linux/kernel/sched/sched.h
<<
>>
Prefs
   1
   2#include <linux/sched.h>
   3#include <linux/mutex.h>
   4#include <linux/spinlock.h>
   5#include <linux/stop_machine.h>
   6
   7#include "cpupri.h"
   8
   9extern __read_mostly int scheduler_running;
  10
  11/*
  12 * Convert user-nice values [ -20 ... 0 ... 19 ]
  13 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
  14 * and back.
  15 */
  16#define NICE_TO_PRIO(nice)      (MAX_RT_PRIO + (nice) + 20)
  17#define PRIO_TO_NICE(prio)      ((prio) - MAX_RT_PRIO - 20)
  18#define TASK_NICE(p)            PRIO_TO_NICE((p)->static_prio)
  19
  20/*
  21 * 'User priority' is the nice value converted to something we
  22 * can work with better when scaling various scheduler parameters,
  23 * it's a [ 0 ... 39 ] range.
  24 */
  25#define USER_PRIO(p)            ((p)-MAX_RT_PRIO)
  26#define TASK_USER_PRIO(p)       USER_PRIO((p)->static_prio)
  27#define MAX_USER_PRIO           (USER_PRIO(MAX_PRIO))
  28
  29/*
  30 * Helpers for converting nanosecond timing to jiffy resolution
  31 */
  32#define NS_TO_JIFFIES(TIME)     ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
  33
  34#define NICE_0_LOAD             SCHED_LOAD_SCALE
  35#define NICE_0_SHIFT            SCHED_LOAD_SHIFT
  36
  37/*
  38 * These are the 'tuning knobs' of the scheduler:
  39 */
  40
  41/*
  42 * single value that denotes runtime == period, ie unlimited time.
  43 */
  44#define RUNTIME_INF     ((u64)~0ULL)
  45
  46static inline int rt_policy(int policy)
  47{
  48        if (policy == SCHED_FIFO || policy == SCHED_RR)
  49                return 1;
  50        return 0;
  51}
  52
  53static inline int task_has_rt_policy(struct task_struct *p)
  54{
  55        return rt_policy(p->policy);
  56}
  57
  58/*
  59 * This is the priority-queue data structure of the RT scheduling class:
  60 */
  61struct rt_prio_array {
  62        DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
  63        struct list_head queue[MAX_RT_PRIO];
  64};
  65
  66struct rt_bandwidth {
  67        /* nests inside the rq lock: */
  68        raw_spinlock_t          rt_runtime_lock;
  69        ktime_t                 rt_period;
  70        u64                     rt_runtime;
  71        struct hrtimer          rt_period_timer;
  72};
  73
  74extern struct mutex sched_domains_mutex;
  75
  76#ifdef CONFIG_CGROUP_SCHED
  77
  78#include <linux/cgroup.h>
  79
  80struct cfs_rq;
  81struct rt_rq;
  82
  83extern struct list_head task_groups;
  84
  85struct cfs_bandwidth {
  86#ifdef CONFIG_CFS_BANDWIDTH
  87        raw_spinlock_t lock;
  88        ktime_t period;
  89        u64 quota, runtime;
  90        s64 hierarchal_quota;
  91        u64 runtime_expires;
  92
  93        int idle, timer_active;
  94        struct hrtimer period_timer, slack_timer;
  95        struct list_head throttled_cfs_rq;
  96
  97        /* statistics */
  98        int nr_periods, nr_throttled;
  99        u64 throttled_time;
 100#endif
 101};
 102
 103/* task group related information */
 104struct task_group {
 105        struct cgroup_subsys_state css;
 106
 107#ifdef CONFIG_FAIR_GROUP_SCHED
 108        /* schedulable entities of this group on each cpu */
 109        struct sched_entity **se;
 110        /* runqueue "owned" by this group on each cpu */
 111        struct cfs_rq **cfs_rq;
 112        unsigned long shares;
 113
 114        atomic_t load_weight;
 115#endif
 116
 117#ifdef CONFIG_RT_GROUP_SCHED
 118        struct sched_rt_entity **rt_se;
 119        struct rt_rq **rt_rq;
 120
 121        struct rt_bandwidth rt_bandwidth;
 122#endif
 123
 124        struct rcu_head rcu;
 125        struct list_head list;
 126
 127        struct task_group *parent;
 128        struct list_head siblings;
 129        struct list_head children;
 130
 131#ifdef CONFIG_SCHED_AUTOGROUP
 132        struct autogroup *autogroup;
 133#endif
 134
 135        struct cfs_bandwidth cfs_bandwidth;
 136};
 137
 138#ifdef CONFIG_FAIR_GROUP_SCHED
 139#define ROOT_TASK_GROUP_LOAD    NICE_0_LOAD
 140
 141/*
 142 * A weight of 0 or 1 can cause arithmetics problems.
 143 * A weight of a cfs_rq is the sum of weights of which entities
 144 * are queued on this cfs_rq, so a weight of a entity should not be
 145 * too large, so as the shares value of a task group.
 146 * (The default weight is 1024 - so there's no practical
 147 *  limitation from this.)
 148 */
 149#define MIN_SHARES      (1UL <<  1)
 150#define MAX_SHARES      (1UL << 18)
 151#endif
 152
 153/* Default task group.
 154 *      Every task in system belong to this group at bootup.
 155 */
 156extern struct task_group root_task_group;
 157
 158typedef int (*tg_visitor)(struct task_group *, void *);
 159
 160extern int walk_tg_tree_from(struct task_group *from,
 161                             tg_visitor down, tg_visitor up, void *data);
 162
 163/*
 164 * Iterate the full tree, calling @down when first entering a node and @up when
 165 * leaving it for the final time.
 166 *
 167 * Caller must hold rcu_lock or sufficient equivalent.
 168 */
 169static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
 170{
 171        return walk_tg_tree_from(&root_task_group, down, up, data);
 172}
 173
 174extern int tg_nop(struct task_group *tg, void *data);
 175
 176extern void free_fair_sched_group(struct task_group *tg);
 177extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
 178extern void unregister_fair_sched_group(struct task_group *tg, int cpu);
 179extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
 180                        struct sched_entity *se, int cpu,
 181                        struct sched_entity *parent);
 182extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
 183extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
 184
 185extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
 186extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
 187extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
 188
 189extern void free_rt_sched_group(struct task_group *tg);
 190extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
 191extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
 192                struct sched_rt_entity *rt_se, int cpu,
 193                struct sched_rt_entity *parent);
 194
 195#else /* CONFIG_CGROUP_SCHED */
 196
 197struct cfs_bandwidth { };
 198
 199#endif  /* CONFIG_CGROUP_SCHED */
 200
 201/* CFS-related fields in a runqueue */
 202struct cfs_rq {
 203        struct load_weight load;
 204        unsigned int nr_running, h_nr_running;
 205
 206        u64 exec_clock;
 207        u64 min_vruntime;
 208#ifndef CONFIG_64BIT
 209        u64 min_vruntime_copy;
 210#endif
 211
 212        struct rb_root tasks_timeline;
 213        struct rb_node *rb_leftmost;
 214
 215        /*
 216         * 'curr' points to currently running entity on this cfs_rq.
 217         * It is set to NULL otherwise (i.e when none are currently running).
 218         */
 219        struct sched_entity *curr, *next, *last, *skip;
 220
 221#ifdef  CONFIG_SCHED_DEBUG
 222        unsigned int nr_spread_over;
 223#endif
 224
 225#ifdef CONFIG_FAIR_GROUP_SCHED
 226        struct rq *rq;  /* cpu runqueue to which this cfs_rq is attached */
 227
 228        /*
 229         * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
 230         * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
 231         * (like users, containers etc.)
 232         *
 233         * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
 234         * list is used during load balance.
 235         */
 236        int on_list;
 237        struct list_head leaf_cfs_rq_list;
 238        struct task_group *tg;  /* group that "owns" this runqueue */
 239
 240#ifdef CONFIG_SMP
 241        /*
 242         *   h_load = weight * f(tg)
 243         *
 244         * Where f(tg) is the recursive weight fraction assigned to
 245         * this group.
 246         */
 247        unsigned long h_load;
 248
 249        /*
 250         * Maintaining per-cpu shares distribution for group scheduling
 251         *
 252         * load_stamp is the last time we updated the load average
 253         * load_last is the last time we updated the load average and saw load
 254         * load_unacc_exec_time is currently unaccounted execution time
 255         */
 256        u64 load_avg;
 257        u64 load_period;
 258        u64 load_stamp, load_last, load_unacc_exec_time;
 259
 260        unsigned long load_contribution;
 261#endif /* CONFIG_SMP */
 262#ifdef CONFIG_CFS_BANDWIDTH
 263        int runtime_enabled;
 264        u64 runtime_expires;
 265        s64 runtime_remaining;
 266
 267        u64 throttled_timestamp;
 268        int throttled, throttle_count;
 269        struct list_head throttled_list;
 270#endif /* CONFIG_CFS_BANDWIDTH */
 271#endif /* CONFIG_FAIR_GROUP_SCHED */
 272};
 273
 274static inline int rt_bandwidth_enabled(void)
 275{
 276        return sysctl_sched_rt_runtime >= 0;
 277}
 278
 279/* Real-Time classes' related field in a runqueue: */
 280struct rt_rq {
 281        struct rt_prio_array active;
 282        unsigned int rt_nr_running;
 283#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
 284        struct {
 285                int curr; /* highest queued rt task prio */
 286#ifdef CONFIG_SMP
 287                int next; /* next highest */
 288#endif
 289        } highest_prio;
 290#endif
 291#ifdef CONFIG_SMP
 292        unsigned long rt_nr_migratory;
 293        unsigned long rt_nr_total;
 294        int overloaded;
 295        struct plist_head pushable_tasks;
 296#endif
 297        int rt_throttled;
 298        u64 rt_time;
 299        u64 rt_runtime;
 300        /* Nests inside the rq lock: */
 301        raw_spinlock_t rt_runtime_lock;
 302
 303#ifdef CONFIG_RT_GROUP_SCHED
 304        unsigned long rt_nr_boosted;
 305
 306        struct rq *rq;
 307        struct list_head leaf_rt_rq_list;
 308        struct task_group *tg;
 309#endif
 310};
 311
 312#ifdef CONFIG_SMP
 313
 314/*
 315 * We add the notion of a root-domain which will be used to define per-domain
 316 * variables. Each exclusive cpuset essentially defines an island domain by
 317 * fully partitioning the member cpus from any other cpuset. Whenever a new
 318 * exclusive cpuset is created, we also create and attach a new root-domain
 319 * object.
 320 *
 321 */
 322struct root_domain {
 323        atomic_t refcount;
 324        atomic_t rto_count;
 325        struct rcu_head rcu;
 326        cpumask_var_t span;
 327        cpumask_var_t online;
 328
 329        /*
 330         * The "RT overload" flag: it gets set if a CPU has more than
 331         * one runnable RT task.
 332         */
 333        cpumask_var_t rto_mask;
 334        struct cpupri cpupri;
 335};
 336
 337extern struct root_domain def_root_domain;
 338
 339#endif /* CONFIG_SMP */
 340
 341/*
 342 * This is the main, per-CPU runqueue data structure.
 343 *
 344 * Locking rule: those places that want to lock multiple runqueues
 345 * (such as the load balancing or the thread migration code), lock
 346 * acquire operations must be ordered by ascending &runqueue.
 347 */
 348struct rq {
 349        /* runqueue lock: */
 350        raw_spinlock_t lock;
 351
 352        /*
 353         * nr_running and cpu_load should be in the same cacheline because
 354         * remote CPUs use both these fields when doing load calculation.
 355         */
 356        unsigned int nr_running;
 357        #define CPU_LOAD_IDX_MAX 5
 358        unsigned long cpu_load[CPU_LOAD_IDX_MAX];
 359        unsigned long last_load_update_tick;
 360#ifdef CONFIG_NO_HZ
 361        u64 nohz_stamp;
 362        unsigned long nohz_flags;
 363#endif
 364        int skip_clock_update;
 365
 366        /* capture load from *all* tasks on this cpu: */
 367        struct load_weight load;
 368        unsigned long nr_load_updates;
 369        u64 nr_switches;
 370
 371        struct cfs_rq cfs;
 372        struct rt_rq rt;
 373
 374#ifdef CONFIG_FAIR_GROUP_SCHED
 375        /* list of leaf cfs_rq on this cpu: */
 376        struct list_head leaf_cfs_rq_list;
 377#ifdef CONFIG_SMP
 378        unsigned long h_load_throttle;
 379#endif /* CONFIG_SMP */
 380#endif /* CONFIG_FAIR_GROUP_SCHED */
 381
 382#ifdef CONFIG_RT_GROUP_SCHED
 383        struct list_head leaf_rt_rq_list;
 384#endif
 385
 386        /*
 387         * This is part of a global counter where only the total sum
 388         * over all CPUs matters. A task can increase this counter on
 389         * one CPU and if it got migrated afterwards it may decrease
 390         * it on another CPU. Always updated under the runqueue lock:
 391         */
 392        unsigned long nr_uninterruptible;
 393
 394        struct task_struct *curr, *idle, *stop;
 395        unsigned long next_balance;
 396        struct mm_struct *prev_mm;
 397
 398        u64 clock;
 399        u64 clock_task;
 400
 401        atomic_t nr_iowait;
 402
 403#ifdef CONFIG_SMP
 404        struct root_domain *rd;
 405        struct sched_domain *sd;
 406
 407        unsigned long cpu_power;
 408
 409        unsigned char idle_balance;
 410        /* For active balancing */
 411        int post_schedule;
 412        int active_balance;
 413        int push_cpu;
 414        struct cpu_stop_work active_balance_work;
 415        /* cpu of this runqueue: */
 416        int cpu;
 417        int online;
 418
 419        struct list_head cfs_tasks;
 420
 421        u64 rt_avg;
 422        u64 age_stamp;
 423        u64 idle_stamp;
 424        u64 avg_idle;
 425#endif
 426
 427#ifdef CONFIG_IRQ_TIME_ACCOUNTING
 428        u64 prev_irq_time;
 429#endif
 430#ifdef CONFIG_PARAVIRT
 431        u64 prev_steal_time;
 432#endif
 433#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
 434        u64 prev_steal_time_rq;
 435#endif
 436
 437        /* calc_load related fields */
 438        unsigned long calc_load_update;
 439        long calc_load_active;
 440
 441#ifdef CONFIG_SCHED_HRTICK
 442#ifdef CONFIG_SMP
 443        int hrtick_csd_pending;
 444        struct call_single_data hrtick_csd;
 445#endif
 446        struct hrtimer hrtick_timer;
 447#endif
 448
 449#ifdef CONFIG_SCHEDSTATS
 450        /* latency stats */
 451        struct sched_info rq_sched_info;
 452        unsigned long long rq_cpu_time;
 453        /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
 454
 455        /* sys_sched_yield() stats */
 456        unsigned int yld_count;
 457
 458        /* schedule() stats */
 459        unsigned int sched_count;
 460        unsigned int sched_goidle;
 461
 462        /* try_to_wake_up() stats */
 463        unsigned int ttwu_count;
 464        unsigned int ttwu_local;
 465#endif
 466
 467#ifdef CONFIG_SMP
 468        struct llist_head wake_list;
 469#endif
 470};
 471
 472static inline int cpu_of(struct rq *rq)
 473{
 474#ifdef CONFIG_SMP
 475        return rq->cpu;
 476#else
 477        return 0;
 478#endif
 479}
 480
 481DECLARE_PER_CPU(struct rq, runqueues);
 482
 483#define cpu_rq(cpu)             (&per_cpu(runqueues, (cpu)))
 484#define this_rq()               (&__get_cpu_var(runqueues))
 485#define task_rq(p)              cpu_rq(task_cpu(p))
 486#define cpu_curr(cpu)           (cpu_rq(cpu)->curr)
 487#define raw_rq()                (&__raw_get_cpu_var(runqueues))
 488
 489#ifdef CONFIG_SMP
 490
 491#define rcu_dereference_check_sched_domain(p) \
 492        rcu_dereference_check((p), \
 493                              lockdep_is_held(&sched_domains_mutex))
 494
 495/*
 496 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
 497 * See detach_destroy_domains: synchronize_sched for details.
 498 *
 499 * The domain tree of any CPU may only be accessed from within
 500 * preempt-disabled sections.
 501 */
 502#define for_each_domain(cpu, __sd) \
 503        for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
 504                        __sd; __sd = __sd->parent)
 505
 506#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
 507
 508/**
 509 * highest_flag_domain - Return highest sched_domain containing flag.
 510 * @cpu:        The cpu whose highest level of sched domain is to
 511 *              be returned.
 512 * @flag:       The flag to check for the highest sched_domain
 513 *              for the given cpu.
 514 *
 515 * Returns the highest sched_domain of a cpu which contains the given flag.
 516 */
 517static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
 518{
 519        struct sched_domain *sd, *hsd = NULL;
 520
 521        for_each_domain(cpu, sd) {
 522                if (!(sd->flags & flag))
 523                        break;
 524                hsd = sd;
 525        }
 526
 527        return hsd;
 528}
 529
 530DECLARE_PER_CPU(struct sched_domain *, sd_llc);
 531DECLARE_PER_CPU(int, sd_llc_id);
 532
 533extern int group_balance_cpu(struct sched_group *sg);
 534
 535#endif /* CONFIG_SMP */
 536
 537#include "stats.h"
 538#include "auto_group.h"
 539
 540#ifdef CONFIG_CGROUP_SCHED
 541
 542/*
 543 * Return the group to which this tasks belongs.
 544 *
 545 * We cannot use task_subsys_state() and friends because the cgroup
 546 * subsystem changes that value before the cgroup_subsys::attach() method
 547 * is called, therefore we cannot pin it and might observe the wrong value.
 548 *
 549 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
 550 * core changes this before calling sched_move_task().
 551 *
 552 * Instead we use a 'copy' which is updated from sched_move_task() while
 553 * holding both task_struct::pi_lock and rq::lock.
 554 */
 555static inline struct task_group *task_group(struct task_struct *p)
 556{
 557        return p->sched_task_group;
 558}
 559
 560/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
 561static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
 562{
 563#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
 564        struct task_group *tg = task_group(p);
 565#endif
 566
 567#ifdef CONFIG_FAIR_GROUP_SCHED
 568        p->se.cfs_rq = tg->cfs_rq[cpu];
 569        p->se.parent = tg->se[cpu];
 570#endif
 571
 572#ifdef CONFIG_RT_GROUP_SCHED
 573        p->rt.rt_rq  = tg->rt_rq[cpu];
 574        p->rt.parent = tg->rt_se[cpu];
 575#endif
 576}
 577
 578#else /* CONFIG_CGROUP_SCHED */
 579
 580static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
 581static inline struct task_group *task_group(struct task_struct *p)
 582{
 583        return NULL;
 584}
 585
 586#endif /* CONFIG_CGROUP_SCHED */
 587
 588static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
 589{
 590        set_task_rq(p, cpu);
 591#ifdef CONFIG_SMP
 592        /*
 593         * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
 594         * successfuly executed on another CPU. We must ensure that updates of
 595         * per-task data have been completed by this moment.
 596         */
 597        smp_wmb();
 598        task_thread_info(p)->cpu = cpu;
 599#endif
 600}
 601
 602/*
 603 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
 604 */
 605#ifdef CONFIG_SCHED_DEBUG
 606# include <linux/static_key.h>
 607# define const_debug __read_mostly
 608#else
 609# define const_debug const
 610#endif
 611
 612extern const_debug unsigned int sysctl_sched_features;
 613
 614#define SCHED_FEAT(name, enabled)       \
 615        __SCHED_FEAT_##name ,
 616
 617enum {
 618#include "features.h"
 619        __SCHED_FEAT_NR,
 620};
 621
 622#undef SCHED_FEAT
 623
 624#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
 625static __always_inline bool static_branch__true(struct static_key *key)
 626{
 627        return static_key_true(key); /* Not out of line branch. */
 628}
 629
 630static __always_inline bool static_branch__false(struct static_key *key)
 631{
 632        return static_key_false(key); /* Out of line branch. */
 633}
 634
 635#define SCHED_FEAT(name, enabled)                                       \
 636static __always_inline bool static_branch_##name(struct static_key *key) \
 637{                                                                       \
 638        return static_branch__##enabled(key);                           \
 639}
 640
 641#include "features.h"
 642
 643#undef SCHED_FEAT
 644
 645extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
 646#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
 647#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
 648#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
 649#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
 650
 651static inline u64 global_rt_period(void)
 652{
 653        return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
 654}
 655
 656static inline u64 global_rt_runtime(void)
 657{
 658        if (sysctl_sched_rt_runtime < 0)
 659                return RUNTIME_INF;
 660
 661        return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
 662}
 663
 664
 665
 666static inline int task_current(struct rq *rq, struct task_struct *p)
 667{
 668        return rq->curr == p;
 669}
 670
 671static inline int task_running(struct rq *rq, struct task_struct *p)
 672{
 673#ifdef CONFIG_SMP
 674        return p->on_cpu;
 675#else
 676        return task_current(rq, p);
 677#endif
 678}
 679
 680
 681#ifndef prepare_arch_switch
 682# define prepare_arch_switch(next)      do { } while (0)
 683#endif
 684#ifndef finish_arch_switch
 685# define finish_arch_switch(prev)       do { } while (0)
 686#endif
 687#ifndef finish_arch_post_lock_switch
 688# define finish_arch_post_lock_switch() do { } while (0)
 689#endif
 690
 691#ifndef __ARCH_WANT_UNLOCKED_CTXSW
 692static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
 693{
 694#ifdef CONFIG_SMP
 695        /*
 696         * We can optimise this out completely for !SMP, because the
 697         * SMP rebalancing from interrupt is the only thing that cares
 698         * here.
 699         */
 700        next->on_cpu = 1;
 701#endif
 702}
 703
 704static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
 705{
 706#ifdef CONFIG_SMP
 707        /*
 708         * After ->on_cpu is cleared, the task can be moved to a different CPU.
 709         * We must ensure this doesn't happen until the switch is completely
 710         * finished.
 711         */
 712        smp_wmb();
 713        prev->on_cpu = 0;
 714#endif
 715#ifdef CONFIG_DEBUG_SPINLOCK
 716        /* this is a valid case when another task releases the spinlock */
 717        rq->lock.owner = current;
 718#endif
 719        /*
 720         * If we are tracking spinlock dependencies then we have to
 721         * fix up the runqueue lock - which gets 'carried over' from
 722         * prev into current:
 723         */
 724        spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
 725
 726        raw_spin_unlock_irq(&rq->lock);
 727}
 728
 729#else /* __ARCH_WANT_UNLOCKED_CTXSW */
 730static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
 731{
 732#ifdef CONFIG_SMP
 733        /*
 734         * We can optimise this out completely for !SMP, because the
 735         * SMP rebalancing from interrupt is the only thing that cares
 736         * here.
 737         */
 738        next->on_cpu = 1;
 739#endif
 740#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
 741        raw_spin_unlock_irq(&rq->lock);
 742#else
 743        raw_spin_unlock(&rq->lock);
 744#endif
 745}
 746
 747static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
 748{
 749#ifdef CONFIG_SMP
 750        /*
 751         * After ->on_cpu is cleared, the task can be moved to a different CPU.
 752         * We must ensure this doesn't happen until the switch is completely
 753         * finished.
 754         */
 755        smp_wmb();
 756        prev->on_cpu = 0;
 757#endif
 758#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
 759        local_irq_enable();
 760#endif
 761}
 762#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
 763
 764
 765static inline void update_load_add(struct load_weight *lw, unsigned long inc)
 766{
 767        lw->weight += inc;
 768        lw->inv_weight = 0;
 769}
 770
 771static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
 772{
 773        lw->weight -= dec;
 774        lw->inv_weight = 0;
 775}
 776
 777static inline void update_load_set(struct load_weight *lw, unsigned long w)
 778{
 779        lw->weight = w;
 780        lw->inv_weight = 0;
 781}
 782
 783/*
 784 * To aid in avoiding the subversion of "niceness" due to uneven distribution
 785 * of tasks with abnormal "nice" values across CPUs the contribution that
 786 * each task makes to its run queue's load is weighted according to its
 787 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
 788 * scaled version of the new time slice allocation that they receive on time
 789 * slice expiry etc.
 790 */
 791
 792#define WEIGHT_IDLEPRIO                3
 793#define WMULT_IDLEPRIO         1431655765
 794
 795/*
 796 * Nice levels are multiplicative, with a gentle 10% change for every
 797 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
 798 * nice 1, it will get ~10% less CPU time than another CPU-bound task
 799 * that remained on nice 0.
 800 *
 801 * The "10% effect" is relative and cumulative: from _any_ nice level,
 802 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
 803 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
 804 * If a task goes up by ~10% and another task goes down by ~10% then
 805 * the relative distance between them is ~25%.)
 806 */
 807static const int prio_to_weight[40] = {
 808 /* -20 */     88761,     71755,     56483,     46273,     36291,
 809 /* -15 */     29154,     23254,     18705,     14949,     11916,
 810 /* -10 */      9548,      7620,      6100,      4904,      3906,
 811 /*  -5 */      3121,      2501,      1991,      1586,      1277,
 812 /*   0 */      1024,       820,       655,       526,       423,
 813 /*   5 */       335,       272,       215,       172,       137,
 814 /*  10 */       110,        87,        70,        56,        45,
 815 /*  15 */        36,        29,        23,        18,        15,
 816};
 817
 818/*
 819 * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
 820 *
 821 * In cases where the weight does not change often, we can use the
 822 * precalculated inverse to speed up arithmetics by turning divisions
 823 * into multiplications:
 824 */
 825static const u32 prio_to_wmult[40] = {
 826 /* -20 */     48388,     59856,     76040,     92818,    118348,
 827 /* -15 */    147320,    184698,    229616,    287308,    360437,
 828 /* -10 */    449829,    563644,    704093,    875809,   1099582,
 829 /*  -5 */   1376151,   1717300,   2157191,   2708050,   3363326,
 830 /*   0 */   4194304,   5237765,   6557202,   8165337,  10153587,
 831 /*   5 */  12820798,  15790321,  19976592,  24970740,  31350126,
 832 /*  10 */  39045157,  49367440,  61356676,  76695844,  95443717,
 833 /*  15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
 834};
 835
 836/* Time spent by the tasks of the cpu accounting group executing in ... */
 837enum cpuacct_stat_index {
 838        CPUACCT_STAT_USER,      /* ... user mode */
 839        CPUACCT_STAT_SYSTEM,    /* ... kernel mode */
 840
 841        CPUACCT_STAT_NSTATS,
 842};
 843
 844
 845#define sched_class_highest (&stop_sched_class)
 846#define for_each_class(class) \
 847   for (class = sched_class_highest; class; class = class->next)
 848
 849extern const struct sched_class stop_sched_class;
 850extern const struct sched_class rt_sched_class;
 851extern const struct sched_class fair_sched_class;
 852extern const struct sched_class idle_sched_class;
 853
 854
 855#ifdef CONFIG_SMP
 856
 857extern void trigger_load_balance(struct rq *rq, int cpu);
 858extern void idle_balance(int this_cpu, struct rq *this_rq);
 859
 860#else   /* CONFIG_SMP */
 861
 862static inline void idle_balance(int cpu, struct rq *rq)
 863{
 864}
 865
 866#endif
 867
 868extern void sysrq_sched_debug_show(void);
 869extern void sched_init_granularity(void);
 870extern void update_max_interval(void);
 871extern void update_group_power(struct sched_domain *sd, int cpu);
 872extern int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu);
 873extern void init_sched_rt_class(void);
 874extern void init_sched_fair_class(void);
 875
 876extern void resched_task(struct task_struct *p);
 877extern void resched_cpu(int cpu);
 878
 879extern struct rt_bandwidth def_rt_bandwidth;
 880extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
 881
 882extern void update_idle_cpu_load(struct rq *this_rq);
 883
 884#ifdef CONFIG_CGROUP_CPUACCT
 885#include <linux/cgroup.h>
 886/* track cpu usage of a group of tasks and its child groups */
 887struct cpuacct {
 888        struct cgroup_subsys_state css;
 889        /* cpuusage holds pointer to a u64-type object on every cpu */
 890        u64 __percpu *cpuusage;
 891        struct kernel_cpustat __percpu *cpustat;
 892};
 893
 894/* return cpu accounting group corresponding to this container */
 895static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
 896{
 897        return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
 898                            struct cpuacct, css);
 899}
 900
 901/* return cpu accounting group to which this task belongs */
 902static inline struct cpuacct *task_ca(struct task_struct *tsk)
 903{
 904        return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
 905                            struct cpuacct, css);
 906}
 907
 908static inline struct cpuacct *parent_ca(struct cpuacct *ca)
 909{
 910        if (!ca || !ca->css.cgroup->parent)
 911                return NULL;
 912        return cgroup_ca(ca->css.cgroup->parent);
 913}
 914
 915extern void cpuacct_charge(struct task_struct *tsk, u64 cputime);
 916#else
 917static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
 918#endif
 919
 920static inline void inc_nr_running(struct rq *rq)
 921{
 922        rq->nr_running++;
 923}
 924
 925static inline void dec_nr_running(struct rq *rq)
 926{
 927        rq->nr_running--;
 928}
 929
 930extern void update_rq_clock(struct rq *rq);
 931
 932extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
 933extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
 934
 935extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
 936
 937extern const_debug unsigned int sysctl_sched_time_avg;
 938extern const_debug unsigned int sysctl_sched_nr_migrate;
 939extern const_debug unsigned int sysctl_sched_migration_cost;
 940
 941static inline u64 sched_avg_period(void)
 942{
 943        return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
 944}
 945
 946#ifdef CONFIG_SCHED_HRTICK
 947
 948/*
 949 * Use hrtick when:
 950 *  - enabled by features
 951 *  - hrtimer is actually high res
 952 */
 953static inline int hrtick_enabled(struct rq *rq)
 954{
 955        if (!sched_feat(HRTICK))
 956                return 0;
 957        if (!cpu_active(cpu_of(rq)))
 958                return 0;
 959        return hrtimer_is_hres_active(&rq->hrtick_timer);
 960}
 961
 962void hrtick_start(struct rq *rq, u64 delay);
 963
 964#else
 965
 966static inline int hrtick_enabled(struct rq *rq)
 967{
 968        return 0;
 969}
 970
 971#endif /* CONFIG_SCHED_HRTICK */
 972
 973#ifdef CONFIG_SMP
 974extern void sched_avg_update(struct rq *rq);
 975static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
 976{
 977        rq->rt_avg += rt_delta;
 978        sched_avg_update(rq);
 979}
 980#else
 981static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
 982static inline void sched_avg_update(struct rq *rq) { }
 983#endif
 984
 985extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period);
 986
 987#ifdef CONFIG_SMP
 988#ifdef CONFIG_PREEMPT
 989
 990static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
 991
 992/*
 993 * fair double_lock_balance: Safely acquires both rq->locks in a fair
 994 * way at the expense of forcing extra atomic operations in all
 995 * invocations.  This assures that the double_lock is acquired using the
 996 * same underlying policy as the spinlock_t on this architecture, which
 997 * reduces latency compared to the unfair variant below.  However, it
 998 * also adds more overhead and therefore may reduce throughput.
 999 */
1000static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1001        __releases(this_rq->lock)
1002        __acquires(busiest->lock)
1003        __acquires(this_rq->lock)
1004{
1005        raw_spin_unlock(&this_rq->lock);
1006        double_rq_lock(this_rq, busiest);
1007
1008        return 1;
1009}
1010
1011#else
1012/*
1013 * Unfair double_lock_balance: Optimizes throughput at the expense of
1014 * latency by eliminating extra atomic operations when the locks are
1015 * already in proper order on entry.  This favors lower cpu-ids and will
1016 * grant the double lock to lower cpus over higher ids under contention,
1017 * regardless of entry order into the function.
1018 */
1019static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1020        __releases(this_rq->lock)
1021        __acquires(busiest->lock)
1022        __acquires(this_rq->lock)
1023{
1024        int ret = 0;
1025
1026        if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1027                if (busiest < this_rq) {
1028                        raw_spin_unlock(&this_rq->lock);
1029                        raw_spin_lock(&busiest->lock);
1030                        raw_spin_lock_nested(&this_rq->lock,
1031                                              SINGLE_DEPTH_NESTING);
1032                        ret = 1;
1033                } else
1034                        raw_spin_lock_nested(&busiest->lock,
1035                                              SINGLE_DEPTH_NESTING);
1036        }
1037        return ret;
1038}
1039
1040#endif /* CONFIG_PREEMPT */
1041
1042/*
1043 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1044 */
1045static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1046{
1047        if (unlikely(!irqs_disabled())) {
1048                /* printk() doesn't work good under rq->lock */
1049                raw_spin_unlock(&this_rq->lock);
1050                BUG_ON(1);
1051        }
1052
1053        return _double_lock_balance(this_rq, busiest);
1054}
1055
1056static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1057        __releases(busiest->lock)
1058{
1059        raw_spin_unlock(&busiest->lock);
1060        lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1061}
1062
1063/*
1064 * double_rq_lock - safely lock two runqueues
1065 *
1066 * Note this does not disable interrupts like task_rq_lock,
1067 * you need to do so manually before calling.
1068 */
1069static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1070        __acquires(rq1->lock)
1071        __acquires(rq2->lock)
1072{
1073        BUG_ON(!irqs_disabled());
1074        if (rq1 == rq2) {
1075                raw_spin_lock(&rq1->lock);
1076                __acquire(rq2->lock);   /* Fake it out ;) */
1077        } else {
1078                if (rq1 < rq2) {
1079                        raw_spin_lock(&rq1->lock);
1080                        raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1081                } else {
1082                        raw_spin_lock(&rq2->lock);
1083                        raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1084                }
1085        }
1086}
1087
1088/*
1089 * double_rq_unlock - safely unlock two runqueues
1090 *
1091 * Note this does not restore interrupts like task_rq_unlock,
1092 * you need to do so manually after calling.
1093 */
1094static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1095        __releases(rq1->lock)
1096        __releases(rq2->lock)
1097{
1098        raw_spin_unlock(&rq1->lock);
1099        if (rq1 != rq2)
1100                raw_spin_unlock(&rq2->lock);
1101        else
1102                __release(rq2->lock);
1103}
1104
1105#else /* CONFIG_SMP */
1106
1107/*
1108 * double_rq_lock - safely lock two runqueues
1109 *
1110 * Note this does not disable interrupts like task_rq_lock,
1111 * you need to do so manually before calling.
1112 */
1113static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1114        __acquires(rq1->lock)
1115        __acquires(rq2->lock)
1116{
1117        BUG_ON(!irqs_disabled());
1118        BUG_ON(rq1 != rq2);
1119        raw_spin_lock(&rq1->lock);
1120        __acquire(rq2->lock);   /* Fake it out ;) */
1121}
1122
1123/*
1124 * double_rq_unlock - safely unlock two runqueues
1125 *
1126 * Note this does not restore interrupts like task_rq_unlock,
1127 * you need to do so manually after calling.
1128 */
1129static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1130        __releases(rq1->lock)
1131        __releases(rq2->lock)
1132{
1133        BUG_ON(rq1 != rq2);
1134        raw_spin_unlock(&rq1->lock);
1135        __release(rq2->lock);
1136}
1137
1138#endif
1139
1140extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
1141extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
1142extern void print_cfs_stats(struct seq_file *m, int cpu);
1143extern void print_rt_stats(struct seq_file *m, int cpu);
1144
1145extern void init_cfs_rq(struct cfs_rq *cfs_rq);
1146extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
1147
1148extern void account_cfs_bandwidth_used(int enabled, int was_enabled);
1149
1150#ifdef CONFIG_NO_HZ
1151enum rq_nohz_flag_bits {
1152        NOHZ_TICK_STOPPED,
1153        NOHZ_BALANCE_KICK,
1154        NOHZ_IDLE,
1155};
1156
1157#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
1158#endif
1159
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.