1
2
3
4
5#include <linux/sched.h>
6
7#include <linux/sched/autogroup.h>
8#include <linux/sched/clock.h>
9#include <linux/sched/coredump.h>
10#include <linux/sched/cpufreq.h>
11#include <linux/sched/cputime.h>
12#include <linux/sched/deadline.h>
13#include <linux/sched/debug.h>
14#include <linux/sched/hotplug.h>
15#include <linux/sched/idle.h>
16#include <linux/sched/init.h>
17#include <linux/sched/isolation.h>
18#include <linux/sched/jobctl.h>
19#include <linux/sched/loadavg.h>
20#include <linux/sched/mm.h>
21#include <linux/sched/nohz.h>
22#include <linux/sched/numa_balancing.h>
23#include <linux/sched/prio.h>
24#include <linux/sched/rt.h>
25#include <linux/sched/signal.h>
26#include <linux/sched/smt.h>
27#include <linux/sched/stat.h>
28#include <linux/sched/sysctl.h>
29#include <linux/sched/task.h>
30#include <linux/sched/task_stack.h>
31#include <linux/sched/topology.h>
32#include <linux/sched/user.h>
33#include <linux/sched/wake_q.h>
34#include <linux/sched/xacct.h>
35
36#include <uapi/linux/sched/types.h>
37
38#include <linux/binfmts.h>
39#include <linux/bitops.h>
40#include <linux/blkdev.h>
41#include <linux/compat.h>
42#include <linux/context_tracking.h>
43#include <linux/cpufreq.h>
44#include <linux/cpuidle.h>
45#include <linux/cpuset.h>
46#include <linux/ctype.h>
47#include <linux/debugfs.h>
48#include <linux/delayacct.h>
49#include <linux/energy_model.h>
50#include <linux/init_task.h>
51#include <linux/kprobes.h>
52#include <linux/kthread.h>
53#include <linux/membarrier.h>
54#include <linux/migrate.h>
55#include <linux/mmu_context.h>
56#include <linux/nmi.h>
57#include <linux/proc_fs.h>
58#include <linux/prefetch.h>
59#include <linux/profile.h>
60#include <linux/psi.h>
61#include <linux/ratelimit.h>
62#include <linux/rcupdate_wait.h>
63#include <linux/security.h>
64#include <linux/stop_machine.h>
65#include <linux/suspend.h>
66#include <linux/swait.h>
67#include <linux/syscalls.h>
68#include <linux/task_work.h>
69#include <linux/tsacct_kern.h>
70
71#include <asm/tlb.h>
72
73#ifdef CONFIG_PARAVIRT
74# include <asm/paravirt.h>
75#endif
76
77#include "cpupri.h"
78#include "cpudeadline.h"
79
80#include <trace/events/sched.h>
81
82#ifdef CONFIG_SCHED_DEBUG
83# define SCHED_WARN_ON(x) WARN_ONCE(x, #x)
84#else
85# define SCHED_WARN_ON(x) ({ (void)(x), 0; })
86#endif
87
88struct rq;
89struct cpuidle_state;
90
91
92#define TASK_ON_RQ_QUEUED 1
93#define TASK_ON_RQ_MIGRATING 2
94
95extern __read_mostly int scheduler_running;
96
97extern unsigned long calc_load_update;
98extern atomic_long_t calc_load_tasks;
99
100extern void calc_global_load_tick(struct rq *this_rq);
101extern long calc_load_fold_active(struct rq *this_rq, long adjust);
102
103extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
104
105
106
107#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123#ifdef CONFIG_64BIT
124# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
125# define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT)
126# define scale_load_down(w) \
127({ \
128 unsigned long __w = (w); \
129 if (__w) \
130 __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \
131 __w; \
132})
133#else
134# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT)
135# define scale_load(w) (w)
136# define scale_load_down(w) (w)
137#endif
138
139
140
141
142
143
144
145
146
147
148#define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT)
149
150
151
152
153
154
155#define DL_SCALE 10
156
157
158
159
160#define RUNTIME_INF ((u64)~0ULL)
161
162static inline int idle_policy(int policy)
163{
164 return policy == SCHED_IDLE;
165}
166static inline int fair_policy(int policy)
167{
168 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
169}
170
171static inline int rt_policy(int policy)
172{
173 return policy == SCHED_FIFO || policy == SCHED_RR;
174}
175
176static inline int dl_policy(int policy)
177{
178 return policy == SCHED_DEADLINE;
179}
180static inline bool valid_policy(int policy)
181{
182 return idle_policy(policy) || fair_policy(policy) ||
183 rt_policy(policy) || dl_policy(policy);
184}
185
186static inline int task_has_idle_policy(struct task_struct *p)
187{
188 return idle_policy(p->policy);
189}
190
191static inline int task_has_rt_policy(struct task_struct *p)
192{
193 return rt_policy(p->policy);
194}
195
196static inline int task_has_dl_policy(struct task_struct *p)
197{
198 return dl_policy(p->policy);
199}
200
201#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
202
203static inline void update_avg(u64 *avg, u64 sample)
204{
205 s64 diff = sample - *avg;
206 *avg += diff / 8;
207}
208
209
210
211
212
213#define shr_bound(val, shift) \
214 (val >> min_t(typeof(shift), shift, BITS_PER_TYPE(typeof(val)) - 1))
215
216
217
218
219
220
221
222
223
224
225
226
227
228#define SCHED_FLAG_SUGOV 0x10000000
229
230static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
231{
232#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
233 return unlikely(dl_se->flags & SCHED_FLAG_SUGOV);
234#else
235 return false;
236#endif
237}
238
239
240
241
242static inline bool
243dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
244{
245 return dl_entity_is_special(a) ||
246 dl_time_before(a->deadline, b->deadline);
247}
248
249
250
251
252struct rt_prio_array {
253 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1);
254 struct list_head queue[MAX_RT_PRIO];
255};
256
257struct rt_bandwidth {
258
259 raw_spinlock_t rt_runtime_lock;
260 ktime_t rt_period;
261 u64 rt_runtime;
262 struct hrtimer rt_period_timer;
263 unsigned int rt_period_active;
264};
265
266void __dl_clear_params(struct task_struct *p);
267
268struct dl_bandwidth {
269 raw_spinlock_t dl_runtime_lock;
270 u64 dl_runtime;
271 u64 dl_period;
272};
273
274static inline int dl_bandwidth_enabled(void)
275{
276 return sysctl_sched_rt_runtime >= 0;
277}
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297struct dl_bw {
298 raw_spinlock_t lock;
299 u64 bw;
300 u64 total_bw;
301};
302
303static inline void __dl_update(struct dl_bw *dl_b, s64 bw);
304
305static inline
306void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
307{
308 dl_b->total_bw -= tsk_bw;
309 __dl_update(dl_b, (s32)tsk_bw / cpus);
310}
311
312static inline
313void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
314{
315 dl_b->total_bw += tsk_bw;
316 __dl_update(dl_b, -((s32)tsk_bw / cpus));
317}
318
319static inline bool __dl_overflow(struct dl_bw *dl_b, unsigned long cap,
320 u64 old_bw, u64 new_bw)
321{
322 return dl_b->bw != -1 &&
323 cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw;
324}
325
326
327
328
329
330
331
332
333
334static inline bool dl_task_fits_capacity(struct task_struct *p, int cpu)
335{
336 unsigned long cap = arch_scale_cpu_capacity(cpu);
337
338 return cap_scale(p->dl.dl_deadline, cap) >= p->dl.dl_runtime;
339}
340
341extern void init_dl_bw(struct dl_bw *dl_b);
342extern int sched_dl_global_validate(void);
343extern void sched_dl_do_global(void);
344extern int sched_dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr);
345extern void __setparam_dl(struct task_struct *p, const struct sched_attr *attr);
346extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr);
347extern bool __checkparam_dl(const struct sched_attr *attr);
348extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr);
349extern int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
350extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
351extern bool dl_cpu_busy(unsigned int cpu);
352
353#ifdef CONFIG_CGROUP_SCHED
354
355#include <linux/cgroup.h>
356#include <linux/psi.h>
357
358struct cfs_rq;
359struct rt_rq;
360
361extern struct list_head task_groups;
362
363struct cfs_bandwidth {
364#ifdef CONFIG_CFS_BANDWIDTH
365 raw_spinlock_t lock;
366 ktime_t period;
367 u64 quota;
368 u64 runtime;
369 u64 burst;
370 s64 hierarchical_quota;
371
372 u8 idle;
373 u8 period_active;
374 u8 slack_started;
375 struct hrtimer period_timer;
376 struct hrtimer slack_timer;
377 struct list_head throttled_cfs_rq;
378
379
380 int nr_periods;
381 int nr_throttled;
382 u64 throttled_time;
383#endif
384};
385
386
387struct task_group {
388 struct cgroup_subsys_state css;
389
390#ifdef CONFIG_FAIR_GROUP_SCHED
391
392 struct sched_entity **se;
393
394 struct cfs_rq **cfs_rq;
395 unsigned long shares;
396
397#ifdef CONFIG_SMP
398
399
400
401
402
403 atomic_long_t load_avg ____cacheline_aligned;
404#endif
405#endif
406
407#ifdef CONFIG_RT_GROUP_SCHED
408 struct sched_rt_entity **rt_se;
409 struct rt_rq **rt_rq;
410
411 struct rt_bandwidth rt_bandwidth;
412#endif
413
414 struct rcu_head rcu;
415 struct list_head list;
416
417 struct task_group *parent;
418 struct list_head siblings;
419 struct list_head children;
420
421#ifdef CONFIG_SCHED_AUTOGROUP
422 struct autogroup *autogroup;
423#endif
424
425 struct cfs_bandwidth cfs_bandwidth;
426
427#ifdef CONFIG_UCLAMP_TASK_GROUP
428
429 unsigned int uclamp_pct[UCLAMP_CNT];
430
431 struct uclamp_se uclamp_req[UCLAMP_CNT];
432
433 struct uclamp_se uclamp[UCLAMP_CNT];
434#endif
435
436};
437
438#ifdef CONFIG_FAIR_GROUP_SCHED
439#define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
440
441
442
443
444
445
446
447
448
449#define MIN_SHARES (1UL << 1)
450#define MAX_SHARES (1UL << 18)
451#endif
452
453typedef int (*tg_visitor)(struct task_group *, void *);
454
455extern int walk_tg_tree_from(struct task_group *from,
456 tg_visitor down, tg_visitor up, void *data);
457
458
459
460
461
462
463
464static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
465{
466 return walk_tg_tree_from(&root_task_group, down, up, data);
467}
468
469extern int tg_nop(struct task_group *tg, void *data);
470
471extern void free_fair_sched_group(struct task_group *tg);
472extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
473extern void online_fair_sched_group(struct task_group *tg);
474extern void unregister_fair_sched_group(struct task_group *tg);
475extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
476 struct sched_entity *se, int cpu,
477 struct sched_entity *parent);
478extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
479
480extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
481extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
482extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
483
484extern void free_rt_sched_group(struct task_group *tg);
485extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
486extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
487 struct sched_rt_entity *rt_se, int cpu,
488 struct sched_rt_entity *parent);
489extern int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us);
490extern int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us);
491extern long sched_group_rt_runtime(struct task_group *tg);
492extern long sched_group_rt_period(struct task_group *tg);
493extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
494
495extern struct task_group *sched_create_group(struct task_group *parent);
496extern void sched_online_group(struct task_group *tg,
497 struct task_group *parent);
498extern void sched_destroy_group(struct task_group *tg);
499extern void sched_offline_group(struct task_group *tg);
500
501extern void sched_move_task(struct task_struct *tsk);
502
503#ifdef CONFIG_FAIR_GROUP_SCHED
504extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
505
506#ifdef CONFIG_SMP
507extern void set_task_rq_fair(struct sched_entity *se,
508 struct cfs_rq *prev, struct cfs_rq *next);
509#else
510static inline void set_task_rq_fair(struct sched_entity *se,
511 struct cfs_rq *prev, struct cfs_rq *next) { }
512#endif
513#endif
514
515#else
516
517struct cfs_bandwidth { };
518
519#endif
520
521
522struct cfs_rq {
523 struct load_weight load;
524 unsigned int nr_running;
525 unsigned int h_nr_running;
526 unsigned int idle_h_nr_running;
527
528 u64 exec_clock;
529 u64 min_vruntime;
530#ifdef CONFIG_SCHED_CORE
531 unsigned int forceidle_seq;
532 u64 min_vruntime_fi;
533#endif
534
535#ifndef CONFIG_64BIT
536 u64 min_vruntime_copy;
537#endif
538
539 struct rb_root_cached tasks_timeline;
540
541
542
543
544
545 struct sched_entity *curr;
546 struct sched_entity *next;
547 struct sched_entity *last;
548 struct sched_entity *skip;
549
550#ifdef CONFIG_SCHED_DEBUG
551 unsigned int nr_spread_over;
552#endif
553
554#ifdef CONFIG_SMP
555
556
557
558 struct sched_avg avg;
559#ifndef CONFIG_64BIT
560 u64 load_last_update_time_copy;
561#endif
562 struct {
563 raw_spinlock_t lock ____cacheline_aligned;
564 int nr;
565 unsigned long load_avg;
566 unsigned long util_avg;
567 unsigned long runnable_avg;
568 } removed;
569
570#ifdef CONFIG_FAIR_GROUP_SCHED
571 unsigned long tg_load_avg_contrib;
572 long propagate;
573 long prop_runnable_sum;
574
575
576
577
578
579
580
581 unsigned long h_load;
582 u64 last_h_load_update;
583 struct sched_entity *h_load_next;
584#endif
585#endif
586
587#ifdef CONFIG_FAIR_GROUP_SCHED
588 struct rq *rq;
589
590
591
592
593
594
595
596
597
598 int on_list;
599 struct list_head leaf_cfs_rq_list;
600 struct task_group *tg;
601
602#ifdef CONFIG_CFS_BANDWIDTH
603 int runtime_enabled;
604 s64 runtime_remaining;
605
606 u64 throttled_clock;
607 u64 throttled_clock_task;
608 u64 throttled_clock_task_time;
609 int throttled;
610 int throttle_count;
611 struct list_head throttled_list;
612#endif
613#endif
614};
615
616static inline int rt_bandwidth_enabled(void)
617{
618 return sysctl_sched_rt_runtime >= 0;
619}
620
621
622#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP)
623# define HAVE_RT_PUSH_IPI
624#endif
625
626
627struct rt_rq {
628 struct rt_prio_array active;
629 unsigned int rt_nr_running;
630 unsigned int rr_nr_running;
631#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
632 struct {
633 int curr;
634#ifdef CONFIG_SMP
635 int next;
636#endif
637 } highest_prio;
638#endif
639#ifdef CONFIG_SMP
640 unsigned int rt_nr_migratory;
641 unsigned int rt_nr_total;
642 int overloaded;
643 struct plist_head pushable_tasks;
644
645#endif
646 int rt_queued;
647
648 int rt_throttled;
649 u64 rt_time;
650 u64 rt_runtime;
651
652 raw_spinlock_t rt_runtime_lock;
653
654#ifdef CONFIG_RT_GROUP_SCHED
655 unsigned int rt_nr_boosted;
656
657 struct rq *rq;
658 struct task_group *tg;
659#endif
660};
661
662static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq)
663{
664 return rt_rq->rt_queued && rt_rq->rt_nr_running;
665}
666
667
668struct dl_rq {
669
670 struct rb_root_cached root;
671
672 unsigned int dl_nr_running;
673
674#ifdef CONFIG_SMP
675
676
677
678
679
680
681 struct {
682 u64 curr;
683 u64 next;
684 } earliest_dl;
685
686 unsigned int dl_nr_migratory;
687 int overloaded;
688
689
690
691
692
693
694 struct rb_root_cached pushable_dl_tasks_root;
695#else
696 struct dl_bw dl_bw;
697#endif
698
699
700
701
702
703 u64 running_bw;
704
705
706
707
708
709
710
711
712
713
714 u64 this_bw;
715 u64 extra_bw;
716
717
718
719
720
721 u64 bw_ratio;
722};
723
724#ifdef CONFIG_FAIR_GROUP_SCHED
725
726#define entity_is_task(se) (!se->my_q)
727
728static inline void se_update_runnable(struct sched_entity *se)
729{
730 if (!entity_is_task(se))
731 se->runnable_weight = se->my_q->h_nr_running;
732}
733
734static inline long se_runnable(struct sched_entity *se)
735{
736 if (entity_is_task(se))
737 return !!se->on_rq;
738 else
739 return se->runnable_weight;
740}
741
742#else
743#define entity_is_task(se) 1
744
745static inline void se_update_runnable(struct sched_entity *se) {}
746
747static inline long se_runnable(struct sched_entity *se)
748{
749 return !!se->on_rq;
750}
751#endif
752
753#ifdef CONFIG_SMP
754
755
756
757static inline long se_weight(struct sched_entity *se)
758{
759 return scale_load_down(se->load.weight);
760}
761
762
763static inline bool sched_asym_prefer(int a, int b)
764{
765 return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b);
766}
767
768struct perf_domain {
769 struct em_perf_domain *em_pd;
770 struct perf_domain *next;
771 struct rcu_head rcu;
772};
773
774
775#define SG_OVERLOAD 0x1
776#define SG_OVERUTILIZED 0x2
777
778
779
780
781
782
783
784
785
786struct root_domain {
787 atomic_t refcount;
788 atomic_t rto_count;
789 struct rcu_head rcu;
790 cpumask_var_t span;
791 cpumask_var_t online;
792
793
794
795
796
797
798 int overload;
799
800
801 int overutilized;
802
803
804
805
806
807 cpumask_var_t dlo_mask;
808 atomic_t dlo_count;
809 struct dl_bw dl_bw;
810 struct cpudl cpudl;
811
812
813
814
815
816
817
818
819 u64 visit_gen;
820
821#ifdef HAVE_RT_PUSH_IPI
822
823
824
825 struct irq_work rto_push_work;
826 raw_spinlock_t rto_lock;
827
828 int rto_loop;
829 int rto_cpu;
830
831 atomic_t rto_loop_next;
832 atomic_t rto_loop_start;
833#endif
834
835
836
837
838 cpumask_var_t rto_mask;
839 struct cpupri cpupri;
840
841 unsigned long max_cpu_capacity;
842
843
844
845
846
847 struct perf_domain __rcu *pd;
848};
849
850extern void init_defrootdomain(void);
851extern int sched_init_domains(const struct cpumask *cpu_map);
852extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
853extern void sched_get_rd(struct root_domain *rd);
854extern void sched_put_rd(struct root_domain *rd);
855
856#ifdef HAVE_RT_PUSH_IPI
857extern void rto_push_irq_work_func(struct irq_work *work);
858#endif
859#endif
860
861#ifdef CONFIG_UCLAMP_TASK
862
863
864
865
866
867
868
869
870struct uclamp_bucket {
871 unsigned long value : bits_per(SCHED_CAPACITY_SCALE);
872 unsigned long tasks : BITS_PER_LONG - bits_per(SCHED_CAPACITY_SCALE);
873};
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897struct uclamp_rq {
898 unsigned int value;
899 struct uclamp_bucket bucket[UCLAMP_BUCKETS];
900};
901
902DECLARE_STATIC_KEY_FALSE(sched_uclamp_used);
903#endif
904
905
906
907
908
909
910
911
912struct rq {
913
914 raw_spinlock_t __lock;
915
916
917
918
919
920 unsigned int nr_running;
921#ifdef CONFIG_NUMA_BALANCING
922 unsigned int nr_numa_running;
923 unsigned int nr_preferred_running;
924 unsigned int numa_migrate_on;
925#endif
926#ifdef CONFIG_NO_HZ_COMMON
927#ifdef CONFIG_SMP
928 unsigned long last_blocked_load_update_tick;
929 unsigned int has_blocked_load;
930 call_single_data_t nohz_csd;
931#endif
932 unsigned int nohz_tick_stopped;
933 atomic_t nohz_flags;
934#endif
935
936#ifdef CONFIG_SMP
937 unsigned int ttwu_pending;
938#endif
939 u64 nr_switches;
940
941#ifdef CONFIG_UCLAMP_TASK
942
943 struct uclamp_rq uclamp[UCLAMP_CNT] ____cacheline_aligned;
944 unsigned int uclamp_flags;
945#define UCLAMP_FLAG_IDLE 0x01
946#endif
947
948 struct cfs_rq cfs;
949 struct rt_rq rt;
950 struct dl_rq dl;
951
952#ifdef CONFIG_FAIR_GROUP_SCHED
953
954 struct list_head leaf_cfs_rq_list;
955 struct list_head *tmp_alone_branch;
956#endif
957
958
959
960
961
962
963
964 unsigned int nr_uninterruptible;
965
966 struct task_struct __rcu *curr;
967 struct task_struct *idle;
968 struct task_struct *stop;
969 unsigned long next_balance;
970 struct mm_struct *prev_mm;
971
972 unsigned int clock_update_flags;
973 u64 clock;
974
975 u64 clock_task ____cacheline_aligned;
976 u64 clock_pelt;
977 unsigned long lost_idle_time;
978
979 atomic_t nr_iowait;
980
981#ifdef CONFIG_SCHED_DEBUG
982 u64 last_seen_need_resched_ns;
983 int ticks_without_resched;
984#endif
985
986#ifdef CONFIG_MEMBARRIER
987 int membarrier_state;
988#endif
989
990#ifdef CONFIG_SMP
991 struct root_domain *rd;
992 struct sched_domain __rcu *sd;
993
994 unsigned long cpu_capacity;
995 unsigned long cpu_capacity_orig;
996
997 struct callback_head *balance_callback;
998
999 unsigned char nohz_idle_balance;
1000 unsigned char idle_balance;
1001
1002 unsigned long misfit_task_load;
1003
1004
1005 int active_balance;
1006 int push_cpu;
1007 struct cpu_stop_work active_balance_work;
1008
1009
1010 int cpu;
1011 int online;
1012
1013 struct list_head cfs_tasks;
1014
1015 struct sched_avg avg_rt;
1016 struct sched_avg avg_dl;
1017#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
1018 struct sched_avg avg_irq;
1019#endif
1020#ifdef CONFIG_SCHED_THERMAL_PRESSURE
1021 struct sched_avg avg_thermal;
1022#endif
1023 u64 idle_stamp;
1024 u64 avg_idle;
1025
1026 unsigned long wake_stamp;
1027 u64 wake_avg_idle;
1028
1029
1030 u64 max_idle_balance_cost;
1031
1032#ifdef CONFIG_HOTPLUG_CPU
1033 struct rcuwait hotplug_wait;
1034#endif
1035#endif
1036
1037#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1038 u64 prev_irq_time;
1039#endif
1040#ifdef CONFIG_PARAVIRT
1041 u64 prev_steal_time;
1042#endif
1043#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
1044 u64 prev_steal_time_rq;
1045#endif
1046
1047
1048 unsigned long calc_load_update;
1049 long calc_load_active;
1050
1051#ifdef CONFIG_SCHED_HRTICK
1052#ifdef CONFIG_SMP
1053 call_single_data_t hrtick_csd;
1054#endif
1055 struct hrtimer hrtick_timer;
1056 ktime_t hrtick_time;
1057#endif
1058
1059#ifdef CONFIG_SCHEDSTATS
1060
1061 struct sched_info rq_sched_info;
1062 unsigned long long rq_cpu_time;
1063
1064
1065
1066 unsigned int yld_count;
1067
1068
1069 unsigned int sched_count;
1070 unsigned int sched_goidle;
1071
1072
1073 unsigned int ttwu_count;
1074 unsigned int ttwu_local;
1075#endif
1076
1077#ifdef CONFIG_CPU_IDLE
1078
1079 struct cpuidle_state *idle_state;
1080#endif
1081
1082#ifdef CONFIG_SMP
1083 unsigned int nr_pinned;
1084#endif
1085 unsigned int push_busy;
1086 struct cpu_stop_work push_work;
1087
1088#ifdef CONFIG_SCHED_CORE
1089
1090 struct rq *core;
1091 struct task_struct *core_pick;
1092 unsigned int core_enabled;
1093 unsigned int core_sched_seq;
1094 struct rb_root core_tree;
1095
1096
1097 unsigned int core_task_seq;
1098 unsigned int core_pick_seq;
1099 unsigned long core_cookie;
1100 unsigned char core_forceidle;
1101 unsigned int core_forceidle_seq;
1102#endif
1103};
1104
1105#ifdef CONFIG_FAIR_GROUP_SCHED
1106
1107
1108static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
1109{
1110 return cfs_rq->rq;
1111}
1112
1113#else
1114
1115static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
1116{
1117 return container_of(cfs_rq, struct rq, cfs);
1118}
1119#endif
1120
1121static inline int cpu_of(struct rq *rq)
1122{
1123#ifdef CONFIG_SMP
1124 return rq->cpu;
1125#else
1126 return 0;
1127#endif
1128}
1129
1130#define MDF_PUSH 0x01
1131
1132static inline bool is_migration_disabled(struct task_struct *p)
1133{
1134#ifdef CONFIG_SMP
1135 return p->migration_disabled;
1136#else
1137 return false;
1138#endif
1139}
1140
1141struct sched_group;
1142#ifdef CONFIG_SCHED_CORE
1143static inline struct cpumask *sched_group_span(struct sched_group *sg);
1144
1145DECLARE_STATIC_KEY_FALSE(__sched_core_enabled);
1146
1147static inline bool sched_core_enabled(struct rq *rq)
1148{
1149 return static_branch_unlikely(&__sched_core_enabled) && rq->core_enabled;
1150}
1151
1152static inline bool sched_core_disabled(void)
1153{
1154 return !static_branch_unlikely(&__sched_core_enabled);
1155}
1156
1157
1158
1159
1160
1161static inline raw_spinlock_t *rq_lockp(struct rq *rq)
1162{
1163 if (sched_core_enabled(rq))
1164 return &rq->core->__lock;
1165
1166 return &rq->__lock;
1167}
1168
1169static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
1170{
1171 if (rq->core_enabled)
1172 return &rq->core->__lock;
1173
1174 return &rq->__lock;
1175}
1176
1177bool cfs_prio_less(struct task_struct *a, struct task_struct *b, bool fi);
1178
1179
1180
1181
1182
1183
1184
1185static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p)
1186{
1187
1188 if (!sched_core_enabled(rq))
1189 return true;
1190
1191 return rq->core->core_cookie == p->core_cookie;
1192}
1193
1194static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p)
1195{
1196 bool idle_core = true;
1197 int cpu;
1198
1199
1200 if (!sched_core_enabled(rq))
1201 return true;
1202
1203 for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) {
1204 if (!available_idle_cpu(cpu)) {
1205 idle_core = false;
1206 break;
1207 }
1208 }
1209
1210
1211
1212
1213
1214 return idle_core || rq->core->core_cookie == p->core_cookie;
1215}
1216
1217static inline bool sched_group_cookie_match(struct rq *rq,
1218 struct task_struct *p,
1219 struct sched_group *group)
1220{
1221 int cpu;
1222
1223
1224 if (!sched_core_enabled(rq))
1225 return true;
1226
1227 for_each_cpu_and(cpu, sched_group_span(group), p->cpus_ptr) {
1228 if (sched_core_cookie_match(rq, p))
1229 return true;
1230 }
1231 return false;
1232}
1233
1234extern void queue_core_balance(struct rq *rq);
1235
1236static inline bool sched_core_enqueued(struct task_struct *p)
1237{
1238 return !RB_EMPTY_NODE(&p->core_node);
1239}
1240
1241extern void sched_core_enqueue(struct rq *rq, struct task_struct *p);
1242extern void sched_core_dequeue(struct rq *rq, struct task_struct *p);
1243
1244extern void sched_core_get(void);
1245extern void sched_core_put(void);
1246
1247extern unsigned long sched_core_alloc_cookie(void);
1248extern void sched_core_put_cookie(unsigned long cookie);
1249extern unsigned long sched_core_get_cookie(unsigned long cookie);
1250extern unsigned long sched_core_update_cookie(struct task_struct *p, unsigned long cookie);
1251
1252#else
1253
1254static inline bool sched_core_enabled(struct rq *rq)
1255{
1256 return false;
1257}
1258
1259static inline bool sched_core_disabled(void)
1260{
1261 return true;
1262}
1263
1264static inline raw_spinlock_t *rq_lockp(struct rq *rq)
1265{
1266 return &rq->__lock;
1267}
1268
1269static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
1270{
1271 return &rq->__lock;
1272}
1273
1274static inline void queue_core_balance(struct rq *rq)
1275{
1276}
1277
1278static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p)
1279{
1280 return true;
1281}
1282
1283static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p)
1284{
1285 return true;
1286}
1287
1288static inline bool sched_group_cookie_match(struct rq *rq,
1289 struct task_struct *p,
1290 struct sched_group *group)
1291{
1292 return true;
1293}
1294#endif
1295
1296static inline void lockdep_assert_rq_held(struct rq *rq)
1297{
1298 lockdep_assert_held(__rq_lockp(rq));
1299}
1300
1301extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass);
1302extern bool raw_spin_rq_trylock(struct rq *rq);
1303extern void raw_spin_rq_unlock(struct rq *rq);
1304
1305static inline void raw_spin_rq_lock(struct rq *rq)
1306{
1307 raw_spin_rq_lock_nested(rq, 0);
1308}
1309
1310static inline void raw_spin_rq_lock_irq(struct rq *rq)
1311{
1312 local_irq_disable();
1313 raw_spin_rq_lock(rq);
1314}
1315
1316static inline void raw_spin_rq_unlock_irq(struct rq *rq)
1317{
1318 raw_spin_rq_unlock(rq);
1319 local_irq_enable();
1320}
1321
1322static inline unsigned long _raw_spin_rq_lock_irqsave(struct rq *rq)
1323{
1324 unsigned long flags;
1325 local_irq_save(flags);
1326 raw_spin_rq_lock(rq);
1327 return flags;
1328}
1329
1330static inline void raw_spin_rq_unlock_irqrestore(struct rq *rq, unsigned long flags)
1331{
1332 raw_spin_rq_unlock(rq);
1333 local_irq_restore(flags);
1334}
1335
1336#define raw_spin_rq_lock_irqsave(rq, flags) \
1337do { \
1338 flags = _raw_spin_rq_lock_irqsave(rq); \
1339} while (0)
1340
1341#ifdef CONFIG_SCHED_SMT
1342extern void __update_idle_core(struct rq *rq);
1343
1344static inline void update_idle_core(struct rq *rq)
1345{
1346 if (static_branch_unlikely(&sched_smt_present))
1347 __update_idle_core(rq);
1348}
1349
1350#else
1351static inline void update_idle_core(struct rq *rq) { }
1352#endif
1353
1354DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
1355
1356#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
1357#define this_rq() this_cpu_ptr(&runqueues)
1358#define task_rq(p) cpu_rq(task_cpu(p))
1359#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
1360#define raw_rq() raw_cpu_ptr(&runqueues)
1361
1362#ifdef CONFIG_FAIR_GROUP_SCHED
1363static inline struct task_struct *task_of(struct sched_entity *se)
1364{
1365 SCHED_WARN_ON(!entity_is_task(se));
1366 return container_of(se, struct task_struct, se);
1367}
1368
1369static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
1370{
1371 return p->se.cfs_rq;
1372}
1373
1374
1375static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
1376{
1377 return se->cfs_rq;
1378}
1379
1380
1381static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
1382{
1383 return grp->my_q;
1384}
1385
1386#else
1387
1388static inline struct task_struct *task_of(struct sched_entity *se)
1389{
1390 return container_of(se, struct task_struct, se);
1391}
1392
1393static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
1394{
1395 return &task_rq(p)->cfs;
1396}
1397
1398static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
1399{
1400 struct task_struct *p = task_of(se);
1401 struct rq *rq = task_rq(p);
1402
1403 return &rq->cfs;
1404}
1405
1406
1407static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
1408{
1409 return NULL;
1410}
1411#endif
1412
1413extern void update_rq_clock(struct rq *rq);
1414
1415static inline u64 __rq_clock_broken(struct rq *rq)
1416{
1417 return READ_ONCE(rq->clock);
1418}
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443#define RQCF_REQ_SKIP 0x01
1444#define RQCF_ACT_SKIP 0x02
1445#define RQCF_UPDATED 0x04
1446
1447static inline void assert_clock_updated(struct rq *rq)
1448{
1449
1450
1451
1452
1453 SCHED_WARN_ON(rq->clock_update_flags < RQCF_ACT_SKIP);
1454}
1455
1456static inline u64 rq_clock(struct rq *rq)
1457{
1458 lockdep_assert_rq_held(rq);
1459 assert_clock_updated(rq);
1460
1461 return rq->clock;
1462}
1463
1464static inline u64 rq_clock_task(struct rq *rq)
1465{
1466 lockdep_assert_rq_held(rq);
1467 assert_clock_updated(rq);
1468
1469 return rq->clock_task;
1470}
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483extern int sched_thermal_decay_shift;
1484
1485static inline u64 rq_clock_thermal(struct rq *rq)
1486{
1487 return rq_clock_task(rq) >> sched_thermal_decay_shift;
1488}
1489
1490static inline void rq_clock_skip_update(struct rq *rq)
1491{
1492 lockdep_assert_rq_held(rq);
1493 rq->clock_update_flags |= RQCF_REQ_SKIP;
1494}
1495
1496
1497
1498
1499
1500static inline void rq_clock_cancel_skipupdate(struct rq *rq)
1501{
1502 lockdep_assert_rq_held(rq);
1503 rq->clock_update_flags &= ~RQCF_REQ_SKIP;
1504}
1505
1506struct rq_flags {
1507 unsigned long flags;
1508 struct pin_cookie cookie;
1509#ifdef CONFIG_SCHED_DEBUG
1510
1511
1512
1513
1514
1515 unsigned int clock_update_flags;
1516#endif
1517};
1518
1519extern struct callback_head balance_push_callback;
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
1532{
1533 rf->cookie = lockdep_pin_lock(__rq_lockp(rq));
1534
1535#ifdef CONFIG_SCHED_DEBUG
1536 rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
1537 rf->clock_update_flags = 0;
1538#ifdef CONFIG_SMP
1539 SCHED_WARN_ON(rq->balance_callback && rq->balance_callback != &balance_push_callback);
1540#endif
1541#endif
1542}
1543
1544static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
1545{
1546#ifdef CONFIG_SCHED_DEBUG
1547 if (rq->clock_update_flags > RQCF_ACT_SKIP)
1548 rf->clock_update_flags = RQCF_UPDATED;
1549#endif
1550
1551 lockdep_unpin_lock(__rq_lockp(rq), rf->cookie);
1552}
1553
1554static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
1555{
1556 lockdep_repin_lock(__rq_lockp(rq), rf->cookie);
1557
1558#ifdef CONFIG_SCHED_DEBUG
1559
1560
1561
1562 rq->clock_update_flags |= rf->clock_update_flags;
1563#endif
1564}
1565
1566struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1567 __acquires(rq->lock);
1568
1569struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
1570 __acquires(p->pi_lock)
1571 __acquires(rq->lock);
1572
1573static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
1574 __releases(rq->lock)
1575{
1576 rq_unpin_lock(rq, rf);
1577 raw_spin_rq_unlock(rq);
1578}
1579
1580static inline void
1581task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1582 __releases(rq->lock)
1583 __releases(p->pi_lock)
1584{
1585 rq_unpin_lock(rq, rf);
1586 raw_spin_rq_unlock(rq);
1587 raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
1588}
1589
1590static inline void
1591rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
1592 __acquires(rq->lock)
1593{
1594 raw_spin_rq_lock_irqsave(rq, rf->flags);
1595 rq_pin_lock(rq, rf);
1596}
1597
1598static inline void
1599rq_lock_irq(struct rq *rq, struct rq_flags *rf)
1600 __acquires(rq->lock)
1601{
1602 raw_spin_rq_lock_irq(rq);
1603 rq_pin_lock(rq, rf);
1604}
1605
1606static inline void
1607rq_lock(struct rq *rq, struct rq_flags *rf)
1608 __acquires(rq->lock)
1609{
1610 raw_spin_rq_lock(rq);
1611 rq_pin_lock(rq, rf);
1612}
1613
1614static inline void
1615rq_relock(struct rq *rq, struct rq_flags *rf)
1616 __acquires(rq->lock)
1617{
1618 raw_spin_rq_lock(rq);
1619 rq_repin_lock(rq, rf);
1620}
1621
1622static inline void
1623rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
1624 __releases(rq->lock)
1625{
1626 rq_unpin_lock(rq, rf);
1627 raw_spin_rq_unlock_irqrestore(rq, rf->flags);
1628}
1629
1630static inline void
1631rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
1632 __releases(rq->lock)
1633{
1634 rq_unpin_lock(rq, rf);
1635 raw_spin_rq_unlock_irq(rq);
1636}
1637
1638static inline void
1639rq_unlock(struct rq *rq, struct rq_flags *rf)
1640 __releases(rq->lock)
1641{
1642 rq_unpin_lock(rq, rf);
1643 raw_spin_rq_unlock(rq);
1644}
1645
1646static inline struct rq *
1647this_rq_lock_irq(struct rq_flags *rf)
1648 __acquires(rq->lock)
1649{
1650 struct rq *rq;
1651
1652 local_irq_disable();
1653 rq = this_rq();
1654 rq_lock(rq, rf);
1655 return rq;
1656}
1657
1658#ifdef CONFIG_NUMA
1659enum numa_topology_type {
1660 NUMA_DIRECT,
1661 NUMA_GLUELESS_MESH,
1662 NUMA_BACKPLANE,
1663};
1664extern enum numa_topology_type sched_numa_topology_type;
1665extern int sched_max_numa_distance;
1666extern bool find_numa_distance(int distance);
1667extern void sched_init_numa(void);
1668extern void sched_domains_numa_masks_set(unsigned int cpu);
1669extern void sched_domains_numa_masks_clear(unsigned int cpu);
1670extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
1671#else
1672static inline void sched_init_numa(void) { }
1673static inline void sched_domains_numa_masks_set(unsigned int cpu) { }
1674static inline void sched_domains_numa_masks_clear(unsigned int cpu) { }
1675static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
1676{
1677 return nr_cpu_ids;
1678}
1679#endif
1680
1681#ifdef CONFIG_NUMA_BALANCING
1682
1683enum numa_faults_stats {
1684 NUMA_MEM = 0,
1685 NUMA_CPU,
1686 NUMA_MEMBUF,
1687 NUMA_CPUBUF
1688};
1689extern void sched_setnuma(struct task_struct *p, int node);
1690extern int migrate_task_to(struct task_struct *p, int cpu);
1691extern int migrate_swap(struct task_struct *p, struct task_struct *t,
1692 int cpu, int scpu);
1693extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p);
1694#else
1695static inline void
1696init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
1697{
1698}
1699#endif
1700
1701#ifdef CONFIG_SMP
1702
1703static inline void
1704queue_balance_callback(struct rq *rq,
1705 struct callback_head *head,
1706 void (*func)(struct rq *rq))
1707{
1708 lockdep_assert_rq_held(rq);
1709
1710 if (unlikely(head->next || rq->balance_callback == &balance_push_callback))
1711 return;
1712
1713 head->func = (void (*)(struct callback_head *))func;
1714 head->next = rq->balance_callback;
1715 rq->balance_callback = head;
1716}
1717
1718#define rcu_dereference_check_sched_domain(p) \
1719 rcu_dereference_check((p), \
1720 lockdep_is_held(&sched_domains_mutex))
1721
1722
1723
1724
1725
1726
1727
1728
1729#define for_each_domain(cpu, __sd) \
1730 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
1731 __sd; __sd = __sd->parent)
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
1743{
1744 struct sched_domain *sd, *hsd = NULL;
1745
1746 for_each_domain(cpu, sd) {
1747 if (!(sd->flags & flag))
1748 break;
1749 hsd = sd;
1750 }
1751
1752 return hsd;
1753}
1754
1755static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
1756{
1757 struct sched_domain *sd;
1758
1759 for_each_domain(cpu, sd) {
1760 if (sd->flags & flag)
1761 break;
1762 }
1763
1764 return sd;
1765}
1766
1767DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc);
1768DECLARE_PER_CPU(int, sd_llc_size);
1769DECLARE_PER_CPU(int, sd_llc_id);
1770DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
1771DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa);
1772DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
1773DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity);
1774extern struct static_key_false sched_asym_cpucapacity;
1775
1776struct sched_group_capacity {
1777 atomic_t ref;
1778
1779
1780
1781
1782 unsigned long capacity;
1783 unsigned long min_capacity;
1784 unsigned long max_capacity;
1785 unsigned long next_update;
1786 int imbalance;
1787
1788#ifdef CONFIG_SCHED_DEBUG
1789 int id;
1790#endif
1791
1792 unsigned long cpumask[];
1793};
1794
1795struct sched_group {
1796 struct sched_group *next;
1797 atomic_t ref;
1798
1799 unsigned int group_weight;
1800 struct sched_group_capacity *sgc;
1801 int asym_prefer_cpu;
1802
1803
1804
1805
1806
1807
1808
1809
1810 unsigned long cpumask[];
1811};
1812
1813static inline struct cpumask *sched_group_span(struct sched_group *sg)
1814{
1815 return to_cpumask(sg->cpumask);
1816}
1817
1818
1819
1820
1821static inline struct cpumask *group_balance_mask(struct sched_group *sg)
1822{
1823 return to_cpumask(sg->sgc->cpumask);
1824}
1825
1826
1827
1828
1829
1830static inline unsigned int group_first_cpu(struct sched_group *group)
1831{
1832 return cpumask_first(sched_group_span(group));
1833}
1834
1835extern int group_balance_cpu(struct sched_group *sg);
1836
1837#ifdef CONFIG_SCHED_DEBUG
1838void update_sched_domain_debugfs(void);
1839void dirty_sched_domain_sysctl(int cpu);
1840#else
1841static inline void update_sched_domain_debugfs(void)
1842{
1843}
1844static inline void dirty_sched_domain_sysctl(int cpu)
1845{
1846}
1847#endif
1848
1849extern int sched_update_scaling(void);
1850
1851extern void flush_smp_call_function_from_idle(void);
1852
1853#else
1854static inline void flush_smp_call_function_from_idle(void) { }
1855#endif
1856
1857#include "stats.h"
1858#include "autogroup.h"
1859
1860#ifdef CONFIG_CGROUP_SCHED
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875static inline struct task_group *task_group(struct task_struct *p)
1876{
1877 return p->sched_task_group;
1878}
1879
1880
1881static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
1882{
1883#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
1884 struct task_group *tg = task_group(p);
1885#endif
1886
1887#ifdef CONFIG_FAIR_GROUP_SCHED
1888 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
1889 p->se.cfs_rq = tg->cfs_rq[cpu];
1890 p->se.parent = tg->se[cpu];
1891#endif
1892
1893#ifdef CONFIG_RT_GROUP_SCHED
1894 p->rt.rt_rq = tg->rt_rq[cpu];
1895 p->rt.parent = tg->rt_se[cpu];
1896#endif
1897}
1898
1899#else
1900
1901static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
1902static inline struct task_group *task_group(struct task_struct *p)
1903{
1904 return NULL;
1905}
1906
1907#endif
1908
1909static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1910{
1911 set_task_rq(p, cpu);
1912#ifdef CONFIG_SMP
1913
1914
1915
1916
1917
1918 smp_wmb();
1919#ifdef CONFIG_THREAD_INFO_IN_TASK
1920 WRITE_ONCE(p->cpu, cpu);
1921#else
1922 WRITE_ONCE(task_thread_info(p)->cpu, cpu);
1923#endif
1924 p->wake_cpu = cpu;
1925#endif
1926}
1927
1928
1929
1930
1931#ifdef CONFIG_SCHED_DEBUG
1932# include <linux/static_key.h>
1933# define const_debug __read_mostly
1934#else
1935# define const_debug const
1936#endif
1937
1938#define SCHED_FEAT(name, enabled) \
1939 __SCHED_FEAT_##name ,
1940
1941enum {
1942#include "features.h"
1943 __SCHED_FEAT_NR,
1944};
1945
1946#undef SCHED_FEAT
1947
1948#ifdef CONFIG_SCHED_DEBUG
1949
1950
1951
1952
1953
1954extern const_debug unsigned int sysctl_sched_features;
1955
1956#ifdef CONFIG_JUMP_LABEL
1957#define SCHED_FEAT(name, enabled) \
1958static __always_inline bool static_branch_##name(struct static_key *key) \
1959{ \
1960 return static_key_##enabled(key); \
1961}
1962
1963#include "features.h"
1964#undef SCHED_FEAT
1965
1966extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
1967#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
1968
1969#else
1970
1971#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
1972
1973#endif
1974
1975#else
1976
1977
1978
1979
1980
1981
1982#define SCHED_FEAT(name, enabled) \
1983 (1UL << __SCHED_FEAT_##name) * enabled |
1984static const_debug __maybe_unused unsigned int sysctl_sched_features =
1985#include "features.h"
1986 0;
1987#undef SCHED_FEAT
1988
1989#define sched_feat(x) !!(sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
1990
1991#endif
1992
1993extern struct static_key_false sched_numa_balancing;
1994extern struct static_key_false sched_schedstats;
1995
1996static inline u64 global_rt_period(void)
1997{
1998 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
1999}
2000
2001static inline u64 global_rt_runtime(void)
2002{
2003 if (sysctl_sched_rt_runtime < 0)
2004 return RUNTIME_INF;
2005
2006 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
2007}
2008
2009static inline int task_current(struct rq *rq, struct task_struct *p)
2010{
2011 return rq->curr == p;
2012}
2013
2014static inline int task_running(struct rq *rq, struct task_struct *p)
2015{
2016#ifdef CONFIG_SMP
2017 return p->on_cpu;
2018#else
2019 return task_current(rq, p);
2020#endif
2021}
2022
2023static inline int task_on_rq_queued(struct task_struct *p)
2024{
2025 return p->on_rq == TASK_ON_RQ_QUEUED;
2026}
2027
2028static inline int task_on_rq_migrating(struct task_struct *p)
2029{
2030 return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
2031}
2032
2033
2034#define WF_EXEC 0x02
2035#define WF_FORK 0x04
2036#define WF_TTWU 0x08
2037
2038#define WF_SYNC 0x10
2039#define WF_MIGRATED 0x20
2040#define WF_ON_CPU 0x40
2041
2042#ifdef CONFIG_SMP
2043static_assert(WF_EXEC == SD_BALANCE_EXEC);
2044static_assert(WF_FORK == SD_BALANCE_FORK);
2045static_assert(WF_TTWU == SD_BALANCE_WAKE);
2046#endif
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057#define WEIGHT_IDLEPRIO 3
2058#define WMULT_IDLEPRIO 1431655765
2059
2060extern const int sched_prio_to_weight[40];
2061extern const u32 sched_prio_to_wmult[40];
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082#define DEQUEUE_SLEEP 0x01
2083#define DEQUEUE_SAVE 0x02
2084#define DEQUEUE_MOVE 0x04
2085#define DEQUEUE_NOCLOCK 0x08
2086
2087#define ENQUEUE_WAKEUP 0x01
2088#define ENQUEUE_RESTORE 0x02
2089#define ENQUEUE_MOVE 0x04
2090#define ENQUEUE_NOCLOCK 0x08
2091
2092#define ENQUEUE_HEAD 0x10
2093#define ENQUEUE_REPLENISH 0x20
2094#ifdef CONFIG_SMP
2095#define ENQUEUE_MIGRATED 0x40
2096#else
2097#define ENQUEUE_MIGRATED 0x00
2098#endif
2099
2100#define RETRY_TASK ((void *)-1UL)
2101
2102struct sched_class {
2103
2104#ifdef CONFIG_UCLAMP_TASK
2105 int uclamp_enabled;
2106#endif
2107
2108 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
2109 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
2110 void (*yield_task) (struct rq *rq);
2111 bool (*yield_to_task)(struct rq *rq, struct task_struct *p);
2112
2113 void (*check_preempt_curr)(struct rq *rq, struct task_struct *p, int flags);
2114
2115 struct task_struct *(*pick_next_task)(struct rq *rq);
2116
2117 void (*put_prev_task)(struct rq *rq, struct task_struct *p);
2118 void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first);
2119
2120#ifdef CONFIG_SMP
2121 int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
2122 int (*select_task_rq)(struct task_struct *p, int task_cpu, int flags);
2123
2124 struct task_struct * (*pick_task)(struct rq *rq);
2125
2126 void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
2127
2128 void (*task_woken)(struct rq *this_rq, struct task_struct *task);
2129
2130 void (*set_cpus_allowed)(struct task_struct *p,
2131 const struct cpumask *newmask,
2132 u32 flags);
2133
2134 void (*rq_online)(struct rq *rq);
2135 void (*rq_offline)(struct rq *rq);
2136
2137 struct rq *(*find_lock_rq)(struct task_struct *p, struct rq *rq);
2138#endif
2139
2140 void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
2141 void (*task_fork)(struct task_struct *p);
2142 void (*task_dead)(struct task_struct *p);
2143
2144
2145
2146
2147
2148
2149 void (*switched_from)(struct rq *this_rq, struct task_struct *task);
2150 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
2151 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
2152 int oldprio);
2153
2154 unsigned int (*get_rr_interval)(struct rq *rq,
2155 struct task_struct *task);
2156
2157 void (*update_curr)(struct rq *rq);
2158
2159#define TASK_SET_GROUP 0
2160#define TASK_MOVE_GROUP 1
2161
2162#ifdef CONFIG_FAIR_GROUP_SCHED
2163 void (*task_change_group)(struct task_struct *p, int type);
2164#endif
2165};
2166
2167static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
2168{
2169 WARN_ON_ONCE(rq->curr != prev);
2170 prev->sched_class->put_prev_task(rq, prev);
2171}
2172
2173static inline void set_next_task(struct rq *rq, struct task_struct *next)
2174{
2175 next->sched_class->set_next_task(rq, next, false);
2176}
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187#define DEFINE_SCHED_CLASS(name) \
2188const struct sched_class name##_sched_class \
2189 __aligned(__alignof__(struct sched_class)) \
2190 __section("__" #name "_sched_class")
2191
2192
2193extern struct sched_class __begin_sched_classes[];
2194extern struct sched_class __end_sched_classes[];
2195
2196#define sched_class_highest (__end_sched_classes - 1)
2197#define sched_class_lowest (__begin_sched_classes - 1)
2198
2199#define for_class_range(class, _from, _to) \
2200 for (class = (_from); class != (_to); class--)
2201
2202#define for_each_class(class) \
2203 for_class_range(class, sched_class_highest, sched_class_lowest)
2204
2205extern const struct sched_class stop_sched_class;
2206extern const struct sched_class dl_sched_class;
2207extern const struct sched_class rt_sched_class;
2208extern const struct sched_class fair_sched_class;
2209extern const struct sched_class idle_sched_class;
2210
2211static inline bool sched_stop_runnable(struct rq *rq)
2212{
2213 return rq->stop && task_on_rq_queued(rq->stop);
2214}
2215
2216static inline bool sched_dl_runnable(struct rq *rq)
2217{
2218 return rq->dl.dl_nr_running > 0;
2219}
2220
2221static inline bool sched_rt_runnable(struct rq *rq)
2222{
2223 return rq->rt.rt_queued > 0;
2224}
2225
2226static inline bool sched_fair_runnable(struct rq *rq)
2227{
2228 return rq->cfs.nr_running > 0;
2229}
2230
2231extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
2232extern struct task_struct *pick_next_task_idle(struct rq *rq);
2233
2234#define SCA_CHECK 0x01
2235#define SCA_MIGRATE_DISABLE 0x02
2236#define SCA_MIGRATE_ENABLE 0x04
2237
2238#ifdef CONFIG_SMP
2239
2240extern void update_group_capacity(struct sched_domain *sd, int cpu);
2241
2242extern void trigger_load_balance(struct rq *rq);
2243
2244extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags);
2245
2246static inline struct task_struct *get_push_task(struct rq *rq)
2247{
2248 struct task_struct *p = rq->curr;
2249
2250 lockdep_assert_rq_held(rq);
2251
2252 if (rq->push_busy)
2253 return NULL;
2254
2255 if (p->nr_cpus_allowed == 1)
2256 return NULL;
2257
2258 if (p->migration_disabled)
2259 return NULL;
2260
2261 rq->push_busy = true;
2262 return get_task_struct(p);
2263}
2264
2265extern int push_cpu_stop(void *arg);
2266
2267#endif
2268
2269#ifdef CONFIG_CPU_IDLE
2270static inline void idle_set_state(struct rq *rq,
2271 struct cpuidle_state *idle_state)
2272{
2273 rq->idle_state = idle_state;
2274}
2275
2276static inline struct cpuidle_state *idle_get_state(struct rq *rq)
2277{
2278 SCHED_WARN_ON(!rcu_read_lock_held());
2279
2280 return rq->idle_state;
2281}
2282#else
2283static inline void idle_set_state(struct rq *rq,
2284 struct cpuidle_state *idle_state)
2285{
2286}
2287
2288static inline struct cpuidle_state *idle_get_state(struct rq *rq)
2289{
2290 return NULL;
2291}
2292#endif
2293
2294extern void schedule_idle(void);
2295
2296extern void sysrq_sched_debug_show(void);
2297extern void sched_init_granularity(void);
2298extern void update_max_interval(void);
2299
2300extern void init_sched_dl_class(void);
2301extern void init_sched_rt_class(void);
2302extern void init_sched_fair_class(void);
2303
2304extern void reweight_task(struct task_struct *p, int prio);
2305
2306extern void resched_curr(struct rq *rq);
2307extern void resched_cpu(int cpu);
2308
2309extern struct rt_bandwidth def_rt_bandwidth;
2310extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
2311
2312extern struct dl_bandwidth def_dl_bandwidth;
2313extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
2314extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
2315extern void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se);
2316
2317#define BW_SHIFT 20
2318#define BW_UNIT (1 << BW_SHIFT)
2319#define RATIO_SHIFT 8
2320#define MAX_BW_BITS (64 - BW_SHIFT)
2321#define MAX_BW ((1ULL << MAX_BW_BITS) - 1)
2322unsigned long to_ratio(u64 period, u64 runtime);
2323
2324extern void init_entity_runnable_average(struct sched_entity *se);
2325extern void post_init_entity_util_avg(struct task_struct *p);
2326
2327#ifdef CONFIG_NO_HZ_FULL
2328extern bool sched_can_stop_tick(struct rq *rq);
2329extern int __init sched_tick_offload_init(void);
2330
2331
2332
2333
2334
2335
2336static inline void sched_update_tick_dependency(struct rq *rq)
2337{
2338 int cpu = cpu_of(rq);
2339
2340 if (!tick_nohz_full_cpu(cpu))
2341 return;
2342
2343 if (sched_can_stop_tick(rq))
2344 tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
2345 else
2346 tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
2347}
2348#else
2349static inline int sched_tick_offload_init(void) { return 0; }
2350static inline void sched_update_tick_dependency(struct rq *rq) { }
2351#endif
2352
2353static inline void add_nr_running(struct rq *rq, unsigned count)
2354{
2355 unsigned prev_nr = rq->nr_running;
2356
2357 rq->nr_running = prev_nr + count;
2358 if (trace_sched_update_nr_running_tp_enabled()) {
2359 call_trace_sched_update_nr_running(rq, count);
2360 }
2361
2362#ifdef CONFIG_SMP
2363 if (prev_nr < 2 && rq->nr_running >= 2) {
2364 if (!READ_ONCE(rq->rd->overload))
2365 WRITE_ONCE(rq->rd->overload, 1);
2366 }
2367#endif
2368
2369 sched_update_tick_dependency(rq);
2370}
2371
2372static inline void sub_nr_running(struct rq *rq, unsigned count)
2373{
2374 rq->nr_running -= count;
2375 if (trace_sched_update_nr_running_tp_enabled()) {
2376 call_trace_sched_update_nr_running(rq, -count);
2377 }
2378
2379
2380 sched_update_tick_dependency(rq);
2381}
2382
2383extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
2384extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
2385
2386extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
2387
2388extern const_debug unsigned int sysctl_sched_nr_migrate;
2389extern const_debug unsigned int sysctl_sched_migration_cost;
2390
2391#ifdef CONFIG_SCHED_HRTICK
2392
2393
2394
2395
2396
2397
2398static inline int hrtick_enabled(struct rq *rq)
2399{
2400 if (!cpu_active(cpu_of(rq)))
2401 return 0;
2402 return hrtimer_is_hres_active(&rq->hrtick_timer);
2403}
2404
2405static inline int hrtick_enabled_fair(struct rq *rq)
2406{
2407 if (!sched_feat(HRTICK))
2408 return 0;
2409 return hrtick_enabled(rq);
2410}
2411
2412static inline int hrtick_enabled_dl(struct rq *rq)
2413{
2414 if (!sched_feat(HRTICK_DL))
2415 return 0;
2416 return hrtick_enabled(rq);
2417}
2418
2419void hrtick_start(struct rq *rq, u64 delay);
2420
2421#else
2422
2423static inline int hrtick_enabled_fair(struct rq *rq)
2424{
2425 return 0;
2426}
2427
2428static inline int hrtick_enabled_dl(struct rq *rq)
2429{
2430 return 0;
2431}
2432
2433static inline int hrtick_enabled(struct rq *rq)
2434{
2435 return 0;
2436}
2437
2438#endif
2439
2440#ifndef arch_scale_freq_tick
2441static __always_inline
2442void arch_scale_freq_tick(void)
2443{
2444}
2445#endif
2446
2447#ifndef arch_scale_freq_capacity
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458static __always_inline
2459unsigned long arch_scale_freq_capacity(int cpu)
2460{
2461 return SCHED_CAPACITY_SCALE;
2462}
2463#endif
2464
2465
2466#ifdef CONFIG_SMP
2467
2468static inline bool rq_order_less(struct rq *rq1, struct rq *rq2)
2469{
2470#ifdef CONFIG_SCHED_CORE
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482 if (rq1->core->cpu < rq2->core->cpu)
2483 return true;
2484 if (rq1->core->cpu > rq2->core->cpu)
2485 return false;
2486
2487
2488
2489
2490#endif
2491 return rq1->cpu < rq2->cpu;
2492}
2493
2494extern void double_rq_lock(struct rq *rq1, struct rq *rq2);
2495
2496#ifdef CONFIG_PREEMPTION
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
2507 __releases(this_rq->lock)
2508 __acquires(busiest->lock)
2509 __acquires(this_rq->lock)
2510{
2511 raw_spin_rq_unlock(this_rq);
2512 double_rq_lock(this_rq, busiest);
2513
2514 return 1;
2515}
2516
2517#else
2518
2519
2520
2521
2522
2523
2524
2525static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
2526 __releases(this_rq->lock)
2527 __acquires(busiest->lock)
2528 __acquires(this_rq->lock)
2529{
2530 if (__rq_lockp(this_rq) == __rq_lockp(busiest))
2531 return 0;
2532
2533 if (likely(raw_spin_rq_trylock(busiest)))
2534 return 0;
2535
2536 if (rq_order_less(this_rq, busiest)) {
2537 raw_spin_rq_lock_nested(busiest, SINGLE_DEPTH_NESTING);
2538 return 0;
2539 }
2540
2541 raw_spin_rq_unlock(this_rq);
2542 double_rq_lock(this_rq, busiest);
2543
2544 return 1;
2545}
2546
2547#endif
2548
2549
2550
2551
2552static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
2553{
2554 lockdep_assert_irqs_disabled();
2555
2556 return _double_lock_balance(this_rq, busiest);
2557}
2558
2559static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
2560 __releases(busiest->lock)
2561{
2562 if (__rq_lockp(this_rq) != __rq_lockp(busiest))
2563 raw_spin_rq_unlock(busiest);
2564 lock_set_subclass(&__rq_lockp(this_rq)->dep_map, 0, _RET_IP_);
2565}
2566
2567static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
2568{
2569 if (l1 > l2)
2570 swap(l1, l2);
2571
2572 spin_lock(l1);
2573 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
2574}
2575
2576static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
2577{
2578 if (l1 > l2)
2579 swap(l1, l2);
2580
2581 spin_lock_irq(l1);
2582 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
2583}
2584
2585static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
2586{
2587 if (l1 > l2)
2588 swap(l1, l2);
2589
2590 raw_spin_lock(l1);
2591 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
2592}
2593
2594
2595
2596
2597
2598
2599
2600static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
2601 __releases(rq1->lock)
2602 __releases(rq2->lock)
2603{
2604 if (__rq_lockp(rq1) != __rq_lockp(rq2))
2605 raw_spin_rq_unlock(rq2);
2606 else
2607 __release(rq2->lock);
2608 raw_spin_rq_unlock(rq1);
2609}
2610
2611extern void set_rq_online (struct rq *rq);
2612extern void set_rq_offline(struct rq *rq);
2613extern bool sched_smp_initialized;
2614
2615#else
2616
2617
2618
2619
2620
2621
2622
2623static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
2624 __acquires(rq1->lock)
2625 __acquires(rq2->lock)
2626{
2627 BUG_ON(!irqs_disabled());
2628 BUG_ON(rq1 != rq2);
2629 raw_spin_rq_lock(rq1);
2630 __acquire(rq2->lock);
2631}
2632
2633
2634
2635
2636
2637
2638
2639static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
2640 __releases(rq1->lock)
2641 __releases(rq2->lock)
2642{
2643 BUG_ON(rq1 != rq2);
2644 raw_spin_rq_unlock(rq1);
2645 __release(rq2->lock);
2646}
2647
2648#endif
2649
2650extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
2651extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
2652
2653#ifdef CONFIG_SCHED_DEBUG
2654extern bool sched_debug_verbose;
2655
2656extern void print_cfs_stats(struct seq_file *m, int cpu);
2657extern void print_rt_stats(struct seq_file *m, int cpu);
2658extern void print_dl_stats(struct seq_file *m, int cpu);
2659extern void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
2660extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2661extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
2662
2663extern void resched_latency_warn(int cpu, u64 latency);
2664#ifdef CONFIG_NUMA_BALANCING
2665extern void
2666show_numa_stats(struct task_struct *p, struct seq_file *m);
2667extern void
2668print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
2669 unsigned long tpf, unsigned long gsf, unsigned long gpf);
2670#endif
2671#else
2672static inline void resched_latency_warn(int cpu, u64 latency) {}
2673#endif
2674
2675extern void init_cfs_rq(struct cfs_rq *cfs_rq);
2676extern void init_rt_rq(struct rt_rq *rt_rq);
2677extern void init_dl_rq(struct dl_rq *dl_rq);
2678
2679extern void cfs_bandwidth_usage_inc(void);
2680extern void cfs_bandwidth_usage_dec(void);
2681
2682#ifdef CONFIG_NO_HZ_COMMON
2683#define NOHZ_BALANCE_KICK_BIT 0
2684#define NOHZ_STATS_KICK_BIT 1
2685#define NOHZ_NEWILB_KICK_BIT 2
2686
2687#define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT)
2688#define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT)
2689#define NOHZ_NEWILB_KICK BIT(NOHZ_NEWILB_KICK_BIT)
2690
2691#define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK)
2692
2693#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
2694
2695extern void nohz_balance_exit_idle(struct rq *rq);
2696#else
2697static inline void nohz_balance_exit_idle(struct rq *rq) { }
2698#endif
2699
2700#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
2701extern void nohz_run_idle_balance(int cpu);
2702#else
2703static inline void nohz_run_idle_balance(int cpu) { }
2704#endif
2705
2706#ifdef CONFIG_SMP
2707static inline
2708void __dl_update(struct dl_bw *dl_b, s64 bw)
2709{
2710 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw);
2711 int i;
2712
2713 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
2714 "sched RCU must be held");
2715 for_each_cpu_and(i, rd->span, cpu_active_mask) {
2716 struct rq *rq = cpu_rq(i);
2717
2718 rq->dl.extra_bw += bw;
2719 }
2720}
2721#else
2722static inline
2723void __dl_update(struct dl_bw *dl_b, s64 bw)
2724{
2725 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw);
2726
2727 dl->extra_bw += bw;
2728}
2729#endif
2730
2731
2732#ifdef CONFIG_IRQ_TIME_ACCOUNTING
2733struct irqtime {
2734 u64 total;
2735 u64 tick_delta;
2736 u64 irq_start_time;
2737 struct u64_stats_sync sync;
2738};
2739
2740DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
2741
2742
2743
2744
2745
2746
2747static inline u64 irq_time_read(int cpu)
2748{
2749 struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
2750 unsigned int seq;
2751 u64 total;
2752
2753 do {
2754 seq = __u64_stats_fetch_begin(&irqtime->sync);
2755 total = irqtime->total;
2756 } while (__u64_stats_fetch_retry(&irqtime->sync, seq));
2757
2758 return total;
2759}
2760#endif
2761
2762#ifdef CONFIG_CPU_FREQ
2763DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
2788{
2789 struct update_util_data *data;
2790
2791 data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
2792 cpu_of(rq)));
2793 if (data)
2794 data->func(data, rq_clock(rq), flags);
2795}
2796#else
2797static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
2798#endif
2799
2800#ifdef CONFIG_UCLAMP_TASK
2801unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820static __always_inline
2821unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
2822 struct task_struct *p)
2823{
2824 unsigned long min_util = 0;
2825 unsigned long max_util = 0;
2826
2827 if (!static_branch_likely(&sched_uclamp_used))
2828 return util;
2829
2830 if (p) {
2831 min_util = uclamp_eff_value(p, UCLAMP_MIN);
2832 max_util = uclamp_eff_value(p, UCLAMP_MAX);
2833
2834
2835
2836
2837
2838 if (rq->uclamp_flags & UCLAMP_FLAG_IDLE)
2839 goto out;
2840 }
2841
2842 min_util = max_t(unsigned long, min_util, READ_ONCE(rq->uclamp[UCLAMP_MIN].value));
2843 max_util = max_t(unsigned long, max_util, READ_ONCE(rq->uclamp[UCLAMP_MAX].value));
2844out:
2845
2846
2847
2848
2849
2850 if (unlikely(min_util >= max_util))
2851 return min_util;
2852
2853 return clamp(util, min_util, max_util);
2854}
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864static inline bool uclamp_is_used(void)
2865{
2866 return static_branch_likely(&sched_uclamp_used);
2867}
2868#else
2869static inline
2870unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
2871 struct task_struct *p)
2872{
2873 return util;
2874}
2875
2876static inline bool uclamp_is_used(void)
2877{
2878 return false;
2879}
2880#endif
2881
2882#ifdef arch_scale_freq_capacity
2883# ifndef arch_scale_freq_invariant
2884# define arch_scale_freq_invariant() true
2885# endif
2886#else
2887# define arch_scale_freq_invariant() false
2888#endif
2889
2890#ifdef CONFIG_SMP
2891static inline unsigned long capacity_orig_of(int cpu)
2892{
2893 return cpu_rq(cpu)->cpu_capacity_orig;
2894}
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906enum cpu_util_type {
2907 FREQUENCY_UTIL,
2908 ENERGY_UTIL,
2909};
2910
2911unsigned long effective_cpu_util(int cpu, unsigned long util_cfs,
2912 unsigned long max, enum cpu_util_type type,
2913 struct task_struct *p);
2914
2915static inline unsigned long cpu_bw_dl(struct rq *rq)
2916{
2917 return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT;
2918}
2919
2920static inline unsigned long cpu_util_dl(struct rq *rq)
2921{
2922 return READ_ONCE(rq->avg_dl.util_avg);
2923}
2924
2925static inline unsigned long cpu_util_cfs(struct rq *rq)
2926{
2927 unsigned long util = READ_ONCE(rq->cfs.avg.util_avg);
2928
2929 if (sched_feat(UTIL_EST)) {
2930 util = max_t(unsigned long, util,
2931 READ_ONCE(rq->cfs.avg.util_est.enqueued));
2932 }
2933
2934 return util;
2935}
2936
2937static inline unsigned long cpu_util_rt(struct rq *rq)
2938{
2939 return READ_ONCE(rq->avg_rt.util_avg);
2940}
2941#endif
2942
2943#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
2944static inline unsigned long cpu_util_irq(struct rq *rq)
2945{
2946 return rq->avg_irq.util_avg;
2947}
2948
2949static inline
2950unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
2951{
2952 util *= (max - irq);
2953 util /= max;
2954
2955 return util;
2956
2957}
2958#else
2959static inline unsigned long cpu_util_irq(struct rq *rq)
2960{
2961 return 0;
2962}
2963
2964static inline
2965unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned long max)
2966{
2967 return util;
2968}
2969#endif
2970
2971#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
2972
2973#define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus)))
2974
2975DECLARE_STATIC_KEY_FALSE(sched_energy_present);
2976
2977static inline bool sched_energy_enabled(void)
2978{
2979 return static_branch_unlikely(&sched_energy_present);
2980}
2981
2982#else
2983
2984#define perf_domain_span(pd) NULL
2985static inline bool sched_energy_enabled(void) { return false; }
2986
2987#endif
2988
2989#ifdef CONFIG_MEMBARRIER
2990
2991
2992
2993
2994
2995
2996static inline void membarrier_switch_mm(struct rq *rq,
2997 struct mm_struct *prev_mm,
2998 struct mm_struct *next_mm)
2999{
3000 int membarrier_state;
3001
3002 if (prev_mm == next_mm)
3003 return;
3004
3005 membarrier_state = atomic_read(&next_mm->membarrier_state);
3006 if (READ_ONCE(rq->membarrier_state) == membarrier_state)
3007 return;
3008
3009 WRITE_ONCE(rq->membarrier_state, membarrier_state);
3010}
3011#else
3012static inline void membarrier_switch_mm(struct rq *rq,
3013 struct mm_struct *prev_mm,
3014 struct mm_struct *next_mm)
3015{
3016}
3017#endif
3018
3019#ifdef CONFIG_SMP
3020static inline bool is_per_cpu_kthread(struct task_struct *p)
3021{
3022 if (!(p->flags & PF_KTHREAD))
3023 return false;
3024
3025 if (p->nr_cpus_allowed != 1)
3026 return false;
3027
3028 return true;
3029}
3030#endif
3031
3032extern void swake_up_all_locked(struct swait_queue_head *q);
3033extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
3034
3035#ifdef CONFIG_PREEMPT_DYNAMIC
3036extern int preempt_dynamic_mode;
3037extern int sched_dynamic_mode(const char *str);
3038extern void sched_dynamic_update(int mode);
3039#endif
3040
3041