1
2#ifndef _LINUX_SCHED_H
3#define _LINUX_SCHED_H
4
5
6
7
8
9
10#include <uapi/linux/sched.h>
11
12#include <asm/current.h>
13
14#include <linux/pid.h>
15#include <linux/sem.h>
16#include <linux/shm.h>
17#include <linux/mutex.h>
18#include <linux/plist.h>
19#include <linux/hrtimer.h>
20#include <linux/irqflags.h>
21#include <linux/seccomp.h>
22#include <linux/nodemask.h>
23#include <linux/rcupdate.h>
24#include <linux/refcount.h>
25#include <linux/resource.h>
26#include <linux/latencytop.h>
27#include <linux/sched/prio.h>
28#include <linux/sched/types.h>
29#include <linux/signal_types.h>
30#include <linux/syscall_user_dispatch.h>
31#include <linux/mm_types_task.h>
32#include <linux/task_io_accounting.h>
33#include <linux/posix-timers.h>
34#include <linux/rseq.h>
35#include <linux/seqlock.h>
36#include <linux/kcsan.h>
37#include <asm/kmap_size.h>
38
39
40struct audit_context;
41struct backing_dev_info;
42struct bio_list;
43struct blk_plug;
44struct bpf_local_storage;
45struct capture_control;
46struct cfs_rq;
47struct fs_struct;
48struct futex_pi_state;
49struct io_context;
50struct io_uring_task;
51struct mempolicy;
52struct nameidata;
53struct nsproxy;
54struct perf_event_context;
55struct pid_namespace;
56struct pipe_inode_info;
57struct rcu_node;
58struct reclaim_state;
59struct robust_list_head;
60struct root_domain;
61struct rq;
62struct sched_attr;
63struct sched_param;
64struct seq_file;
65struct sighand_struct;
66struct signal_struct;
67struct task_delay_info;
68struct task_group;
69
70
71
72
73
74
75
76
77
78
79
80
81
82#define TASK_RUNNING 0x0000
83#define TASK_INTERRUPTIBLE 0x0001
84#define TASK_UNINTERRUPTIBLE 0x0002
85#define __TASK_STOPPED 0x0004
86#define __TASK_TRACED 0x0008
87
88#define EXIT_DEAD 0x0010
89#define EXIT_ZOMBIE 0x0020
90#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
91
92#define TASK_PARKED 0x0040
93#define TASK_DEAD 0x0080
94#define TASK_WAKEKILL 0x0100
95#define TASK_WAKING 0x0200
96#define TASK_NOLOAD 0x0400
97#define TASK_NEW 0x0800
98#define TASK_STATE_MAX 0x1000
99
100
101#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
102#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
103#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
104
105#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
106
107
108#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
109
110
111#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
112 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
113 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
114 TASK_PARKED)
115
116#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
117
118#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
119
120#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
121
122#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
123
124
125
126
127
128#define is_special_task_state(state) \
129 ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
130
131#define __set_current_state(state_value) \
132 do { \
133 WARN_ON_ONCE(is_special_task_state(state_value));\
134 current->task_state_change = _THIS_IP_; \
135 current->state = (state_value); \
136 } while (0)
137
138#define set_current_state(state_value) \
139 do { \
140 WARN_ON_ONCE(is_special_task_state(state_value));\
141 current->task_state_change = _THIS_IP_; \
142 smp_store_mb(current->state, (state_value)); \
143 } while (0)
144
145#define set_special_state(state_value) \
146 do { \
147 unsigned long flags; \
148 WARN_ON_ONCE(!is_special_task_state(state_value)); \
149 raw_spin_lock_irqsave(¤t->pi_lock, flags); \
150 current->task_state_change = _THIS_IP_; \
151 current->state = (state_value); \
152 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
153 } while (0)
154#else
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192#define __set_current_state(state_value) \
193 current->state = (state_value)
194
195#define set_current_state(state_value) \
196 smp_store_mb(current->state, (state_value))
197
198
199
200
201
202
203
204#define set_special_state(state_value) \
205 do { \
206 unsigned long flags; \
207 raw_spin_lock_irqsave(¤t->pi_lock, flags); \
208 current->state = (state_value); \
209 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
210 } while (0)
211
212#endif
213
214
215#define TASK_COMM_LEN 16
216
217extern void scheduler_tick(void);
218
219#define MAX_SCHEDULE_TIMEOUT LONG_MAX
220
221extern long schedule_timeout(long timeout);
222extern long schedule_timeout_interruptible(long timeout);
223extern long schedule_timeout_killable(long timeout);
224extern long schedule_timeout_uninterruptible(long timeout);
225extern long schedule_timeout_idle(long timeout);
226asmlinkage void schedule(void);
227extern void schedule_preempt_disabled(void);
228asmlinkage void preempt_schedule_irq(void);
229
230extern int __must_check io_schedule_prepare(void);
231extern void io_schedule_finish(int token);
232extern long io_schedule_timeout(long timeout);
233extern void io_schedule(void);
234
235
236
237
238
239
240
241
242
243
244struct prev_cputime {
245#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
246 u64 utime;
247 u64 stime;
248 raw_spinlock_t lock;
249#endif
250};
251
252enum vtime_state {
253
254 VTIME_INACTIVE = 0,
255
256 VTIME_IDLE,
257
258 VTIME_SYS,
259
260 VTIME_USER,
261
262 VTIME_GUEST,
263};
264
265struct vtime {
266 seqcount_t seqcount;
267 unsigned long long starttime;
268 enum vtime_state state;
269 unsigned int cpu;
270 u64 utime;
271 u64 stime;
272 u64 gtime;
273};
274
275
276
277
278
279
280
281enum uclamp_id {
282 UCLAMP_MIN = 0,
283 UCLAMP_MAX,
284 UCLAMP_CNT
285};
286
287#ifdef CONFIG_SMP
288extern struct root_domain def_root_domain;
289extern struct mutex sched_domains_mutex;
290#endif
291
292struct sched_info {
293#ifdef CONFIG_SCHED_INFO
294
295
296
297 unsigned long pcount;
298
299
300 unsigned long long run_delay;
301
302
303
304
305 unsigned long long last_arrival;
306
307
308 unsigned long long last_queued;
309
310#endif
311};
312
313
314
315
316
317
318
319
320# define SCHED_FIXEDPOINT_SHIFT 10
321# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
322
323
324# define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
325# define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
326
327struct load_weight {
328 unsigned long weight;
329 u32 inv_weight;
330};
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361struct util_est {
362 unsigned int enqueued;
363 unsigned int ewma;
364#define UTIL_EST_WEIGHT_SHIFT 2
365#define UTIL_AVG_UNCHANGED 0x80000000
366} __attribute__((__aligned__(sizeof(u64))));
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413struct sched_avg {
414 u64 last_update_time;
415 u64 load_sum;
416 u64 runnable_sum;
417 u32 util_sum;
418 u32 period_contrib;
419 unsigned long load_avg;
420 unsigned long runnable_avg;
421 unsigned long util_avg;
422 struct util_est util_est;
423} ____cacheline_aligned;
424
425struct sched_statistics {
426#ifdef CONFIG_SCHEDSTATS
427 u64 wait_start;
428 u64 wait_max;
429 u64 wait_count;
430 u64 wait_sum;
431 u64 iowait_count;
432 u64 iowait_sum;
433
434 u64 sleep_start;
435 u64 sleep_max;
436 s64 sum_sleep_runtime;
437
438 u64 block_start;
439 u64 block_max;
440 u64 exec_max;
441 u64 slice_max;
442
443 u64 nr_migrations_cold;
444 u64 nr_failed_migrations_affine;
445 u64 nr_failed_migrations_running;
446 u64 nr_failed_migrations_hot;
447 u64 nr_forced_migrations;
448
449 u64 nr_wakeups;
450 u64 nr_wakeups_sync;
451 u64 nr_wakeups_migrate;
452 u64 nr_wakeups_local;
453 u64 nr_wakeups_remote;
454 u64 nr_wakeups_affine;
455 u64 nr_wakeups_affine_attempts;
456 u64 nr_wakeups_passive;
457 u64 nr_wakeups_idle;
458#endif
459};
460
461struct sched_entity {
462
463 struct load_weight load;
464 struct rb_node run_node;
465 struct list_head group_node;
466 unsigned int on_rq;
467
468 u64 exec_start;
469 u64 sum_exec_runtime;
470 u64 vruntime;
471 u64 prev_sum_exec_runtime;
472
473 u64 nr_migrations;
474
475 struct sched_statistics statistics;
476
477#ifdef CONFIG_FAIR_GROUP_SCHED
478 int depth;
479 struct sched_entity *parent;
480
481 struct cfs_rq *cfs_rq;
482
483 struct cfs_rq *my_q;
484
485 unsigned long runnable_weight;
486#endif
487
488#ifdef CONFIG_SMP
489
490
491
492
493
494
495 struct sched_avg avg;
496#endif
497};
498
499struct sched_rt_entity {
500 struct list_head run_list;
501 unsigned long timeout;
502 unsigned long watchdog_stamp;
503 unsigned int time_slice;
504 unsigned short on_rq;
505 unsigned short on_list;
506
507 struct sched_rt_entity *back;
508#ifdef CONFIG_RT_GROUP_SCHED
509 struct sched_rt_entity *parent;
510
511 struct rt_rq *rt_rq;
512
513 struct rt_rq *my_q;
514#endif
515} __randomize_layout;
516
517struct sched_dl_entity {
518 struct rb_node rb_node;
519
520
521
522
523
524
525 u64 dl_runtime;
526 u64 dl_deadline;
527 u64 dl_period;
528 u64 dl_bw;
529 u64 dl_density;
530
531
532
533
534
535
536 s64 runtime;
537 u64 deadline;
538 unsigned int flags;
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564 unsigned int dl_throttled : 1;
565 unsigned int dl_yielded : 1;
566 unsigned int dl_non_contending : 1;
567 unsigned int dl_overrun : 1;
568
569
570
571
572
573 struct hrtimer dl_timer;
574
575
576
577
578
579
580
581
582 struct hrtimer inactive_timer;
583
584#ifdef CONFIG_RT_MUTEXES
585
586
587
588
589
590 struct sched_dl_entity *pi_se;
591#endif
592};
593
594#ifdef CONFIG_UCLAMP_TASK
595
596#define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621struct uclamp_se {
622 unsigned int value : bits_per(SCHED_CAPACITY_SCALE);
623 unsigned int bucket_id : bits_per(UCLAMP_BUCKETS);
624 unsigned int active : 1;
625 unsigned int user_defined : 1;
626};
627#endif
628
629union rcu_special {
630 struct {
631 u8 blocked;
632 u8 need_qs;
633 u8 exp_hint;
634 u8 need_mb;
635 } b;
636 u32 s;
637};
638
639enum perf_event_task_context {
640 perf_invalid_context = -1,
641 perf_hw_context = 0,
642 perf_sw_context,
643 perf_nr_task_contexts,
644};
645
646struct wake_q_node {
647 struct wake_q_node *next;
648};
649
650struct kmap_ctrl {
651#ifdef CONFIG_KMAP_LOCAL
652 int idx;
653 pte_t pteval[KM_MAX_IDX];
654#endif
655};
656
657struct task_struct {
658#ifdef CONFIG_THREAD_INFO_IN_TASK
659
660
661
662
663 struct thread_info thread_info;
664#endif
665
666 volatile long state;
667
668
669
670
671
672 randomized_struct_fields_start
673
674 void *stack;
675 refcount_t usage;
676
677 unsigned int flags;
678 unsigned int ptrace;
679
680#ifdef CONFIG_SMP
681 int on_cpu;
682 struct __call_single_node wake_entry;
683#ifdef CONFIG_THREAD_INFO_IN_TASK
684
685 unsigned int cpu;
686#endif
687 unsigned int wakee_flips;
688 unsigned long wakee_flip_decay_ts;
689 struct task_struct *last_wakee;
690
691
692
693
694
695
696
697
698 int recent_used_cpu;
699 int wake_cpu;
700#endif
701 int on_rq;
702
703 int prio;
704 int static_prio;
705 int normal_prio;
706 unsigned int rt_priority;
707
708 const struct sched_class *sched_class;
709 struct sched_entity se;
710 struct sched_rt_entity rt;
711#ifdef CONFIG_CGROUP_SCHED
712 struct task_group *sched_task_group;
713#endif
714 struct sched_dl_entity dl;
715
716#ifdef CONFIG_UCLAMP_TASK
717
718
719
720
721 struct uclamp_se uclamp_req[UCLAMP_CNT];
722
723
724
725
726 struct uclamp_se uclamp[UCLAMP_CNT];
727#endif
728
729#ifdef CONFIG_PREEMPT_NOTIFIERS
730
731 struct hlist_head preempt_notifiers;
732#endif
733
734#ifdef CONFIG_BLK_DEV_IO_TRACE
735 unsigned int btrace_seq;
736#endif
737
738 unsigned int policy;
739 int nr_cpus_allowed;
740 const cpumask_t *cpus_ptr;
741 cpumask_t cpus_mask;
742 void *migration_pending;
743#ifdef CONFIG_SMP
744 unsigned short migration_disabled;
745#endif
746 unsigned short migration_flags;
747
748#ifdef CONFIG_PREEMPT_RCU
749 int rcu_read_lock_nesting;
750 union rcu_special rcu_read_unlock_special;
751 struct list_head rcu_node_entry;
752 struct rcu_node *rcu_blocked_node;
753#endif
754
755#ifdef CONFIG_TASKS_RCU
756 unsigned long rcu_tasks_nvcsw;
757 u8 rcu_tasks_holdout;
758 u8 rcu_tasks_idx;
759 int rcu_tasks_idle_cpu;
760 struct list_head rcu_tasks_holdout_list;
761#endif
762
763#ifdef CONFIG_TASKS_TRACE_RCU
764 int trc_reader_nesting;
765 int trc_ipi_to_cpu;
766 union rcu_special trc_reader_special;
767 bool trc_reader_checked;
768 struct list_head trc_holdout_list;
769#endif
770
771 struct sched_info sched_info;
772
773 struct list_head tasks;
774#ifdef CONFIG_SMP
775 struct plist_node pushable_tasks;
776 struct rb_node pushable_dl_tasks;
777#endif
778
779 struct mm_struct *mm;
780 struct mm_struct *active_mm;
781
782
783 struct vmacache vmacache;
784
785#ifdef SPLIT_RSS_COUNTING
786 struct task_rss_stat rss_stat;
787#endif
788 int exit_state;
789 int exit_code;
790 int exit_signal;
791
792 int pdeath_signal;
793
794 unsigned long jobctl;
795
796
797 unsigned int personality;
798
799
800 unsigned sched_reset_on_fork:1;
801 unsigned sched_contributes_to_load:1;
802 unsigned sched_migrated:1;
803#ifdef CONFIG_PSI
804 unsigned sched_psi_wake_requeue:1;
805#endif
806
807
808 unsigned :0;
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825 unsigned sched_remote_wakeup:1;
826
827
828 unsigned in_execve:1;
829 unsigned in_iowait:1;
830#ifndef TIF_RESTORE_SIGMASK
831 unsigned restore_sigmask:1;
832#endif
833#ifdef CONFIG_MEMCG
834 unsigned in_user_fault:1;
835#endif
836#ifdef CONFIG_COMPAT_BRK
837 unsigned brk_randomized:1;
838#endif
839#ifdef CONFIG_CGROUPS
840
841 unsigned no_cgroup_migration:1;
842
843 unsigned frozen:1;
844#endif
845#ifdef CONFIG_BLK_CGROUP
846 unsigned use_memdelay:1;
847#endif
848#ifdef CONFIG_PSI
849
850 unsigned in_memstall:1;
851#endif
852#ifdef CONFIG_PAGE_OWNER
853
854 unsigned in_page_owner:1;
855#endif
856
857 unsigned long atomic_flags;
858
859 struct restart_block restart_block;
860
861 pid_t pid;
862 pid_t tgid;
863
864#ifdef CONFIG_STACKPROTECTOR
865
866 unsigned long stack_canary;
867#endif
868
869
870
871
872
873
874
875 struct task_struct __rcu *real_parent;
876
877
878 struct task_struct __rcu *parent;
879
880
881
882
883 struct list_head children;
884 struct list_head sibling;
885 struct task_struct *group_leader;
886
887
888
889
890
891
892
893 struct list_head ptraced;
894 struct list_head ptrace_entry;
895
896
897 struct pid *thread_pid;
898 struct hlist_node pid_links[PIDTYPE_MAX];
899 struct list_head thread_group;
900 struct list_head thread_node;
901
902 struct completion *vfork_done;
903
904
905 int __user *set_child_tid;
906
907
908 int __user *clear_child_tid;
909
910
911 void *pf_io_worker;
912
913 u64 utime;
914 u64 stime;
915#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
916 u64 utimescaled;
917 u64 stimescaled;
918#endif
919 u64 gtime;
920 struct prev_cputime prev_cputime;
921#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
922 struct vtime vtime;
923#endif
924
925#ifdef CONFIG_NO_HZ_FULL
926 atomic_t tick_dep_mask;
927#endif
928
929 unsigned long nvcsw;
930 unsigned long nivcsw;
931
932
933 u64 start_time;
934
935
936 u64 start_boottime;
937
938
939 unsigned long min_flt;
940 unsigned long maj_flt;
941
942
943 struct posix_cputimers posix_cputimers;
944
945#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
946 struct posix_cputimers_work posix_cputimers_work;
947#endif
948
949
950
951
952 const struct cred __rcu *ptracer_cred;
953
954
955 const struct cred __rcu *real_cred;
956
957
958 const struct cred __rcu *cred;
959
960#ifdef CONFIG_KEYS
961
962 struct key *cached_requested_key;
963#endif
964
965
966
967
968
969
970
971
972 char comm[TASK_COMM_LEN];
973
974 struct nameidata *nameidata;
975
976#ifdef CONFIG_SYSVIPC
977 struct sysv_sem sysvsem;
978 struct sysv_shm sysvshm;
979#endif
980#ifdef CONFIG_DETECT_HUNG_TASK
981 unsigned long last_switch_count;
982 unsigned long last_switch_time;
983#endif
984
985 struct fs_struct *fs;
986
987
988 struct files_struct *files;
989
990#ifdef CONFIG_IO_URING
991 struct io_uring_task *io_uring;
992#endif
993
994
995 struct nsproxy *nsproxy;
996
997
998 struct signal_struct *signal;
999 struct sighand_struct __rcu *sighand;
1000 sigset_t blocked;
1001 sigset_t real_blocked;
1002
1003 sigset_t saved_sigmask;
1004 struct sigpending pending;
1005 unsigned long sas_ss_sp;
1006 size_t sas_ss_size;
1007 unsigned int sas_ss_flags;
1008
1009 struct callback_head *task_works;
1010
1011#ifdef CONFIG_AUDIT
1012#ifdef CONFIG_AUDITSYSCALL
1013 struct audit_context *audit_context;
1014#endif
1015 kuid_t loginuid;
1016 unsigned int sessionid;
1017#endif
1018 struct seccomp seccomp;
1019 struct syscall_user_dispatch syscall_dispatch;
1020
1021
1022 u64 parent_exec_id;
1023 u64 self_exec_id;
1024
1025
1026 spinlock_t alloc_lock;
1027
1028
1029 raw_spinlock_t pi_lock;
1030
1031 struct wake_q_node wake_q;
1032
1033#ifdef CONFIG_RT_MUTEXES
1034
1035 struct rb_root_cached pi_waiters;
1036
1037 struct task_struct *pi_top_task;
1038
1039 struct rt_mutex_waiter *pi_blocked_on;
1040#endif
1041
1042#ifdef CONFIG_DEBUG_MUTEXES
1043
1044 struct mutex_waiter *blocked_on;
1045#endif
1046
1047#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1048 int non_block_count;
1049#endif
1050
1051#ifdef CONFIG_TRACE_IRQFLAGS
1052 struct irqtrace_events irqtrace;
1053 unsigned int hardirq_threaded;
1054 u64 hardirq_chain_key;
1055 int softirqs_enabled;
1056 int softirq_context;
1057 int irq_config;
1058#endif
1059#ifdef CONFIG_PREEMPT_RT
1060 int softirq_disable_cnt;
1061#endif
1062
1063#ifdef CONFIG_LOCKDEP
1064# define MAX_LOCK_DEPTH 48UL
1065 u64 curr_chain_key;
1066 int lockdep_depth;
1067 unsigned int lockdep_recursion;
1068 struct held_lock held_locks[MAX_LOCK_DEPTH];
1069#endif
1070
1071#if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP)
1072 unsigned int in_ubsan;
1073#endif
1074
1075
1076 void *journal_info;
1077
1078
1079 struct bio_list *bio_list;
1080
1081#ifdef CONFIG_BLOCK
1082
1083 struct blk_plug *plug;
1084#endif
1085
1086
1087 struct reclaim_state *reclaim_state;
1088
1089 struct backing_dev_info *backing_dev_info;
1090
1091 struct io_context *io_context;
1092
1093#ifdef CONFIG_COMPACTION
1094 struct capture_control *capture_control;
1095#endif
1096
1097 unsigned long ptrace_message;
1098 kernel_siginfo_t *last_siginfo;
1099
1100 struct task_io_accounting ioac;
1101#ifdef CONFIG_PSI
1102
1103 unsigned int psi_flags;
1104#endif
1105#ifdef CONFIG_TASK_XACCT
1106
1107 u64 acct_rss_mem1;
1108
1109 u64 acct_vm_mem1;
1110
1111 u64 acct_timexpd;
1112#endif
1113#ifdef CONFIG_CPUSETS
1114
1115 nodemask_t mems_allowed;
1116
1117 seqcount_spinlock_t mems_allowed_seq;
1118 int cpuset_mem_spread_rotor;
1119 int cpuset_slab_spread_rotor;
1120#endif
1121#ifdef CONFIG_CGROUPS
1122
1123 struct css_set __rcu *cgroups;
1124
1125 struct list_head cg_list;
1126#endif
1127#ifdef CONFIG_X86_CPU_RESCTRL
1128 u32 closid;
1129 u32 rmid;
1130#endif
1131#ifdef CONFIG_FUTEX
1132 struct robust_list_head __user *robust_list;
1133#ifdef CONFIG_COMPAT
1134 struct compat_robust_list_head __user *compat_robust_list;
1135#endif
1136 struct list_head pi_state_list;
1137 struct futex_pi_state *pi_state_cache;
1138 struct mutex futex_exit_mutex;
1139 unsigned int futex_state;
1140#endif
1141#ifdef CONFIG_PERF_EVENTS
1142 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1143 struct mutex perf_event_mutex;
1144 struct list_head perf_event_list;
1145#endif
1146#ifdef CONFIG_DEBUG_PREEMPT
1147 unsigned long preempt_disable_ip;
1148#endif
1149#ifdef CONFIG_NUMA
1150
1151 struct mempolicy *mempolicy;
1152 short il_prev;
1153 short pref_node_fork;
1154#endif
1155#ifdef CONFIG_NUMA_BALANCING
1156 int numa_scan_seq;
1157 unsigned int numa_scan_period;
1158 unsigned int numa_scan_period_max;
1159 int numa_preferred_nid;
1160 unsigned long numa_migrate_retry;
1161
1162 u64 node_stamp;
1163 u64 last_task_numa_placement;
1164 u64 last_sum_exec_runtime;
1165 struct callback_head numa_work;
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175 struct numa_group __rcu *numa_group;
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191 unsigned long *numa_faults;
1192 unsigned long total_numa_faults;
1193
1194
1195
1196
1197
1198
1199
1200 unsigned long numa_faults_locality[3];
1201
1202 unsigned long numa_pages_migrated;
1203#endif
1204
1205#ifdef CONFIG_RSEQ
1206 struct rseq __user *rseq;
1207 u32 rseq_sig;
1208
1209
1210
1211
1212 unsigned long rseq_event_mask;
1213#endif
1214
1215 struct tlbflush_unmap_batch tlb_ubc;
1216
1217 union {
1218 refcount_t rcu_users;
1219 struct rcu_head rcu;
1220 };
1221
1222
1223 struct pipe_inode_info *splice_pipe;
1224
1225 struct page_frag task_frag;
1226
1227#ifdef CONFIG_TASK_DELAY_ACCT
1228 struct task_delay_info *delays;
1229#endif
1230
1231#ifdef CONFIG_FAULT_INJECTION
1232 int make_it_fail;
1233 unsigned int fail_nth;
1234#endif
1235
1236
1237
1238
1239 int nr_dirtied;
1240 int nr_dirtied_pause;
1241
1242 unsigned long dirty_paused_when;
1243
1244#ifdef CONFIG_LATENCYTOP
1245 int latency_record_count;
1246 struct latency_record latency_record[LT_SAVECOUNT];
1247#endif
1248
1249
1250
1251
1252 u64 timer_slack_ns;
1253 u64 default_timer_slack_ns;
1254
1255#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
1256 unsigned int kasan_depth;
1257#endif
1258
1259#ifdef CONFIG_KCSAN
1260 struct kcsan_ctx kcsan_ctx;
1261#ifdef CONFIG_TRACE_IRQFLAGS
1262 struct irqtrace_events kcsan_save_irqtrace;
1263#endif
1264#endif
1265
1266#if IS_ENABLED(CONFIG_KUNIT)
1267 struct kunit *kunit_test;
1268#endif
1269
1270#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1271
1272 int curr_ret_stack;
1273 int curr_ret_depth;
1274
1275
1276 struct ftrace_ret_stack *ret_stack;
1277
1278
1279 unsigned long long ftrace_timestamp;
1280
1281
1282
1283
1284
1285 atomic_t trace_overrun;
1286
1287
1288 atomic_t tracing_graph_pause;
1289#endif
1290
1291#ifdef CONFIG_TRACING
1292
1293 unsigned long trace;
1294
1295
1296 unsigned long trace_recursion;
1297#endif
1298
1299#ifdef CONFIG_KCOV
1300
1301
1302
1303 unsigned int kcov_mode;
1304
1305
1306 unsigned int kcov_size;
1307
1308
1309 void *kcov_area;
1310
1311
1312 struct kcov *kcov;
1313
1314
1315 u64 kcov_handle;
1316
1317
1318 int kcov_sequence;
1319
1320
1321 unsigned int kcov_softirq;
1322#endif
1323
1324#ifdef CONFIG_MEMCG
1325 struct mem_cgroup *memcg_in_oom;
1326 gfp_t memcg_oom_gfp_mask;
1327 int memcg_oom_order;
1328
1329
1330 unsigned int memcg_nr_pages_over_high;
1331
1332
1333 struct mem_cgroup *active_memcg;
1334#endif
1335
1336#ifdef CONFIG_BLK_CGROUP
1337 struct request_queue *throttle_queue;
1338#endif
1339
1340#ifdef CONFIG_UPROBES
1341 struct uprobe_task *utask;
1342#endif
1343#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1344 unsigned int sequential_io;
1345 unsigned int sequential_io_avg;
1346#endif
1347 struct kmap_ctrl kmap_ctrl;
1348#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1349 unsigned long task_state_change;
1350#endif
1351 int pagefault_disabled;
1352#ifdef CONFIG_MMU
1353 struct task_struct *oom_reaper_list;
1354#endif
1355#ifdef CONFIG_VMAP_STACK
1356 struct vm_struct *stack_vm_area;
1357#endif
1358#ifdef CONFIG_THREAD_INFO_IN_TASK
1359
1360 refcount_t stack_refcount;
1361#endif
1362#ifdef CONFIG_LIVEPATCH
1363 int patch_state;
1364#endif
1365#ifdef CONFIG_SECURITY
1366
1367 void *security;
1368#endif
1369#ifdef CONFIG_BPF_SYSCALL
1370
1371 struct bpf_local_storage __rcu *bpf_storage;
1372#endif
1373
1374#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
1375 unsigned long lowest_stack;
1376 unsigned long prev_lowest_stack;
1377#endif
1378
1379#ifdef CONFIG_X86_MCE
1380 void __user *mce_vaddr;
1381 __u64 mce_kflags;
1382 u64 mce_addr;
1383 __u64 mce_ripv : 1,
1384 mce_whole_page : 1,
1385 __mce_reserved : 62;
1386 struct callback_head mce_kill_me;
1387#endif
1388
1389#ifdef CONFIG_KRETPROBES
1390 struct llist_head kretprobe_instances;
1391#endif
1392
1393
1394
1395
1396
1397 randomized_struct_fields_end
1398
1399
1400 struct thread_struct thread;
1401
1402
1403
1404
1405
1406
1407
1408};
1409
1410static inline struct pid *task_pid(struct task_struct *task)
1411{
1412 return task->thread_pid;
1413}
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
1427
1428static inline pid_t task_pid_nr(struct task_struct *tsk)
1429{
1430 return tsk->pid;
1431}
1432
1433static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1434{
1435 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1436}
1437
1438static inline pid_t task_pid_vnr(struct task_struct *tsk)
1439{
1440 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1441}
1442
1443
1444static inline pid_t task_tgid_nr(struct task_struct *tsk)
1445{
1446 return tsk->tgid;
1447}
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459static inline int pid_alive(const struct task_struct *p)
1460{
1461 return p->thread_pid != NULL;
1462}
1463
1464static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1465{
1466 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1467}
1468
1469static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1470{
1471 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1472}
1473
1474
1475static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1476{
1477 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1478}
1479
1480static inline pid_t task_session_vnr(struct task_struct *tsk)
1481{
1482 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1483}
1484
1485static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1486{
1487 return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
1488}
1489
1490static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1491{
1492 return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
1493}
1494
1495static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1496{
1497 pid_t pid = 0;
1498
1499 rcu_read_lock();
1500 if (pid_alive(tsk))
1501 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1502 rcu_read_unlock();
1503
1504 return pid;
1505}
1506
1507static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1508{
1509 return task_ppid_nr_ns(tsk, &init_pid_ns);
1510}
1511
1512
1513static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1514{
1515 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1516}
1517
1518#define TASK_REPORT_IDLE (TASK_REPORT + 1)
1519#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
1520
1521static inline unsigned int task_state_index(struct task_struct *tsk)
1522{
1523 unsigned int tsk_state = READ_ONCE(tsk->state);
1524 unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
1525
1526 BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
1527
1528 if (tsk_state == TASK_IDLE)
1529 state = TASK_REPORT_IDLE;
1530
1531 return fls(state);
1532}
1533
1534static inline char task_index_to_char(unsigned int state)
1535{
1536 static const char state_char[] = "RSDTtXZPI";
1537
1538 BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
1539
1540 return state_char[state];
1541}
1542
1543static inline char task_state_to_char(struct task_struct *tsk)
1544{
1545 return task_index_to_char(task_state_index(tsk));
1546}
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557static inline int is_global_init(struct task_struct *tsk)
1558{
1559 return task_tgid_nr(tsk) == 1;
1560}
1561
1562extern struct pid *cad_pid;
1563
1564
1565
1566
1567#define PF_VCPU 0x00000001
1568#define PF_IDLE 0x00000002
1569#define PF_EXITING 0x00000004
1570#define PF_IO_WORKER 0x00000010
1571#define PF_WQ_WORKER 0x00000020
1572#define PF_FORKNOEXEC 0x00000040
1573#define PF_MCE_PROCESS 0x00000080
1574#define PF_SUPERPRIV 0x00000100
1575#define PF_DUMPCORE 0x00000200
1576#define PF_SIGNALED 0x00000400
1577#define PF_MEMALLOC 0x00000800
1578#define PF_NPROC_EXCEEDED 0x00001000
1579#define PF_USED_MATH 0x00002000
1580#define PF_USED_ASYNC 0x00004000
1581#define PF_NOFREEZE 0x00008000
1582#define PF_FROZEN 0x00010000
1583#define PF_KSWAPD 0x00020000
1584#define PF_MEMALLOC_NOFS 0x00040000
1585#define PF_MEMALLOC_NOIO 0x00080000
1586#define PF_LOCAL_THROTTLE 0x00100000
1587
1588#define PF_KTHREAD 0x00200000
1589#define PF_RANDOMIZE 0x00400000
1590#define PF_SWAPWRITE 0x00800000
1591#define PF_NO_SETAFFINITY 0x04000000
1592#define PF_MCE_EARLY 0x08000000
1593#define PF_MEMALLOC_PIN 0x10000000
1594#define PF_FREEZER_SKIP 0x40000000
1595#define PF_SUSPEND_TASK 0x80000000
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1609#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1610#define clear_used_math() clear_stopped_child_used_math(current)
1611#define set_used_math() set_stopped_child_used_math(current)
1612
1613#define conditional_stopped_child_used_math(condition, child) \
1614 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1615
1616#define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current)
1617
1618#define copy_to_stopped_child_used_math(child) \
1619 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1620
1621
1622#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1623#define used_math() tsk_used_math(current)
1624
1625static inline bool is_percpu_thread(void)
1626{
1627#ifdef CONFIG_SMP
1628 return (current->flags & PF_NO_SETAFFINITY) &&
1629 (current->nr_cpus_allowed == 1);
1630#else
1631 return true;
1632#endif
1633}
1634
1635
1636#define PFA_NO_NEW_PRIVS 0
1637#define PFA_SPREAD_PAGE 1
1638#define PFA_SPREAD_SLAB 2
1639#define PFA_SPEC_SSB_DISABLE 3
1640#define PFA_SPEC_SSB_FORCE_DISABLE 4
1641#define PFA_SPEC_IB_DISABLE 5
1642#define PFA_SPEC_IB_FORCE_DISABLE 6
1643#define PFA_SPEC_SSB_NOEXEC 7
1644
1645#define TASK_PFA_TEST(name, func) \
1646 static inline bool task_##func(struct task_struct *p) \
1647 { return test_bit(PFA_##name, &p->atomic_flags); }
1648
1649#define TASK_PFA_SET(name, func) \
1650 static inline void task_set_##func(struct task_struct *p) \
1651 { set_bit(PFA_##name, &p->atomic_flags); }
1652
1653#define TASK_PFA_CLEAR(name, func) \
1654 static inline void task_clear_##func(struct task_struct *p) \
1655 { clear_bit(PFA_##name, &p->atomic_flags); }
1656
1657TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1658TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1659
1660TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1661TASK_PFA_SET(SPREAD_PAGE, spread_page)
1662TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1663
1664TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1665TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1666TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1667
1668TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
1669TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
1670TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
1671
1672TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1673TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1674TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1675
1676TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1677TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1678
1679TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
1680TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
1681TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
1682
1683TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1684TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1685
1686static inline void
1687current_restore_flags(unsigned long orig_flags, unsigned long flags)
1688{
1689 current->flags &= ~flags;
1690 current->flags |= orig_flags & flags;
1691}
1692
1693extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
1694extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
1695#ifdef CONFIG_SMP
1696extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
1697extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
1698#else
1699static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1700{
1701}
1702static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1703{
1704 if (!cpumask_test_cpu(0, new_mask))
1705 return -EINVAL;
1706 return 0;
1707}
1708#endif
1709
1710extern int yield_to(struct task_struct *p, bool preempt);
1711extern void set_user_nice(struct task_struct *p, long nice);
1712extern int task_prio(const struct task_struct *p);
1713
1714
1715
1716
1717
1718
1719
1720static inline int task_nice(const struct task_struct *p)
1721{
1722 return PRIO_TO_NICE((p)->static_prio);
1723}
1724
1725extern int can_nice(const struct task_struct *p, const int nice);
1726extern int task_curr(const struct task_struct *p);
1727extern int idle_cpu(int cpu);
1728extern int available_idle_cpu(int cpu);
1729extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1730extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
1731extern void sched_set_fifo(struct task_struct *p);
1732extern void sched_set_fifo_low(struct task_struct *p);
1733extern void sched_set_normal(struct task_struct *p, int nice);
1734extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1735extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
1736extern struct task_struct *idle_task(int cpu);
1737
1738
1739
1740
1741
1742
1743
1744static __always_inline bool is_idle_task(const struct task_struct *p)
1745{
1746 return !!(p->flags & PF_IDLE);
1747}
1748
1749extern struct task_struct *curr_task(int cpu);
1750extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1751
1752void yield(void);
1753
1754union thread_union {
1755#ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
1756 struct task_struct task;
1757#endif
1758#ifndef CONFIG_THREAD_INFO_IN_TASK
1759 struct thread_info thread_info;
1760#endif
1761 unsigned long stack[THREAD_SIZE/sizeof(long)];
1762};
1763
1764#ifndef CONFIG_THREAD_INFO_IN_TASK
1765extern struct thread_info init_thread_info;
1766#endif
1767
1768extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
1769
1770#ifdef CONFIG_THREAD_INFO_IN_TASK
1771static inline struct thread_info *task_thread_info(struct task_struct *task)
1772{
1773 return &task->thread_info;
1774}
1775#elif !defined(__HAVE_THREAD_FUNCTIONS)
1776# define task_thread_info(task) ((struct thread_info *)(task)->stack)
1777#endif
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790extern struct task_struct *find_task_by_vpid(pid_t nr);
1791extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1792
1793
1794
1795
1796extern struct task_struct *find_get_task_by_vpid(pid_t nr);
1797
1798extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1799extern int wake_up_process(struct task_struct *tsk);
1800extern void wake_up_new_task(struct task_struct *tsk);
1801
1802#ifdef CONFIG_SMP
1803extern void kick_process(struct task_struct *tsk);
1804#else
1805static inline void kick_process(struct task_struct *tsk) { }
1806#endif
1807
1808extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1809
1810static inline void set_task_comm(struct task_struct *tsk, const char *from)
1811{
1812 __set_task_comm(tsk, from, false);
1813}
1814
1815extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
1816#define get_task_comm(buf, tsk) ({ \
1817 BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \
1818 __get_task_comm(buf, sizeof(buf), tsk); \
1819})
1820
1821#ifdef CONFIG_SMP
1822static __always_inline void scheduler_ipi(void)
1823{
1824
1825
1826
1827
1828
1829 preempt_fold_need_resched();
1830}
1831extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
1832#else
1833static inline void scheduler_ipi(void) { }
1834static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1835{
1836 return 1;
1837}
1838#endif
1839
1840
1841
1842
1843
1844static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
1845{
1846 set_ti_thread_flag(task_thread_info(tsk), flag);
1847}
1848
1849static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1850{
1851 clear_ti_thread_flag(task_thread_info(tsk), flag);
1852}
1853
1854static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
1855 bool value)
1856{
1857 update_ti_thread_flag(task_thread_info(tsk), flag, value);
1858}
1859
1860static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
1861{
1862 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
1863}
1864
1865static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1866{
1867 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
1868}
1869
1870static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
1871{
1872 return test_ti_thread_flag(task_thread_info(tsk), flag);
1873}
1874
1875static inline void set_tsk_need_resched(struct task_struct *tsk)
1876{
1877 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1878}
1879
1880static inline void clear_tsk_need_resched(struct task_struct *tsk)
1881{
1882 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1883}
1884
1885static inline int test_tsk_need_resched(struct task_struct *tsk)
1886{
1887 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
1888}
1889
1890
1891
1892
1893
1894
1895
1896#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
1897extern int __cond_resched(void);
1898
1899#ifdef CONFIG_PREEMPT_DYNAMIC
1900
1901DECLARE_STATIC_CALL(cond_resched, __cond_resched);
1902
1903static __always_inline int _cond_resched(void)
1904{
1905 return static_call_mod(cond_resched)();
1906}
1907
1908#else
1909
1910static inline int _cond_resched(void)
1911{
1912 return __cond_resched();
1913}
1914
1915#endif
1916
1917#else
1918
1919static inline int _cond_resched(void) { return 0; }
1920
1921#endif
1922
1923#define cond_resched() ({ \
1924 ___might_sleep(__FILE__, __LINE__, 0); \
1925 _cond_resched(); \
1926})
1927
1928extern int __cond_resched_lock(spinlock_t *lock);
1929extern int __cond_resched_rwlock_read(rwlock_t *lock);
1930extern int __cond_resched_rwlock_write(rwlock_t *lock);
1931
1932#define cond_resched_lock(lock) ({ \
1933 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
1934 __cond_resched_lock(lock); \
1935})
1936
1937#define cond_resched_rwlock_read(lock) ({ \
1938 __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
1939 __cond_resched_rwlock_read(lock); \
1940})
1941
1942#define cond_resched_rwlock_write(lock) ({ \
1943 __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
1944 __cond_resched_rwlock_write(lock); \
1945})
1946
1947static inline void cond_resched_rcu(void)
1948{
1949#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
1950 rcu_read_unlock();
1951 cond_resched();
1952 rcu_read_lock();
1953#endif
1954}
1955
1956
1957
1958
1959
1960
1961static inline int spin_needbreak(spinlock_t *lock)
1962{
1963#ifdef CONFIG_PREEMPTION
1964 return spin_is_contended(lock);
1965#else
1966 return 0;
1967#endif
1968}
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978static inline int rwlock_needbreak(rwlock_t *lock)
1979{
1980#ifdef CONFIG_PREEMPTION
1981 return rwlock_is_contended(lock);
1982#else
1983 return 0;
1984#endif
1985}
1986
1987static __always_inline bool need_resched(void)
1988{
1989 return unlikely(tif_need_resched());
1990}
1991
1992
1993
1994
1995#ifdef CONFIG_SMP
1996
1997static inline unsigned int task_cpu(const struct task_struct *p)
1998{
1999#ifdef CONFIG_THREAD_INFO_IN_TASK
2000 return READ_ONCE(p->cpu);
2001#else
2002 return READ_ONCE(task_thread_info(p)->cpu);
2003#endif
2004}
2005
2006extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2007
2008#else
2009
2010static inline unsigned int task_cpu(const struct task_struct *p)
2011{
2012 return 0;
2013}
2014
2015static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2016{
2017}
2018
2019#endif
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029#ifndef vcpu_is_preempted
2030static inline bool vcpu_is_preempted(int cpu)
2031{
2032 return false;
2033}
2034#endif
2035
2036extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2037extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2038
2039#ifndef TASK_SIZE_OF
2040#define TASK_SIZE_OF(tsk) TASK_SIZE
2041#endif
2042
2043#ifdef CONFIG_SMP
2044
2045unsigned long sched_cpu_util(int cpu, unsigned long max);
2046#endif
2047
2048#ifdef CONFIG_RSEQ
2049
2050
2051
2052
2053
2054enum rseq_event_mask_bits {
2055 RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
2056 RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
2057 RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
2058};
2059
2060enum rseq_event_mask {
2061 RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT),
2062 RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT),
2063 RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT),
2064};
2065
2066static inline void rseq_set_notify_resume(struct task_struct *t)
2067{
2068 if (t->rseq)
2069 set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
2070}
2071
2072void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
2073
2074static inline void rseq_handle_notify_resume(struct ksignal *ksig,
2075 struct pt_regs *regs)
2076{
2077 if (current->rseq)
2078 __rseq_handle_notify_resume(ksig, regs);
2079}
2080
2081static inline void rseq_signal_deliver(struct ksignal *ksig,
2082 struct pt_regs *regs)
2083{
2084 preempt_disable();
2085 __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask);
2086 preempt_enable();
2087 rseq_handle_notify_resume(ksig, regs);
2088}
2089
2090
2091static inline void rseq_preempt(struct task_struct *t)
2092{
2093 __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
2094 rseq_set_notify_resume(t);
2095}
2096
2097
2098static inline void rseq_migrate(struct task_struct *t)
2099{
2100 __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
2101 rseq_set_notify_resume(t);
2102}
2103
2104
2105
2106
2107
2108static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
2109{
2110 if (clone_flags & CLONE_VM) {
2111 t->rseq = NULL;
2112 t->rseq_sig = 0;
2113 t->rseq_event_mask = 0;
2114 } else {
2115 t->rseq = current->rseq;
2116 t->rseq_sig = current->rseq_sig;
2117 t->rseq_event_mask = current->rseq_event_mask;
2118 }
2119}
2120
2121static inline void rseq_execve(struct task_struct *t)
2122{
2123 t->rseq = NULL;
2124 t->rseq_sig = 0;
2125 t->rseq_event_mask = 0;
2126}
2127
2128#else
2129
2130static inline void rseq_set_notify_resume(struct task_struct *t)
2131{
2132}
2133static inline void rseq_handle_notify_resume(struct ksignal *ksig,
2134 struct pt_regs *regs)
2135{
2136}
2137static inline void rseq_signal_deliver(struct ksignal *ksig,
2138 struct pt_regs *regs)
2139{
2140}
2141static inline void rseq_preempt(struct task_struct *t)
2142{
2143}
2144static inline void rseq_migrate(struct task_struct *t)
2145{
2146}
2147static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
2148{
2149}
2150static inline void rseq_execve(struct task_struct *t)
2151{
2152}
2153
2154#endif
2155
2156#ifdef CONFIG_DEBUG_RSEQ
2157
2158void rseq_syscall(struct pt_regs *regs);
2159
2160#else
2161
2162static inline void rseq_syscall(struct pt_regs *regs)
2163{
2164}
2165
2166#endif
2167
2168const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq);
2169char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len);
2170int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq);
2171
2172const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq);
2173const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq);
2174const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq);
2175
2176int sched_trace_rq_cpu(struct rq *rq);
2177int sched_trace_rq_cpu_capacity(struct rq *rq);
2178int sched_trace_rq_nr_running(struct rq *rq);
2179
2180const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
2181
2182#endif
2183