1
2#ifndef _LINUX_SCHED_H
3#define _LINUX_SCHED_H
4
5
6
7
8
9
10#include <uapi/linux/sched.h>
11
12#include <asm/current.h>
13
14#include <linux/pid.h>
15#include <linux/sem.h>
16#include <linux/shm.h>
17#include <linux/mutex.h>
18#include <linux/plist.h>
19#include <linux/hrtimer.h>
20#include <linux/irqflags.h>
21#include <linux/seccomp.h>
22#include <linux/nodemask.h>
23#include <linux/rcupdate.h>
24#include <linux/refcount.h>
25#include <linux/resource.h>
26#include <linux/latencytop.h>
27#include <linux/sched/prio.h>
28#include <linux/sched/types.h>
29#include <linux/signal_types.h>
30#include <linux/syscall_user_dispatch.h>
31#include <linux/mm_types_task.h>
32#include <linux/task_io_accounting.h>
33#include <linux/posix-timers.h>
34#include <linux/rseq.h>
35#include <linux/seqlock.h>
36#include <linux/kcsan.h>
37#include <linux/rv.h>
38#include <asm/kmap_size.h>
39
40
41struct audit_context;
42struct backing_dev_info;
43struct bio_list;
44struct blk_plug;
45struct bpf_local_storage;
46struct bpf_run_ctx;
47struct capture_control;
48struct cfs_rq;
49struct fs_struct;
50struct futex_pi_state;
51struct io_context;
52struct io_uring_task;
53struct mempolicy;
54struct nameidata;
55struct nsproxy;
56struct perf_event_context;
57struct pid_namespace;
58struct pipe_inode_info;
59struct rcu_node;
60struct reclaim_state;
61struct robust_list_head;
62struct root_domain;
63struct rq;
64struct sched_attr;
65struct sched_param;
66struct seq_file;
67struct sighand_struct;
68struct signal_struct;
69struct task_delay_info;
70struct task_group;
71
72
73
74
75
76
77
78
79
80
81
82
83
84#define TASK_RUNNING 0x0000
85#define TASK_INTERRUPTIBLE 0x0001
86#define TASK_UNINTERRUPTIBLE 0x0002
87#define __TASK_STOPPED 0x0004
88#define __TASK_TRACED 0x0008
89
90#define EXIT_DEAD 0x0010
91#define EXIT_ZOMBIE 0x0020
92#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
93
94#define TASK_PARKED 0x0040
95#define TASK_DEAD 0x0080
96#define TASK_WAKEKILL 0x0100
97#define TASK_WAKING 0x0200
98#define TASK_NOLOAD 0x0400
99#define TASK_NEW 0x0800
100
101#define TASK_RTLOCK_WAIT 0x1000
102#define TASK_STATE_MAX 0x2000
103
104
105#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
106#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
107#define TASK_TRACED __TASK_TRACED
108
109#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
110
111
112#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
113
114
115#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
116 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
117 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
118 TASK_PARKED)
119
120#define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING)
121
122#define task_is_traced(task) ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0)
123#define task_is_stopped(task) ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0)
124#define task_is_stopped_or_traced(task) ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED)) != 0)
125
126
127
128
129
130#define is_special_task_state(state) \
131 ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
132
133#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
134# define debug_normal_state_change(state_value) \
135 do { \
136 WARN_ON_ONCE(is_special_task_state(state_value)); \
137 current->task_state_change = _THIS_IP_; \
138 } while (0)
139
140# define debug_special_state_change(state_value) \
141 do { \
142 WARN_ON_ONCE(!is_special_task_state(state_value)); \
143 current->task_state_change = _THIS_IP_; \
144 } while (0)
145
146# define debug_rtlock_wait_set_state() \
147 do { \
148 current->saved_state_change = current->task_state_change;\
149 current->task_state_change = _THIS_IP_; \
150 } while (0)
151
152# define debug_rtlock_wait_restore_state() \
153 do { \
154 current->task_state_change = current->saved_state_change;\
155 } while (0)
156
157#else
158# define debug_normal_state_change(cond) do { } while (0)
159# define debug_special_state_change(cond) do { } while (0)
160# define debug_rtlock_wait_set_state() do { } while (0)
161# define debug_rtlock_wait_restore_state() do { } while (0)
162#endif
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201#define __set_current_state(state_value) \
202 do { \
203 debug_normal_state_change((state_value)); \
204 WRITE_ONCE(current->__state, (state_value)); \
205 } while (0)
206
207#define set_current_state(state_value) \
208 do { \
209 debug_normal_state_change((state_value)); \
210 smp_store_mb(current->__state, (state_value)); \
211 } while (0)
212
213
214
215
216
217
218
219#define set_special_state(state_value) \
220 do { \
221 unsigned long flags; \
222 \
223 raw_spin_lock_irqsave(¤t->pi_lock, flags); \
224 debug_special_state_change((state_value)); \
225 WRITE_ONCE(current->__state, (state_value)); \
226 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
227 } while (0)
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254#define current_save_and_set_rtlock_wait_state() \
255 do { \
256 lockdep_assert_irqs_disabled(); \
257 raw_spin_lock(¤t->pi_lock); \
258 current->saved_state = current->__state; \
259 debug_rtlock_wait_set_state(); \
260 WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT); \
261 raw_spin_unlock(¤t->pi_lock); \
262 } while (0);
263
264#define current_restore_rtlock_saved_state() \
265 do { \
266 lockdep_assert_irqs_disabled(); \
267 raw_spin_lock(¤t->pi_lock); \
268 debug_rtlock_wait_restore_state(); \
269 WRITE_ONCE(current->__state, current->saved_state); \
270 current->saved_state = TASK_RUNNING; \
271 raw_spin_unlock(¤t->pi_lock); \
272 } while (0);
273
274#define get_current_state() READ_ONCE(current->__state)
275
276
277
278
279
280enum {
281 TASK_COMM_LEN = 16,
282};
283
284extern void scheduler_tick(void);
285
286#define MAX_SCHEDULE_TIMEOUT LONG_MAX
287
288extern long schedule_timeout(long timeout);
289extern long schedule_timeout_interruptible(long timeout);
290extern long schedule_timeout_killable(long timeout);
291extern long schedule_timeout_uninterruptible(long timeout);
292extern long schedule_timeout_idle(long timeout);
293asmlinkage void schedule(void);
294extern void schedule_preempt_disabled(void);
295asmlinkage void preempt_schedule_irq(void);
296#ifdef CONFIG_PREEMPT_RT
297 extern void schedule_rtlock(void);
298#endif
299
300extern int __must_check io_schedule_prepare(void);
301extern void io_schedule_finish(int token);
302extern long io_schedule_timeout(long timeout);
303extern void io_schedule(void);
304
305
306
307
308
309
310
311
312
313
314struct prev_cputime {
315#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
316 u64 utime;
317 u64 stime;
318 raw_spinlock_t lock;
319#endif
320};
321
322enum vtime_state {
323
324 VTIME_INACTIVE = 0,
325
326 VTIME_IDLE,
327
328 VTIME_SYS,
329
330 VTIME_USER,
331
332 VTIME_GUEST,
333};
334
335struct vtime {
336 seqcount_t seqcount;
337 unsigned long long starttime;
338 enum vtime_state state;
339 unsigned int cpu;
340 u64 utime;
341 u64 stime;
342 u64 gtime;
343};
344
345
346
347
348
349
350
351enum uclamp_id {
352 UCLAMP_MIN = 0,
353 UCLAMP_MAX,
354 UCLAMP_CNT
355};
356
357#ifdef CONFIG_SMP
358extern struct root_domain def_root_domain;
359extern struct mutex sched_domains_mutex;
360#endif
361
362struct sched_info {
363#ifdef CONFIG_SCHED_INFO
364
365
366
367 unsigned long pcount;
368
369
370 unsigned long long run_delay;
371
372
373
374
375 unsigned long long last_arrival;
376
377
378 unsigned long long last_queued;
379
380#endif
381};
382
383
384
385
386
387
388
389
390# define SCHED_FIXEDPOINT_SHIFT 10
391# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
392
393
394# define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
395# define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
396
397struct load_weight {
398 unsigned long weight;
399 u32 inv_weight;
400};
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431struct util_est {
432 unsigned int enqueued;
433 unsigned int ewma;
434#define UTIL_EST_WEIGHT_SHIFT 2
435#define UTIL_AVG_UNCHANGED 0x80000000
436} __attribute__((__aligned__(sizeof(u64))));
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483struct sched_avg {
484 u64 last_update_time;
485 u64 load_sum;
486 u64 runnable_sum;
487 u32 util_sum;
488 u32 period_contrib;
489 unsigned long load_avg;
490 unsigned long runnable_avg;
491 unsigned long util_avg;
492 struct util_est util_est;
493} ____cacheline_aligned;
494
495struct sched_statistics {
496#ifdef CONFIG_SCHEDSTATS
497 u64 wait_start;
498 u64 wait_max;
499 u64 wait_count;
500 u64 wait_sum;
501 u64 iowait_count;
502 u64 iowait_sum;
503
504 u64 sleep_start;
505 u64 sleep_max;
506 s64 sum_sleep_runtime;
507
508 u64 block_start;
509 u64 block_max;
510 s64 sum_block_runtime;
511
512 u64 exec_max;
513 u64 slice_max;
514
515 u64 nr_migrations_cold;
516 u64 nr_failed_migrations_affine;
517 u64 nr_failed_migrations_running;
518 u64 nr_failed_migrations_hot;
519 u64 nr_forced_migrations;
520
521 u64 nr_wakeups;
522 u64 nr_wakeups_sync;
523 u64 nr_wakeups_migrate;
524 u64 nr_wakeups_local;
525 u64 nr_wakeups_remote;
526 u64 nr_wakeups_affine;
527 u64 nr_wakeups_affine_attempts;
528 u64 nr_wakeups_passive;
529 u64 nr_wakeups_idle;
530
531#ifdef CONFIG_SCHED_CORE
532 u64 core_forceidle_sum;
533#endif
534#endif
535} ____cacheline_aligned;
536
537struct sched_entity {
538
539 struct load_weight load;
540 struct rb_node run_node;
541 struct list_head group_node;
542 unsigned int on_rq;
543
544 u64 exec_start;
545 u64 sum_exec_runtime;
546 u64 vruntime;
547 u64 prev_sum_exec_runtime;
548
549 u64 nr_migrations;
550
551#ifdef CONFIG_FAIR_GROUP_SCHED
552 int depth;
553 struct sched_entity *parent;
554
555 struct cfs_rq *cfs_rq;
556
557 struct cfs_rq *my_q;
558
559 unsigned long runnable_weight;
560#endif
561
562#ifdef CONFIG_SMP
563
564
565
566
567
568
569 struct sched_avg avg;
570#endif
571};
572
573struct sched_rt_entity {
574 struct list_head run_list;
575 unsigned long timeout;
576 unsigned long watchdog_stamp;
577 unsigned int time_slice;
578 unsigned short on_rq;
579 unsigned short on_list;
580
581 struct sched_rt_entity *back;
582#ifdef CONFIG_RT_GROUP_SCHED
583 struct sched_rt_entity *parent;
584
585 struct rt_rq *rt_rq;
586
587 struct rt_rq *my_q;
588#endif
589} __randomize_layout;
590
591struct sched_dl_entity {
592 struct rb_node rb_node;
593
594
595
596
597
598
599 u64 dl_runtime;
600 u64 dl_deadline;
601 u64 dl_period;
602 u64 dl_bw;
603 u64 dl_density;
604
605
606
607
608
609
610 s64 runtime;
611 u64 deadline;
612 unsigned int flags;
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634 unsigned int dl_throttled : 1;
635 unsigned int dl_yielded : 1;
636 unsigned int dl_non_contending : 1;
637 unsigned int dl_overrun : 1;
638
639
640
641
642
643 struct hrtimer dl_timer;
644
645
646
647
648
649
650
651
652 struct hrtimer inactive_timer;
653
654#ifdef CONFIG_RT_MUTEXES
655
656
657
658
659
660 struct sched_dl_entity *pi_se;
661#endif
662};
663
664#ifdef CONFIG_UCLAMP_TASK
665
666#define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691struct uclamp_se {
692 unsigned int value : bits_per(SCHED_CAPACITY_SCALE);
693 unsigned int bucket_id : bits_per(UCLAMP_BUCKETS);
694 unsigned int active : 1;
695 unsigned int user_defined : 1;
696};
697#endif
698
699union rcu_special {
700 struct {
701 u8 blocked;
702 u8 need_qs;
703 u8 exp_hint;
704 u8 need_mb;
705 } b;
706 u32 s;
707};
708
709enum perf_event_task_context {
710 perf_invalid_context = -1,
711 perf_hw_context = 0,
712 perf_sw_context,
713 perf_nr_task_contexts,
714};
715
716struct wake_q_node {
717 struct wake_q_node *next;
718};
719
720struct kmap_ctrl {
721#ifdef CONFIG_KMAP_LOCAL
722 int idx;
723 pte_t pteval[KM_MAX_IDX];
724#endif
725};
726
727struct task_struct {
728#ifdef CONFIG_THREAD_INFO_IN_TASK
729
730
731
732
733 struct thread_info thread_info;
734#endif
735 unsigned int __state;
736
737#ifdef CONFIG_PREEMPT_RT
738
739 unsigned int saved_state;
740#endif
741
742
743
744
745
746 randomized_struct_fields_start
747
748 void *stack;
749 refcount_t usage;
750
751 unsigned int flags;
752 unsigned int ptrace;
753
754#ifdef CONFIG_SMP
755 int on_cpu;
756 struct __call_single_node wake_entry;
757 unsigned int wakee_flips;
758 unsigned long wakee_flip_decay_ts;
759 struct task_struct *last_wakee;
760
761
762
763
764
765
766
767
768 int recent_used_cpu;
769 int wake_cpu;
770#endif
771 int on_rq;
772
773 int prio;
774 int static_prio;
775 int normal_prio;
776 unsigned int rt_priority;
777
778 struct sched_entity se;
779 struct sched_rt_entity rt;
780 struct sched_dl_entity dl;
781 const struct sched_class *sched_class;
782
783#ifdef CONFIG_SCHED_CORE
784 struct rb_node core_node;
785 unsigned long core_cookie;
786 unsigned int core_occupation;
787#endif
788
789#ifdef CONFIG_CGROUP_SCHED
790 struct task_group *sched_task_group;
791#endif
792
793#ifdef CONFIG_UCLAMP_TASK
794
795
796
797
798 struct uclamp_se uclamp_req[UCLAMP_CNT];
799
800
801
802
803 struct uclamp_se uclamp[UCLAMP_CNT];
804#endif
805
806 struct sched_statistics stats;
807
808#ifdef CONFIG_PREEMPT_NOTIFIERS
809
810 struct hlist_head preempt_notifiers;
811#endif
812
813#ifdef CONFIG_BLK_DEV_IO_TRACE
814 unsigned int btrace_seq;
815#endif
816
817 unsigned int policy;
818 int nr_cpus_allowed;
819 const cpumask_t *cpus_ptr;
820 cpumask_t *user_cpus_ptr;
821 cpumask_t cpus_mask;
822 void *migration_pending;
823#ifdef CONFIG_SMP
824 unsigned short migration_disabled;
825#endif
826 unsigned short migration_flags;
827
828#ifdef CONFIG_PREEMPT_RCU
829 int rcu_read_lock_nesting;
830 union rcu_special rcu_read_unlock_special;
831 struct list_head rcu_node_entry;
832 struct rcu_node *rcu_blocked_node;
833#endif
834
835#ifdef CONFIG_TASKS_RCU
836 unsigned long rcu_tasks_nvcsw;
837 u8 rcu_tasks_holdout;
838 u8 rcu_tasks_idx;
839 int rcu_tasks_idle_cpu;
840 struct list_head rcu_tasks_holdout_list;
841#endif
842
843#ifdef CONFIG_TASKS_TRACE_RCU
844 int trc_reader_nesting;
845 int trc_ipi_to_cpu;
846 union rcu_special trc_reader_special;
847 struct list_head trc_holdout_list;
848 struct list_head trc_blkd_node;
849 int trc_blkd_cpu;
850#endif
851
852 struct sched_info sched_info;
853
854 struct list_head tasks;
855#ifdef CONFIG_SMP
856 struct plist_node pushable_tasks;
857 struct rb_node pushable_dl_tasks;
858#endif
859
860 struct mm_struct *mm;
861 struct mm_struct *active_mm;
862
863
864 struct vmacache vmacache;
865
866#ifdef SPLIT_RSS_COUNTING
867 struct task_rss_stat rss_stat;
868#endif
869 int exit_state;
870 int exit_code;
871 int exit_signal;
872
873 int pdeath_signal;
874
875 unsigned long jobctl;
876
877
878 unsigned int personality;
879
880
881 unsigned sched_reset_on_fork:1;
882 unsigned sched_contributes_to_load:1;
883 unsigned sched_migrated:1;
884#ifdef CONFIG_PSI
885 unsigned sched_psi_wake_requeue:1;
886#endif
887
888
889 unsigned :0;
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906 unsigned sched_remote_wakeup:1;
907
908
909 unsigned in_execve:1;
910 unsigned in_iowait:1;
911#ifndef TIF_RESTORE_SIGMASK
912 unsigned restore_sigmask:1;
913#endif
914#ifdef CONFIG_MEMCG
915 unsigned in_user_fault:1;
916#endif
917#ifdef CONFIG_COMPAT_BRK
918 unsigned brk_randomized:1;
919#endif
920#ifdef CONFIG_CGROUPS
921
922 unsigned no_cgroup_migration:1;
923
924 unsigned frozen:1;
925#endif
926#ifdef CONFIG_BLK_CGROUP
927 unsigned use_memdelay:1;
928#endif
929#ifdef CONFIG_PSI
930
931 unsigned in_memstall:1;
932#endif
933#ifdef CONFIG_PAGE_OWNER
934
935 unsigned in_page_owner:1;
936#endif
937#ifdef CONFIG_EVENTFD
938
939 unsigned in_eventfd:1;
940#endif
941#ifdef CONFIG_IOMMU_SVA
942 unsigned pasid_activated:1;
943#endif
944#ifdef CONFIG_CPU_SUP_INTEL
945 unsigned reported_split_lock:1;
946#endif
947
948 unsigned long atomic_flags;
949
950 struct restart_block restart_block;
951
952 pid_t pid;
953 pid_t tgid;
954
955#ifdef CONFIG_STACKPROTECTOR
956
957 unsigned long stack_canary;
958#endif
959
960
961
962
963
964
965
966 struct task_struct __rcu *real_parent;
967
968
969 struct task_struct __rcu *parent;
970
971
972
973
974 struct list_head children;
975 struct list_head sibling;
976 struct task_struct *group_leader;
977
978
979
980
981
982
983
984 struct list_head ptraced;
985 struct list_head ptrace_entry;
986
987
988 struct pid *thread_pid;
989 struct hlist_node pid_links[PIDTYPE_MAX];
990 struct list_head thread_group;
991 struct list_head thread_node;
992
993 struct completion *vfork_done;
994
995
996 int __user *set_child_tid;
997
998
999 int __user *clear_child_tid;
1000
1001
1002 void *worker_private;
1003
1004 u64 utime;
1005 u64 stime;
1006#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
1007 u64 utimescaled;
1008 u64 stimescaled;
1009#endif
1010 u64 gtime;
1011 struct prev_cputime prev_cputime;
1012#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1013 struct vtime vtime;
1014#endif
1015
1016#ifdef CONFIG_NO_HZ_FULL
1017 atomic_t tick_dep_mask;
1018#endif
1019
1020 unsigned long nvcsw;
1021 unsigned long nivcsw;
1022
1023
1024 u64 start_time;
1025
1026
1027 u64 start_boottime;
1028
1029
1030 unsigned long min_flt;
1031 unsigned long maj_flt;
1032
1033
1034 struct posix_cputimers posix_cputimers;
1035
1036#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
1037 struct posix_cputimers_work posix_cputimers_work;
1038#endif
1039
1040
1041
1042
1043 const struct cred __rcu *ptracer_cred;
1044
1045
1046 const struct cred __rcu *real_cred;
1047
1048
1049 const struct cred __rcu *cred;
1050
1051#ifdef CONFIG_KEYS
1052
1053 struct key *cached_requested_key;
1054#endif
1055
1056
1057
1058
1059
1060
1061
1062
1063 char comm[TASK_COMM_LEN];
1064
1065 struct nameidata *nameidata;
1066
1067#ifdef CONFIG_SYSVIPC
1068 struct sysv_sem sysvsem;
1069 struct sysv_shm sysvshm;
1070#endif
1071#ifdef CONFIG_DETECT_HUNG_TASK
1072 unsigned long last_switch_count;
1073 unsigned long last_switch_time;
1074#endif
1075
1076 struct fs_struct *fs;
1077
1078
1079 struct files_struct *files;
1080
1081#ifdef CONFIG_IO_URING
1082 struct io_uring_task *io_uring;
1083#endif
1084
1085
1086 struct nsproxy *nsproxy;
1087
1088
1089 struct signal_struct *signal;
1090 struct sighand_struct __rcu *sighand;
1091 sigset_t blocked;
1092 sigset_t real_blocked;
1093
1094 sigset_t saved_sigmask;
1095 struct sigpending pending;
1096 unsigned long sas_ss_sp;
1097 size_t sas_ss_size;
1098 unsigned int sas_ss_flags;
1099
1100 struct callback_head *task_works;
1101
1102#ifdef CONFIG_AUDIT
1103#ifdef CONFIG_AUDITSYSCALL
1104 struct audit_context *audit_context;
1105#endif
1106 kuid_t loginuid;
1107 unsigned int sessionid;
1108#endif
1109 struct seccomp seccomp;
1110 struct syscall_user_dispatch syscall_dispatch;
1111
1112
1113 u64 parent_exec_id;
1114 u64 self_exec_id;
1115
1116
1117 spinlock_t alloc_lock;
1118
1119
1120 raw_spinlock_t pi_lock;
1121
1122 struct wake_q_node wake_q;
1123
1124#ifdef CONFIG_RT_MUTEXES
1125
1126 struct rb_root_cached pi_waiters;
1127
1128 struct task_struct *pi_top_task;
1129
1130 struct rt_mutex_waiter *pi_blocked_on;
1131#endif
1132
1133#ifdef CONFIG_DEBUG_MUTEXES
1134
1135 struct mutex_waiter *blocked_on;
1136#endif
1137
1138#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1139 int non_block_count;
1140#endif
1141
1142#ifdef CONFIG_TRACE_IRQFLAGS
1143 struct irqtrace_events irqtrace;
1144 unsigned int hardirq_threaded;
1145 u64 hardirq_chain_key;
1146 int softirqs_enabled;
1147 int softirq_context;
1148 int irq_config;
1149#endif
1150#ifdef CONFIG_PREEMPT_RT
1151 int softirq_disable_cnt;
1152#endif
1153
1154#ifdef CONFIG_LOCKDEP
1155# define MAX_LOCK_DEPTH 48UL
1156 u64 curr_chain_key;
1157 int lockdep_depth;
1158 unsigned int lockdep_recursion;
1159 struct held_lock held_locks[MAX_LOCK_DEPTH];
1160#endif
1161
1162#if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP)
1163 unsigned int in_ubsan;
1164#endif
1165
1166
1167 void *journal_info;
1168
1169
1170 struct bio_list *bio_list;
1171
1172
1173 struct blk_plug *plug;
1174
1175
1176 struct reclaim_state *reclaim_state;
1177
1178 struct backing_dev_info *backing_dev_info;
1179
1180 struct io_context *io_context;
1181
1182#ifdef CONFIG_COMPACTION
1183 struct capture_control *capture_control;
1184#endif
1185
1186 unsigned long ptrace_message;
1187 kernel_siginfo_t *last_siginfo;
1188
1189 struct task_io_accounting ioac;
1190#ifdef CONFIG_PSI
1191
1192 unsigned int psi_flags;
1193#endif
1194#ifdef CONFIG_TASK_XACCT
1195
1196 u64 acct_rss_mem1;
1197
1198 u64 acct_vm_mem1;
1199
1200 u64 acct_timexpd;
1201#endif
1202#ifdef CONFIG_CPUSETS
1203
1204 nodemask_t mems_allowed;
1205
1206 seqcount_spinlock_t mems_allowed_seq;
1207 int cpuset_mem_spread_rotor;
1208 int cpuset_slab_spread_rotor;
1209#endif
1210#ifdef CONFIG_CGROUPS
1211
1212 struct css_set __rcu *cgroups;
1213
1214 struct list_head cg_list;
1215#endif
1216#ifdef CONFIG_X86_CPU_RESCTRL
1217 u32 closid;
1218 u32 rmid;
1219#endif
1220#ifdef CONFIG_FUTEX
1221 struct robust_list_head __user *robust_list;
1222#ifdef CONFIG_COMPAT
1223 struct compat_robust_list_head __user *compat_robust_list;
1224#endif
1225 struct list_head pi_state_list;
1226 struct futex_pi_state *pi_state_cache;
1227 struct mutex futex_exit_mutex;
1228 unsigned int futex_state;
1229#endif
1230#ifdef CONFIG_PERF_EVENTS
1231 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1232 struct mutex perf_event_mutex;
1233 struct list_head perf_event_list;
1234#endif
1235#ifdef CONFIG_DEBUG_PREEMPT
1236 unsigned long preempt_disable_ip;
1237#endif
1238#ifdef CONFIG_NUMA
1239
1240 struct mempolicy *mempolicy;
1241 short il_prev;
1242 short pref_node_fork;
1243#endif
1244#ifdef CONFIG_NUMA_BALANCING
1245 int numa_scan_seq;
1246 unsigned int numa_scan_period;
1247 unsigned int numa_scan_period_max;
1248 int numa_preferred_nid;
1249 unsigned long numa_migrate_retry;
1250
1251 u64 node_stamp;
1252 u64 last_task_numa_placement;
1253 u64 last_sum_exec_runtime;
1254 struct callback_head numa_work;
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264 struct numa_group __rcu *numa_group;
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280 unsigned long *numa_faults;
1281 unsigned long total_numa_faults;
1282
1283
1284
1285
1286
1287
1288
1289 unsigned long numa_faults_locality[3];
1290
1291 unsigned long numa_pages_migrated;
1292#endif
1293
1294#ifdef CONFIG_RSEQ
1295 struct rseq __user *rseq;
1296 u32 rseq_sig;
1297
1298
1299
1300
1301 unsigned long rseq_event_mask;
1302#endif
1303
1304 struct tlbflush_unmap_batch tlb_ubc;
1305
1306 union {
1307 refcount_t rcu_users;
1308 struct rcu_head rcu;
1309 };
1310
1311
1312 struct pipe_inode_info *splice_pipe;
1313
1314 struct page_frag task_frag;
1315
1316#ifdef CONFIG_TASK_DELAY_ACCT
1317 struct task_delay_info *delays;
1318#endif
1319
1320#ifdef CONFIG_FAULT_INJECTION
1321 int make_it_fail;
1322 unsigned int fail_nth;
1323#endif
1324
1325
1326
1327
1328 int nr_dirtied;
1329 int nr_dirtied_pause;
1330
1331 unsigned long dirty_paused_when;
1332
1333#ifdef CONFIG_LATENCYTOP
1334 int latency_record_count;
1335 struct latency_record latency_record[LT_SAVECOUNT];
1336#endif
1337
1338
1339
1340
1341 u64 timer_slack_ns;
1342 u64 default_timer_slack_ns;
1343
1344#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
1345 unsigned int kasan_depth;
1346#endif
1347
1348#ifdef CONFIG_KCSAN
1349 struct kcsan_ctx kcsan_ctx;
1350#ifdef CONFIG_TRACE_IRQFLAGS
1351 struct irqtrace_events kcsan_save_irqtrace;
1352#endif
1353#ifdef CONFIG_KCSAN_WEAK_MEMORY
1354 int kcsan_stack_depth;
1355#endif
1356#endif
1357
1358#if IS_ENABLED(CONFIG_KUNIT)
1359 struct kunit *kunit_test;
1360#endif
1361
1362#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1363
1364 int curr_ret_stack;
1365 int curr_ret_depth;
1366
1367
1368 struct ftrace_ret_stack *ret_stack;
1369
1370
1371 unsigned long long ftrace_timestamp;
1372
1373
1374
1375
1376
1377 atomic_t trace_overrun;
1378
1379
1380 atomic_t tracing_graph_pause;
1381#endif
1382
1383#ifdef CONFIG_TRACING
1384
1385 unsigned long trace;
1386
1387
1388 unsigned long trace_recursion;
1389#endif
1390
1391#ifdef CONFIG_KCOV
1392
1393
1394
1395 unsigned int kcov_mode;
1396
1397
1398 unsigned int kcov_size;
1399
1400
1401 void *kcov_area;
1402
1403
1404 struct kcov *kcov;
1405
1406
1407 u64 kcov_handle;
1408
1409
1410 int kcov_sequence;
1411
1412
1413 unsigned int kcov_softirq;
1414#endif
1415
1416#ifdef CONFIG_MEMCG
1417 struct mem_cgroup *memcg_in_oom;
1418 gfp_t memcg_oom_gfp_mask;
1419 int memcg_oom_order;
1420
1421
1422 unsigned int memcg_nr_pages_over_high;
1423
1424
1425 struct mem_cgroup *active_memcg;
1426#endif
1427
1428#ifdef CONFIG_BLK_CGROUP
1429 struct request_queue *throttle_queue;
1430#endif
1431
1432#ifdef CONFIG_UPROBES
1433 struct uprobe_task *utask;
1434#endif
1435#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1436 unsigned int sequential_io;
1437 unsigned int sequential_io_avg;
1438#endif
1439 struct kmap_ctrl kmap_ctrl;
1440#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1441 unsigned long task_state_change;
1442# ifdef CONFIG_PREEMPT_RT
1443 unsigned long saved_state_change;
1444# endif
1445#endif
1446 int pagefault_disabled;
1447#ifdef CONFIG_MMU
1448 struct task_struct *oom_reaper_list;
1449 struct timer_list oom_reaper_timer;
1450#endif
1451#ifdef CONFIG_VMAP_STACK
1452 struct vm_struct *stack_vm_area;
1453#endif
1454#ifdef CONFIG_THREAD_INFO_IN_TASK
1455
1456 refcount_t stack_refcount;
1457#endif
1458#ifdef CONFIG_LIVEPATCH
1459 int patch_state;
1460#endif
1461#ifdef CONFIG_SECURITY
1462
1463 void *security;
1464#endif
1465#ifdef CONFIG_BPF_SYSCALL
1466
1467 struct bpf_local_storage __rcu *bpf_storage;
1468
1469 struct bpf_run_ctx *bpf_ctx;
1470#endif
1471
1472#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
1473 unsigned long lowest_stack;
1474 unsigned long prev_lowest_stack;
1475#endif
1476
1477#ifdef CONFIG_X86_MCE
1478 void __user *mce_vaddr;
1479 __u64 mce_kflags;
1480 u64 mce_addr;
1481 __u64 mce_ripv : 1,
1482 mce_whole_page : 1,
1483 __mce_reserved : 62;
1484 struct callback_head mce_kill_me;
1485 int mce_count;
1486#endif
1487
1488#ifdef CONFIG_KRETPROBES
1489 struct llist_head kretprobe_instances;
1490#endif
1491#ifdef CONFIG_RETHOOK
1492 struct llist_head rethooks;
1493#endif
1494
1495#ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH
1496
1497
1498
1499
1500
1501
1502 struct callback_head l1d_flush_kill;
1503#endif
1504
1505#ifdef CONFIG_RV
1506
1507
1508
1509
1510
1511
1512 union rv_task_monitor rv[RV_PER_TASK_MONITORS];
1513#endif
1514
1515
1516
1517
1518
1519 randomized_struct_fields_end
1520
1521
1522 struct thread_struct thread;
1523
1524
1525
1526
1527
1528
1529
1530};
1531
1532static inline struct pid *task_pid(struct task_struct *task)
1533{
1534 return task->thread_pid;
1535}
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
1549
1550static inline pid_t task_pid_nr(struct task_struct *tsk)
1551{
1552 return tsk->pid;
1553}
1554
1555static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1556{
1557 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1558}
1559
1560static inline pid_t task_pid_vnr(struct task_struct *tsk)
1561{
1562 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1563}
1564
1565
1566static inline pid_t task_tgid_nr(struct task_struct *tsk)
1567{
1568 return tsk->tgid;
1569}
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581static inline int pid_alive(const struct task_struct *p)
1582{
1583 return p->thread_pid != NULL;
1584}
1585
1586static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1587{
1588 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1589}
1590
1591static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1592{
1593 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1594}
1595
1596
1597static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1598{
1599 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1600}
1601
1602static inline pid_t task_session_vnr(struct task_struct *tsk)
1603{
1604 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1605}
1606
1607static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1608{
1609 return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
1610}
1611
1612static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1613{
1614 return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
1615}
1616
1617static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1618{
1619 pid_t pid = 0;
1620
1621 rcu_read_lock();
1622 if (pid_alive(tsk))
1623 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1624 rcu_read_unlock();
1625
1626 return pid;
1627}
1628
1629static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1630{
1631 return task_ppid_nr_ns(tsk, &init_pid_ns);
1632}
1633
1634
1635static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1636{
1637 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1638}
1639
1640#define TASK_REPORT_IDLE (TASK_REPORT + 1)
1641#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
1642
1643static inline unsigned int __task_state_index(unsigned int tsk_state,
1644 unsigned int tsk_exit_state)
1645{
1646 unsigned int state = (tsk_state | tsk_exit_state) & TASK_REPORT;
1647
1648 BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
1649
1650 if (tsk_state == TASK_IDLE)
1651 state = TASK_REPORT_IDLE;
1652
1653
1654
1655
1656
1657
1658 if (tsk_state == TASK_RTLOCK_WAIT)
1659 state = TASK_UNINTERRUPTIBLE;
1660
1661 return fls(state);
1662}
1663
1664static inline unsigned int task_state_index(struct task_struct *tsk)
1665{
1666 return __task_state_index(READ_ONCE(tsk->__state), tsk->exit_state);
1667}
1668
1669static inline char task_index_to_char(unsigned int state)
1670{
1671 static const char state_char[] = "RSDTtXZPI";
1672
1673 BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
1674
1675 return state_char[state];
1676}
1677
1678static inline char task_state_to_char(struct task_struct *tsk)
1679{
1680 return task_index_to_char(task_state_index(tsk));
1681}
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692static inline int is_global_init(struct task_struct *tsk)
1693{
1694 return task_tgid_nr(tsk) == 1;
1695}
1696
1697extern struct pid *cad_pid;
1698
1699
1700
1701
1702#define PF_VCPU 0x00000001
1703#define PF_IDLE 0x00000002
1704#define PF_EXITING 0x00000004
1705#define PF_POSTCOREDUMP 0x00000008
1706#define PF_IO_WORKER 0x00000010
1707#define PF_WQ_WORKER 0x00000020
1708#define PF_FORKNOEXEC 0x00000040
1709#define PF_MCE_PROCESS 0x00000080
1710#define PF_SUPERPRIV 0x00000100
1711#define PF_DUMPCORE 0x00000200
1712#define PF_SIGNALED 0x00000400
1713#define PF_MEMALLOC 0x00000800
1714#define PF_NPROC_EXCEEDED 0x00001000
1715#define PF_USED_MATH 0x00002000
1716#define PF_NOFREEZE 0x00008000
1717#define PF_FROZEN 0x00010000
1718#define PF_KSWAPD 0x00020000
1719#define PF_MEMALLOC_NOFS 0x00040000
1720#define PF_MEMALLOC_NOIO 0x00080000
1721#define PF_LOCAL_THROTTLE 0x00100000
1722
1723#define PF_KTHREAD 0x00200000
1724#define PF_RANDOMIZE 0x00400000
1725#define PF_NO_SETAFFINITY 0x04000000
1726#define PF_MCE_EARLY 0x08000000
1727#define PF_MEMALLOC_PIN 0x10000000
1728#define PF_FREEZER_SKIP 0x40000000
1729#define PF_SUSPEND_TASK 0x80000000
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1743#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1744#define clear_used_math() clear_stopped_child_used_math(current)
1745#define set_used_math() set_stopped_child_used_math(current)
1746
1747#define conditional_stopped_child_used_math(condition, child) \
1748 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1749
1750#define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current)
1751
1752#define copy_to_stopped_child_used_math(child) \
1753 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1754
1755
1756#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1757#define used_math() tsk_used_math(current)
1758
1759static __always_inline bool is_percpu_thread(void)
1760{
1761#ifdef CONFIG_SMP
1762 return (current->flags & PF_NO_SETAFFINITY) &&
1763 (current->nr_cpus_allowed == 1);
1764#else
1765 return true;
1766#endif
1767}
1768
1769
1770#define PFA_NO_NEW_PRIVS 0
1771#define PFA_SPREAD_PAGE 1
1772#define PFA_SPREAD_SLAB 2
1773#define PFA_SPEC_SSB_DISABLE 3
1774#define PFA_SPEC_SSB_FORCE_DISABLE 4
1775#define PFA_SPEC_IB_DISABLE 5
1776#define PFA_SPEC_IB_FORCE_DISABLE 6
1777#define PFA_SPEC_SSB_NOEXEC 7
1778
1779#define TASK_PFA_TEST(name, func) \
1780 static inline bool task_##func(struct task_struct *p) \
1781 { return test_bit(PFA_##name, &p->atomic_flags); }
1782
1783#define TASK_PFA_SET(name, func) \
1784 static inline void task_set_##func(struct task_struct *p) \
1785 { set_bit(PFA_##name, &p->atomic_flags); }
1786
1787#define TASK_PFA_CLEAR(name, func) \
1788 static inline void task_clear_##func(struct task_struct *p) \
1789 { clear_bit(PFA_##name, &p->atomic_flags); }
1790
1791TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1792TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1793
1794TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1795TASK_PFA_SET(SPREAD_PAGE, spread_page)
1796TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1797
1798TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1799TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1800TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1801
1802TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
1803TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
1804TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
1805
1806TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1807TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1808TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1809
1810TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1811TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1812
1813TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
1814TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
1815TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
1816
1817TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1818TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1819
1820static inline void
1821current_restore_flags(unsigned long orig_flags, unsigned long flags)
1822{
1823 current->flags &= ~flags;
1824 current->flags |= orig_flags & flags;
1825}
1826
1827extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
1828extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_effective_cpus);
1829#ifdef CONFIG_SMP
1830extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
1831extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
1832extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
1833extern void release_user_cpus_ptr(struct task_struct *p);
1834extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask);
1835extern void force_compatible_cpus_allowed_ptr(struct task_struct *p);
1836extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p);
1837#else
1838static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1839{
1840}
1841static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1842{
1843 if (!cpumask_test_cpu(0, new_mask))
1844 return -EINVAL;
1845 return 0;
1846}
1847static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node)
1848{
1849 if (src->user_cpus_ptr)
1850 return -EINVAL;
1851 return 0;
1852}
1853static inline void release_user_cpus_ptr(struct task_struct *p)
1854{
1855 WARN_ON(p->user_cpus_ptr);
1856}
1857
1858static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
1859{
1860 return 0;
1861}
1862#endif
1863
1864extern int yield_to(struct task_struct *p, bool preempt);
1865extern void set_user_nice(struct task_struct *p, long nice);
1866extern int task_prio(const struct task_struct *p);
1867
1868
1869
1870
1871
1872
1873
1874static inline int task_nice(const struct task_struct *p)
1875{
1876 return PRIO_TO_NICE((p)->static_prio);
1877}
1878
1879extern int can_nice(const struct task_struct *p, const int nice);
1880extern int task_curr(const struct task_struct *p);
1881extern int idle_cpu(int cpu);
1882extern int available_idle_cpu(int cpu);
1883extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1884extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
1885extern void sched_set_fifo(struct task_struct *p);
1886extern void sched_set_fifo_low(struct task_struct *p);
1887extern void sched_set_normal(struct task_struct *p, int nice);
1888extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1889extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
1890extern struct task_struct *idle_task(int cpu);
1891
1892
1893
1894
1895
1896
1897
1898static __always_inline bool is_idle_task(const struct task_struct *p)
1899{
1900 return !!(p->flags & PF_IDLE);
1901}
1902
1903extern struct task_struct *curr_task(int cpu);
1904extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1905
1906void yield(void);
1907
1908union thread_union {
1909#ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
1910 struct task_struct task;
1911#endif
1912#ifndef CONFIG_THREAD_INFO_IN_TASK
1913 struct thread_info thread_info;
1914#endif
1915 unsigned long stack[THREAD_SIZE/sizeof(long)];
1916};
1917
1918#ifndef CONFIG_THREAD_INFO_IN_TASK
1919extern struct thread_info init_thread_info;
1920#endif
1921
1922extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
1923
1924#ifdef CONFIG_THREAD_INFO_IN_TASK
1925# define task_thread_info(task) (&(task)->thread_info)
1926#elif !defined(__HAVE_THREAD_FUNCTIONS)
1927# define task_thread_info(task) ((struct thread_info *)(task)->stack)
1928#endif
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941extern struct task_struct *find_task_by_vpid(pid_t nr);
1942extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1943
1944
1945
1946
1947extern struct task_struct *find_get_task_by_vpid(pid_t nr);
1948
1949extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1950extern int wake_up_process(struct task_struct *tsk);
1951extern void wake_up_new_task(struct task_struct *tsk);
1952
1953#ifdef CONFIG_SMP
1954extern void kick_process(struct task_struct *tsk);
1955#else
1956static inline void kick_process(struct task_struct *tsk) { }
1957#endif
1958
1959extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1960
1961static inline void set_task_comm(struct task_struct *tsk, const char *from)
1962{
1963 __set_task_comm(tsk, from, false);
1964}
1965
1966extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
1967#define get_task_comm(buf, tsk) ({ \
1968 BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \
1969 __get_task_comm(buf, sizeof(buf), tsk); \
1970})
1971
1972#ifdef CONFIG_SMP
1973static __always_inline void scheduler_ipi(void)
1974{
1975
1976
1977
1978
1979
1980 preempt_fold_need_resched();
1981}
1982extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
1983#else
1984static inline void scheduler_ipi(void) { }
1985static inline unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
1986{
1987 return 1;
1988}
1989#endif
1990
1991
1992
1993
1994
1995static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
1996{
1997 set_ti_thread_flag(task_thread_info(tsk), flag);
1998}
1999
2000static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2001{
2002 clear_ti_thread_flag(task_thread_info(tsk), flag);
2003}
2004
2005static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
2006 bool value)
2007{
2008 update_ti_thread_flag(task_thread_info(tsk), flag, value);
2009}
2010
2011static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2012{
2013 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2014}
2015
2016static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2017{
2018 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2019}
2020
2021static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2022{
2023 return test_ti_thread_flag(task_thread_info(tsk), flag);
2024}
2025
2026static inline void set_tsk_need_resched(struct task_struct *tsk)
2027{
2028 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2029}
2030
2031static inline void clear_tsk_need_resched(struct task_struct *tsk)
2032{
2033 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2034}
2035
2036static inline int test_tsk_need_resched(struct task_struct *tsk)
2037{
2038 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2039}
2040
2041
2042
2043
2044
2045
2046
2047#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
2048extern int __cond_resched(void);
2049
2050#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
2051
2052DECLARE_STATIC_CALL(cond_resched, __cond_resched);
2053
2054static __always_inline int _cond_resched(void)
2055{
2056 return static_call_mod(cond_resched)();
2057}
2058
2059#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
2060extern int dynamic_cond_resched(void);
2061
2062static __always_inline int _cond_resched(void)
2063{
2064 return dynamic_cond_resched();
2065}
2066
2067#else
2068
2069static inline int _cond_resched(void)
2070{
2071 return __cond_resched();
2072}
2073
2074#endif
2075
2076#else
2077
2078static inline int _cond_resched(void) { return 0; }
2079
2080#endif
2081
2082#define cond_resched() ({ \
2083 __might_resched(__FILE__, __LINE__, 0); \
2084 _cond_resched(); \
2085})
2086
2087extern int __cond_resched_lock(spinlock_t *lock);
2088extern int __cond_resched_rwlock_read(rwlock_t *lock);
2089extern int __cond_resched_rwlock_write(rwlock_t *lock);
2090
2091#define MIGHT_RESCHED_RCU_SHIFT 8
2092#define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
2093
2094#ifndef CONFIG_PREEMPT_RT
2095
2096
2097
2098
2099# define PREEMPT_LOCK_RESCHED_OFFSETS PREEMPT_LOCK_OFFSET
2100#else
2101
2102
2103
2104
2105
2106# define PREEMPT_LOCK_RESCHED_OFFSETS \
2107 (PREEMPT_LOCK_OFFSET + (1U << MIGHT_RESCHED_RCU_SHIFT))
2108#endif
2109
2110#define cond_resched_lock(lock) ({ \
2111 __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
2112 __cond_resched_lock(lock); \
2113})
2114
2115#define cond_resched_rwlock_read(lock) ({ \
2116 __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
2117 __cond_resched_rwlock_read(lock); \
2118})
2119
2120#define cond_resched_rwlock_write(lock) ({ \
2121 __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
2122 __cond_resched_rwlock_write(lock); \
2123})
2124
2125static inline void cond_resched_rcu(void)
2126{
2127#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
2128 rcu_read_unlock();
2129 cond_resched();
2130 rcu_read_lock();
2131#endif
2132}
2133
2134#ifdef CONFIG_PREEMPT_DYNAMIC
2135
2136extern bool preempt_model_none(void);
2137extern bool preempt_model_voluntary(void);
2138extern bool preempt_model_full(void);
2139
2140#else
2141
2142static inline bool preempt_model_none(void)
2143{
2144 return IS_ENABLED(CONFIG_PREEMPT_NONE);
2145}
2146static inline bool preempt_model_voluntary(void)
2147{
2148 return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY);
2149}
2150static inline bool preempt_model_full(void)
2151{
2152 return IS_ENABLED(CONFIG_PREEMPT);
2153}
2154
2155#endif
2156
2157static inline bool preempt_model_rt(void)
2158{
2159 return IS_ENABLED(CONFIG_PREEMPT_RT);
2160}
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170static inline bool preempt_model_preemptible(void)
2171{
2172 return preempt_model_full() || preempt_model_rt();
2173}
2174
2175
2176
2177
2178
2179
2180static inline int spin_needbreak(spinlock_t *lock)
2181{
2182#ifdef CONFIG_PREEMPTION
2183 return spin_is_contended(lock);
2184#else
2185 return 0;
2186#endif
2187}
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197static inline int rwlock_needbreak(rwlock_t *lock)
2198{
2199#ifdef CONFIG_PREEMPTION
2200 return rwlock_is_contended(lock);
2201#else
2202 return 0;
2203#endif
2204}
2205
2206static __always_inline bool need_resched(void)
2207{
2208 return unlikely(tif_need_resched());
2209}
2210
2211
2212
2213
2214#ifdef CONFIG_SMP
2215
2216static inline unsigned int task_cpu(const struct task_struct *p)
2217{
2218 return READ_ONCE(task_thread_info(p)->cpu);
2219}
2220
2221extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2222
2223#else
2224
2225static inline unsigned int task_cpu(const struct task_struct *p)
2226{
2227 return 0;
2228}
2229
2230static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2231{
2232}
2233
2234#endif
2235
2236extern bool sched_task_on_rq(struct task_struct *p);
2237extern unsigned long get_wchan(struct task_struct *p);
2238extern struct task_struct *cpu_curr_snapshot(int cpu);
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248#ifndef vcpu_is_preempted
2249static inline bool vcpu_is_preempted(int cpu)
2250{
2251 return false;
2252}
2253#endif
2254
2255extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2256extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2257
2258#ifndef TASK_SIZE_OF
2259#define TASK_SIZE_OF(tsk) TASK_SIZE
2260#endif
2261
2262#ifdef CONFIG_SMP
2263static inline bool owner_on_cpu(struct task_struct *owner)
2264{
2265
2266
2267
2268
2269 return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner));
2270}
2271
2272
2273unsigned long sched_cpu_util(int cpu);
2274#endif
2275
2276#ifdef CONFIG_RSEQ
2277
2278
2279
2280
2281
2282enum rseq_event_mask_bits {
2283 RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
2284 RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
2285 RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
2286};
2287
2288enum rseq_event_mask {
2289 RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT),
2290 RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT),
2291 RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT),
2292};
2293
2294static inline void rseq_set_notify_resume(struct task_struct *t)
2295{
2296 if (t->rseq)
2297 set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
2298}
2299
2300void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
2301
2302static inline void rseq_handle_notify_resume(struct ksignal *ksig,
2303 struct pt_regs *regs)
2304{
2305 if (current->rseq)
2306 __rseq_handle_notify_resume(ksig, regs);
2307}
2308
2309static inline void rseq_signal_deliver(struct ksignal *ksig,
2310 struct pt_regs *regs)
2311{
2312 preempt_disable();
2313 __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask);
2314 preempt_enable();
2315 rseq_handle_notify_resume(ksig, regs);
2316}
2317
2318
2319static inline void rseq_preempt(struct task_struct *t)
2320{
2321 __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
2322 rseq_set_notify_resume(t);
2323}
2324
2325
2326static inline void rseq_migrate(struct task_struct *t)
2327{
2328 __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
2329 rseq_set_notify_resume(t);
2330}
2331
2332
2333
2334
2335
2336static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
2337{
2338 if (clone_flags & CLONE_VM) {
2339 t->rseq = NULL;
2340 t->rseq_sig = 0;
2341 t->rseq_event_mask = 0;
2342 } else {
2343 t->rseq = current->rseq;
2344 t->rseq_sig = current->rseq_sig;
2345 t->rseq_event_mask = current->rseq_event_mask;
2346 }
2347}
2348
2349static inline void rseq_execve(struct task_struct *t)
2350{
2351 t->rseq = NULL;
2352 t->rseq_sig = 0;
2353 t->rseq_event_mask = 0;
2354}
2355
2356#else
2357
2358static inline void rseq_set_notify_resume(struct task_struct *t)
2359{
2360}
2361static inline void rseq_handle_notify_resume(struct ksignal *ksig,
2362 struct pt_regs *regs)
2363{
2364}
2365static inline void rseq_signal_deliver(struct ksignal *ksig,
2366 struct pt_regs *regs)
2367{
2368}
2369static inline void rseq_preempt(struct task_struct *t)
2370{
2371}
2372static inline void rseq_migrate(struct task_struct *t)
2373{
2374}
2375static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
2376{
2377}
2378static inline void rseq_execve(struct task_struct *t)
2379{
2380}
2381
2382#endif
2383
2384#ifdef CONFIG_DEBUG_RSEQ
2385
2386void rseq_syscall(struct pt_regs *regs);
2387
2388#else
2389
2390static inline void rseq_syscall(struct pt_regs *regs)
2391{
2392}
2393
2394#endif
2395
2396#ifdef CONFIG_SCHED_CORE
2397extern void sched_core_free(struct task_struct *tsk);
2398extern void sched_core_fork(struct task_struct *p);
2399extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
2400 unsigned long uaddr);
2401#else
2402static inline void sched_core_free(struct task_struct *tsk) { }
2403static inline void sched_core_fork(struct task_struct *p) { }
2404#endif
2405
2406extern void sched_set_stop_task(int cpu, struct task_struct *stop);
2407
2408#endif
2409