1
2#ifndef _LINUX_SCHED_H
3#define _LINUX_SCHED_H
4
5
6
7
8
9
10#include <uapi/linux/sched.h>
11
12#include <asm/current.h>
13
14#include <linux/pid.h>
15#include <linux/sem.h>
16#include <linux/shm.h>
17#include <linux/kmsan_types.h>
18#include <linux/mutex.h>
19#include <linux/plist.h>
20#include <linux/hrtimer.h>
21#include <linux/irqflags.h>
22#include <linux/seccomp.h>
23#include <linux/nodemask.h>
24#include <linux/rcupdate.h>
25#include <linux/refcount.h>
26#include <linux/resource.h>
27#include <linux/latencytop.h>
28#include <linux/sched/prio.h>
29#include <linux/sched/types.h>
30#include <linux/signal_types.h>
31#include <linux/syscall_user_dispatch.h>
32#include <linux/mm_types_task.h>
33#include <linux/task_io_accounting.h>
34#include <linux/posix-timers.h>
35#include <linux/rseq.h>
36#include <linux/seqlock.h>
37#include <linux/kcsan.h>
38#include <linux/rv.h>
39#include <linux/livepatch_sched.h>
40#include <asm/kmap_size.h>
41
42
43struct audit_context;
44struct bio_list;
45struct blk_plug;
46struct bpf_local_storage;
47struct bpf_run_ctx;
48struct capture_control;
49struct cfs_rq;
50struct fs_struct;
51struct futex_pi_state;
52struct io_context;
53struct io_uring_task;
54struct mempolicy;
55struct nameidata;
56struct nsproxy;
57struct perf_event_context;
58struct pid_namespace;
59struct pipe_inode_info;
60struct rcu_node;
61struct reclaim_state;
62struct robust_list_head;
63struct root_domain;
64struct rq;
65struct sched_attr;
66struct seq_file;
67struct sighand_struct;
68struct signal_struct;
69struct task_delay_info;
70struct task_group;
71struct user_event_mm;
72
73
74
75
76
77
78
79
80
81
82
83
84
85#define TASK_RUNNING 0x00000000
86#define TASK_INTERRUPTIBLE 0x00000001
87#define TASK_UNINTERRUPTIBLE 0x00000002
88#define __TASK_STOPPED 0x00000004
89#define __TASK_TRACED 0x00000008
90
91#define EXIT_DEAD 0x00000010
92#define EXIT_ZOMBIE 0x00000020
93#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
94
95#define TASK_PARKED 0x00000040
96#define TASK_DEAD 0x00000080
97#define TASK_WAKEKILL 0x00000100
98#define TASK_WAKING 0x00000200
99#define TASK_NOLOAD 0x00000400
100#define TASK_NEW 0x00000800
101#define TASK_RTLOCK_WAIT 0x00001000
102#define TASK_FREEZABLE 0x00002000
103#define __TASK_FREEZABLE_UNSAFE (0x00004000 * IS_ENABLED(CONFIG_LOCKDEP))
104#define TASK_FROZEN 0x00008000
105#define TASK_STATE_MAX 0x00010000
106
107#define TASK_ANY (TASK_STATE_MAX-1)
108
109
110
111
112#define TASK_FREEZABLE_UNSAFE (TASK_FREEZABLE | __TASK_FREEZABLE_UNSAFE)
113
114
115#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
116#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
117#define TASK_TRACED __TASK_TRACED
118
119#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
120
121
122#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
123
124
125#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
126 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
127 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
128 TASK_PARKED)
129
130#define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING)
131
132#define task_is_traced(task) ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0)
133#define task_is_stopped(task) ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0)
134#define task_is_stopped_or_traced(task) ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED)) != 0)
135
136
137
138
139
140#define is_special_task_state(state) \
141 ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
142
143#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
144# define debug_normal_state_change(state_value) \
145 do { \
146 WARN_ON_ONCE(is_special_task_state(state_value)); \
147 current->task_state_change = _THIS_IP_; \
148 } while (0)
149
150# define debug_special_state_change(state_value) \
151 do { \
152 WARN_ON_ONCE(!is_special_task_state(state_value)); \
153 current->task_state_change = _THIS_IP_; \
154 } while (0)
155
156# define debug_rtlock_wait_set_state() \
157 do { \
158 current->saved_state_change = current->task_state_change;\
159 current->task_state_change = _THIS_IP_; \
160 } while (0)
161
162# define debug_rtlock_wait_restore_state() \
163 do { \
164 current->task_state_change = current->saved_state_change;\
165 } while (0)
166
167#else
168# define debug_normal_state_change(cond) do { } while (0)
169# define debug_special_state_change(cond) do { } while (0)
170# define debug_rtlock_wait_set_state() do { } while (0)
171# define debug_rtlock_wait_restore_state() do { } while (0)
172#endif
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211#define __set_current_state(state_value) \
212 do { \
213 debug_normal_state_change((state_value)); \
214 WRITE_ONCE(current->__state, (state_value)); \
215 } while (0)
216
217#define set_current_state(state_value) \
218 do { \
219 debug_normal_state_change((state_value)); \
220 smp_store_mb(current->__state, (state_value)); \
221 } while (0)
222
223
224
225
226
227
228
229#define set_special_state(state_value) \
230 do { \
231 unsigned long flags; \
232 \
233 raw_spin_lock_irqsave(¤t->pi_lock, flags); \
234 debug_special_state_change((state_value)); \
235 WRITE_ONCE(current->__state, (state_value)); \
236 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
237 } while (0)
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264#define current_save_and_set_rtlock_wait_state() \
265 do { \
266 lockdep_assert_irqs_disabled(); \
267 raw_spin_lock(¤t->pi_lock); \
268 current->saved_state = current->__state; \
269 debug_rtlock_wait_set_state(); \
270 WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT); \
271 raw_spin_unlock(¤t->pi_lock); \
272 } while (0);
273
274#define current_restore_rtlock_saved_state() \
275 do { \
276 lockdep_assert_irqs_disabled(); \
277 raw_spin_lock(¤t->pi_lock); \
278 debug_rtlock_wait_restore_state(); \
279 WRITE_ONCE(current->__state, current->saved_state); \
280 current->saved_state = TASK_RUNNING; \
281 raw_spin_unlock(¤t->pi_lock); \
282 } while (0);
283
284#define get_current_state() READ_ONCE(current->__state)
285
286
287
288
289
290enum {
291 TASK_COMM_LEN = 16,
292};
293
294extern void scheduler_tick(void);
295
296#define MAX_SCHEDULE_TIMEOUT LONG_MAX
297
298extern long schedule_timeout(long timeout);
299extern long schedule_timeout_interruptible(long timeout);
300extern long schedule_timeout_killable(long timeout);
301extern long schedule_timeout_uninterruptible(long timeout);
302extern long schedule_timeout_idle(long timeout);
303asmlinkage void schedule(void);
304extern void schedule_preempt_disabled(void);
305asmlinkage void preempt_schedule_irq(void);
306#ifdef CONFIG_PREEMPT_RT
307 extern void schedule_rtlock(void);
308#endif
309
310extern int __must_check io_schedule_prepare(void);
311extern void io_schedule_finish(int token);
312extern long io_schedule_timeout(long timeout);
313extern void io_schedule(void);
314
315
316
317
318
319
320
321
322
323
324struct prev_cputime {
325#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
326 u64 utime;
327 u64 stime;
328 raw_spinlock_t lock;
329#endif
330};
331
332enum vtime_state {
333
334 VTIME_INACTIVE = 0,
335
336 VTIME_IDLE,
337
338 VTIME_SYS,
339
340 VTIME_USER,
341
342 VTIME_GUEST,
343};
344
345struct vtime {
346 seqcount_t seqcount;
347 unsigned long long starttime;
348 enum vtime_state state;
349 unsigned int cpu;
350 u64 utime;
351 u64 stime;
352 u64 gtime;
353};
354
355
356
357
358
359
360
361enum uclamp_id {
362 UCLAMP_MIN = 0,
363 UCLAMP_MAX,
364 UCLAMP_CNT
365};
366
367#ifdef CONFIG_SMP
368extern struct root_domain def_root_domain;
369extern struct mutex sched_domains_mutex;
370#endif
371
372struct sched_param {
373 int sched_priority;
374};
375
376struct sched_info {
377#ifdef CONFIG_SCHED_INFO
378
379
380
381 unsigned long pcount;
382
383
384 unsigned long long run_delay;
385
386
387
388
389 unsigned long long last_arrival;
390
391
392 unsigned long long last_queued;
393
394#endif
395};
396
397
398
399
400
401
402
403
404# define SCHED_FIXEDPOINT_SHIFT 10
405# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
406
407
408# define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT
409# define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
410
411struct load_weight {
412 unsigned long weight;
413 u32 inv_weight;
414};
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445struct util_est {
446 unsigned int enqueued;
447 unsigned int ewma;
448#define UTIL_EST_WEIGHT_SHIFT 2
449#define UTIL_AVG_UNCHANGED 0x80000000
450} __attribute__((__aligned__(sizeof(u64))));
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497struct sched_avg {
498 u64 last_update_time;
499 u64 load_sum;
500 u64 runnable_sum;
501 u32 util_sum;
502 u32 period_contrib;
503 unsigned long load_avg;
504 unsigned long runnable_avg;
505 unsigned long util_avg;
506 struct util_est util_est;
507} ____cacheline_aligned;
508
509struct sched_statistics {
510#ifdef CONFIG_SCHEDSTATS
511 u64 wait_start;
512 u64 wait_max;
513 u64 wait_count;
514 u64 wait_sum;
515 u64 iowait_count;
516 u64 iowait_sum;
517
518 u64 sleep_start;
519 u64 sleep_max;
520 s64 sum_sleep_runtime;
521
522 u64 block_start;
523 u64 block_max;
524 s64 sum_block_runtime;
525
526 u64 exec_max;
527 u64 slice_max;
528
529 u64 nr_migrations_cold;
530 u64 nr_failed_migrations_affine;
531 u64 nr_failed_migrations_running;
532 u64 nr_failed_migrations_hot;
533 u64 nr_forced_migrations;
534
535 u64 nr_wakeups;
536 u64 nr_wakeups_sync;
537 u64 nr_wakeups_migrate;
538 u64 nr_wakeups_local;
539 u64 nr_wakeups_remote;
540 u64 nr_wakeups_affine;
541 u64 nr_wakeups_affine_attempts;
542 u64 nr_wakeups_passive;
543 u64 nr_wakeups_idle;
544
545#ifdef CONFIG_SCHED_CORE
546 u64 core_forceidle_sum;
547#endif
548#endif
549} ____cacheline_aligned;
550
551struct sched_entity {
552
553 struct load_weight load;
554 struct rb_node run_node;
555 u64 deadline;
556 u64 min_deadline;
557
558 struct list_head group_node;
559 unsigned int on_rq;
560
561 u64 exec_start;
562 u64 sum_exec_runtime;
563 u64 prev_sum_exec_runtime;
564 u64 vruntime;
565 s64 vlag;
566 u64 slice;
567
568 u64 nr_migrations;
569
570#ifdef CONFIG_FAIR_GROUP_SCHED
571 int depth;
572 struct sched_entity *parent;
573
574 struct cfs_rq *cfs_rq;
575
576 struct cfs_rq *my_q;
577
578 unsigned long runnable_weight;
579#endif
580
581#ifdef CONFIG_SMP
582
583
584
585
586
587
588 struct sched_avg avg;
589#endif
590};
591
592struct sched_rt_entity {
593 struct list_head run_list;
594 unsigned long timeout;
595 unsigned long watchdog_stamp;
596 unsigned int time_slice;
597 unsigned short on_rq;
598 unsigned short on_list;
599
600 struct sched_rt_entity *back;
601#ifdef CONFIG_RT_GROUP_SCHED
602 struct sched_rt_entity *parent;
603
604 struct rt_rq *rt_rq;
605
606 struct rt_rq *my_q;
607#endif
608} __randomize_layout;
609
610struct sched_dl_entity {
611 struct rb_node rb_node;
612
613
614
615
616
617
618 u64 dl_runtime;
619 u64 dl_deadline;
620 u64 dl_period;
621 u64 dl_bw;
622 u64 dl_density;
623
624
625
626
627
628
629 s64 runtime;
630 u64 deadline;
631 unsigned int flags;
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653 unsigned int dl_throttled : 1;
654 unsigned int dl_yielded : 1;
655 unsigned int dl_non_contending : 1;
656 unsigned int dl_overrun : 1;
657
658
659
660
661
662 struct hrtimer dl_timer;
663
664
665
666
667
668
669
670
671 struct hrtimer inactive_timer;
672
673#ifdef CONFIG_RT_MUTEXES
674
675
676
677
678
679 struct sched_dl_entity *pi_se;
680#endif
681};
682
683#ifdef CONFIG_UCLAMP_TASK
684
685#define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710struct uclamp_se {
711 unsigned int value : bits_per(SCHED_CAPACITY_SCALE);
712 unsigned int bucket_id : bits_per(UCLAMP_BUCKETS);
713 unsigned int active : 1;
714 unsigned int user_defined : 1;
715};
716#endif
717
718union rcu_special {
719 struct {
720 u8 blocked;
721 u8 need_qs;
722 u8 exp_hint;
723 u8 need_mb;
724 } b;
725 u32 s;
726};
727
728enum perf_event_task_context {
729 perf_invalid_context = -1,
730 perf_hw_context = 0,
731 perf_sw_context,
732 perf_nr_task_contexts,
733};
734
735struct wake_q_node {
736 struct wake_q_node *next;
737};
738
739struct kmap_ctrl {
740#ifdef CONFIG_KMAP_LOCAL
741 int idx;
742 pte_t pteval[KM_MAX_IDX];
743#endif
744};
745
746struct task_struct {
747#ifdef CONFIG_THREAD_INFO_IN_TASK
748
749
750
751
752 struct thread_info thread_info;
753#endif
754 unsigned int __state;
755
756
757 unsigned int saved_state;
758
759
760
761
762
763 randomized_struct_fields_start
764
765 void *stack;
766 refcount_t usage;
767
768 unsigned int flags;
769 unsigned int ptrace;
770
771#ifdef CONFIG_SMP
772 int on_cpu;
773 struct __call_single_node wake_entry;
774 unsigned int wakee_flips;
775 unsigned long wakee_flip_decay_ts;
776 struct task_struct *last_wakee;
777
778
779
780
781
782
783
784
785 int recent_used_cpu;
786 int wake_cpu;
787#endif
788 int on_rq;
789
790 int prio;
791 int static_prio;
792 int normal_prio;
793 unsigned int rt_priority;
794
795 struct sched_entity se;
796 struct sched_rt_entity rt;
797 struct sched_dl_entity dl;
798 const struct sched_class *sched_class;
799
800#ifdef CONFIG_SCHED_CORE
801 struct rb_node core_node;
802 unsigned long core_cookie;
803 unsigned int core_occupation;
804#endif
805
806#ifdef CONFIG_CGROUP_SCHED
807 struct task_group *sched_task_group;
808#endif
809
810#ifdef CONFIG_UCLAMP_TASK
811
812
813
814
815 struct uclamp_se uclamp_req[UCLAMP_CNT];
816
817
818
819
820 struct uclamp_se uclamp[UCLAMP_CNT];
821#endif
822
823 struct sched_statistics stats;
824
825#ifdef CONFIG_PREEMPT_NOTIFIERS
826
827 struct hlist_head preempt_notifiers;
828#endif
829
830#ifdef CONFIG_BLK_DEV_IO_TRACE
831 unsigned int btrace_seq;
832#endif
833
834 unsigned int policy;
835 int nr_cpus_allowed;
836 const cpumask_t *cpus_ptr;
837 cpumask_t *user_cpus_ptr;
838 cpumask_t cpus_mask;
839 void *migration_pending;
840#ifdef CONFIG_SMP
841 unsigned short migration_disabled;
842#endif
843 unsigned short migration_flags;
844
845#ifdef CONFIG_PREEMPT_RCU
846 int rcu_read_lock_nesting;
847 union rcu_special rcu_read_unlock_special;
848 struct list_head rcu_node_entry;
849 struct rcu_node *rcu_blocked_node;
850#endif
851
852#ifdef CONFIG_TASKS_RCU
853 unsigned long rcu_tasks_nvcsw;
854 u8 rcu_tasks_holdout;
855 u8 rcu_tasks_idx;
856 int rcu_tasks_idle_cpu;
857 struct list_head rcu_tasks_holdout_list;
858#endif
859
860#ifdef CONFIG_TASKS_TRACE_RCU
861 int trc_reader_nesting;
862 int trc_ipi_to_cpu;
863 union rcu_special trc_reader_special;
864 struct list_head trc_holdout_list;
865 struct list_head trc_blkd_node;
866 int trc_blkd_cpu;
867#endif
868
869 struct sched_info sched_info;
870
871 struct list_head tasks;
872#ifdef CONFIG_SMP
873 struct plist_node pushable_tasks;
874 struct rb_node pushable_dl_tasks;
875#endif
876
877 struct mm_struct *mm;
878 struct mm_struct *active_mm;
879 struct address_space *faults_disabled_mapping;
880
881 int exit_state;
882 int exit_code;
883 int exit_signal;
884
885 int pdeath_signal;
886
887 unsigned long jobctl;
888
889
890 unsigned int personality;
891
892
893 unsigned sched_reset_on_fork:1;
894 unsigned sched_contributes_to_load:1;
895 unsigned sched_migrated:1;
896
897
898 unsigned :0;
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915 unsigned sched_remote_wakeup:1;
916#ifdef CONFIG_RT_MUTEXES
917 unsigned sched_rt_mutex:1;
918#endif
919
920
921 unsigned in_execve:1;
922 unsigned in_iowait:1;
923#ifndef TIF_RESTORE_SIGMASK
924 unsigned restore_sigmask:1;
925#endif
926#ifdef CONFIG_MEMCG
927 unsigned in_user_fault:1;
928#endif
929#ifdef CONFIG_LRU_GEN
930
931 unsigned in_lru_fault:1;
932#endif
933#ifdef CONFIG_COMPAT_BRK
934 unsigned brk_randomized:1;
935#endif
936#ifdef CONFIG_CGROUPS
937
938 unsigned no_cgroup_migration:1;
939
940 unsigned frozen:1;
941#endif
942#ifdef CONFIG_BLK_CGROUP
943 unsigned use_memdelay:1;
944#endif
945#ifdef CONFIG_PSI
946
947 unsigned in_memstall:1;
948#endif
949#ifdef CONFIG_PAGE_OWNER
950
951 unsigned in_page_owner:1;
952#endif
953#ifdef CONFIG_EVENTFD
954
955 unsigned in_eventfd:1;
956#endif
957#ifdef CONFIG_IOMMU_SVA
958 unsigned pasid_activated:1;
959#endif
960#ifdef CONFIG_CPU_SUP_INTEL
961 unsigned reported_split_lock:1;
962#endif
963#ifdef CONFIG_TASK_DELAY_ACCT
964
965 unsigned in_thrashing:1;
966#endif
967
968 unsigned long atomic_flags;
969
970 struct restart_block restart_block;
971
972 pid_t pid;
973 pid_t tgid;
974
975#ifdef CONFIG_STACKPROTECTOR
976
977 unsigned long stack_canary;
978#endif
979
980
981
982
983
984
985
986 struct task_struct __rcu *real_parent;
987
988
989 struct task_struct __rcu *parent;
990
991
992
993
994 struct list_head children;
995 struct list_head sibling;
996 struct task_struct *group_leader;
997
998
999
1000
1001
1002
1003
1004 struct list_head ptraced;
1005 struct list_head ptrace_entry;
1006
1007
1008 struct pid *thread_pid;
1009 struct hlist_node pid_links[PIDTYPE_MAX];
1010 struct list_head thread_node;
1011
1012 struct completion *vfork_done;
1013
1014
1015 int __user *set_child_tid;
1016
1017
1018 int __user *clear_child_tid;
1019
1020
1021 void *worker_private;
1022
1023 u64 utime;
1024 u64 stime;
1025#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
1026 u64 utimescaled;
1027 u64 stimescaled;
1028#endif
1029 u64 gtime;
1030 struct prev_cputime prev_cputime;
1031#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1032 struct vtime vtime;
1033#endif
1034
1035#ifdef CONFIG_NO_HZ_FULL
1036 atomic_t tick_dep_mask;
1037#endif
1038
1039 unsigned long nvcsw;
1040 unsigned long nivcsw;
1041
1042
1043 u64 start_time;
1044
1045
1046 u64 start_boottime;
1047
1048
1049 unsigned long min_flt;
1050 unsigned long maj_flt;
1051
1052
1053 struct posix_cputimers posix_cputimers;
1054
1055#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
1056 struct posix_cputimers_work posix_cputimers_work;
1057#endif
1058
1059
1060
1061
1062 const struct cred __rcu *ptracer_cred;
1063
1064
1065 const struct cred __rcu *real_cred;
1066
1067
1068 const struct cred __rcu *cred;
1069
1070#ifdef CONFIG_KEYS
1071
1072 struct key *cached_requested_key;
1073#endif
1074
1075
1076
1077
1078
1079
1080
1081
1082 char comm[TASK_COMM_LEN];
1083
1084 struct nameidata *nameidata;
1085
1086#ifdef CONFIG_SYSVIPC
1087 struct sysv_sem sysvsem;
1088 struct sysv_shm sysvshm;
1089#endif
1090#ifdef CONFIG_DETECT_HUNG_TASK
1091 unsigned long last_switch_count;
1092 unsigned long last_switch_time;
1093#endif
1094
1095 struct fs_struct *fs;
1096
1097
1098 struct files_struct *files;
1099
1100#ifdef CONFIG_IO_URING
1101 struct io_uring_task *io_uring;
1102#endif
1103
1104
1105 struct nsproxy *nsproxy;
1106
1107
1108 struct signal_struct *signal;
1109 struct sighand_struct __rcu *sighand;
1110 sigset_t blocked;
1111 sigset_t real_blocked;
1112
1113 sigset_t saved_sigmask;
1114 struct sigpending pending;
1115 unsigned long sas_ss_sp;
1116 size_t sas_ss_size;
1117 unsigned int sas_ss_flags;
1118
1119 struct callback_head *task_works;
1120
1121#ifdef CONFIG_AUDIT
1122#ifdef CONFIG_AUDITSYSCALL
1123 struct audit_context *audit_context;
1124#endif
1125 kuid_t loginuid;
1126 unsigned int sessionid;
1127#endif
1128 struct seccomp seccomp;
1129 struct syscall_user_dispatch syscall_dispatch;
1130
1131
1132 u64 parent_exec_id;
1133 u64 self_exec_id;
1134
1135
1136 spinlock_t alloc_lock;
1137
1138
1139 raw_spinlock_t pi_lock;
1140
1141 struct wake_q_node wake_q;
1142
1143#ifdef CONFIG_RT_MUTEXES
1144
1145 struct rb_root_cached pi_waiters;
1146
1147 struct task_struct *pi_top_task;
1148
1149 struct rt_mutex_waiter *pi_blocked_on;
1150#endif
1151
1152#ifdef CONFIG_DEBUG_MUTEXES
1153
1154 struct mutex_waiter *blocked_on;
1155#endif
1156
1157#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1158 int non_block_count;
1159#endif
1160
1161#ifdef CONFIG_TRACE_IRQFLAGS
1162 struct irqtrace_events irqtrace;
1163 unsigned int hardirq_threaded;
1164 u64 hardirq_chain_key;
1165 int softirqs_enabled;
1166 int softirq_context;
1167 int irq_config;
1168#endif
1169#ifdef CONFIG_PREEMPT_RT
1170 int softirq_disable_cnt;
1171#endif
1172
1173#ifdef CONFIG_LOCKDEP
1174# define MAX_LOCK_DEPTH 48UL
1175 u64 curr_chain_key;
1176 int lockdep_depth;
1177 unsigned int lockdep_recursion;
1178 struct held_lock held_locks[MAX_LOCK_DEPTH];
1179#endif
1180
1181#if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP)
1182 unsigned int in_ubsan;
1183#endif
1184
1185
1186 void *journal_info;
1187
1188
1189 struct bio_list *bio_list;
1190
1191
1192 struct blk_plug *plug;
1193
1194
1195 struct reclaim_state *reclaim_state;
1196
1197 struct io_context *io_context;
1198
1199#ifdef CONFIG_COMPACTION
1200 struct capture_control *capture_control;
1201#endif
1202
1203 unsigned long ptrace_message;
1204 kernel_siginfo_t *last_siginfo;
1205
1206 struct task_io_accounting ioac;
1207#ifdef CONFIG_PSI
1208
1209 unsigned int psi_flags;
1210#endif
1211#ifdef CONFIG_TASK_XACCT
1212
1213 u64 acct_rss_mem1;
1214
1215 u64 acct_vm_mem1;
1216
1217 u64 acct_timexpd;
1218#endif
1219#ifdef CONFIG_CPUSETS
1220
1221 nodemask_t mems_allowed;
1222
1223 seqcount_spinlock_t mems_allowed_seq;
1224 int cpuset_mem_spread_rotor;
1225 int cpuset_slab_spread_rotor;
1226#endif
1227#ifdef CONFIG_CGROUPS
1228
1229 struct css_set __rcu *cgroups;
1230
1231 struct list_head cg_list;
1232#endif
1233#ifdef CONFIG_X86_CPU_RESCTRL
1234 u32 closid;
1235 u32 rmid;
1236#endif
1237#ifdef CONFIG_FUTEX
1238 struct robust_list_head __user *robust_list;
1239#ifdef CONFIG_COMPAT
1240 struct compat_robust_list_head __user *compat_robust_list;
1241#endif
1242 struct list_head pi_state_list;
1243 struct futex_pi_state *pi_state_cache;
1244 struct mutex futex_exit_mutex;
1245 unsigned int futex_state;
1246#endif
1247#ifdef CONFIG_PERF_EVENTS
1248 struct perf_event_context *perf_event_ctxp;
1249 struct mutex perf_event_mutex;
1250 struct list_head perf_event_list;
1251#endif
1252#ifdef CONFIG_DEBUG_PREEMPT
1253 unsigned long preempt_disable_ip;
1254#endif
1255#ifdef CONFIG_NUMA
1256
1257 struct mempolicy *mempolicy;
1258 short il_prev;
1259 short pref_node_fork;
1260#endif
1261#ifdef CONFIG_NUMA_BALANCING
1262 int numa_scan_seq;
1263 unsigned int numa_scan_period;
1264 unsigned int numa_scan_period_max;
1265 int numa_preferred_nid;
1266 unsigned long numa_migrate_retry;
1267
1268 u64 node_stamp;
1269 u64 last_task_numa_placement;
1270 u64 last_sum_exec_runtime;
1271 struct callback_head numa_work;
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281 struct numa_group __rcu *numa_group;
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297 unsigned long *numa_faults;
1298 unsigned long total_numa_faults;
1299
1300
1301
1302
1303
1304
1305
1306 unsigned long numa_faults_locality[3];
1307
1308 unsigned long numa_pages_migrated;
1309#endif
1310
1311#ifdef CONFIG_RSEQ
1312 struct rseq __user *rseq;
1313 u32 rseq_len;
1314 u32 rseq_sig;
1315
1316
1317
1318
1319 unsigned long rseq_event_mask;
1320#endif
1321
1322#ifdef CONFIG_SCHED_MM_CID
1323 int mm_cid;
1324 int last_mm_cid;
1325 int migrate_from_cpu;
1326 int mm_cid_active;
1327 struct callback_head cid_work;
1328#endif
1329
1330 struct tlbflush_unmap_batch tlb_ubc;
1331
1332
1333 struct pipe_inode_info *splice_pipe;
1334
1335 struct page_frag task_frag;
1336
1337#ifdef CONFIG_TASK_DELAY_ACCT
1338 struct task_delay_info *delays;
1339#endif
1340
1341#ifdef CONFIG_FAULT_INJECTION
1342 int make_it_fail;
1343 unsigned int fail_nth;
1344#endif
1345
1346
1347
1348
1349 int nr_dirtied;
1350 int nr_dirtied_pause;
1351
1352 unsigned long dirty_paused_when;
1353
1354#ifdef CONFIG_LATENCYTOP
1355 int latency_record_count;
1356 struct latency_record latency_record[LT_SAVECOUNT];
1357#endif
1358
1359
1360
1361
1362 u64 timer_slack_ns;
1363 u64 default_timer_slack_ns;
1364
1365#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
1366 unsigned int kasan_depth;
1367#endif
1368
1369#ifdef CONFIG_KCSAN
1370 struct kcsan_ctx kcsan_ctx;
1371#ifdef CONFIG_TRACE_IRQFLAGS
1372 struct irqtrace_events kcsan_save_irqtrace;
1373#endif
1374#ifdef CONFIG_KCSAN_WEAK_MEMORY
1375 int kcsan_stack_depth;
1376#endif
1377#endif
1378
1379#ifdef CONFIG_KMSAN
1380 struct kmsan_ctx kmsan_ctx;
1381#endif
1382
1383#if IS_ENABLED(CONFIG_KUNIT)
1384 struct kunit *kunit_test;
1385#endif
1386
1387#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1388
1389 int curr_ret_stack;
1390 int curr_ret_depth;
1391
1392
1393 struct ftrace_ret_stack *ret_stack;
1394
1395
1396 unsigned long long ftrace_timestamp;
1397
1398
1399
1400
1401
1402 atomic_t trace_overrun;
1403
1404
1405 atomic_t tracing_graph_pause;
1406#endif
1407
1408#ifdef CONFIG_TRACING
1409
1410 unsigned long trace_recursion;
1411#endif
1412
1413#ifdef CONFIG_KCOV
1414
1415
1416
1417 unsigned int kcov_mode;
1418
1419
1420 unsigned int kcov_size;
1421
1422
1423 void *kcov_area;
1424
1425
1426 struct kcov *kcov;
1427
1428
1429 u64 kcov_handle;
1430
1431
1432 int kcov_sequence;
1433
1434
1435 unsigned int kcov_softirq;
1436#endif
1437
1438#ifdef CONFIG_MEMCG
1439 struct mem_cgroup *memcg_in_oom;
1440 gfp_t memcg_oom_gfp_mask;
1441 int memcg_oom_order;
1442
1443
1444 unsigned int memcg_nr_pages_over_high;
1445
1446
1447 struct mem_cgroup *active_memcg;
1448#endif
1449
1450#ifdef CONFIG_MEMCG_KMEM
1451 struct obj_cgroup *objcg;
1452#endif
1453
1454#ifdef CONFIG_BLK_CGROUP
1455 struct gendisk *throttle_disk;
1456#endif
1457
1458#ifdef CONFIG_UPROBES
1459 struct uprobe_task *utask;
1460#endif
1461#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1462 unsigned int sequential_io;
1463 unsigned int sequential_io_avg;
1464#endif
1465 struct kmap_ctrl kmap_ctrl;
1466#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1467 unsigned long task_state_change;
1468# ifdef CONFIG_PREEMPT_RT
1469 unsigned long saved_state_change;
1470# endif
1471#endif
1472 struct rcu_head rcu;
1473 refcount_t rcu_users;
1474 int pagefault_disabled;
1475#ifdef CONFIG_MMU
1476 struct task_struct *oom_reaper_list;
1477 struct timer_list oom_reaper_timer;
1478#endif
1479#ifdef CONFIG_VMAP_STACK
1480 struct vm_struct *stack_vm_area;
1481#endif
1482#ifdef CONFIG_THREAD_INFO_IN_TASK
1483
1484 refcount_t stack_refcount;
1485#endif
1486#ifdef CONFIG_LIVEPATCH
1487 int patch_state;
1488#endif
1489#ifdef CONFIG_SECURITY
1490
1491 void *security;
1492#endif
1493#ifdef CONFIG_BPF_SYSCALL
1494
1495 struct bpf_local_storage __rcu *bpf_storage;
1496
1497 struct bpf_run_ctx *bpf_ctx;
1498#endif
1499
1500#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
1501 unsigned long lowest_stack;
1502 unsigned long prev_lowest_stack;
1503#endif
1504
1505#ifdef CONFIG_X86_MCE
1506 void __user *mce_vaddr;
1507 __u64 mce_kflags;
1508 u64 mce_addr;
1509 __u64 mce_ripv : 1,
1510 mce_whole_page : 1,
1511 __mce_reserved : 62;
1512 struct callback_head mce_kill_me;
1513 int mce_count;
1514#endif
1515
1516#ifdef CONFIG_KRETPROBES
1517 struct llist_head kretprobe_instances;
1518#endif
1519#ifdef CONFIG_RETHOOK
1520 struct llist_head rethooks;
1521#endif
1522
1523#ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH
1524
1525
1526
1527
1528
1529
1530 struct callback_head l1d_flush_kill;
1531#endif
1532
1533#ifdef CONFIG_RV
1534
1535
1536
1537
1538
1539
1540 union rv_task_monitor rv[RV_PER_TASK_MONITORS];
1541#endif
1542
1543#ifdef CONFIG_USER_EVENTS
1544 struct user_event_mm *user_event_mm;
1545#endif
1546
1547
1548
1549
1550
1551 randomized_struct_fields_end
1552
1553
1554 struct thread_struct thread;
1555
1556
1557
1558
1559
1560
1561
1562};
1563
1564static inline struct pid *task_pid(struct task_struct *task)
1565{
1566 return task->thread_pid;
1567}
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
1581
1582static inline pid_t task_pid_nr(struct task_struct *tsk)
1583{
1584 return tsk->pid;
1585}
1586
1587static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1588{
1589 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1590}
1591
1592static inline pid_t task_pid_vnr(struct task_struct *tsk)
1593{
1594 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1595}
1596
1597
1598static inline pid_t task_tgid_nr(struct task_struct *tsk)
1599{
1600 return tsk->tgid;
1601}
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613static inline int pid_alive(const struct task_struct *p)
1614{
1615 return p->thread_pid != NULL;
1616}
1617
1618static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1619{
1620 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1621}
1622
1623static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1624{
1625 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1626}
1627
1628
1629static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1630{
1631 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1632}
1633
1634static inline pid_t task_session_vnr(struct task_struct *tsk)
1635{
1636 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1637}
1638
1639static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1640{
1641 return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
1642}
1643
1644static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1645{
1646 return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
1647}
1648
1649static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1650{
1651 pid_t pid = 0;
1652
1653 rcu_read_lock();
1654 if (pid_alive(tsk))
1655 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1656 rcu_read_unlock();
1657
1658 return pid;
1659}
1660
1661static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1662{
1663 return task_ppid_nr_ns(tsk, &init_pid_ns);
1664}
1665
1666
1667static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1668{
1669 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1670}
1671
1672#define TASK_REPORT_IDLE (TASK_REPORT + 1)
1673#define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1)
1674
1675static inline unsigned int __task_state_index(unsigned int tsk_state,
1676 unsigned int tsk_exit_state)
1677{
1678 unsigned int state = (tsk_state | tsk_exit_state) & TASK_REPORT;
1679
1680 BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
1681
1682 if ((tsk_state & TASK_IDLE) == TASK_IDLE)
1683 state = TASK_REPORT_IDLE;
1684
1685
1686
1687
1688
1689
1690 if (tsk_state & TASK_RTLOCK_WAIT)
1691 state = TASK_UNINTERRUPTIBLE;
1692
1693 return fls(state);
1694}
1695
1696static inline unsigned int task_state_index(struct task_struct *tsk)
1697{
1698 return __task_state_index(READ_ONCE(tsk->__state), tsk->exit_state);
1699}
1700
1701static inline char task_index_to_char(unsigned int state)
1702{
1703 static const char state_char[] = "RSDTtXZPI";
1704
1705 BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
1706
1707 return state_char[state];
1708}
1709
1710static inline char task_state_to_char(struct task_struct *tsk)
1711{
1712 return task_index_to_char(task_state_index(tsk));
1713}
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724static inline int is_global_init(struct task_struct *tsk)
1725{
1726 return task_tgid_nr(tsk) == 1;
1727}
1728
1729extern struct pid *cad_pid;
1730
1731
1732
1733
1734#define PF_VCPU 0x00000001
1735#define PF_IDLE 0x00000002
1736#define PF_EXITING 0x00000004
1737#define PF_POSTCOREDUMP 0x00000008
1738#define PF_IO_WORKER 0x00000010
1739#define PF_WQ_WORKER 0x00000020
1740#define PF_FORKNOEXEC 0x00000040
1741#define PF_MCE_PROCESS 0x00000080
1742#define PF_SUPERPRIV 0x00000100
1743#define PF_DUMPCORE 0x00000200
1744#define PF_SIGNALED 0x00000400
1745#define PF_MEMALLOC 0x00000800
1746#define PF_NPROC_EXCEEDED 0x00001000
1747#define PF_USED_MATH 0x00002000
1748#define PF_USER_WORKER 0x00004000
1749#define PF_NOFREEZE 0x00008000
1750#define PF__HOLE__00010000 0x00010000
1751#define PF_KSWAPD 0x00020000
1752#define PF_MEMALLOC_NOFS 0x00040000
1753#define PF_MEMALLOC_NOIO 0x00080000
1754#define PF_LOCAL_THROTTLE 0x00100000
1755
1756#define PF_KTHREAD 0x00200000
1757#define PF_RANDOMIZE 0x00400000
1758#define PF__HOLE__00800000 0x00800000
1759#define PF__HOLE__01000000 0x01000000
1760#define PF__HOLE__02000000 0x02000000
1761#define PF_NO_SETAFFINITY 0x04000000
1762#define PF_MCE_EARLY 0x08000000
1763#define PF_MEMALLOC_PIN 0x10000000
1764#define PF__HOLE__20000000 0x20000000
1765#define PF__HOLE__40000000 0x40000000
1766#define PF_SUSPEND_TASK 0x80000000
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1780#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1781#define clear_used_math() clear_stopped_child_used_math(current)
1782#define set_used_math() set_stopped_child_used_math(current)
1783
1784#define conditional_stopped_child_used_math(condition, child) \
1785 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1786
1787#define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current)
1788
1789#define copy_to_stopped_child_used_math(child) \
1790 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1791
1792
1793#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1794#define used_math() tsk_used_math(current)
1795
1796static __always_inline bool is_percpu_thread(void)
1797{
1798#ifdef CONFIG_SMP
1799 return (current->flags & PF_NO_SETAFFINITY) &&
1800 (current->nr_cpus_allowed == 1);
1801#else
1802 return true;
1803#endif
1804}
1805
1806
1807#define PFA_NO_NEW_PRIVS 0
1808#define PFA_SPREAD_PAGE 1
1809#define PFA_SPREAD_SLAB 2
1810#define PFA_SPEC_SSB_DISABLE 3
1811#define PFA_SPEC_SSB_FORCE_DISABLE 4
1812#define PFA_SPEC_IB_DISABLE 5
1813#define PFA_SPEC_IB_FORCE_DISABLE 6
1814#define PFA_SPEC_SSB_NOEXEC 7
1815
1816#define TASK_PFA_TEST(name, func) \
1817 static inline bool task_##func(struct task_struct *p) \
1818 { return test_bit(PFA_##name, &p->atomic_flags); }
1819
1820#define TASK_PFA_SET(name, func) \
1821 static inline void task_set_##func(struct task_struct *p) \
1822 { set_bit(PFA_##name, &p->atomic_flags); }
1823
1824#define TASK_PFA_CLEAR(name, func) \
1825 static inline void task_clear_##func(struct task_struct *p) \
1826 { clear_bit(PFA_##name, &p->atomic_flags); }
1827
1828TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1829TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1830
1831TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1832TASK_PFA_SET(SPREAD_PAGE, spread_page)
1833TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1834
1835TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1836TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1837TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1838
1839TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
1840TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
1841TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
1842
1843TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1844TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1845TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1846
1847TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1848TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1849
1850TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
1851TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
1852TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
1853
1854TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1855TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1856
1857static inline void
1858current_restore_flags(unsigned long orig_flags, unsigned long flags)
1859{
1860 current->flags &= ~flags;
1861 current->flags |= orig_flags & flags;
1862}
1863
1864extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
1865extern int task_can_attach(struct task_struct *p);
1866extern int dl_bw_alloc(int cpu, u64 dl_bw);
1867extern void dl_bw_free(int cpu, u64 dl_bw);
1868#ifdef CONFIG_SMP
1869
1870
1871extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
1872
1873
1874
1875
1876
1877
1878
1879
1880extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
1881extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
1882extern void release_user_cpus_ptr(struct task_struct *p);
1883extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask);
1884extern void force_compatible_cpus_allowed_ptr(struct task_struct *p);
1885extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p);
1886#else
1887static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1888{
1889}
1890static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1891{
1892 if (!cpumask_test_cpu(0, new_mask))
1893 return -EINVAL;
1894 return 0;
1895}
1896static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node)
1897{
1898 if (src->user_cpus_ptr)
1899 return -EINVAL;
1900 return 0;
1901}
1902static inline void release_user_cpus_ptr(struct task_struct *p)
1903{
1904 WARN_ON(p->user_cpus_ptr);
1905}
1906
1907static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
1908{
1909 return 0;
1910}
1911#endif
1912
1913extern int yield_to(struct task_struct *p, bool preempt);
1914extern void set_user_nice(struct task_struct *p, long nice);
1915extern int task_prio(const struct task_struct *p);
1916
1917
1918
1919
1920
1921
1922
1923static inline int task_nice(const struct task_struct *p)
1924{
1925 return PRIO_TO_NICE((p)->static_prio);
1926}
1927
1928extern int can_nice(const struct task_struct *p, const int nice);
1929extern int task_curr(const struct task_struct *p);
1930extern int idle_cpu(int cpu);
1931extern int available_idle_cpu(int cpu);
1932extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1933extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
1934extern void sched_set_fifo(struct task_struct *p);
1935extern void sched_set_fifo_low(struct task_struct *p);
1936extern void sched_set_normal(struct task_struct *p, int nice);
1937extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1938extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
1939extern struct task_struct *idle_task(int cpu);
1940
1941
1942
1943
1944
1945
1946
1947static __always_inline bool is_idle_task(const struct task_struct *p)
1948{
1949 return !!(p->flags & PF_IDLE);
1950}
1951
1952extern struct task_struct *curr_task(int cpu);
1953extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1954
1955void yield(void);
1956
1957union thread_union {
1958#ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
1959 struct task_struct task;
1960#endif
1961#ifndef CONFIG_THREAD_INFO_IN_TASK
1962 struct thread_info thread_info;
1963#endif
1964 unsigned long stack[THREAD_SIZE/sizeof(long)];
1965};
1966
1967#ifndef CONFIG_THREAD_INFO_IN_TASK
1968extern struct thread_info init_thread_info;
1969#endif
1970
1971extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
1972
1973#ifdef CONFIG_THREAD_INFO_IN_TASK
1974# define task_thread_info(task) (&(task)->thread_info)
1975#elif !defined(__HAVE_THREAD_FUNCTIONS)
1976# define task_thread_info(task) ((struct thread_info *)(task)->stack)
1977#endif
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990extern struct task_struct *find_task_by_vpid(pid_t nr);
1991extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1992
1993
1994
1995
1996extern struct task_struct *find_get_task_by_vpid(pid_t nr);
1997
1998extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1999extern int wake_up_process(struct task_struct *tsk);
2000extern void wake_up_new_task(struct task_struct *tsk);
2001
2002#ifdef CONFIG_SMP
2003extern void kick_process(struct task_struct *tsk);
2004#else
2005static inline void kick_process(struct task_struct *tsk) { }
2006#endif
2007
2008extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
2009
2010static inline void set_task_comm(struct task_struct *tsk, const char *from)
2011{
2012 __set_task_comm(tsk, from, false);
2013}
2014
2015extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
2016#define get_task_comm(buf, tsk) ({ \
2017 BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \
2018 __get_task_comm(buf, sizeof(buf), tsk); \
2019})
2020
2021#ifdef CONFIG_SMP
2022static __always_inline void scheduler_ipi(void)
2023{
2024
2025
2026
2027
2028
2029 preempt_fold_need_resched();
2030}
2031#else
2032static inline void scheduler_ipi(void) { }
2033#endif
2034
2035extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
2036
2037
2038
2039
2040
2041static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2042{
2043 set_ti_thread_flag(task_thread_info(tsk), flag);
2044}
2045
2046static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2047{
2048 clear_ti_thread_flag(task_thread_info(tsk), flag);
2049}
2050
2051static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
2052 bool value)
2053{
2054 update_ti_thread_flag(task_thread_info(tsk), flag, value);
2055}
2056
2057static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2058{
2059 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2060}
2061
2062static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2063{
2064 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2065}
2066
2067static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2068{
2069 return test_ti_thread_flag(task_thread_info(tsk), flag);
2070}
2071
2072static inline void set_tsk_need_resched(struct task_struct *tsk)
2073{
2074 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2075}
2076
2077static inline void clear_tsk_need_resched(struct task_struct *tsk)
2078{
2079 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2080}
2081
2082static inline int test_tsk_need_resched(struct task_struct *tsk)
2083{
2084 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2085}
2086
2087
2088
2089
2090
2091
2092
2093#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
2094extern int __cond_resched(void);
2095
2096#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
2097
2098void sched_dynamic_klp_enable(void);
2099void sched_dynamic_klp_disable(void);
2100
2101DECLARE_STATIC_CALL(cond_resched, __cond_resched);
2102
2103static __always_inline int _cond_resched(void)
2104{
2105 return static_call_mod(cond_resched)();
2106}
2107
2108#elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
2109
2110extern int dynamic_cond_resched(void);
2111
2112static __always_inline int _cond_resched(void)
2113{
2114 return dynamic_cond_resched();
2115}
2116
2117#else
2118
2119static inline int _cond_resched(void)
2120{
2121 klp_sched_try_switch();
2122 return __cond_resched();
2123}
2124
2125#endif
2126
2127#else
2128
2129static inline int _cond_resched(void)
2130{
2131 klp_sched_try_switch();
2132 return 0;
2133}
2134
2135#endif
2136
2137#define cond_resched() ({ \
2138 __might_resched(__FILE__, __LINE__, 0); \
2139 _cond_resched(); \
2140})
2141
2142extern int __cond_resched_lock(spinlock_t *lock);
2143extern int __cond_resched_rwlock_read(rwlock_t *lock);
2144extern int __cond_resched_rwlock_write(rwlock_t *lock);
2145
2146#define MIGHT_RESCHED_RCU_SHIFT 8
2147#define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
2148
2149#ifndef CONFIG_PREEMPT_RT
2150
2151
2152
2153
2154# define PREEMPT_LOCK_RESCHED_OFFSETS PREEMPT_LOCK_OFFSET
2155#else
2156
2157
2158
2159
2160
2161# define PREEMPT_LOCK_RESCHED_OFFSETS \
2162 (PREEMPT_LOCK_OFFSET + (1U << MIGHT_RESCHED_RCU_SHIFT))
2163#endif
2164
2165#define cond_resched_lock(lock) ({ \
2166 __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
2167 __cond_resched_lock(lock); \
2168})
2169
2170#define cond_resched_rwlock_read(lock) ({ \
2171 __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
2172 __cond_resched_rwlock_read(lock); \
2173})
2174
2175#define cond_resched_rwlock_write(lock) ({ \
2176 __might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS); \
2177 __cond_resched_rwlock_write(lock); \
2178})
2179
2180static inline void cond_resched_rcu(void)
2181{
2182#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
2183 rcu_read_unlock();
2184 cond_resched();
2185 rcu_read_lock();
2186#endif
2187}
2188
2189#ifdef CONFIG_PREEMPT_DYNAMIC
2190
2191extern bool preempt_model_none(void);
2192extern bool preempt_model_voluntary(void);
2193extern bool preempt_model_full(void);
2194
2195#else
2196
2197static inline bool preempt_model_none(void)
2198{
2199 return IS_ENABLED(CONFIG_PREEMPT_NONE);
2200}
2201static inline bool preempt_model_voluntary(void)
2202{
2203 return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY);
2204}
2205static inline bool preempt_model_full(void)
2206{
2207 return IS_ENABLED(CONFIG_PREEMPT);
2208}
2209
2210#endif
2211
2212static inline bool preempt_model_rt(void)
2213{
2214 return IS_ENABLED(CONFIG_PREEMPT_RT);
2215}
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225static inline bool preempt_model_preemptible(void)
2226{
2227 return preempt_model_full() || preempt_model_rt();
2228}
2229
2230
2231
2232
2233
2234
2235static inline int spin_needbreak(spinlock_t *lock)
2236{
2237#ifdef CONFIG_PREEMPTION
2238 return spin_is_contended(lock);
2239#else
2240 return 0;
2241#endif
2242}
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252static inline int rwlock_needbreak(rwlock_t *lock)
2253{
2254#ifdef CONFIG_PREEMPTION
2255 return rwlock_is_contended(lock);
2256#else
2257 return 0;
2258#endif
2259}
2260
2261static __always_inline bool need_resched(void)
2262{
2263 return unlikely(tif_need_resched());
2264}
2265
2266
2267
2268
2269#ifdef CONFIG_SMP
2270
2271static inline unsigned int task_cpu(const struct task_struct *p)
2272{
2273 return READ_ONCE(task_thread_info(p)->cpu);
2274}
2275
2276extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2277
2278#else
2279
2280static inline unsigned int task_cpu(const struct task_struct *p)
2281{
2282 return 0;
2283}
2284
2285static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2286{
2287}
2288
2289#endif
2290
2291extern bool sched_task_on_rq(struct task_struct *p);
2292extern unsigned long get_wchan(struct task_struct *p);
2293extern struct task_struct *cpu_curr_snapshot(int cpu);
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303#ifndef vcpu_is_preempted
2304static inline bool vcpu_is_preempted(int cpu)
2305{
2306 return false;
2307}
2308#endif
2309
2310extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2311extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2312
2313#ifndef TASK_SIZE_OF
2314#define TASK_SIZE_OF(tsk) TASK_SIZE
2315#endif
2316
2317#ifdef CONFIG_SMP
2318static inline bool owner_on_cpu(struct task_struct *owner)
2319{
2320
2321
2322
2323
2324 return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner));
2325}
2326
2327
2328unsigned long sched_cpu_util(int cpu);
2329#endif
2330
2331#ifdef CONFIG_RSEQ
2332
2333
2334
2335
2336
2337enum rseq_event_mask_bits {
2338 RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
2339 RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
2340 RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
2341};
2342
2343enum rseq_event_mask {
2344 RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT),
2345 RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT),
2346 RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT),
2347};
2348
2349static inline void rseq_set_notify_resume(struct task_struct *t)
2350{
2351 if (t->rseq)
2352 set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
2353}
2354
2355void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
2356
2357static inline void rseq_handle_notify_resume(struct ksignal *ksig,
2358 struct pt_regs *regs)
2359{
2360 if (current->rseq)
2361 __rseq_handle_notify_resume(ksig, regs);
2362}
2363
2364static inline void rseq_signal_deliver(struct ksignal *ksig,
2365 struct pt_regs *regs)
2366{
2367 preempt_disable();
2368 __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask);
2369 preempt_enable();
2370 rseq_handle_notify_resume(ksig, regs);
2371}
2372
2373
2374static inline void rseq_preempt(struct task_struct *t)
2375{
2376 __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
2377 rseq_set_notify_resume(t);
2378}
2379
2380
2381static inline void rseq_migrate(struct task_struct *t)
2382{
2383 __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
2384 rseq_set_notify_resume(t);
2385}
2386
2387
2388
2389
2390
2391static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
2392{
2393 if (clone_flags & CLONE_VM) {
2394 t->rseq = NULL;
2395 t->rseq_len = 0;
2396 t->rseq_sig = 0;
2397 t->rseq_event_mask = 0;
2398 } else {
2399 t->rseq = current->rseq;
2400 t->rseq_len = current->rseq_len;
2401 t->rseq_sig = current->rseq_sig;
2402 t->rseq_event_mask = current->rseq_event_mask;
2403 }
2404}
2405
2406static inline void rseq_execve(struct task_struct *t)
2407{
2408 t->rseq = NULL;
2409 t->rseq_len = 0;
2410 t->rseq_sig = 0;
2411 t->rseq_event_mask = 0;
2412}
2413
2414#else
2415
2416static inline void rseq_set_notify_resume(struct task_struct *t)
2417{
2418}
2419static inline void rseq_handle_notify_resume(struct ksignal *ksig,
2420 struct pt_regs *regs)
2421{
2422}
2423static inline void rseq_signal_deliver(struct ksignal *ksig,
2424 struct pt_regs *regs)
2425{
2426}
2427static inline void rseq_preempt(struct task_struct *t)
2428{
2429}
2430static inline void rseq_migrate(struct task_struct *t)
2431{
2432}
2433static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
2434{
2435}
2436static inline void rseq_execve(struct task_struct *t)
2437{
2438}
2439
2440#endif
2441
2442#ifdef CONFIG_DEBUG_RSEQ
2443
2444void rseq_syscall(struct pt_regs *regs);
2445
2446#else
2447
2448static inline void rseq_syscall(struct pt_regs *regs)
2449{
2450}
2451
2452#endif
2453
2454#ifdef CONFIG_SCHED_CORE
2455extern void sched_core_free(struct task_struct *tsk);
2456extern void sched_core_fork(struct task_struct *p);
2457extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
2458 unsigned long uaddr);
2459extern int sched_core_idle_cpu(int cpu);
2460#else
2461static inline void sched_core_free(struct task_struct *tsk) { }
2462static inline void sched_core_fork(struct task_struct *p) { }
2463static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); }
2464#endif
2465
2466extern void sched_set_stop_task(int cpu, struct task_struct *stop);
2467
2468#endif
2469