linux/include/linux/sched/task.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_SCHED_TASK_H
   3#define _LINUX_SCHED_TASK_H
   4
   5/*
   6 * Interface between the scheduler and various task lifetime (fork()/exit())
   7 * functionality:
   8 */
   9
  10#include <linux/sched.h>
  11#include <linux/uaccess.h>
  12
  13struct task_struct;
  14struct rusage;
  15union thread_union;
  16struct css_set;
  17
  18/* All the bits taken by the old clone syscall. */
  19#define CLONE_LEGACY_FLAGS 0xffffffffULL
  20
  21struct kernel_clone_args {
  22        u64 flags;
  23        int __user *pidfd;
  24        int __user *child_tid;
  25        int __user *parent_tid;
  26        const char *name;
  27        int exit_signal;
  28        u32 kthread:1;
  29        u32 io_thread:1;
  30        u32 user_worker:1;
  31        u32 no_files:1;
  32        unsigned long stack;
  33        unsigned long stack_size;
  34        unsigned long tls;
  35        pid_t *set_tid;
  36        /* Number of elements in *set_tid */
  37        size_t set_tid_size;
  38        int cgroup;
  39        int idle;
  40        int (*fn)(void *);
  41        void *fn_arg;
  42        struct cgroup *cgrp;
  43        struct css_set *cset;
  44};
  45
  46/*
  47 * This serializes "schedule()" and also protects
  48 * the run-queue from deletions/modifications (but
  49 * _adding_ to the beginning of the run-queue has
  50 * a separate lock).
  51 */
  52extern rwlock_t tasklist_lock;
  53extern spinlock_t mmlist_lock;
  54
  55extern union thread_union init_thread_union;
  56extern struct task_struct init_task;
  57
  58extern int lockdep_tasklist_lock_is_held(void);
  59
  60extern asmlinkage void schedule_tail(struct task_struct *prev);
  61extern void init_idle(struct task_struct *idle, int cpu);
  62
  63extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
  64extern void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs);
  65extern void sched_post_fork(struct task_struct *p);
  66extern void sched_dead(struct task_struct *p);
  67
  68void __noreturn do_task_dead(void);
  69void __noreturn make_task_dead(int signr);
  70
  71extern void mm_cache_init(void);
  72extern void proc_caches_init(void);
  73
  74extern void fork_init(void);
  75
  76extern void release_task(struct task_struct * p);
  77
  78extern int copy_thread(struct task_struct *, const struct kernel_clone_args *);
  79
  80extern void flush_thread(void);
  81
  82#ifdef CONFIG_HAVE_EXIT_THREAD
  83extern void exit_thread(struct task_struct *tsk);
  84#else
  85static inline void exit_thread(struct task_struct *tsk)
  86{
  87}
  88#endif
  89extern __noreturn void do_group_exit(int);
  90
  91extern void exit_files(struct task_struct *);
  92extern void exit_itimers(struct task_struct *);
  93
  94extern pid_t kernel_clone(struct kernel_clone_args *kargs);
  95struct task_struct *copy_process(struct pid *pid, int trace, int node,
  96                                 struct kernel_clone_args *args);
  97struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node);
  98struct task_struct *fork_idle(int);
  99extern pid_t kernel_thread(int (*fn)(void *), void *arg, const char *name,
 100                            unsigned long flags);
 101extern pid_t user_mode_thread(int (*fn)(void *), void *arg, unsigned long flags);
 102extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
 103int kernel_wait(pid_t pid, int *stat);
 104
 105extern void free_task(struct task_struct *tsk);
 106
 107/* sched_exec is called by processes performing an exec */
 108#ifdef CONFIG_SMP
 109extern void sched_exec(void);
 110#else
 111#define sched_exec()   {}
 112#endif
 113
 114static inline struct task_struct *get_task_struct(struct task_struct *t)
 115{
 116        refcount_inc(&t->usage);
 117        return t;
 118}
 119
 120extern void __put_task_struct(struct task_struct *t);
 121extern void __put_task_struct_rcu_cb(struct rcu_head *rhp);
 122
 123static inline void put_task_struct(struct task_struct *t)
 124{
 125        if (!refcount_dec_and_test(&t->usage))
 126                return;
 127
 128        /*
 129         * In !RT, it is always safe to call __put_task_struct().
 130         * Under RT, we can only call it in preemptible context.
 131         */
 132        if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
 133                static DEFINE_WAIT_OVERRIDE_MAP(put_task_map, LD_WAIT_SLEEP);
 134
 135                lock_map_acquire_try(&put_task_map);
 136                __put_task_struct(t);
 137                lock_map_release(&put_task_map);
 138                return;
 139        }
 140
 141        /*
 142         * under PREEMPT_RT, we can't call put_task_struct
 143         * in atomic context because it will indirectly
 144         * acquire sleeping locks.
 145         *
 146         * call_rcu() will schedule delayed_put_task_struct_rcu()
 147         * to be called in process context.
 148         *
 149         * __put_task_struct() is called when
 150         * refcount_dec_and_test(&t->usage) succeeds.
 151         *
 152         * This means that it can't "conflict" with
 153         * put_task_struct_rcu_user() which abuses ->rcu the same
 154         * way; rcu_users has a reference so task->usage can't be
 155         * zero after rcu_users 1 -> 0 transition.
 156         *
 157         * delayed_free_task() also uses ->rcu, but it is only called
 158         * when it fails to fork a process. Therefore, there is no
 159         * way it can conflict with put_task_struct().
 160         */
 161        call_rcu(&t->rcu, __put_task_struct_rcu_cb);
 162}
 163
 164DEFINE_FREE(put_task, struct task_struct *, if (_T) put_task_struct(_T))
 165
 166static inline void put_task_struct_many(struct task_struct *t, int nr)
 167{
 168        if (refcount_sub_and_test(nr, &t->usage))
 169                __put_task_struct(t);
 170}
 171
 172void put_task_struct_rcu_user(struct task_struct *task);
 173
 174/* Free all architecture-specific resources held by a thread. */
 175void release_thread(struct task_struct *dead_task);
 176
 177#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
 178extern int arch_task_struct_size __read_mostly;
 179#else
 180# define arch_task_struct_size (sizeof(struct task_struct))
 181#endif
 182
 183#ifndef CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST
 184/*
 185 * If an architecture has not declared a thread_struct whitelist we
 186 * must assume something there may need to be copied to userspace.
 187 */
 188static inline void arch_thread_struct_whitelist(unsigned long *offset,
 189                                                unsigned long *size)
 190{
 191        *offset = 0;
 192        /* Handle dynamically sized thread_struct. */
 193        *size = arch_task_struct_size - offsetof(struct task_struct, thread);
 194}
 195#endif
 196
 197#ifdef CONFIG_VMAP_STACK
 198static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
 199{
 200        return t->stack_vm_area;
 201}
 202#else
 203static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
 204{
 205        return NULL;
 206}
 207#endif
 208
 209/*
 210 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
 211 * subscriptions and synchronises with wait4().  Also used in procfs.  Also
 212 * pins the final release of task.io_context.  Also protects ->cpuset and
 213 * ->cgroup.subsys[]. And ->vfork_done. And ->sysvshm.shm_clist.
 214 *
 215 * Nests both inside and outside of read_lock(&tasklist_lock).
 216 * It must not be nested with write_lock_irq(&tasklist_lock),
 217 * neither inside nor outside.
 218 */
 219static inline void task_lock(struct task_struct *p)
 220{
 221        spin_lock(&p->alloc_lock);
 222}
 223
 224static inline void task_unlock(struct task_struct *p)
 225{
 226        spin_unlock(&p->alloc_lock);
 227}
 228
 229#endif /* _LINUX_SCHED_TASK_H */
 230