linux/include/linux/sched/signal.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_SCHED_SIGNAL_H
   3#define _LINUX_SCHED_SIGNAL_H
   4
   5#include <linux/rculist.h>
   6#include <linux/signal.h>
   7#include <linux/sched.h>
   8#include <linux/sched/jobctl.h>
   9#include <linux/sched/task.h>
  10#include <linux/cred.h>
  11#include <linux/refcount.h>
  12#include <linux/posix-timers.h>
  13#include <linux/mm_types.h>
  14#include <asm/ptrace.h>
  15
  16/*
  17 * Types defining task->signal and task->sighand and APIs using them:
  18 */
  19
  20struct sighand_struct {
  21        spinlock_t              siglock;
  22        refcount_t              count;
  23        wait_queue_head_t       signalfd_wqh;
  24        struct k_sigaction      action[_NSIG];
  25};
  26
  27/*
  28 * Per-process accounting stats:
  29 */
  30struct pacct_struct {
  31        int                     ac_flag;
  32        long                    ac_exitcode;
  33        unsigned long           ac_mem;
  34        u64                     ac_utime, ac_stime;
  35        unsigned long           ac_minflt, ac_majflt;
  36};
  37
  38struct cpu_itimer {
  39        u64 expires;
  40        u64 incr;
  41};
  42
  43/*
  44 * This is the atomic variant of task_cputime, which can be used for
  45 * storing and updating task_cputime statistics without locking.
  46 */
  47struct task_cputime_atomic {
  48        atomic64_t utime;
  49        atomic64_t stime;
  50        atomic64_t sum_exec_runtime;
  51};
  52
  53#define INIT_CPUTIME_ATOMIC \
  54        (struct task_cputime_atomic) {                          \
  55                .utime = ATOMIC64_INIT(0),                      \
  56                .stime = ATOMIC64_INIT(0),                      \
  57                .sum_exec_runtime = ATOMIC64_INIT(0),           \
  58        }
  59/**
  60 * struct thread_group_cputimer - thread group interval timer counts
  61 * @cputime_atomic:     atomic thread group interval timers.
  62 *
  63 * This structure contains the version of task_cputime, above, that is
  64 * used for thread group CPU timer calculations.
  65 */
  66struct thread_group_cputimer {
  67        struct task_cputime_atomic cputime_atomic;
  68};
  69
  70struct multiprocess_signals {
  71        sigset_t signal;
  72        struct hlist_node node;
  73};
  74
  75struct core_thread {
  76        struct task_struct *task;
  77        struct core_thread *next;
  78};
  79
  80struct core_state {
  81        atomic_t nr_threads;
  82        struct core_thread dumper;
  83        struct completion startup;
  84};
  85
  86/*
  87 * NOTE! "signal_struct" does not have its own
  88 * locking, because a shared signal_struct always
  89 * implies a shared sighand_struct, so locking
  90 * sighand_struct is always a proper superset of
  91 * the locking of signal_struct.
  92 */
  93struct signal_struct {
  94        refcount_t              sigcnt;
  95        atomic_t                live;
  96        int                     nr_threads;
  97        int                     quick_threads;
  98        struct list_head        thread_head;
  99
 100        wait_queue_head_t       wait_chldexit;  /* for wait4() */
 101
 102        /* current thread group signal load-balancing target: */
 103        struct task_struct      *curr_target;
 104
 105        /* shared signal handling: */
 106        struct sigpending       shared_pending;
 107
 108        /* For collecting multiprocess signals during fork */
 109        struct hlist_head       multiprocess;
 110
 111        /* thread group exit support */
 112        int                     group_exit_code;
 113        /* notify group_exec_task when notify_count is less or equal to 0 */
 114        int                     notify_count;
 115        struct task_struct      *group_exec_task;
 116
 117        /* thread group stop support, overloads group_exit_code too */
 118        int                     group_stop_count;
 119        unsigned int            flags; /* see SIGNAL_* flags below */
 120
 121        struct core_state *core_state; /* coredumping support */
 122
 123        /*
 124         * PR_SET_CHILD_SUBREAPER marks a process, like a service
 125         * manager, to re-parent orphan (double-forking) child processes
 126         * to this process instead of 'init'. The service manager is
 127         * able to receive SIGCHLD signals and is able to investigate
 128         * the process until it calls wait(). All children of this
 129         * process will inherit a flag if they should look for a
 130         * child_subreaper process at exit.
 131         */
 132        unsigned int            is_child_subreaper:1;
 133        unsigned int            has_child_subreaper:1;
 134
 135#ifdef CONFIG_POSIX_TIMERS
 136
 137        /* POSIX.1b Interval Timers */
 138        unsigned int            next_posix_timer_id;
 139        struct list_head        posix_timers;
 140
 141        /* ITIMER_REAL timer for the process */
 142        struct hrtimer real_timer;
 143        ktime_t it_real_incr;
 144
 145        /*
 146         * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
 147         * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
 148         * values are defined to 0 and 1 respectively
 149         */
 150        struct cpu_itimer it[2];
 151
 152        /*
 153         * Thread group totals for process CPU timers.
 154         * See thread_group_cputimer(), et al, for details.
 155         */
 156        struct thread_group_cputimer cputimer;
 157
 158#endif
 159        /* Empty if CONFIG_POSIX_TIMERS=n */
 160        struct posix_cputimers posix_cputimers;
 161
 162        /* PID/PID hash table linkage. */
 163        struct pid *pids[PIDTYPE_MAX];
 164
 165#ifdef CONFIG_NO_HZ_FULL
 166        atomic_t tick_dep_mask;
 167#endif
 168
 169        struct pid *tty_old_pgrp;
 170
 171        /* boolean value for session group leader */
 172        int leader;
 173
 174        struct tty_struct *tty; /* NULL if no tty */
 175
 176#ifdef CONFIG_SCHED_AUTOGROUP
 177        struct autogroup *autogroup;
 178#endif
 179        /*
 180         * Cumulative resource counters for dead threads in the group,
 181         * and for reaped dead child processes forked by this group.
 182         * Live threads maintain their own counters and add to these
 183         * in __exit_signal, except for the group leader.
 184         */
 185        seqlock_t stats_lock;
 186        u64 utime, stime, cutime, cstime;
 187        u64 gtime;
 188        u64 cgtime;
 189        struct prev_cputime prev_cputime;
 190        unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
 191        unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
 192        unsigned long inblock, oublock, cinblock, coublock;
 193        unsigned long maxrss, cmaxrss;
 194        struct task_io_accounting ioac;
 195
 196        /*
 197         * Cumulative ns of schedule CPU time fo dead threads in the
 198         * group, not including a zombie group leader, (This only differs
 199         * from jiffies_to_ns(utime + stime) if sched_clock uses something
 200         * other than jiffies.)
 201         */
 202        unsigned long long sum_sched_runtime;
 203
 204        /*
 205         * We don't bother to synchronize most readers of this at all,
 206         * because there is no reader checking a limit that actually needs
 207         * to get both rlim_cur and rlim_max atomically, and either one
 208         * alone is a single word that can safely be read normally.
 209         * getrlimit/setrlimit use task_lock(current->group_leader) to
 210         * protect this instead of the siglock, because they really
 211         * have no need to disable irqs.
 212         */
 213        struct rlimit rlim[RLIM_NLIMITS];
 214
 215#ifdef CONFIG_BSD_PROCESS_ACCT
 216        struct pacct_struct pacct;      /* per-process accounting information */
 217#endif
 218#ifdef CONFIG_TASKSTATS
 219        struct taskstats *stats;
 220#endif
 221#ifdef CONFIG_AUDIT
 222        unsigned audit_tty;
 223        struct tty_audit_buf *tty_audit_buf;
 224#endif
 225
 226        /*
 227         * Thread is the potential origin of an oom condition; kill first on
 228         * oom
 229         */
 230        bool oom_flag_origin;
 231        short oom_score_adj;            /* OOM kill score adjustment */
 232        short oom_score_adj_min;        /* OOM kill score adjustment min value.
 233                                         * Only settable by CAP_SYS_RESOURCE. */
 234        struct mm_struct *oom_mm;       /* recorded mm when the thread group got
 235                                         * killed by the oom killer */
 236
 237        struct mutex cred_guard_mutex;  /* guard against foreign influences on
 238                                         * credential calculations
 239                                         * (notably. ptrace)
 240                                         * Deprecated do not use in new code.
 241                                         * Use exec_update_lock instead.
 242                                         */
 243        struct rw_semaphore exec_update_lock;   /* Held while task_struct is
 244                                                 * being updated during exec,
 245                                                 * and may have inconsistent
 246                                                 * permissions.
 247                                                 */
 248} __randomize_layout;
 249
 250/*
 251 * Bits in flags field of signal_struct.
 252 */
 253#define SIGNAL_STOP_STOPPED     0x00000001 /* job control stop in effect */
 254#define SIGNAL_STOP_CONTINUED   0x00000002 /* SIGCONT since WCONTINUED reap */
 255#define SIGNAL_GROUP_EXIT       0x00000004 /* group exit in progress */
 256/*
 257 * Pending notifications to parent.
 258 */
 259#define SIGNAL_CLD_STOPPED      0x00000010
 260#define SIGNAL_CLD_CONTINUED    0x00000020
 261#define SIGNAL_CLD_MASK         (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
 262
 263#define SIGNAL_UNKILLABLE       0x00000040 /* for init: ignore fatal signals */
 264
 265#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
 266                          SIGNAL_STOP_CONTINUED)
 267
 268static inline void signal_set_stop_flags(struct signal_struct *sig,
 269                                         unsigned int flags)
 270{
 271        WARN_ON(sig->flags & SIGNAL_GROUP_EXIT);
 272        sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
 273}
 274
 275extern void flush_signals(struct task_struct *);
 276extern void ignore_signals(struct task_struct *);
 277extern void flush_signal_handlers(struct task_struct *, int force_default);
 278extern int dequeue_signal(struct task_struct *task, sigset_t *mask,
 279                          kernel_siginfo_t *info, enum pid_type *type);
 280
 281static inline int kernel_dequeue_signal(void)
 282{
 283        struct task_struct *task = current;
 284        kernel_siginfo_t __info;
 285        enum pid_type __type;
 286        int ret;
 287
 288        spin_lock_irq(&task->sighand->siglock);
 289        ret = dequeue_signal(task, &task->blocked, &__info, &__type);
 290        spin_unlock_irq(&task->sighand->siglock);
 291
 292        return ret;
 293}
 294
 295static inline void kernel_signal_stop(void)
 296{
 297        spin_lock_irq(&current->sighand->siglock);
 298        if (current->jobctl & JOBCTL_STOP_DEQUEUED) {
 299                current->jobctl |= JOBCTL_STOPPED;
 300                set_special_state(TASK_STOPPED);
 301        }
 302        spin_unlock_irq(&current->sighand->siglock);
 303
 304        schedule();
 305}
 306
 307int force_sig_fault_to_task(int sig, int code, void __user *addr,
 308                            struct task_struct *t);
 309int force_sig_fault(int sig, int code, void __user *addr);
 310int send_sig_fault(int sig, int code, void __user *addr, struct task_struct *t);
 311
 312int force_sig_mceerr(int code, void __user *, short);
 313int send_sig_mceerr(int code, void __user *, short, struct task_struct *);
 314
 315int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper);
 316int force_sig_pkuerr(void __user *addr, u32 pkey);
 317int send_sig_perf(void __user *addr, u32 type, u64 sig_data);
 318
 319int force_sig_ptrace_errno_trap(int errno, void __user *addr);
 320int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno);
 321int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
 322                        struct task_struct *t);
 323int force_sig_seccomp(int syscall, int reason, bool force_coredump);
 324
 325extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *);
 326extern void force_sigsegv(int sig);
 327extern int force_sig_info(struct kernel_siginfo *);
 328extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp);
 329extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid);
 330extern int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, struct pid *,
 331                                const struct cred *);
 332extern int kill_pgrp(struct pid *pid, int sig, int priv);
 333extern int kill_pid(struct pid *pid, int sig, int priv);
 334extern __must_check bool do_notify_parent(struct task_struct *, int);
 335extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
 336extern void force_sig(int);
 337extern void force_fatal_sig(int);
 338extern void force_exit_sig(int);
 339extern int send_sig(int, struct task_struct *, int);
 340extern int zap_other_threads(struct task_struct *p);
 341extern struct sigqueue *sigqueue_alloc(void);
 342extern void sigqueue_free(struct sigqueue *);
 343extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type);
 344extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
 345
 346static inline void clear_notify_signal(void)
 347{
 348        clear_thread_flag(TIF_NOTIFY_SIGNAL);
 349        smp_mb__after_atomic();
 350}
 351
 352/*
 353 * Returns 'true' if kick_process() is needed to force a transition from
 354 * user -> kernel to guarantee expedient run of TWA_SIGNAL based task_work.
 355 */
 356static inline bool __set_notify_signal(struct task_struct *task)
 357{
 358        return !test_and_set_tsk_thread_flag(task, TIF_NOTIFY_SIGNAL) &&
 359               !wake_up_state(task, TASK_INTERRUPTIBLE);
 360}
 361
 362/*
 363 * Called to break out of interruptible wait loops, and enter the
 364 * exit_to_user_mode_loop().
 365 */
 366static inline void set_notify_signal(struct task_struct *task)
 367{
 368        if (__set_notify_signal(task))
 369                kick_process(task);
 370}
 371
 372static inline int restart_syscall(void)
 373{
 374        set_tsk_thread_flag(current, TIF_SIGPENDING);
 375        return -ERESTARTNOINTR;
 376}
 377
 378static inline int task_sigpending(struct task_struct *p)
 379{
 380        return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
 381}
 382
 383static inline int signal_pending(struct task_struct *p)
 384{
 385        /*
 386         * TIF_NOTIFY_SIGNAL isn't really a signal, but it requires the same
 387         * behavior in terms of ensuring that we break out of wait loops
 388         * so that notify signal callbacks can be processed.
 389         */
 390        if (unlikely(test_tsk_thread_flag(p, TIF_NOTIFY_SIGNAL)))
 391                return 1;
 392        return task_sigpending(p);
 393}
 394
 395static inline int __fatal_signal_pending(struct task_struct *p)
 396{
 397        return unlikely(sigismember(&p->pending.signal, SIGKILL));
 398}
 399
 400static inline int fatal_signal_pending(struct task_struct *p)
 401{
 402        return task_sigpending(p) && __fatal_signal_pending(p);
 403}
 404
 405static inline int signal_pending_state(unsigned int state, struct task_struct *p)
 406{
 407        if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
 408                return 0;
 409        if (!signal_pending(p))
 410                return 0;
 411
 412        return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
 413}
 414
 415/*
 416 * This should only be used in fault handlers to decide whether we
 417 * should stop the current fault routine to handle the signals
 418 * instead, especially with the case where we've got interrupted with
 419 * a VM_FAULT_RETRY.
 420 */
 421static inline bool fault_signal_pending(vm_fault_t fault_flags,
 422                                        struct pt_regs *regs)
 423{
 424        return unlikely((fault_flags & VM_FAULT_RETRY) &&
 425                        (fatal_signal_pending(current) ||
 426                         (user_mode(regs) && signal_pending(current))));
 427}
 428
 429/*
 430 * Reevaluate whether the task has signals pending delivery.
 431 * Wake the task if so.
 432 * This is required every time the blocked sigset_t changes.
 433 * callers must hold sighand->siglock.
 434 */
 435extern void recalc_sigpending_and_wake(struct task_struct *t);
 436extern void recalc_sigpending(void);
 437extern void calculate_sigpending(void);
 438
 439extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
 440
 441static inline void signal_wake_up(struct task_struct *t, bool fatal)
 442{
 443        unsigned int state = 0;
 444        if (fatal && !(t->jobctl & JOBCTL_PTRACE_FROZEN)) {
 445                t->jobctl &= ~(JOBCTL_STOPPED | JOBCTL_TRACED);
 446                state = TASK_WAKEKILL | __TASK_TRACED;
 447        }
 448        signal_wake_up_state(t, state);
 449}
 450static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
 451{
 452        unsigned int state = 0;
 453        if (resume) {
 454                t->jobctl &= ~JOBCTL_TRACED;
 455                state = __TASK_TRACED;
 456        }
 457        signal_wake_up_state(t, state);
 458}
 459
 460void task_join_group_stop(struct task_struct *task);
 461
 462#ifdef TIF_RESTORE_SIGMASK
 463/*
 464 * Legacy restore_sigmask accessors.  These are inefficient on
 465 * SMP architectures because they require atomic operations.
 466 */
 467
 468/**
 469 * set_restore_sigmask() - make sure saved_sigmask processing gets done
 470 *
 471 * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code
 472 * will run before returning to user mode, to process the flag.  For
 473 * all callers, TIF_SIGPENDING is already set or it's no harm to set
 474 * it.  TIF_RESTORE_SIGMASK need not be in the set of bits that the
 475 * arch code will notice on return to user mode, in case those bits
 476 * are scarce.  We set TIF_SIGPENDING here to ensure that the arch
 477 * signal code always gets run when TIF_RESTORE_SIGMASK is set.
 478 */
 479static inline void set_restore_sigmask(void)
 480{
 481        set_thread_flag(TIF_RESTORE_SIGMASK);
 482}
 483
 484static inline void clear_tsk_restore_sigmask(struct task_struct *task)
 485{
 486        clear_tsk_thread_flag(task, TIF_RESTORE_SIGMASK);
 487}
 488
 489static inline void clear_restore_sigmask(void)
 490{
 491        clear_thread_flag(TIF_RESTORE_SIGMASK);
 492}
 493static inline bool test_tsk_restore_sigmask(struct task_struct *task)
 494{
 495        return test_tsk_thread_flag(task, TIF_RESTORE_SIGMASK);
 496}
 497static inline bool test_restore_sigmask(void)
 498{
 499        return test_thread_flag(TIF_RESTORE_SIGMASK);
 500}
 501static inline bool test_and_clear_restore_sigmask(void)
 502{
 503        return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK);
 504}
 505
 506#else   /* TIF_RESTORE_SIGMASK */
 507
 508/* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */
 509static inline void set_restore_sigmask(void)
 510{
 511        current->restore_sigmask = true;
 512}
 513static inline void clear_tsk_restore_sigmask(struct task_struct *task)
 514{
 515        task->restore_sigmask = false;
 516}
 517static inline void clear_restore_sigmask(void)
 518{
 519        current->restore_sigmask = false;
 520}
 521static inline bool test_restore_sigmask(void)
 522{
 523        return current->restore_sigmask;
 524}
 525static inline bool test_tsk_restore_sigmask(struct task_struct *task)
 526{
 527        return task->restore_sigmask;
 528}
 529static inline bool test_and_clear_restore_sigmask(void)
 530{
 531        if (!current->restore_sigmask)
 532                return false;
 533        current->restore_sigmask = false;
 534        return true;
 535}
 536#endif
 537
 538static inline void restore_saved_sigmask(void)
 539{
 540        if (test_and_clear_restore_sigmask())
 541                __set_current_blocked(&current->saved_sigmask);
 542}
 543
 544extern int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize);
 545
 546static inline void restore_saved_sigmask_unless(bool interrupted)
 547{
 548        if (interrupted)
 549                WARN_ON(!signal_pending(current));
 550        else
 551                restore_saved_sigmask();
 552}
 553
 554static inline sigset_t *sigmask_to_save(void)
 555{
 556        sigset_t *res = &current->blocked;
 557        if (unlikely(test_restore_sigmask()))
 558                res = &current->saved_sigmask;
 559        return res;
 560}
 561
 562static inline int kill_cad_pid(int sig, int priv)
 563{
 564        return kill_pid(cad_pid, sig, priv);
 565}
 566
 567/* These can be the second arg to send_sig_info/send_group_sig_info.  */
 568#define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0)
 569#define SEND_SIG_PRIV   ((struct kernel_siginfo *) 1)
 570
 571static inline int __on_sig_stack(unsigned long sp)
 572{
 573#ifdef CONFIG_STACK_GROWSUP
 574        return sp >= current->sas_ss_sp &&
 575                sp - current->sas_ss_sp < current->sas_ss_size;
 576#else
 577        return sp > current->sas_ss_sp &&
 578                sp - current->sas_ss_sp <= current->sas_ss_size;
 579#endif
 580}
 581
 582/*
 583 * True if we are on the alternate signal stack.
 584 */
 585static inline int on_sig_stack(unsigned long sp)
 586{
 587        /*
 588         * If the signal stack is SS_AUTODISARM then, by construction, we
 589         * can't be on the signal stack unless user code deliberately set
 590         * SS_AUTODISARM when we were already on it.
 591         *
 592         * This improves reliability: if user state gets corrupted such that
 593         * the stack pointer points very close to the end of the signal stack,
 594         * then this check will enable the signal to be handled anyway.
 595         */
 596        if (current->sas_ss_flags & SS_AUTODISARM)
 597                return 0;
 598
 599        return __on_sig_stack(sp);
 600}
 601
 602static inline int sas_ss_flags(unsigned long sp)
 603{
 604        if (!current->sas_ss_size)
 605                return SS_DISABLE;
 606
 607        return on_sig_stack(sp) ? SS_ONSTACK : 0;
 608}
 609
 610static inline void sas_ss_reset(struct task_struct *p)
 611{
 612        p->sas_ss_sp = 0;
 613        p->sas_ss_size = 0;
 614        p->sas_ss_flags = SS_DISABLE;
 615}
 616
 617static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
 618{
 619        if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
 620#ifdef CONFIG_STACK_GROWSUP
 621                return current->sas_ss_sp;
 622#else
 623                return current->sas_ss_sp + current->sas_ss_size;
 624#endif
 625        return sp;
 626}
 627
 628extern void __cleanup_sighand(struct sighand_struct *);
 629extern void flush_itimer_signals(void);
 630
 631#define tasklist_empty() \
 632        list_empty(&init_task.tasks)
 633
 634#define next_task(p) \
 635        list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
 636
 637#define for_each_process(p) \
 638        for (p = &init_task ; (p = next_task(p)) != &init_task ; )
 639
 640extern bool current_is_single_threaded(void);
 641
 642/*
 643 * Without tasklist/siglock it is only rcu-safe if g can't exit/exec,
 644 * otherwise next_thread(t) will never reach g after list_del_rcu(g).
 645 */
 646#define while_each_thread(g, t) \
 647        while ((t = next_thread(t)) != g)
 648
 649#define __for_each_thread(signal, t)    \
 650        list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node, \
 651                lockdep_is_held(&tasklist_lock))
 652
 653#define for_each_thread(p, t)           \
 654        __for_each_thread((p)->signal, t)
 655
 656/* Careful: this is a double loop, 'break' won't work as expected. */
 657#define for_each_process_thread(p, t)   \
 658        for_each_process(p) for_each_thread(p, t)
 659
 660typedef int (*proc_visitor)(struct task_struct *p, void *data);
 661void walk_process_tree(struct task_struct *top, proc_visitor, void *);
 662
 663static inline
 664struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
 665{
 666        struct pid *pid;
 667        if (type == PIDTYPE_PID)
 668                pid = task_pid(task);
 669        else
 670                pid = task->signal->pids[type];
 671        return pid;
 672}
 673
 674static inline struct pid *task_tgid(struct task_struct *task)
 675{
 676        return task->signal->pids[PIDTYPE_TGID];
 677}
 678
 679/*
 680 * Without tasklist or RCU lock it is not safe to dereference
 681 * the result of task_pgrp/task_session even if task == current,
 682 * we can race with another thread doing sys_setsid/sys_setpgid.
 683 */
 684static inline struct pid *task_pgrp(struct task_struct *task)
 685{
 686        return task->signal->pids[PIDTYPE_PGID];
 687}
 688
 689static inline struct pid *task_session(struct task_struct *task)
 690{
 691        return task->signal->pids[PIDTYPE_SID];
 692}
 693
 694static inline int get_nr_threads(struct task_struct *task)
 695{
 696        return task->signal->nr_threads;
 697}
 698
 699static inline bool thread_group_leader(struct task_struct *p)
 700{
 701        return p->exit_signal >= 0;
 702}
 703
 704static inline
 705bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
 706{
 707        return p1->signal == p2->signal;
 708}
 709
 710/*
 711 * returns NULL if p is the last thread in the thread group
 712 */
 713static inline struct task_struct *__next_thread(struct task_struct *p)
 714{
 715        return list_next_or_null_rcu(&p->signal->thread_head,
 716                                        &p->thread_node,
 717                                        struct task_struct,
 718                                        thread_node);
 719}
 720
 721static inline struct task_struct *next_thread(struct task_struct *p)
 722{
 723        return __next_thread(p) ?: p->group_leader;
 724}
 725
 726static inline int thread_group_empty(struct task_struct *p)
 727{
 728        return thread_group_leader(p) &&
 729               list_is_last(&p->thread_node, &p->signal->thread_head);
 730}
 731
 732#define delay_group_leader(p) \
 733                (thread_group_leader(p) && !thread_group_empty(p))
 734
 735extern bool thread_group_exited(struct pid *pid);
 736
 737extern struct sighand_struct *__lock_task_sighand(struct task_struct *task,
 738                                                        unsigned long *flags);
 739
 740static inline struct sighand_struct *lock_task_sighand(struct task_struct *task,
 741                                                       unsigned long *flags)
 742{
 743        struct sighand_struct *ret;
 744
 745        ret = __lock_task_sighand(task, flags);
 746        (void)__cond_lock(&task->sighand->siglock, ret);
 747        return ret;
 748}
 749
 750static inline void unlock_task_sighand(struct task_struct *task,
 751                                                unsigned long *flags)
 752{
 753        spin_unlock_irqrestore(&task->sighand->siglock, *flags);
 754}
 755
 756#ifdef CONFIG_LOCKDEP
 757extern void lockdep_assert_task_sighand_held(struct task_struct *task);
 758#else
 759static inline void lockdep_assert_task_sighand_held(struct task_struct *task) { }
 760#endif
 761
 762static inline unsigned long task_rlimit(const struct task_struct *task,
 763                unsigned int limit)
 764{
 765        return READ_ONCE(task->signal->rlim[limit].rlim_cur);
 766}
 767
 768static inline unsigned long task_rlimit_max(const struct task_struct *task,
 769                unsigned int limit)
 770{
 771        return READ_ONCE(task->signal->rlim[limit].rlim_max);
 772}
 773
 774static inline unsigned long rlimit(unsigned int limit)
 775{
 776        return task_rlimit(current, limit);
 777}
 778
 779static inline unsigned long rlimit_max(unsigned int limit)
 780{
 781        return task_rlimit_max(current, limit);
 782}
 783
 784#endif /* _LINUX_SCHED_SIGNAL_H */
 785