linux/include/linux/sched.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_SCHED_H
   3#define _LINUX_SCHED_H
   4
   5/*
   6 * Define 'struct task_struct' and provide the main scheduler
   7 * APIs (schedule(), wakeup variants, etc.)
   8 */
   9
  10#include <uapi/linux/sched.h>
  11
  12#include <asm/current.h>
  13
  14#include <linux/pid.h>
  15#include <linux/sem.h>
  16#include <linux/shm.h>
  17#include <linux/kcov.h>
  18#include <linux/mutex.h>
  19#include <linux/plist.h>
  20#include <linux/hrtimer.h>
  21#include <linux/seccomp.h>
  22#include <linux/nodemask.h>
  23#include <linux/rcupdate.h>
  24#include <linux/resource.h>
  25#include <linux/latencytop.h>
  26#include <linux/sched/prio.h>
  27#include <linux/signal_types.h>
  28#include <linux/mm_types_task.h>
  29#include <linux/task_io_accounting.h>
  30
  31/* task_struct member predeclarations (sorted alphabetically): */
  32struct audit_context;
  33struct backing_dev_info;
  34struct bio_list;
  35struct blk_plug;
  36struct cfs_rq;
  37struct fs_struct;
  38struct futex_pi_state;
  39struct io_context;
  40struct mempolicy;
  41struct nameidata;
  42struct nsproxy;
  43struct perf_event_context;
  44struct pid_namespace;
  45struct pipe_inode_info;
  46struct rcu_node;
  47struct reclaim_state;
  48struct robust_list_head;
  49struct sched_attr;
  50struct sched_param;
  51struct seq_file;
  52struct sighand_struct;
  53struct signal_struct;
  54struct task_delay_info;
  55struct task_group;
  56
  57/*
  58 * Task state bitmask. NOTE! These bits are also
  59 * encoded in fs/proc/array.c: get_task_state().
  60 *
  61 * We have two separate sets of flags: task->state
  62 * is about runnability, while task->exit_state are
  63 * about the task exiting. Confusing, but this way
  64 * modifying one set can't modify the other one by
  65 * mistake.
  66 */
  67
  68/* Used in tsk->state: */
  69#define TASK_RUNNING                    0x0000
  70#define TASK_INTERRUPTIBLE              0x0001
  71#define TASK_UNINTERRUPTIBLE            0x0002
  72#define __TASK_STOPPED                  0x0004
  73#define __TASK_TRACED                   0x0008
  74/* Used in tsk->exit_state: */
  75#define EXIT_DEAD                       0x0010
  76#define EXIT_ZOMBIE                     0x0020
  77#define EXIT_TRACE                      (EXIT_ZOMBIE | EXIT_DEAD)
  78/* Used in tsk->state again: */
  79#define TASK_PARKED                     0x0040
  80#define TASK_DEAD                       0x0080
  81#define TASK_WAKEKILL                   0x0100
  82#define TASK_WAKING                     0x0200
  83#define TASK_NOLOAD                     0x0400
  84#define TASK_NEW                        0x0800
  85#define TASK_STATE_MAX                  0x1000
  86
  87/* Convenience macros for the sake of set_current_state: */
  88#define TASK_KILLABLE                   (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
  89#define TASK_STOPPED                    (TASK_WAKEKILL | __TASK_STOPPED)
  90#define TASK_TRACED                     (TASK_WAKEKILL | __TASK_TRACED)
  91
  92#define TASK_IDLE                       (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
  93
  94/* Convenience macros for the sake of wake_up(): */
  95#define TASK_NORMAL                     (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
  96#define TASK_ALL                        (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
  97
  98/* get_task_state(): */
  99#define TASK_REPORT                     (TASK_RUNNING | TASK_INTERRUPTIBLE | \
 100                                         TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
 101                                         __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
 102                                         TASK_PARKED)
 103
 104#define task_is_traced(task)            ((task->state & __TASK_TRACED) != 0)
 105
 106#define task_is_stopped(task)           ((task->state & __TASK_STOPPED) != 0)
 107
 108#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
 109
 110#define task_contributes_to_load(task)  ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
 111                                         (task->flags & PF_FROZEN) == 0 && \
 112                                         (task->state & TASK_NOLOAD) == 0)
 113
 114#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
 115
 116#define __set_current_state(state_value)                        \
 117        do {                                                    \
 118                current->task_state_change = _THIS_IP_;         \
 119                current->state = (state_value);                 \
 120        } while (0)
 121#define set_current_state(state_value)                          \
 122        do {                                                    \
 123                current->task_state_change = _THIS_IP_;         \
 124                smp_store_mb(current->state, (state_value));    \
 125        } while (0)
 126
 127#else
 128/*
 129 * set_current_state() includes a barrier so that the write of current->state
 130 * is correctly serialised wrt the caller's subsequent test of whether to
 131 * actually sleep:
 132 *
 133 *   for (;;) {
 134 *      set_current_state(TASK_UNINTERRUPTIBLE);
 135 *      if (!need_sleep)
 136 *              break;
 137 *
 138 *      schedule();
 139 *   }
 140 *   __set_current_state(TASK_RUNNING);
 141 *
 142 * If the caller does not need such serialisation (because, for instance, the
 143 * condition test and condition change and wakeup are under the same lock) then
 144 * use __set_current_state().
 145 *
 146 * The above is typically ordered against the wakeup, which does:
 147 *
 148 *      need_sleep = false;
 149 *      wake_up_state(p, TASK_UNINTERRUPTIBLE);
 150 *
 151 * Where wake_up_state() (and all other wakeup primitives) imply enough
 152 * barriers to order the store of the variable against wakeup.
 153 *
 154 * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
 155 * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
 156 * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
 157 *
 158 * This is obviously fine, since they both store the exact same value.
 159 *
 160 * Also see the comments of try_to_wake_up().
 161 */
 162#define __set_current_state(state_value) do { current->state = (state_value); } while (0)
 163#define set_current_state(state_value)   smp_store_mb(current->state, (state_value))
 164#endif
 165
 166/* Task command name length: */
 167#define TASK_COMM_LEN                   16
 168
 169extern void scheduler_tick(void);
 170
 171#define MAX_SCHEDULE_TIMEOUT            LONG_MAX
 172
 173extern long schedule_timeout(long timeout);
 174extern long schedule_timeout_interruptible(long timeout);
 175extern long schedule_timeout_killable(long timeout);
 176extern long schedule_timeout_uninterruptible(long timeout);
 177extern long schedule_timeout_idle(long timeout);
 178asmlinkage void schedule(void);
 179extern void schedule_preempt_disabled(void);
 180
 181extern int __must_check io_schedule_prepare(void);
 182extern void io_schedule_finish(int token);
 183extern long io_schedule_timeout(long timeout);
 184extern void io_schedule(void);
 185
 186/**
 187 * struct prev_cputime - snapshot of system and user cputime
 188 * @utime: time spent in user mode
 189 * @stime: time spent in system mode
 190 * @lock: protects the above two fields
 191 *
 192 * Stores previous user/system time values such that we can guarantee
 193 * monotonicity.
 194 */
 195struct prev_cputime {
 196#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 197        u64                             utime;
 198        u64                             stime;
 199        raw_spinlock_t                  lock;
 200#endif
 201};
 202
 203/**
 204 * struct task_cputime - collected CPU time counts
 205 * @utime:              time spent in user mode, in nanoseconds
 206 * @stime:              time spent in kernel mode, in nanoseconds
 207 * @sum_exec_runtime:   total time spent on the CPU, in nanoseconds
 208 *
 209 * This structure groups together three kinds of CPU time that are tracked for
 210 * threads and thread groups.  Most things considering CPU time want to group
 211 * these counts together and treat all three of them in parallel.
 212 */
 213struct task_cputime {
 214        u64                             utime;
 215        u64                             stime;
 216        unsigned long long              sum_exec_runtime;
 217};
 218
 219/* Alternate field names when used on cache expirations: */
 220#define virt_exp                        utime
 221#define prof_exp                        stime
 222#define sched_exp                       sum_exec_runtime
 223
 224enum vtime_state {
 225        /* Task is sleeping or running in a CPU with VTIME inactive: */
 226        VTIME_INACTIVE = 0,
 227        /* Task runs in userspace in a CPU with VTIME active: */
 228        VTIME_USER,
 229        /* Task runs in kernelspace in a CPU with VTIME active: */
 230        VTIME_SYS,
 231};
 232
 233struct vtime {
 234        seqcount_t              seqcount;
 235        unsigned long long      starttime;
 236        enum vtime_state        state;
 237        u64                     utime;
 238        u64                     stime;
 239        u64                     gtime;
 240};
 241
 242struct sched_info {
 243#ifdef CONFIG_SCHED_INFO
 244        /* Cumulative counters: */
 245
 246        /* # of times we have run on this CPU: */
 247        unsigned long                   pcount;
 248
 249        /* Time spent waiting on a runqueue: */
 250        unsigned long long              run_delay;
 251
 252        /* Timestamps: */
 253
 254        /* When did we last run on a CPU? */
 255        unsigned long long              last_arrival;
 256
 257        /* When were we last queued to run? */
 258        unsigned long long              last_queued;
 259
 260#endif /* CONFIG_SCHED_INFO */
 261};
 262
 263/*
 264 * Integer metrics need fixed point arithmetic, e.g., sched/fair
 265 * has a few: load, load_avg, util_avg, freq, and capacity.
 266 *
 267 * We define a basic fixed point arithmetic range, and then formalize
 268 * all these metrics based on that basic range.
 269 */
 270# define SCHED_FIXEDPOINT_SHIFT         10
 271# define SCHED_FIXEDPOINT_SCALE         (1L << SCHED_FIXEDPOINT_SHIFT)
 272
 273struct load_weight {
 274        unsigned long                   weight;
 275        u32                             inv_weight;
 276};
 277
 278/*
 279 * The load_avg/util_avg accumulates an infinite geometric series
 280 * (see __update_load_avg() in kernel/sched/fair.c).
 281 *
 282 * [load_avg definition]
 283 *
 284 *   load_avg = runnable% * scale_load_down(load)
 285 *
 286 * where runnable% is the time ratio that a sched_entity is runnable.
 287 * For cfs_rq, it is the aggregated load_avg of all runnable and
 288 * blocked sched_entities.
 289 *
 290 * load_avg may also take frequency scaling into account:
 291 *
 292 *   load_avg = runnable% * scale_load_down(load) * freq%
 293 *
 294 * where freq% is the CPU frequency normalized to the highest frequency.
 295 *
 296 * [util_avg definition]
 297 *
 298 *   util_avg = running% * SCHED_CAPACITY_SCALE
 299 *
 300 * where running% is the time ratio that a sched_entity is running on
 301 * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
 302 * and blocked sched_entities.
 303 *
 304 * util_avg may also factor frequency scaling and CPU capacity scaling:
 305 *
 306 *   util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
 307 *
 308 * where freq% is the same as above, and capacity% is the CPU capacity
 309 * normalized to the greatest capacity (due to uarch differences, etc).
 310 *
 311 * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
 312 * themselves are in the range of [0, 1]. To do fixed point arithmetics,
 313 * we therefore scale them to as large a range as necessary. This is for
 314 * example reflected by util_avg's SCHED_CAPACITY_SCALE.
 315 *
 316 * [Overflow issue]
 317 *
 318 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
 319 * with the highest load (=88761), always runnable on a single cfs_rq,
 320 * and should not overflow as the number already hits PID_MAX_LIMIT.
 321 *
 322 * For all other cases (including 32-bit kernels), struct load_weight's
 323 * weight will overflow first before we do, because:
 324 *
 325 *    Max(load_avg) <= Max(load.weight)
 326 *
 327 * Then it is the load_weight's responsibility to consider overflow
 328 * issues.
 329 */
 330struct sched_avg {
 331        u64                             last_update_time;
 332        u64                             load_sum;
 333        u64                             runnable_load_sum;
 334        u32                             util_sum;
 335        u32                             period_contrib;
 336        unsigned long                   load_avg;
 337        unsigned long                   runnable_load_avg;
 338        unsigned long                   util_avg;
 339};
 340
 341struct sched_statistics {
 342#ifdef CONFIG_SCHEDSTATS
 343        u64                             wait_start;
 344        u64                             wait_max;
 345        u64                             wait_count;
 346        u64                             wait_sum;
 347        u64                             iowait_count;
 348        u64                             iowait_sum;
 349
 350        u64                             sleep_start;
 351        u64                             sleep_max;
 352        s64                             sum_sleep_runtime;
 353
 354        u64                             block_start;
 355        u64                             block_max;
 356        u64                             exec_max;
 357        u64                             slice_max;
 358
 359        u64                             nr_migrations_cold;
 360        u64                             nr_failed_migrations_affine;
 361        u64                             nr_failed_migrations_running;
 362        u64                             nr_failed_migrations_hot;
 363        u64                             nr_forced_migrations;
 364
 365        u64                             nr_wakeups;
 366        u64                             nr_wakeups_sync;
 367        u64                             nr_wakeups_migrate;
 368        u64                             nr_wakeups_local;
 369        u64                             nr_wakeups_remote;
 370        u64                             nr_wakeups_affine;
 371        u64                             nr_wakeups_affine_attempts;
 372        u64                             nr_wakeups_passive;
 373        u64                             nr_wakeups_idle;
 374#endif
 375};
 376
 377struct sched_entity {
 378        /* For load-balancing: */
 379        struct load_weight              load;
 380        unsigned long                   runnable_weight;
 381        struct rb_node                  run_node;
 382        struct list_head                group_node;
 383        unsigned int                    on_rq;
 384
 385        u64                             exec_start;
 386        u64                             sum_exec_runtime;
 387        u64                             vruntime;
 388        u64                             prev_sum_exec_runtime;
 389
 390        u64                             nr_migrations;
 391
 392        struct sched_statistics         statistics;
 393
 394#ifdef CONFIG_FAIR_GROUP_SCHED
 395        int                             depth;
 396        struct sched_entity             *parent;
 397        /* rq on which this entity is (to be) queued: */
 398        struct cfs_rq                   *cfs_rq;
 399        /* rq "owned" by this entity/group: */
 400        struct cfs_rq                   *my_q;
 401#endif
 402
 403#ifdef CONFIG_SMP
 404        /*
 405         * Per entity load average tracking.
 406         *
 407         * Put into separate cache line so it does not
 408         * collide with read-mostly values above.
 409         */
 410        struct sched_avg                avg ____cacheline_aligned_in_smp;
 411#endif
 412};
 413
 414struct sched_rt_entity {
 415        struct list_head                run_list;
 416        unsigned long                   timeout;
 417        unsigned long                   watchdog_stamp;
 418        unsigned int                    time_slice;
 419        unsigned short                  on_rq;
 420        unsigned short                  on_list;
 421
 422        struct sched_rt_entity          *back;
 423#ifdef CONFIG_RT_GROUP_SCHED
 424        struct sched_rt_entity          *parent;
 425        /* rq on which this entity is (to be) queued: */
 426        struct rt_rq                    *rt_rq;
 427        /* rq "owned" by this entity/group: */
 428        struct rt_rq                    *my_q;
 429#endif
 430} __randomize_layout;
 431
 432struct sched_dl_entity {
 433        struct rb_node                  rb_node;
 434
 435        /*
 436         * Original scheduling parameters. Copied here from sched_attr
 437         * during sched_setattr(), they will remain the same until
 438         * the next sched_setattr().
 439         */
 440        u64                             dl_runtime;     /* Maximum runtime for each instance    */
 441        u64                             dl_deadline;    /* Relative deadline of each instance   */
 442        u64                             dl_period;      /* Separation of two instances (period) */
 443        u64                             dl_bw;          /* dl_runtime / dl_period               */
 444        u64                             dl_density;     /* dl_runtime / dl_deadline             */
 445
 446        /*
 447         * Actual scheduling parameters. Initialized with the values above,
 448         * they are continously updated during task execution. Note that
 449         * the remaining runtime could be < 0 in case we are in overrun.
 450         */
 451        s64                             runtime;        /* Remaining runtime for this instance  */
 452        u64                             deadline;       /* Absolute deadline for this instance  */
 453        unsigned int                    flags;          /* Specifying the scheduler behaviour   */
 454
 455        /*
 456         * Some bool flags:
 457         *
 458         * @dl_throttled tells if we exhausted the runtime. If so, the
 459         * task has to wait for a replenishment to be performed at the
 460         * next firing of dl_timer.
 461         *
 462         * @dl_boosted tells if we are boosted due to DI. If so we are
 463         * outside bandwidth enforcement mechanism (but only until we
 464         * exit the critical section);
 465         *
 466         * @dl_yielded tells if task gave up the CPU before consuming
 467         * all its available runtime during the last job.
 468         *
 469         * @dl_non_contending tells if the task is inactive while still
 470         * contributing to the active utilization. In other words, it
 471         * indicates if the inactive timer has been armed and its handler
 472         * has not been executed yet. This flag is useful to avoid race
 473         * conditions between the inactive timer handler and the wakeup
 474         * code.
 475         */
 476        unsigned int                    dl_throttled      : 1;
 477        unsigned int                    dl_boosted        : 1;
 478        unsigned int                    dl_yielded        : 1;
 479        unsigned int                    dl_non_contending : 1;
 480
 481        /*
 482         * Bandwidth enforcement timer. Each -deadline task has its
 483         * own bandwidth to be enforced, thus we need one timer per task.
 484         */
 485        struct hrtimer                  dl_timer;
 486
 487        /*
 488         * Inactive timer, responsible for decreasing the active utilization
 489         * at the "0-lag time". When a -deadline task blocks, it contributes
 490         * to GRUB's active utilization until the "0-lag time", hence a
 491         * timer is needed to decrease the active utilization at the correct
 492         * time.
 493         */
 494        struct hrtimer inactive_timer;
 495};
 496
 497union rcu_special {
 498        struct {
 499                u8                      blocked;
 500                u8                      need_qs;
 501                u8                      exp_need_qs;
 502
 503                /* Otherwise the compiler can store garbage here: */
 504                u8                      pad;
 505        } b; /* Bits. */
 506        u32 s; /* Set of bits. */
 507};
 508
 509enum perf_event_task_context {
 510        perf_invalid_context = -1,
 511        perf_hw_context = 0,
 512        perf_sw_context,
 513        perf_nr_task_contexts,
 514};
 515
 516struct wake_q_node {
 517        struct wake_q_node *next;
 518};
 519
 520struct task_struct {
 521#ifdef CONFIG_THREAD_INFO_IN_TASK
 522        /*
 523         * For reasons of header soup (see current_thread_info()), this
 524         * must be the first element of task_struct.
 525         */
 526        struct thread_info              thread_info;
 527#endif
 528        /* -1 unrunnable, 0 runnable, >0 stopped: */
 529        volatile long                   state;
 530
 531        /*
 532         * This begins the randomizable portion of task_struct. Only
 533         * scheduling-critical items should be added above here.
 534         */
 535        randomized_struct_fields_start
 536
 537        void                            *stack;
 538        atomic_t                        usage;
 539        /* Per task flags (PF_*), defined further below: */
 540        unsigned int                    flags;
 541        unsigned int                    ptrace;
 542
 543#ifdef CONFIG_SMP
 544        struct llist_node               wake_entry;
 545        int                             on_cpu;
 546#ifdef CONFIG_THREAD_INFO_IN_TASK
 547        /* Current CPU: */
 548        unsigned int                    cpu;
 549#endif
 550        unsigned int                    wakee_flips;
 551        unsigned long                   wakee_flip_decay_ts;
 552        struct task_struct              *last_wakee;
 553
 554        int                             wake_cpu;
 555#endif
 556        int                             on_rq;
 557
 558        int                             prio;
 559        int                             static_prio;
 560        int                             normal_prio;
 561        unsigned int                    rt_priority;
 562
 563        const struct sched_class        *sched_class;
 564        struct sched_entity             se;
 565        struct sched_rt_entity          rt;
 566#ifdef CONFIG_CGROUP_SCHED
 567        struct task_group               *sched_task_group;
 568#endif
 569        struct sched_dl_entity          dl;
 570
 571#ifdef CONFIG_PREEMPT_NOTIFIERS
 572        /* List of struct preempt_notifier: */
 573        struct hlist_head               preempt_notifiers;
 574#endif
 575
 576#ifdef CONFIG_BLK_DEV_IO_TRACE
 577        unsigned int                    btrace_seq;
 578#endif
 579
 580        unsigned int                    policy;
 581        int                             nr_cpus_allowed;
 582        cpumask_t                       cpus_allowed;
 583
 584#ifdef CONFIG_PREEMPT_RCU
 585        int                             rcu_read_lock_nesting;
 586        union rcu_special               rcu_read_unlock_special;
 587        struct list_head                rcu_node_entry;
 588        struct rcu_node                 *rcu_blocked_node;
 589#endif /* #ifdef CONFIG_PREEMPT_RCU */
 590
 591#ifdef CONFIG_TASKS_RCU
 592        unsigned long                   rcu_tasks_nvcsw;
 593        u8                              rcu_tasks_holdout;
 594        u8                              rcu_tasks_idx;
 595        int                             rcu_tasks_idle_cpu;
 596        struct list_head                rcu_tasks_holdout_list;
 597#endif /* #ifdef CONFIG_TASKS_RCU */
 598
 599        struct sched_info               sched_info;
 600
 601        struct list_head                tasks;
 602#ifdef CONFIG_SMP
 603        struct plist_node               pushable_tasks;
 604        struct rb_node                  pushable_dl_tasks;
 605#endif
 606
 607        struct mm_struct                *mm;
 608        struct mm_struct                *active_mm;
 609
 610        /* Per-thread vma caching: */
 611        struct vmacache                 vmacache;
 612
 613#ifdef SPLIT_RSS_COUNTING
 614        struct task_rss_stat            rss_stat;
 615#endif
 616        int                             exit_state;
 617        int                             exit_code;
 618        int                             exit_signal;
 619        /* The signal sent when the parent dies: */
 620        int                             pdeath_signal;
 621        /* JOBCTL_*, siglock protected: */
 622        unsigned long                   jobctl;
 623
 624        /* Used for emulating ABI behavior of previous Linux versions: */
 625        unsigned int                    personality;
 626
 627        /* Scheduler bits, serialized by scheduler locks: */
 628        unsigned                        sched_reset_on_fork:1;
 629        unsigned                        sched_contributes_to_load:1;
 630        unsigned                        sched_migrated:1;
 631        unsigned                        sched_remote_wakeup:1;
 632        /* Force alignment to the next boundary: */
 633        unsigned                        :0;
 634
 635        /* Unserialized, strictly 'current' */
 636
 637        /* Bit to tell LSMs we're in execve(): */
 638        unsigned                        in_execve:1;
 639        unsigned                        in_iowait:1;
 640#ifndef TIF_RESTORE_SIGMASK
 641        unsigned                        restore_sigmask:1;
 642#endif
 643#ifdef CONFIG_MEMCG
 644        unsigned                        memcg_may_oom:1;
 645#ifndef CONFIG_SLOB
 646        unsigned                        memcg_kmem_skip_account:1;
 647#endif
 648#endif
 649#ifdef CONFIG_COMPAT_BRK
 650        unsigned                        brk_randomized:1;
 651#endif
 652#ifdef CONFIG_CGROUPS
 653        /* disallow userland-initiated cgroup migration */
 654        unsigned                        no_cgroup_migration:1;
 655#endif
 656
 657        unsigned long                   atomic_flags; /* Flags requiring atomic access. */
 658
 659        struct restart_block            restart_block;
 660
 661        pid_t                           pid;
 662        pid_t                           tgid;
 663
 664#ifdef CONFIG_CC_STACKPROTECTOR
 665        /* Canary value for the -fstack-protector GCC feature: */
 666        unsigned long                   stack_canary;
 667#endif
 668        /*
 669         * Pointers to the (original) parent process, youngest child, younger sibling,
 670         * older sibling, respectively.  (p->father can be replaced with
 671         * p->real_parent->pid)
 672         */
 673
 674        /* Real parent process: */
 675        struct task_struct __rcu        *real_parent;
 676
 677        /* Recipient of SIGCHLD, wait4() reports: */
 678        struct task_struct __rcu        *parent;
 679
 680        /*
 681         * Children/sibling form the list of natural children:
 682         */
 683        struct list_head                children;
 684        struct list_head                sibling;
 685        struct task_struct              *group_leader;
 686
 687        /*
 688         * 'ptraced' is the list of tasks this task is using ptrace() on.
 689         *
 690         * This includes both natural children and PTRACE_ATTACH targets.
 691         * 'ptrace_entry' is this task's link on the p->parent->ptraced list.
 692         */
 693        struct list_head                ptraced;
 694        struct list_head                ptrace_entry;
 695
 696        /* PID/PID hash table linkage. */
 697        struct pid_link                 pids[PIDTYPE_MAX];
 698        struct list_head                thread_group;
 699        struct list_head                thread_node;
 700
 701        struct completion               *vfork_done;
 702
 703        /* CLONE_CHILD_SETTID: */
 704        int __user                      *set_child_tid;
 705
 706        /* CLONE_CHILD_CLEARTID: */
 707        int __user                      *clear_child_tid;
 708
 709        u64                             utime;
 710        u64                             stime;
 711#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
 712        u64                             utimescaled;
 713        u64                             stimescaled;
 714#endif
 715        u64                             gtime;
 716        struct prev_cputime             prev_cputime;
 717#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
 718        struct vtime                    vtime;
 719#endif
 720
 721#ifdef CONFIG_NO_HZ_FULL
 722        atomic_t                        tick_dep_mask;
 723#endif
 724        /* Context switch counts: */
 725        unsigned long                   nvcsw;
 726        unsigned long                   nivcsw;
 727
 728        /* Monotonic time in nsecs: */
 729        u64                             start_time;
 730
 731        /* Boot based time in nsecs: */
 732        u64                             real_start_time;
 733
 734        /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
 735        unsigned long                   min_flt;
 736        unsigned long                   maj_flt;
 737
 738#ifdef CONFIG_POSIX_TIMERS
 739        struct task_cputime             cputime_expires;
 740        struct list_head                cpu_timers[3];
 741#endif
 742
 743        /* Process credentials: */
 744
 745        /* Tracer's credentials at attach: */
 746        const struct cred __rcu         *ptracer_cred;
 747
 748        /* Objective and real subjective task credentials (COW): */
 749        const struct cred __rcu         *real_cred;
 750
 751        /* Effective (overridable) subjective task credentials (COW): */
 752        const struct cred __rcu         *cred;
 753
 754        /*
 755         * executable name, excluding path.
 756         *
 757         * - normally initialized setup_new_exec()
 758         * - access it with [gs]et_task_comm()
 759         * - lock it with task_lock()
 760         */
 761        char                            comm[TASK_COMM_LEN];
 762
 763        struct nameidata                *nameidata;
 764
 765#ifdef CONFIG_SYSVIPC
 766        struct sysv_sem                 sysvsem;
 767        struct sysv_shm                 sysvshm;
 768#endif
 769#ifdef CONFIG_DETECT_HUNG_TASK
 770        unsigned long                   last_switch_count;
 771#endif
 772        /* Filesystem information: */
 773        struct fs_struct                *fs;
 774
 775        /* Open file information: */
 776        struct files_struct             *files;
 777
 778        /* Namespaces: */
 779        struct nsproxy                  *nsproxy;
 780
 781        /* Signal handlers: */
 782        struct signal_struct            *signal;
 783        struct sighand_struct           *sighand;
 784        sigset_t                        blocked;
 785        sigset_t                        real_blocked;
 786        /* Restored if set_restore_sigmask() was used: */
 787        sigset_t                        saved_sigmask;
 788        struct sigpending               pending;
 789        unsigned long                   sas_ss_sp;
 790        size_t                          sas_ss_size;
 791        unsigned int                    sas_ss_flags;
 792
 793        struct callback_head            *task_works;
 794
 795        struct audit_context            *audit_context;
 796#ifdef CONFIG_AUDITSYSCALL
 797        kuid_t                          loginuid;
 798        unsigned int                    sessionid;
 799#endif
 800        struct seccomp                  seccomp;
 801
 802        /* Thread group tracking: */
 803        u32                             parent_exec_id;
 804        u32                             self_exec_id;
 805
 806        /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
 807        spinlock_t                      alloc_lock;
 808
 809        /* Protection of the PI data structures: */
 810        raw_spinlock_t                  pi_lock;
 811
 812        struct wake_q_node              wake_q;
 813
 814#ifdef CONFIG_RT_MUTEXES
 815        /* PI waiters blocked on a rt_mutex held by this task: */
 816        struct rb_root_cached           pi_waiters;
 817        /* Updated under owner's pi_lock and rq lock */
 818        struct task_struct              *pi_top_task;
 819        /* Deadlock detection and priority inheritance handling: */
 820        struct rt_mutex_waiter          *pi_blocked_on;
 821#endif
 822
 823#ifdef CONFIG_DEBUG_MUTEXES
 824        /* Mutex deadlock detection: */
 825        struct mutex_waiter             *blocked_on;
 826#endif
 827
 828#ifdef CONFIG_TRACE_IRQFLAGS
 829        unsigned int                    irq_events;
 830        unsigned long                   hardirq_enable_ip;
 831        unsigned long                   hardirq_disable_ip;
 832        unsigned int                    hardirq_enable_event;
 833        unsigned int                    hardirq_disable_event;
 834        int                             hardirqs_enabled;
 835        int                             hardirq_context;
 836        unsigned long                   softirq_disable_ip;
 837        unsigned long                   softirq_enable_ip;
 838        unsigned int                    softirq_disable_event;
 839        unsigned int                    softirq_enable_event;
 840        int                             softirqs_enabled;
 841        int                             softirq_context;
 842#endif
 843
 844#ifdef CONFIG_LOCKDEP
 845# define MAX_LOCK_DEPTH                 48UL
 846        u64                             curr_chain_key;
 847        int                             lockdep_depth;
 848        unsigned int                    lockdep_recursion;
 849        struct held_lock                held_locks[MAX_LOCK_DEPTH];
 850#endif
 851
 852#ifdef CONFIG_UBSAN
 853        unsigned int                    in_ubsan;
 854#endif
 855
 856        /* Journalling filesystem info: */
 857        void                            *journal_info;
 858
 859        /* Stacked block device info: */
 860        struct bio_list                 *bio_list;
 861
 862#ifdef CONFIG_BLOCK
 863        /* Stack plugging: */
 864        struct blk_plug                 *plug;
 865#endif
 866
 867        /* VM state: */
 868        struct reclaim_state            *reclaim_state;
 869
 870        struct backing_dev_info         *backing_dev_info;
 871
 872        struct io_context               *io_context;
 873
 874        /* Ptrace state: */
 875        unsigned long                   ptrace_message;
 876        siginfo_t                       *last_siginfo;
 877
 878        struct task_io_accounting       ioac;
 879#ifdef CONFIG_TASK_XACCT
 880        /* Accumulated RSS usage: */
 881        u64                             acct_rss_mem1;
 882        /* Accumulated virtual memory usage: */
 883        u64                             acct_vm_mem1;
 884        /* stime + utime since last update: */
 885        u64                             acct_timexpd;
 886#endif
 887#ifdef CONFIG_CPUSETS
 888        /* Protected by ->alloc_lock: */
 889        nodemask_t                      mems_allowed;
 890        /* Seqence number to catch updates: */
 891        seqcount_t                      mems_allowed_seq;
 892        int                             cpuset_mem_spread_rotor;
 893        int                             cpuset_slab_spread_rotor;
 894#endif
 895#ifdef CONFIG_CGROUPS
 896        /* Control Group info protected by css_set_lock: */
 897        struct css_set __rcu            *cgroups;
 898        /* cg_list protected by css_set_lock and tsk->alloc_lock: */
 899        struct list_head                cg_list;
 900#endif
 901#ifdef CONFIG_INTEL_RDT
 902        u32                             closid;
 903        u32                             rmid;
 904#endif
 905#ifdef CONFIG_FUTEX
 906        struct robust_list_head __user  *robust_list;
 907#ifdef CONFIG_COMPAT
 908        struct compat_robust_list_head __user *compat_robust_list;
 909#endif
 910        struct list_head                pi_state_list;
 911        struct futex_pi_state           *pi_state_cache;
 912#endif
 913#ifdef CONFIG_PERF_EVENTS
 914        struct perf_event_context       *perf_event_ctxp[perf_nr_task_contexts];
 915        struct mutex                    perf_event_mutex;
 916        struct list_head                perf_event_list;
 917#endif
 918#ifdef CONFIG_DEBUG_PREEMPT
 919        unsigned long                   preempt_disable_ip;
 920#endif
 921#ifdef CONFIG_NUMA
 922        /* Protected by alloc_lock: */
 923        struct mempolicy                *mempolicy;
 924        short                           il_prev;
 925        short                           pref_node_fork;
 926#endif
 927#ifdef CONFIG_NUMA_BALANCING
 928        int                             numa_scan_seq;
 929        unsigned int                    numa_scan_period;
 930        unsigned int                    numa_scan_period_max;
 931        int                             numa_preferred_nid;
 932        unsigned long                   numa_migrate_retry;
 933        /* Migration stamp: */
 934        u64                             node_stamp;
 935        u64                             last_task_numa_placement;
 936        u64                             last_sum_exec_runtime;
 937        struct callback_head            numa_work;
 938
 939        struct list_head                numa_entry;
 940        struct numa_group               *numa_group;
 941
 942        /*
 943         * numa_faults is an array split into four regions:
 944         * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
 945         * in this precise order.
 946         *
 947         * faults_memory: Exponential decaying average of faults on a per-node
 948         * basis. Scheduling placement decisions are made based on these
 949         * counts. The values remain static for the duration of a PTE scan.
 950         * faults_cpu: Track the nodes the process was running on when a NUMA
 951         * hinting fault was incurred.
 952         * faults_memory_buffer and faults_cpu_buffer: Record faults per node
 953         * during the current scan window. When the scan completes, the counts
 954         * in faults_memory and faults_cpu decay and these values are copied.
 955         */
 956        unsigned long                   *numa_faults;
 957        unsigned long                   total_numa_faults;
 958
 959        /*
 960         * numa_faults_locality tracks if faults recorded during the last
 961         * scan window were remote/local or failed to migrate. The task scan
 962         * period is adapted based on the locality of the faults with different
 963         * weights depending on whether they were shared or private faults
 964         */
 965        unsigned long                   numa_faults_locality[3];
 966
 967        unsigned long                   numa_pages_migrated;
 968#endif /* CONFIG_NUMA_BALANCING */
 969
 970        struct tlbflush_unmap_batch     tlb_ubc;
 971
 972        struct rcu_head                 rcu;
 973
 974        /* Cache last used pipe for splice(): */
 975        struct pipe_inode_info          *splice_pipe;
 976
 977        struct page_frag                task_frag;
 978
 979#ifdef CONFIG_TASK_DELAY_ACCT
 980        struct task_delay_info          *delays;
 981#endif
 982
 983#ifdef CONFIG_FAULT_INJECTION
 984        int                             make_it_fail;
 985        unsigned int                    fail_nth;
 986#endif
 987        /*
 988         * When (nr_dirtied >= nr_dirtied_pause), it's time to call
 989         * balance_dirty_pages() for a dirty throttling pause:
 990         */
 991        int                             nr_dirtied;
 992        int                             nr_dirtied_pause;
 993        /* Start of a write-and-pause period: */
 994        unsigned long                   dirty_paused_when;
 995
 996#ifdef CONFIG_LATENCYTOP
 997        int                             latency_record_count;
 998        struct latency_record           latency_record[LT_SAVECOUNT];
 999#endif
1000        /*
1001         * Time slack values; these are used to round up poll() and
1002         * select() etc timeout values. These are in nanoseconds.
1003         */
1004        u64                             timer_slack_ns;
1005        u64                             default_timer_slack_ns;
1006
1007#ifdef CONFIG_KASAN
1008        unsigned int                    kasan_depth;
1009#endif
1010
1011#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1012        /* Index of current stored address in ret_stack: */
1013        int                             curr_ret_stack;
1014
1015        /* Stack of return addresses for return function tracing: */
1016        struct ftrace_ret_stack         *ret_stack;
1017
1018        /* Timestamp for last schedule: */
1019        unsigned long long              ftrace_timestamp;
1020
1021        /*
1022         * Number of functions that haven't been traced
1023         * because of depth overrun:
1024         */
1025        atomic_t                        trace_overrun;
1026
1027        /* Pause tracing: */
1028        atomic_t                        tracing_graph_pause;
1029#endif
1030
1031#ifdef CONFIG_TRACING
1032        /* State flags for use by tracers: */
1033        unsigned long                   trace;
1034
1035        /* Bitmask and counter of trace recursion: */
1036        unsigned long                   trace_recursion;
1037#endif /* CONFIG_TRACING */
1038
1039#ifdef CONFIG_KCOV
1040        /* Coverage collection mode enabled for this task (0 if disabled): */
1041        enum kcov_mode                  kcov_mode;
1042
1043        /* Size of the kcov_area: */
1044        unsigned int                    kcov_size;
1045
1046        /* Buffer for coverage collection: */
1047        void                            *kcov_area;
1048
1049        /* KCOV descriptor wired with this task or NULL: */
1050        struct kcov                     *kcov;
1051#endif
1052
1053#ifdef CONFIG_MEMCG
1054        struct mem_cgroup               *memcg_in_oom;
1055        gfp_t                           memcg_oom_gfp_mask;
1056        int                             memcg_oom_order;
1057
1058        /* Number of pages to reclaim on returning to userland: */
1059        unsigned int                    memcg_nr_pages_over_high;
1060#endif
1061
1062#ifdef CONFIG_UPROBES
1063        struct uprobe_task              *utask;
1064#endif
1065#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1066        unsigned int                    sequential_io;
1067        unsigned int                    sequential_io_avg;
1068#endif
1069#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1070        unsigned long                   task_state_change;
1071#endif
1072        int                             pagefault_disabled;
1073#ifdef CONFIG_MMU
1074        struct task_struct              *oom_reaper_list;
1075#endif
1076#ifdef CONFIG_VMAP_STACK
1077        struct vm_struct                *stack_vm_area;
1078#endif
1079#ifdef CONFIG_THREAD_INFO_IN_TASK
1080        /* A live task holds one reference: */
1081        atomic_t                        stack_refcount;
1082#endif
1083#ifdef CONFIG_LIVEPATCH
1084        int patch_state;
1085#endif
1086#ifdef CONFIG_SECURITY
1087        /* Used by LSM modules for access restriction: */
1088        void                            *security;
1089#endif
1090
1091        /*
1092         * New fields for task_struct should be added above here, so that
1093         * they are included in the randomized portion of task_struct.
1094         */
1095        randomized_struct_fields_end
1096
1097        /* CPU-specific state of this task: */
1098        struct thread_struct            thread;
1099
1100        /*
1101         * WARNING: on x86, 'thread_struct' contains a variable-sized
1102         * structure.  It *MUST* be at the end of 'task_struct'.
1103         *
1104         * Do not put anything below here!
1105         */
1106};
1107
1108static inline struct pid *task_pid(struct task_struct *task)
1109{
1110        return task->pids[PIDTYPE_PID].pid;
1111}
1112
1113static inline struct pid *task_tgid(struct task_struct *task)
1114{
1115        return task->group_leader->pids[PIDTYPE_PID].pid;
1116}
1117
1118/*
1119 * Without tasklist or RCU lock it is not safe to dereference
1120 * the result of task_pgrp/task_session even if task == current,
1121 * we can race with another thread doing sys_setsid/sys_setpgid.
1122 */
1123static inline struct pid *task_pgrp(struct task_struct *task)
1124{
1125        return task->group_leader->pids[PIDTYPE_PGID].pid;
1126}
1127
1128static inline struct pid *task_session(struct task_struct *task)
1129{
1130        return task->group_leader->pids[PIDTYPE_SID].pid;
1131}
1132
1133/*
1134 * the helpers to get the task's different pids as they are seen
1135 * from various namespaces
1136 *
1137 * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
1138 * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
1139 *                     current.
1140 * task_xid_nr_ns()  : id seen from the ns specified;
1141 *
1142 * see also pid_nr() etc in include/linux/pid.h
1143 */
1144pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
1145
1146static inline pid_t task_pid_nr(struct task_struct *tsk)
1147{
1148        return tsk->pid;
1149}
1150
1151static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1152{
1153        return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1154}
1155
1156static inline pid_t task_pid_vnr(struct task_struct *tsk)
1157{
1158        return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1159}
1160
1161
1162static inline pid_t task_tgid_nr(struct task_struct *tsk)
1163{
1164        return tsk->tgid;
1165}
1166
1167/**
1168 * pid_alive - check that a task structure is not stale
1169 * @p: Task structure to be checked.
1170 *
1171 * Test if a process is not yet dead (at most zombie state)
1172 * If pid_alive fails, then pointers within the task structure
1173 * can be stale and must not be dereferenced.
1174 *
1175 * Return: 1 if the process is alive. 0 otherwise.
1176 */
1177static inline int pid_alive(const struct task_struct *p)
1178{
1179        return p->pids[PIDTYPE_PID].pid != NULL;
1180}
1181
1182static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1183{
1184        return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1185}
1186
1187static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1188{
1189        return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1190}
1191
1192
1193static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1194{
1195        return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1196}
1197
1198static inline pid_t task_session_vnr(struct task_struct *tsk)
1199{
1200        return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1201}
1202
1203static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1204{
1205        return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, ns);
1206}
1207
1208static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1209{
1210        return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, NULL);
1211}
1212
1213static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1214{
1215        pid_t pid = 0;
1216
1217        rcu_read_lock();
1218        if (pid_alive(tsk))
1219                pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1220        rcu_read_unlock();
1221
1222        return pid;
1223}
1224
1225static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1226{
1227        return task_ppid_nr_ns(tsk, &init_pid_ns);
1228}
1229
1230/* Obsolete, do not use: */
1231static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1232{
1233        return task_pgrp_nr_ns(tsk, &init_pid_ns);
1234}
1235
1236#define TASK_REPORT_IDLE        (TASK_REPORT + 1)
1237#define TASK_REPORT_MAX         (TASK_REPORT_IDLE << 1)
1238
1239static inline unsigned int task_state_index(struct task_struct *tsk)
1240{
1241        unsigned int tsk_state = READ_ONCE(tsk->state);
1242        unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
1243
1244        BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
1245
1246        if (tsk_state == TASK_IDLE)
1247                state = TASK_REPORT_IDLE;
1248
1249        return fls(state);
1250}
1251
1252static inline char task_index_to_char(unsigned int state)
1253{
1254        static const char state_char[] = "RSDTtXZPI";
1255
1256        BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
1257
1258        return state_char[state];
1259}
1260
1261static inline char task_state_to_char(struct task_struct *tsk)
1262{
1263        return task_index_to_char(task_state_index(tsk));
1264}
1265
1266/**
1267 * is_global_init - check if a task structure is init. Since init
1268 * is free to have sub-threads we need to check tgid.
1269 * @tsk: Task structure to be checked.
1270 *
1271 * Check if a task structure is the first user space task the kernel created.
1272 *
1273 * Return: 1 if the task structure is init. 0 otherwise.
1274 */
1275static inline int is_global_init(struct task_struct *tsk)
1276{
1277        return task_tgid_nr(tsk) == 1;
1278}
1279
1280extern struct pid *cad_pid;
1281
1282/*
1283 * Per process flags
1284 */
1285#define PF_IDLE                 0x00000002      /* I am an IDLE thread */
1286#define PF_EXITING              0x00000004      /* Getting shut down */
1287#define PF_EXITPIDONE           0x00000008      /* PI exit done on shut down */
1288#define PF_VCPU                 0x00000010      /* I'm a virtual CPU */
1289#define PF_WQ_WORKER            0x00000020      /* I'm a workqueue worker */
1290#define PF_FORKNOEXEC           0x00000040      /* Forked but didn't exec */
1291#define PF_MCE_PROCESS          0x00000080      /* Process policy on mce errors */
1292#define PF_SUPERPRIV            0x00000100      /* Used super-user privileges */
1293#define PF_DUMPCORE             0x00000200      /* Dumped core */
1294#define PF_SIGNALED             0x00000400      /* Killed by a signal */
1295#define PF_MEMALLOC             0x00000800      /* Allocating memory */
1296#define PF_NPROC_EXCEEDED       0x00001000      /* set_user() noticed that RLIMIT_NPROC was exceeded */
1297#define PF_USED_MATH            0x00002000      /* If unset the fpu must be initialized before use */
1298#define PF_USED_ASYNC           0x00004000      /* Used async_schedule*(), used by module init */
1299#define PF_NOFREEZE             0x00008000      /* This thread should not be frozen */
1300#define PF_FROZEN               0x00010000      /* Frozen for system suspend */
1301#define PF_KSWAPD               0x00020000      /* I am kswapd */
1302#define PF_MEMALLOC_NOFS        0x00040000      /* All allocation requests will inherit GFP_NOFS */
1303#define PF_MEMALLOC_NOIO        0x00080000      /* All allocation requests will inherit GFP_NOIO */
1304#define PF_LESS_THROTTLE        0x00100000      /* Throttle me less: I clean memory */
1305#define PF_KTHREAD              0x00200000      /* I am a kernel thread */
1306#define PF_RANDOMIZE            0x00400000      /* Randomize virtual address space */
1307#define PF_SWAPWRITE            0x00800000      /* Allowed to write to swap */
1308#define PF_NO_SETAFFINITY       0x04000000      /* Userland is not allowed to meddle with cpus_allowed */
1309#define PF_MCE_EARLY            0x08000000      /* Early kill for mce process policy */
1310#define PF_MUTEX_TESTER         0x20000000      /* Thread belongs to the rt mutex tester */
1311#define PF_FREEZER_SKIP         0x40000000      /* Freezer should not count it as freezable */
1312#define PF_SUSPEND_TASK         0x80000000      /* This thread called freeze_processes() and should not be frozen */
1313
1314/*
1315 * Only the _current_ task can read/write to tsk->flags, but other
1316 * tasks can access tsk->flags in readonly mode for example
1317 * with tsk_used_math (like during threaded core dumping).
1318 * There is however an exception to this rule during ptrace
1319 * or during fork: the ptracer task is allowed to write to the
1320 * child->flags of its traced child (same goes for fork, the parent
1321 * can write to the child->flags), because we're guaranteed the
1322 * child is not running and in turn not changing child->flags
1323 * at the same time the parent does it.
1324 */
1325#define clear_stopped_child_used_math(child)    do { (child)->flags &= ~PF_USED_MATH; } while (0)
1326#define set_stopped_child_used_math(child)      do { (child)->flags |= PF_USED_MATH; } while (0)
1327#define clear_used_math()                       clear_stopped_child_used_math(current)
1328#define set_used_math()                         set_stopped_child_used_math(current)
1329
1330#define conditional_stopped_child_used_math(condition, child) \
1331        do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1332
1333#define conditional_used_math(condition)        conditional_stopped_child_used_math(condition, current)
1334
1335#define copy_to_stopped_child_used_math(child) \
1336        do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1337
1338/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1339#define tsk_used_math(p)                        ((p)->flags & PF_USED_MATH)
1340#define used_math()                             tsk_used_math(current)
1341
1342static inline bool is_percpu_thread(void)
1343{
1344#ifdef CONFIG_SMP
1345        return (current->flags & PF_NO_SETAFFINITY) &&
1346                (current->nr_cpus_allowed  == 1);
1347#else
1348        return true;
1349#endif
1350}
1351
1352/* Per-process atomic flags. */
1353#define PFA_NO_NEW_PRIVS                0       /* May not gain new privileges. */
1354#define PFA_SPREAD_PAGE                 1       /* Spread page cache over cpuset */
1355#define PFA_SPREAD_SLAB                 2       /* Spread some slab caches over cpuset */
1356
1357
1358#define TASK_PFA_TEST(name, func)                                       \
1359        static inline bool task_##func(struct task_struct *p)           \
1360        { return test_bit(PFA_##name, &p->atomic_flags); }
1361
1362#define TASK_PFA_SET(name, func)                                        \
1363        static inline void task_set_##func(struct task_struct *p)       \
1364        { set_bit(PFA_##name, &p->atomic_flags); }
1365
1366#define TASK_PFA_CLEAR(name, func)                                      \
1367        static inline void task_clear_##func(struct task_struct *p)     \
1368        { clear_bit(PFA_##name, &p->atomic_flags); }
1369
1370TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1371TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1372
1373TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1374TASK_PFA_SET(SPREAD_PAGE, spread_page)
1375TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1376
1377TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1378TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1379TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1380
1381static inline void
1382current_restore_flags(unsigned long orig_flags, unsigned long flags)
1383{
1384        current->flags &= ~flags;
1385        current->flags |= orig_flags & flags;
1386}
1387
1388extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
1389extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
1390#ifdef CONFIG_SMP
1391extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
1392extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
1393#else
1394static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1395{
1396}
1397static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1398{
1399        if (!cpumask_test_cpu(0, new_mask))
1400                return -EINVAL;
1401        return 0;
1402}
1403#endif
1404
1405#ifndef cpu_relax_yield
1406#define cpu_relax_yield() cpu_relax()
1407#endif
1408
1409extern int yield_to(struct task_struct *p, bool preempt);
1410extern void set_user_nice(struct task_struct *p, long nice);
1411extern int task_prio(const struct task_struct *p);
1412
1413/**
1414 * task_nice - return the nice value of a given task.
1415 * @p: the task in question.
1416 *
1417 * Return: The nice value [ -20 ... 0 ... 19 ].
1418 */
1419static inline int task_nice(const struct task_struct *p)
1420{
1421        return PRIO_TO_NICE((p)->static_prio);
1422}
1423
1424extern int can_nice(const struct task_struct *p, const int nice);
1425extern int task_curr(const struct task_struct *p);
1426extern int idle_cpu(int cpu);
1427extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1428extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
1429extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1430extern struct task_struct *idle_task(int cpu);
1431
1432/**
1433 * is_idle_task - is the specified task an idle task?
1434 * @p: the task in question.
1435 *
1436 * Return: 1 if @p is an idle task. 0 otherwise.
1437 */
1438static inline bool is_idle_task(const struct task_struct *p)
1439{
1440        return !!(p->flags & PF_IDLE);
1441}
1442
1443extern struct task_struct *curr_task(int cpu);
1444extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1445
1446void yield(void);
1447
1448union thread_union {
1449#ifndef CONFIG_THREAD_INFO_IN_TASK
1450        struct thread_info thread_info;
1451#endif
1452        unsigned long stack[THREAD_SIZE/sizeof(long)];
1453};
1454
1455#ifdef CONFIG_THREAD_INFO_IN_TASK
1456static inline struct thread_info *task_thread_info(struct task_struct *task)
1457{
1458        return &task->thread_info;
1459}
1460#elif !defined(__HAVE_THREAD_FUNCTIONS)
1461# define task_thread_info(task) ((struct thread_info *)(task)->stack)
1462#endif
1463
1464/*
1465 * find a task by one of its numerical ids
1466 *
1467 * find_task_by_pid_ns():
1468 *      finds a task by its pid in the specified namespace
1469 * find_task_by_vpid():
1470 *      finds a task by its virtual pid
1471 *
1472 * see also find_vpid() etc in include/linux/pid.h
1473 */
1474
1475extern struct task_struct *find_task_by_vpid(pid_t nr);
1476extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1477
1478extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1479extern int wake_up_process(struct task_struct *tsk);
1480extern void wake_up_new_task(struct task_struct *tsk);
1481
1482#ifdef CONFIG_SMP
1483extern void kick_process(struct task_struct *tsk);
1484#else
1485static inline void kick_process(struct task_struct *tsk) { }
1486#endif
1487
1488extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1489
1490static inline void set_task_comm(struct task_struct *tsk, const char *from)
1491{
1492        __set_task_comm(tsk, from, false);
1493}
1494
1495extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
1496#define get_task_comm(buf, tsk) ({                      \
1497        BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN);     \
1498        __get_task_comm(buf, sizeof(buf), tsk);         \
1499})
1500
1501#ifdef CONFIG_SMP
1502void scheduler_ipi(void);
1503extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
1504#else
1505static inline void scheduler_ipi(void) { }
1506static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1507{
1508        return 1;
1509}
1510#endif
1511
1512/*
1513 * Set thread flags in other task's structures.
1514 * See asm/thread_info.h for TIF_xxxx flags available:
1515 */
1516static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
1517{
1518        set_ti_thread_flag(task_thread_info(tsk), flag);
1519}
1520
1521static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1522{
1523        clear_ti_thread_flag(task_thread_info(tsk), flag);
1524}
1525
1526static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
1527{
1528        return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
1529}
1530
1531static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1532{
1533        return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
1534}
1535
1536static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
1537{
1538        return test_ti_thread_flag(task_thread_info(tsk), flag);
1539}
1540
1541static inline void set_tsk_need_resched(struct task_struct *tsk)
1542{
1543        set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1544}
1545
1546static inline void clear_tsk_need_resched(struct task_struct *tsk)
1547{
1548        clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1549}
1550
1551static inline int test_tsk_need_resched(struct task_struct *tsk)
1552{
1553        return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
1554}
1555
1556/*
1557 * cond_resched() and cond_resched_lock(): latency reduction via
1558 * explicit rescheduling in places that are safe. The return
1559 * value indicates whether a reschedule was done in fact.
1560 * cond_resched_lock() will drop the spinlock before scheduling,
1561 * cond_resched_softirq() will enable bhs before scheduling.
1562 */
1563#ifndef CONFIG_PREEMPT
1564extern int _cond_resched(void);
1565#else
1566static inline int _cond_resched(void) { return 0; }
1567#endif
1568
1569#define cond_resched() ({                       \
1570        ___might_sleep(__FILE__, __LINE__, 0);  \
1571        _cond_resched();                        \
1572})
1573
1574extern int __cond_resched_lock(spinlock_t *lock);
1575
1576#define cond_resched_lock(lock) ({                              \
1577        ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
1578        __cond_resched_lock(lock);                              \
1579})
1580
1581extern int __cond_resched_softirq(void);
1582
1583#define cond_resched_softirq() ({                                       \
1584        ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);     \
1585        __cond_resched_softirq();                                       \
1586})
1587
1588static inline void cond_resched_rcu(void)
1589{
1590#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
1591        rcu_read_unlock();
1592        cond_resched();
1593        rcu_read_lock();
1594#endif
1595}
1596
1597/*
1598 * Does a critical section need to be broken due to another
1599 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
1600 * but a general need for low latency)
1601 */
1602static inline int spin_needbreak(spinlock_t *lock)
1603{
1604#ifdef CONFIG_PREEMPT
1605        return spin_is_contended(lock);
1606#else
1607        return 0;
1608#endif
1609}
1610
1611static __always_inline bool need_resched(void)
1612{
1613        return unlikely(tif_need_resched());
1614}
1615
1616/*
1617 * Wrappers for p->thread_info->cpu access. No-op on UP.
1618 */
1619#ifdef CONFIG_SMP
1620
1621static inline unsigned int task_cpu(const struct task_struct *p)
1622{
1623#ifdef CONFIG_THREAD_INFO_IN_TASK
1624        return p->cpu;
1625#else
1626        return task_thread_info(p)->cpu;
1627#endif
1628}
1629
1630extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
1631
1632#else
1633
1634static inline unsigned int task_cpu(const struct task_struct *p)
1635{
1636        return 0;
1637}
1638
1639static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
1640{
1641}
1642
1643#endif /* CONFIG_SMP */
1644
1645/*
1646 * In order to reduce various lock holder preemption latencies provide an
1647 * interface to see if a vCPU is currently running or not.
1648 *
1649 * This allows us to terminate optimistic spin loops and block, analogous to
1650 * the native optimistic spin heuristic of testing if the lock owner task is
1651 * running or not.
1652 */
1653#ifndef vcpu_is_preempted
1654# define vcpu_is_preempted(cpu) false
1655#endif
1656
1657extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
1658extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
1659
1660#ifndef TASK_SIZE_OF
1661#define TASK_SIZE_OF(tsk)       TASK_SIZE
1662#endif
1663
1664#endif
1665
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.