linux/include/linux/sched.h
<<
>>
Prefs
   1#ifndef _LINUX_SCHED_H
   2#define _LINUX_SCHED_H
   3
   4/*
   5 * cloning flags:
   6 */
   7#define CSIGNAL         0x000000ff      /* signal mask to be sent at exit */
   8#define CLONE_VM        0x00000100      /* set if VM shared between processes */
   9#define CLONE_FS        0x00000200      /* set if fs info shared between processes */
  10#define CLONE_FILES     0x00000400      /* set if open files shared between processes */
  11#define CLONE_SIGHAND   0x00000800      /* set if signal handlers and blocked signals shared */
  12#define CLONE_PTRACE    0x00002000      /* set if we want to let tracing continue on the child too */
  13#define CLONE_VFORK     0x00004000      /* set if the parent wants the child to wake it up on mm_release */
  14#define CLONE_PARENT    0x00008000      /* set if we want to have the same parent as the cloner */
  15#define CLONE_THREAD    0x00010000      /* Same thread group? */
  16#define CLONE_NEWNS     0x00020000      /* New namespace group? */
  17#define CLONE_SYSVSEM   0x00040000      /* share system V SEM_UNDO semantics */
  18#define CLONE_SETTLS    0x00080000      /* create a new TLS for the child */
  19#define CLONE_PARENT_SETTID     0x00100000      /* set the TID in the parent */
  20#define CLONE_CHILD_CLEARTID    0x00200000      /* clear the TID in the child */
  21#define CLONE_DETACHED          0x00400000      /* Unused, ignored */
  22#define CLONE_UNTRACED          0x00800000      /* set if the tracing process can't force CLONE_PTRACE on this clone */
  23#define CLONE_CHILD_SETTID      0x01000000      /* set the TID in the child */
  24#define CLONE_STOPPED           0x02000000      /* Start in stopped state */
  25#define CLONE_NEWUTS            0x04000000      /* New utsname group? */
  26#define CLONE_NEWIPC            0x08000000      /* New ipcs */
  27#define CLONE_NEWUSER           0x10000000      /* New user namespace */
  28#define CLONE_NEWPID            0x20000000      /* New pid namespace */
  29#define CLONE_NEWNET            0x40000000      /* New network namespace */
  30#define CLONE_IO                0x80000000      /* Clone io context */
  31
  32/*
  33 * Scheduling policies
  34 */
  35#define SCHED_NORMAL            0
  36#define SCHED_FIFO              1
  37#define SCHED_RR                2
  38#define SCHED_BATCH             3
  39/* SCHED_ISO: reserved but not implemented yet */
  40#define SCHED_IDLE              5
  41/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
  42#define SCHED_RESET_ON_FORK     0x40000000
  43
  44#ifdef __KERNEL__
  45
  46struct sched_param {
  47        int sched_priority;
  48};
  49
  50#include <asm/param.h>  /* for HZ */
  51
  52#include <linux/capability.h>
  53#include <linux/threads.h>
  54#include <linux/kernel.h>
  55#include <linux/types.h>
  56#include <linux/timex.h>
  57#include <linux/jiffies.h>
  58#include <linux/rbtree.h>
  59#include <linux/thread_info.h>
  60#include <linux/cpumask.h>
  61#include <linux/errno.h>
  62#include <linux/nodemask.h>
  63#include <linux/mm_types.h>
  64
  65#include <asm/system.h>
  66#include <asm/page.h>
  67#include <asm/ptrace.h>
  68#include <asm/cputime.h>
  69
  70#include <linux/smp.h>
  71#include <linux/sem.h>
  72#include <linux/signal.h>
  73#include <linux/path.h>
  74#include <linux/compiler.h>
  75#include <linux/completion.h>
  76#include <linux/pid.h>
  77#include <linux/percpu.h>
  78#include <linux/topology.h>
  79#include <linux/proportions.h>
  80#include <linux/seccomp.h>
  81#include <linux/rcupdate.h>
  82#include <linux/rculist.h>
  83#include <linux/rtmutex.h>
  84
  85#include <linux/time.h>
  86#include <linux/param.h>
  87#include <linux/resource.h>
  88#include <linux/timer.h>
  89#include <linux/hrtimer.h>
  90#include <linux/task_io_accounting.h>
  91#include <linux/kobject.h>
  92#include <linux/latencytop.h>
  93#include <linux/cred.h>
  94
  95#include <asm/processor.h>
  96
  97struct exec_domain;
  98struct futex_pi_state;
  99struct robust_list_head;
 100struct bio;
 101struct fs_struct;
 102struct bts_context;
 103struct perf_event_context;
 104
 105/*
 106 * List of flags we want to share for kernel threads,
 107 * if only because they are not used by them anyway.
 108 */
 109#define CLONE_KERNEL    (CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
 110
 111/*
 112 * These are the constant used to fake the fixed-point load-average
 113 * counting. Some notes:
 114 *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
 115 *    a load-average precision of 10 bits integer + 11 bits fractional
 116 *  - if you want to count load-averages more often, you need more
 117 *    precision, or rounding will get you. With 2-second counting freq,
 118 *    the EXP_n values would be 1981, 2034 and 2043 if still using only
 119 *    11 bit fractions.
 120 */
 121extern unsigned long avenrun[];         /* Load averages */
 122extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
 123
 124#define FSHIFT          11              /* nr of bits of precision */
 125#define FIXED_1         (1<<FSHIFT)     /* 1.0 as fixed-point */
 126#define LOAD_FREQ       (5*HZ+1)        /* 5 sec intervals */
 127#define EXP_1           1884            /* 1/exp(5sec/1min) as fixed-point */
 128#define EXP_5           2014            /* 1/exp(5sec/5min) */
 129#define EXP_15          2037            /* 1/exp(5sec/15min) */
 130
 131#define CALC_LOAD(load,exp,n) \
 132        load *= exp; \
 133        load += n*(FIXED_1-exp); \
 134        load >>= FSHIFT;
 135
 136extern unsigned long total_forks;
 137extern int nr_threads;
 138DECLARE_PER_CPU(unsigned long, process_counts);
 139extern int nr_processes(void);
 140extern unsigned long nr_running(void);
 141extern unsigned long nr_uninterruptible(void);
 142extern unsigned long nr_iowait(void);
 143extern unsigned long nr_iowait_cpu(void);
 144extern unsigned long this_cpu_load(void);
 145
 146
 147extern void calc_global_load(void);
 148extern u64 cpu_nr_migrations(int cpu);
 149
 150extern unsigned long get_parent_ip(unsigned long addr);
 151
 152struct seq_file;
 153struct cfs_rq;
 154struct task_group;
 155#ifdef CONFIG_SCHED_DEBUG
 156extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
 157extern void proc_sched_set_task(struct task_struct *p);
 158extern void
 159print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
 160#else
 161static inline void
 162proc_sched_show_task(struct task_struct *p, struct seq_file *m)
 163{
 164}
 165static inline void proc_sched_set_task(struct task_struct *p)
 166{
 167}
 168static inline void
 169print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
 170{
 171}
 172#endif
 173
 174extern unsigned long long time_sync_thresh;
 175
 176/*
 177 * Task state bitmask. NOTE! These bits are also
 178 * encoded in fs/proc/array.c: get_task_state().
 179 *
 180 * We have two separate sets of flags: task->state
 181 * is about runnability, while task->exit_state are
 182 * about the task exiting. Confusing, but this way
 183 * modifying one set can't modify the other one by
 184 * mistake.
 185 */
 186#define TASK_RUNNING            0
 187#define TASK_INTERRUPTIBLE      1
 188#define TASK_UNINTERRUPTIBLE    2
 189#define __TASK_STOPPED          4
 190#define __TASK_TRACED           8
 191/* in tsk->exit_state */
 192#define EXIT_ZOMBIE             16
 193#define EXIT_DEAD               32
 194/* in tsk->state again */
 195#define TASK_DEAD               64
 196#define TASK_WAKEKILL           128
 197#define TASK_WAKING             256
 198
 199/* Convenience macros for the sake of set_task_state */
 200#define TASK_KILLABLE           (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
 201#define TASK_STOPPED            (TASK_WAKEKILL | __TASK_STOPPED)
 202#define TASK_TRACED             (TASK_WAKEKILL | __TASK_TRACED)
 203
 204/* Convenience macros for the sake of wake_up */
 205#define TASK_NORMAL             (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
 206#define TASK_ALL                (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
 207
 208/* get_task_state() */
 209#define TASK_REPORT             (TASK_RUNNING | TASK_INTERRUPTIBLE | \
 210                                 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
 211                                 __TASK_TRACED)
 212
 213#define task_is_traced(task)    ((task->state & __TASK_TRACED) != 0)
 214#define task_is_stopped(task)   ((task->state & __TASK_STOPPED) != 0)
 215#define task_is_stopped_or_traced(task) \
 216                        ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
 217#define task_contributes_to_load(task)  \
 218                                ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
 219                                 (task->flags & PF_FREEZING) == 0)
 220
 221#define __set_task_state(tsk, state_value)              \
 222        do { (tsk)->state = (state_value); } while (0)
 223#define set_task_state(tsk, state_value)                \
 224        set_mb((tsk)->state, (state_value))
 225
 226/*
 227 * set_current_state() includes a barrier so that the write of current->state
 228 * is correctly serialised wrt the caller's subsequent test of whether to
 229 * actually sleep:
 230 *
 231 *      set_current_state(TASK_UNINTERRUPTIBLE);
 232 *      if (do_i_need_to_sleep())
 233 *              schedule();
 234 *
 235 * If the caller does not need such serialisation then use __set_current_state()
 236 */
 237#define __set_current_state(state_value)                        \
 238        do { current->state = (state_value); } while (0)
 239#define set_current_state(state_value)          \
 240        set_mb(current->state, (state_value))
 241
 242/* Task command name length */
 243#define TASK_COMM_LEN 16
 244
 245#include <linux/spinlock.h>
 246
 247/*
 248 * This serializes "schedule()" and also protects
 249 * the run-queue from deletions/modifications (but
 250 * _adding_ to the beginning of the run-queue has
 251 * a separate lock).
 252 */
 253extern rwlock_t tasklist_lock;
 254extern spinlock_t mmlist_lock;
 255
 256struct task_struct;
 257
 258extern void sched_init(void);
 259extern void sched_init_smp(void);
 260extern asmlinkage void schedule_tail(struct task_struct *prev);
 261extern void init_idle(struct task_struct *idle, int cpu);
 262extern void init_idle_bootup_task(struct task_struct *idle);
 263
 264extern int runqueue_is_locked(int cpu);
 265extern void task_rq_unlock_wait(struct task_struct *p);
 266
 267extern cpumask_var_t nohz_cpu_mask;
 268#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
 269extern int select_nohz_load_balancer(int cpu);
 270extern int get_nohz_load_balancer(void);
 271#else
 272static inline int select_nohz_load_balancer(int cpu)
 273{
 274        return 0;
 275}
 276#endif
 277
 278/*
 279 * Only dump TASK_* tasks. (0 for all tasks)
 280 */
 281extern void show_state_filter(unsigned long state_filter);
 282
 283static inline void show_state(void)
 284{
 285        show_state_filter(0);
 286}
 287
 288extern void show_regs(struct pt_regs *);
 289
 290/*
 291 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
 292 * task), SP is the stack pointer of the first frame that should be shown in the back
 293 * trace (or NULL if the entire call-chain of the task should be shown).
 294 */
 295extern void show_stack(struct task_struct *task, unsigned long *sp);
 296
 297void io_schedule(void);
 298long io_schedule_timeout(long timeout);
 299
 300extern void cpu_init (void);
 301extern void trap_init(void);
 302extern void update_process_times(int user);
 303extern void scheduler_tick(void);
 304
 305extern void sched_show_task(struct task_struct *p);
 306
 307#ifdef CONFIG_DETECT_SOFTLOCKUP
 308extern void softlockup_tick(void);
 309extern void touch_softlockup_watchdog(void);
 310extern void touch_all_softlockup_watchdogs(void);
 311extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
 312                                    void __user *buffer,
 313                                    size_t *lenp, loff_t *ppos);
 314extern unsigned int  softlockup_panic;
 315extern int softlockup_thresh;
 316#else
 317static inline void softlockup_tick(void)
 318{
 319}
 320static inline void touch_softlockup_watchdog(void)
 321{
 322}
 323static inline void touch_all_softlockup_watchdogs(void)
 324{
 325}
 326#endif
 327
 328#ifdef CONFIG_DETECT_HUNG_TASK
 329extern unsigned int  sysctl_hung_task_panic;
 330extern unsigned long sysctl_hung_task_check_count;
 331extern unsigned long sysctl_hung_task_timeout_secs;
 332extern unsigned long sysctl_hung_task_warnings;
 333extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
 334                                         void __user *buffer,
 335                                         size_t *lenp, loff_t *ppos);
 336#endif
 337
 338/* Attach to any functions which should be ignored in wchan output. */
 339#define __sched         __attribute__((__section__(".sched.text")))
 340
 341/* Linker adds these: start and end of __sched functions */
 342extern char __sched_text_start[], __sched_text_end[];
 343
 344/* Is this address in the __sched functions? */
 345extern int in_sched_functions(unsigned long addr);
 346
 347#define MAX_SCHEDULE_TIMEOUT    LONG_MAX
 348extern signed long schedule_timeout(signed long timeout);
 349extern signed long schedule_timeout_interruptible(signed long timeout);
 350extern signed long schedule_timeout_killable(signed long timeout);
 351extern signed long schedule_timeout_uninterruptible(signed long timeout);
 352asmlinkage void __schedule(void);
 353asmlinkage void schedule(void);
 354extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
 355
 356struct nsproxy;
 357struct user_namespace;
 358
 359/*
 360 * Default maximum number of active map areas, this limits the number of vmas
 361 * per mm struct. Users can overwrite this number by sysctl but there is a
 362 * problem.
 363 *
 364 * When a program's coredump is generated as ELF format, a section is created
 365 * per a vma. In ELF, the number of sections is represented in unsigned short.
 366 * This means the number of sections should be smaller than 65535 at coredump.
 367 * Because the kernel adds some informative sections to a image of program at
 368 * generating coredump, we need some margin. The number of extra sections is
 369 * 1-3 now and depends on arch. We use "5" as safe margin, here.
 370 */
 371#define MAPCOUNT_ELF_CORE_MARGIN        (5)
 372#define DEFAULT_MAX_MAP_COUNT   (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
 373
 374extern int sysctl_max_map_count;
 375
 376#include <linux/aio.h>
 377
 378extern unsigned long
 379arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
 380                       unsigned long, unsigned long);
 381extern unsigned long
 382arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
 383                          unsigned long len, unsigned long pgoff,
 384                          unsigned long flags);
 385extern void arch_unmap_area(struct mm_struct *, unsigned long);
 386extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
 387
 388#if USE_SPLIT_PTLOCKS
 389/*
 390 * The mm counters are not protected by its page_table_lock,
 391 * so must be incremented atomically.
 392 */
 393#define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value)
 394#define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member))
 395#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member)
 396#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
 397#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
 398
 399#else  /* !USE_SPLIT_PTLOCKS */
 400/*
 401 * The mm counters are protected by its page_table_lock,
 402 * so can be incremented directly.
 403 */
 404#define set_mm_counter(mm, member, value) (mm)->_##member = (value)
 405#define get_mm_counter(mm, member) ((mm)->_##member)
 406#define add_mm_counter(mm, member, value) (mm)->_##member += (value)
 407#define inc_mm_counter(mm, member) (mm)->_##member++
 408#define dec_mm_counter(mm, member) (mm)->_##member--
 409
 410#endif /* !USE_SPLIT_PTLOCKS */
 411
 412#define get_mm_rss(mm)                                  \
 413        (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))
 414#define update_hiwater_rss(mm)  do {                    \
 415        unsigned long _rss = get_mm_rss(mm);            \
 416        if ((mm)->hiwater_rss < _rss)                   \
 417                (mm)->hiwater_rss = _rss;               \
 418} while (0)
 419#define update_hiwater_vm(mm)   do {                    \
 420        if ((mm)->hiwater_vm < (mm)->total_vm)          \
 421                (mm)->hiwater_vm = (mm)->total_vm;      \
 422} while (0)
 423
 424static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
 425{
 426        return max(mm->hiwater_rss, get_mm_rss(mm));
 427}
 428
 429static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
 430                                         struct mm_struct *mm)
 431{
 432        unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
 433
 434        if (*maxrss < hiwater_rss)
 435                *maxrss = hiwater_rss;
 436}
 437
 438static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
 439{
 440        return max(mm->hiwater_vm, mm->total_vm);
 441}
 442
 443extern void set_dumpable(struct mm_struct *mm, int value);
 444extern int get_dumpable(struct mm_struct *mm);
 445
 446/* mm flags */
 447/* dumpable bits */
 448#define MMF_DUMPABLE      0  /* core dump is permitted */
 449#define MMF_DUMP_SECURELY 1  /* core file is readable only by root */
 450
 451#define MMF_DUMPABLE_BITS 2
 452#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
 453
 454/* coredump filter bits */
 455#define MMF_DUMP_ANON_PRIVATE   2
 456#define MMF_DUMP_ANON_SHARED    3
 457#define MMF_DUMP_MAPPED_PRIVATE 4
 458#define MMF_DUMP_MAPPED_SHARED  5
 459#define MMF_DUMP_ELF_HEADERS    6
 460#define MMF_DUMP_HUGETLB_PRIVATE 7
 461#define MMF_DUMP_HUGETLB_SHARED  8
 462
 463#define MMF_DUMP_FILTER_SHIFT   MMF_DUMPABLE_BITS
 464#define MMF_DUMP_FILTER_BITS    7
 465#define MMF_DUMP_FILTER_MASK \
 466        (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
 467#define MMF_DUMP_FILTER_DEFAULT \
 468        ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
 469         (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
 470
 471#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
 472# define MMF_DUMP_MASK_DEFAULT_ELF      (1 << MMF_DUMP_ELF_HEADERS)
 473#else
 474# define MMF_DUMP_MASK_DEFAULT_ELF      0
 475#endif
 476                                        /* leave room for more dump flags */
 477#define MMF_VM_MERGEABLE        16      /* KSM may merge identical pages */
 478
 479#define MMF_INIT_MASK           (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
 480
 481struct sighand_struct {
 482        atomic_t                count;
 483        struct k_sigaction      action[_NSIG];
 484        spinlock_t              siglock;
 485        wait_queue_head_t       signalfd_wqh;
 486};
 487
 488struct pacct_struct {
 489        int                     ac_flag;
 490        long                    ac_exitcode;
 491        unsigned long           ac_mem;
 492        cputime_t               ac_utime, ac_stime;
 493        unsigned long           ac_minflt, ac_majflt;
 494};
 495
 496struct cpu_itimer {
 497        cputime_t expires;
 498        cputime_t incr;
 499        u32 error;
 500        u32 incr_error;
 501};
 502
 503/**
 504 * struct task_cputime - collected CPU time counts
 505 * @utime:              time spent in user mode, in &cputime_t units
 506 * @stime:              time spent in kernel mode, in &cputime_t units
 507 * @sum_exec_runtime:   total time spent on the CPU, in nanoseconds
 508 *
 509 * This structure groups together three kinds of CPU time that are
 510 * tracked for threads and thread groups.  Most things considering
 511 * CPU time want to group these counts together and treat all three
 512 * of them in parallel.
 513 */
 514struct task_cputime {
 515        cputime_t utime;
 516        cputime_t stime;
 517        unsigned long long sum_exec_runtime;
 518};
 519/* Alternate field names when used to cache expirations. */
 520#define prof_exp        stime
 521#define virt_exp        utime
 522#define sched_exp       sum_exec_runtime
 523
 524#define INIT_CPUTIME    \
 525        (struct task_cputime) {                                 \
 526                .utime = cputime_zero,                          \
 527                .stime = cputime_zero,                          \
 528                .sum_exec_runtime = 0,                          \
 529        }
 530
 531/*
 532 * Disable preemption until the scheduler is running.
 533 * Reset by start_kernel()->sched_init()->init_idle().
 534 *
 535 * We include PREEMPT_ACTIVE to avoid cond_resched() from working
 536 * before the scheduler is active -- see should_resched().
 537 */
 538#define INIT_PREEMPT_COUNT      (1 + PREEMPT_ACTIVE)
 539
 540/**
 541 * struct thread_group_cputimer - thread group interval timer counts
 542 * @cputime:            thread group interval timers.
 543 * @running:            non-zero when there are timers running and
 544 *                      @cputime receives updates.
 545 * @lock:               lock for fields in this struct.
 546 *
 547 * This structure contains the version of task_cputime, above, that is
 548 * used for thread group CPU timer calculations.
 549 */
 550struct thread_group_cputimer {
 551        struct task_cputime cputime;
 552        int running;
 553        spinlock_t lock;
 554};
 555
 556/*
 557 * NOTE! "signal_struct" does not have it's own
 558 * locking, because a shared signal_struct always
 559 * implies a shared sighand_struct, so locking
 560 * sighand_struct is always a proper superset of
 561 * the locking of signal_struct.
 562 */
 563struct signal_struct {
 564        atomic_t                count;
 565        atomic_t                live;
 566
 567        wait_queue_head_t       wait_chldexit;  /* for wait4() */
 568
 569        /* current thread group signal load-balancing target: */
 570        struct task_struct      *curr_target;
 571
 572        /* shared signal handling: */
 573        struct sigpending       shared_pending;
 574
 575        /* thread group exit support */
 576        int                     group_exit_code;
 577        /* overloaded:
 578         * - notify group_exit_task when ->count is equal to notify_count
 579         * - everyone except group_exit_task is stopped during signal delivery
 580         *   of fatal signals, group_exit_task processes the signal.
 581         */
 582        int                     notify_count;
 583        struct task_struct      *group_exit_task;
 584
 585        /* thread group stop support, overloads group_exit_code too */
 586        int                     group_stop_count;
 587        unsigned int            flags; /* see SIGNAL_* flags below */
 588
 589        /* POSIX.1b Interval Timers */
 590        struct list_head posix_timers;
 591
 592        /* ITIMER_REAL timer for the process */
 593        struct hrtimer real_timer;
 594        struct pid *leader_pid;
 595        ktime_t it_real_incr;
 596
 597        /*
 598         * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
 599         * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
 600         * values are defined to 0 and 1 respectively
 601         */
 602        struct cpu_itimer it[2];
 603
 604        /*
 605         * Thread group totals for process CPU timers.
 606         * See thread_group_cputimer(), et al, for details.
 607         */
 608        struct thread_group_cputimer cputimer;
 609
 610        /* Earliest-expiration cache. */
 611        struct task_cputime cputime_expires;
 612
 613        struct list_head cpu_timers[3];
 614
 615        struct pid *tty_old_pgrp;
 616
 617        /* boolean value for session group leader */
 618        int leader;
 619
 620        struct tty_struct *tty; /* NULL if no tty */
 621
 622        /*
 623         * Cumulative resource counters for dead threads in the group,
 624         * and for reaped dead child processes forked by this group.
 625         * Live threads maintain their own counters and add to these
 626         * in __exit_signal, except for the group leader.
 627         */
 628        cputime_t utime, stime, cutime, cstime;
 629        cputime_t gtime;
 630        cputime_t cgtime;
 631        unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
 632        unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
 633        unsigned long inblock, oublock, cinblock, coublock;
 634        unsigned long maxrss, cmaxrss;
 635        struct task_io_accounting ioac;
 636
 637        /*
 638         * Cumulative ns of schedule CPU time fo dead threads in the
 639         * group, not including a zombie group leader, (This only differs
 640         * from jiffies_to_ns(utime + stime) if sched_clock uses something
 641         * other than jiffies.)
 642         */
 643        unsigned long long sum_sched_runtime;
 644
 645        /*
 646         * We don't bother to synchronize most readers of this at all,
 647         * because there is no reader checking a limit that actually needs
 648         * to get both rlim_cur and rlim_max atomically, and either one
 649         * alone is a single word that can safely be read normally.
 650         * getrlimit/setrlimit use task_lock(current->group_leader) to
 651         * protect this instead of the siglock, because they really
 652         * have no need to disable irqs.
 653         */
 654        struct rlimit rlim[RLIM_NLIMITS];
 655
 656#ifdef CONFIG_BSD_PROCESS_ACCT
 657        struct pacct_struct pacct;      /* per-process accounting information */
 658#endif
 659#ifdef CONFIG_TASKSTATS
 660        struct taskstats *stats;
 661#endif
 662#ifdef CONFIG_AUDIT
 663        unsigned audit_tty;
 664        struct tty_audit_buf *tty_audit_buf;
 665#endif
 666
 667        int oom_adj;    /* OOM kill score adjustment (bit shift) */
 668};
 669
 670/* Context switch must be unlocked if interrupts are to be enabled */
 671#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
 672# define __ARCH_WANT_UNLOCKED_CTXSW
 673#endif
 674
 675/*
 676 * Bits in flags field of signal_struct.
 677 */
 678#define SIGNAL_STOP_STOPPED     0x00000001 /* job control stop in effect */
 679#define SIGNAL_STOP_DEQUEUED    0x00000002 /* stop signal dequeued */
 680#define SIGNAL_STOP_CONTINUED   0x00000004 /* SIGCONT since WCONTINUED reap */
 681#define SIGNAL_GROUP_EXIT       0x00000008 /* group exit in progress */
 682/*
 683 * Pending notifications to parent.
 684 */
 685#define SIGNAL_CLD_STOPPED      0x00000010
 686#define SIGNAL_CLD_CONTINUED    0x00000020
 687#define SIGNAL_CLD_MASK         (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
 688
 689#define SIGNAL_UNKILLABLE       0x00000040 /* for init: ignore fatal signals */
 690
 691/* If true, all threads except ->group_exit_task have pending SIGKILL */
 692static inline int signal_group_exit(const struct signal_struct *sig)
 693{
 694        return  (sig->flags & SIGNAL_GROUP_EXIT) ||
 695                (sig->group_exit_task != NULL);
 696}
 697
 698/*
 699 * Some day this will be a full-fledged user tracking system..
 700 */
 701struct user_struct {
 702        atomic_t __count;       /* reference count */
 703        atomic_t processes;     /* How many processes does this user have? */
 704        atomic_t files;         /* How many open files does this user have? */
 705        atomic_t sigpending;    /* How many pending signals does this user have? */
 706#ifdef CONFIG_INOTIFY_USER
 707        atomic_t inotify_watches; /* How many inotify watches does this user have? */
 708        atomic_t inotify_devs;  /* How many inotify devs does this user have opened? */
 709#endif
 710#ifdef CONFIG_EPOLL
 711        atomic_t epoll_watches; /* The number of file descriptors currently watched */
 712#endif
 713#ifdef CONFIG_POSIX_MQUEUE
 714        /* protected by mq_lock */
 715        unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
 716#endif
 717        unsigned long locked_shm; /* How many pages of mlocked shm ? */
 718
 719#ifdef CONFIG_KEYS
 720        struct key *uid_keyring;        /* UID specific keyring */
 721        struct key *session_keyring;    /* UID's default session keyring */
 722#endif
 723
 724        /* Hash table maintenance information */
 725        struct hlist_node uidhash_node;
 726        uid_t uid;
 727        struct user_namespace *user_ns;
 728
 729#ifdef CONFIG_USER_SCHED
 730        struct task_group *tg;
 731#ifdef CONFIG_SYSFS
 732        struct kobject kobj;
 733        struct delayed_work work;
 734#endif
 735#endif
 736
 737#ifdef CONFIG_PERF_EVENTS
 738        atomic_long_t locked_vm;
 739#endif
 740};
 741
 742extern int uids_sysfs_init(void);
 743
 744extern struct user_struct *find_user(uid_t);
 745
 746extern struct user_struct root_user;
 747#define INIT_USER (&root_user)
 748
 749
 750struct backing_dev_info;
 751struct reclaim_state;
 752
 753#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
 754struct sched_info {
 755        /* cumulative counters */
 756        unsigned long pcount;         /* # of times run on this cpu */
 757        unsigned long long run_delay; /* time spent waiting on a runqueue */
 758
 759        /* timestamps */
 760        unsigned long long last_arrival,/* when we last ran on a cpu */
 761                           last_queued; /* when we were last queued to run */
 762#ifdef CONFIG_SCHEDSTATS
 763        /* BKL stats */
 764        unsigned int bkl_count;
 765#endif
 766};
 767#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
 768
 769#ifdef CONFIG_TASK_DELAY_ACCT
 770struct task_delay_info {
 771        spinlock_t      lock;
 772        unsigned int    flags;  /* Private per-task flags */
 773
 774        /* For each stat XXX, add following, aligned appropriately
 775         *
 776         * struct timespec XXX_start, XXX_end;
 777         * u64 XXX_delay;
 778         * u32 XXX_count;
 779         *
 780         * Atomicity of updates to XXX_delay, XXX_count protected by
 781         * single lock above (split into XXX_lock if contention is an issue).
 782         */
 783
 784        /*
 785         * XXX_count is incremented on every XXX operation, the delay
 786         * associated with the operation is added to XXX_delay.
 787         * XXX_delay contains the accumulated delay time in nanoseconds.
 788         */
 789        struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */
 790        u64 blkio_delay;        /* wait for sync block io completion */
 791        u64 swapin_delay;       /* wait for swapin block io completion */
 792        u32 blkio_count;        /* total count of the number of sync block */
 793                                /* io operations performed */
 794        u32 swapin_count;       /* total count of the number of swapin block */
 795                                /* io operations performed */
 796
 797        struct timespec freepages_start, freepages_end;
 798        u64 freepages_delay;    /* wait for memory reclaim */
 799        u32 freepages_count;    /* total count of memory reclaim */
 800};
 801#endif  /* CONFIG_TASK_DELAY_ACCT */
 802
 803static inline int sched_info_on(void)
 804{
 805#ifdef CONFIG_SCHEDSTATS
 806        return 1;
 807#elif defined(CONFIG_TASK_DELAY_ACCT)
 808        extern int delayacct_on;
 809        return delayacct_on;
 810#else
 811        return 0;
 812#endif
 813}
 814
 815enum cpu_idle_type {
 816        CPU_IDLE,
 817        CPU_NOT_IDLE,
 818        CPU_NEWLY_IDLE,
 819        CPU_MAX_IDLE_TYPES
 820};
 821
 822/*
 823 * sched-domains (multiprocessor balancing) declarations:
 824 */
 825
 826/*
 827 * Increase resolution of nice-level calculations:
 828 */
 829#define SCHED_LOAD_SHIFT        10
 830#define SCHED_LOAD_SCALE        (1L << SCHED_LOAD_SHIFT)
 831
 832#define SCHED_LOAD_SCALE_FUZZ   SCHED_LOAD_SCALE
 833
 834#ifdef CONFIG_SMP
 835#define SD_LOAD_BALANCE         0x0001  /* Do load balancing on this domain. */
 836#define SD_BALANCE_NEWIDLE      0x0002  /* Balance when about to become idle */
 837#define SD_BALANCE_EXEC         0x0004  /* Balance on exec */
 838#define SD_BALANCE_FORK         0x0008  /* Balance on fork, clone */
 839#define SD_BALANCE_WAKE         0x0010  /* Balance on wakeup */
 840#define SD_WAKE_AFFINE          0x0020  /* Wake task to waking CPU */
 841#define SD_PREFER_LOCAL         0x0040  /* Prefer to keep tasks local to this domain */
 842#define SD_SHARE_CPUPOWER       0x0080  /* Domain members share cpu power */
 843#define SD_POWERSAVINGS_BALANCE 0x0100  /* Balance for power savings */
 844#define SD_SHARE_PKG_RESOURCES  0x0200  /* Domain members share cpu pkg resources */
 845#define SD_SERIALIZE            0x0400  /* Only a single load balancing instance */
 846
 847#define SD_PREFER_SIBLING       0x1000  /* Prefer to place tasks in a sibling domain */
 848
 849enum powersavings_balance_level {
 850        POWERSAVINGS_BALANCE_NONE = 0,  /* No power saving load balance */
 851        POWERSAVINGS_BALANCE_BASIC,     /* Fill one thread/core/package
 852                                         * first for long running threads
 853                                         */
 854        POWERSAVINGS_BALANCE_WAKEUP,    /* Also bias task wakeups to semi-idle
 855                                         * cpu package for power savings
 856                                         */
 857        MAX_POWERSAVINGS_BALANCE_LEVELS
 858};
 859
 860extern int sched_mc_power_savings, sched_smt_power_savings;
 861
 862static inline int sd_balance_for_mc_power(void)
 863{
 864        if (sched_smt_power_savings)
 865                return SD_POWERSAVINGS_BALANCE;
 866
 867        return SD_PREFER_SIBLING;
 868}
 869
 870static inline int sd_balance_for_package_power(void)
 871{
 872        if (sched_mc_power_savings | sched_smt_power_savings)
 873                return SD_POWERSAVINGS_BALANCE;
 874
 875        return SD_PREFER_SIBLING;
 876}
 877
 878/*
 879 * Optimise SD flags for power savings:
 880 * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings.
 881 * Keep default SD flags if sched_{smt,mc}_power_saving=0
 882 */
 883
 884static inline int sd_power_saving_flags(void)
 885{
 886        if (sched_mc_power_savings | sched_smt_power_savings)
 887                return SD_BALANCE_NEWIDLE;
 888
 889        return 0;
 890}
 891
 892struct sched_group {
 893        struct sched_group *next;       /* Must be a circular list */
 894
 895        /*
 896         * CPU power of this group, SCHED_LOAD_SCALE being max power for a
 897         * single CPU.
 898         */
 899        unsigned int cpu_power;
 900
 901        /*
 902         * The CPUs this group covers.
 903         *
 904         * NOTE: this field is variable length. (Allocated dynamically
 905         * by attaching extra space to the end of the structure,
 906         * depending on how many CPUs the kernel has booted up with)
 907         *
 908         * It is also be embedded into static data structures at build
 909         * time. (See 'struct static_sched_group' in kernel/sched.c)
 910         */
 911        unsigned long cpumask[0];
 912};
 913
 914static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
 915{
 916        return to_cpumask(sg->cpumask);
 917}
 918
 919enum sched_domain_level {
 920        SD_LV_NONE = 0,
 921        SD_LV_SIBLING,
 922        SD_LV_MC,
 923        SD_LV_CPU,
 924        SD_LV_NODE,
 925        SD_LV_ALLNODES,
 926        SD_LV_MAX
 927};
 928
 929struct sched_domain_attr {
 930        int relax_domain_level;
 931};
 932
 933#define SD_ATTR_INIT    (struct sched_domain_attr) {    \
 934        .relax_domain_level = -1,                       \
 935}
 936
 937struct sched_domain {
 938        /* These fields must be setup */
 939        struct sched_domain *parent;    /* top domain must be null terminated */
 940        struct sched_domain *child;     /* bottom domain must be null terminated */
 941        struct sched_group *groups;     /* the balancing groups of the domain */
 942        unsigned long min_interval;     /* Minimum balance interval ms */
 943        unsigned long max_interval;     /* Maximum balance interval ms */
 944        unsigned int busy_factor;       /* less balancing by factor if busy */
 945        unsigned int imbalance_pct;     /* No balance until over watermark */
 946        unsigned int cache_nice_tries;  /* Leave cache hot tasks for # tries */
 947        unsigned int busy_idx;
 948        unsigned int idle_idx;
 949        unsigned int newidle_idx;
 950        unsigned int wake_idx;
 951        unsigned int forkexec_idx;
 952        unsigned int smt_gain;
 953        int flags;                      /* See SD_* */
 954        enum sched_domain_level level;
 955
 956        /* Runtime fields. */
 957        unsigned long last_balance;     /* init to jiffies. units in jiffies */
 958        unsigned int balance_interval;  /* initialise to 1. units in ms. */
 959        unsigned int nr_balance_failed; /* initialise to 0 */
 960
 961        u64 last_update;
 962
 963#ifdef CONFIG_SCHEDSTATS
 964        /* load_balance() stats */
 965        unsigned int lb_count[CPU_MAX_IDLE_TYPES];
 966        unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
 967        unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
 968        unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
 969        unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
 970        unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
 971        unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
 972        unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
 973
 974        /* Active load balancing */
 975        unsigned int alb_count;
 976        unsigned int alb_failed;
 977        unsigned int alb_pushed;
 978
 979        /* SD_BALANCE_EXEC stats */
 980        unsigned int sbe_count;
 981        unsigned int sbe_balanced;
 982        unsigned int sbe_pushed;
 983
 984        /* SD_BALANCE_FORK stats */
 985        unsigned int sbf_count;
 986        unsigned int sbf_balanced;
 987        unsigned int sbf_pushed;
 988
 989        /* try_to_wake_up() stats */
 990        unsigned int ttwu_wake_remote;
 991        unsigned int ttwu_move_affine;
 992        unsigned int ttwu_move_balance;
 993#endif
 994#ifdef CONFIG_SCHED_DEBUG
 995        char *name;
 996#endif
 997
 998        /*
 999         * Span of all CPUs in this domain.
1000         *
1001         * NOTE: this field is variable length. (Allocated dynamically
1002         * by attaching extra space to the end of the structure,
1003         * depending on how many CPUs the kernel has booted up with)
1004         *
1005         * It is also be embedded into static data structures at build
1006         * time. (See 'struct static_sched_domain' in kernel/sched.c)
1007         */
1008        unsigned long span[0];
1009};
1010
1011static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
1012{
1013        return to_cpumask(sd->span);
1014}
1015
1016extern void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
1017                                    struct sched_domain_attr *dattr_new);
1018
1019/* Test a flag in parent sched domain */
1020static inline int test_sd_parent(struct sched_domain *sd, int flag)
1021{
1022        if (sd->parent && (sd->parent->flags & flag))
1023                return 1;
1024
1025        return 0;
1026}
1027
1028unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
1029unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);
1030
1031#else /* CONFIG_SMP */
1032
1033struct sched_domain_attr;
1034
1035static inline void
1036partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
1037                        struct sched_domain_attr *dattr_new)
1038{
1039}
1040#endif  /* !CONFIG_SMP */
1041
1042
1043struct io_context;                      /* See blkdev.h */
1044
1045
1046#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1047extern void prefetch_stack(struct task_struct *t);
1048#else
1049static inline void prefetch_stack(struct task_struct *t) { }
1050#endif
1051
1052struct audit_context;           /* See audit.c */
1053struct mempolicy;
1054struct pipe_inode_info;
1055struct uts_namespace;
1056
1057struct rq;
1058struct sched_domain;
1059
1060/*
1061 * wake flags
1062 */
1063#define WF_SYNC         0x01            /* waker goes to sleep after wakup */
1064#define WF_FORK         0x02            /* child wakeup after fork */
1065
1066struct sched_class {
1067        const struct sched_class *next;
1068
1069        void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
1070        void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
1071        void (*yield_task) (struct rq *rq);
1072
1073        void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1074
1075        struct task_struct * (*pick_next_task) (struct rq *rq);
1076        void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1077
1078#ifdef CONFIG_SMP
1079        int  (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
1080
1081        unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
1082                        struct rq *busiest, unsigned long max_load_move,
1083                        struct sched_domain *sd, enum cpu_idle_type idle,
1084                        int *all_pinned, int *this_best_prio);
1085
1086        int (*move_one_task) (struct rq *this_rq, int this_cpu,
1087                              struct rq *busiest, struct sched_domain *sd,
1088                              enum cpu_idle_type idle);
1089        void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1090        void (*post_schedule) (struct rq *this_rq);
1091        void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
1092
1093        void (*set_cpus_allowed)(struct task_struct *p,
1094                                 const struct cpumask *newmask);
1095
1096        void (*rq_online)(struct rq *rq);
1097        void (*rq_offline)(struct rq *rq);
1098#endif
1099
1100        void (*set_curr_task) (struct rq *rq);
1101        void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1102        void (*task_new) (struct rq *rq, struct task_struct *p);
1103
1104        void (*switched_from) (struct rq *this_rq, struct task_struct *task,
1105                               int running);
1106        void (*switched_to) (struct rq *this_rq, struct task_struct *task,
1107                             int running);
1108        void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1109                             int oldprio, int running);
1110
1111        unsigned int (*get_rr_interval) (struct task_struct *task);
1112
1113#ifdef CONFIG_FAIR_GROUP_SCHED
1114        void (*moved_group) (struct task_struct *p);
1115#endif
1116};
1117
1118struct load_weight {
1119        unsigned long weight, inv_weight;
1120};
1121
1122/*
1123 * CFS stats for a schedulable entity (task, task-group etc)
1124 *
1125 * Current field usage histogram:
1126 *
1127 *     4 se->block_start
1128 *     4 se->run_node
1129 *     4 se->sleep_start
1130 *     6 se->load.weight
1131 */
1132struct sched_entity {
1133        struct load_weight      load;           /* for load-balancing */
1134        struct rb_node          run_node;
1135        struct list_head        group_node;
1136        unsigned int            on_rq;
1137
1138        u64                     exec_start;
1139        u64                     sum_exec_runtime;
1140        u64                     vruntime;
1141        u64                     prev_sum_exec_runtime;
1142
1143        u64                     last_wakeup;
1144        u64                     avg_overlap;
1145
1146        u64                     nr_migrations;
1147
1148        u64                     start_runtime;
1149        u64                     avg_wakeup;
1150
1151        u64                     avg_running;
1152
1153#ifdef CONFIG_SCHEDSTATS
1154        u64                     wait_start;
1155        u64                     wait_max;
1156        u64                     wait_count;
1157        u64                     wait_sum;
1158        u64                     iowait_count;
1159        u64                     iowait_sum;
1160
1161        u64                     sleep_start;
1162        u64                     sleep_max;
1163        s64                     sum_sleep_runtime;
1164
1165        u64                     block_start;
1166        u64                     block_max;
1167        u64                     exec_max;
1168        u64                     slice_max;
1169
1170        u64                     nr_migrations_cold;
1171        u64                     nr_failed_migrations_affine;
1172        u64                     nr_failed_migrations_running;
1173        u64                     nr_failed_migrations_hot;
1174        u64                     nr_forced_migrations;
1175        u64                     nr_forced2_migrations;
1176
1177        u64                     nr_wakeups;
1178        u64                     nr_wakeups_sync;
1179        u64                     nr_wakeups_migrate;
1180        u64                     nr_wakeups_local;
1181        u64                     nr_wakeups_remote;
1182        u64                     nr_wakeups_affine;
1183        u64                     nr_wakeups_affine_attempts;
1184        u64                     nr_wakeups_passive;
1185        u64                     nr_wakeups_idle;
1186#endif
1187
1188#ifdef CONFIG_FAIR_GROUP_SCHED
1189        struct sched_entity     *parent;
1190        /* rq on which this entity is (to be) queued: */
1191        struct cfs_rq           *cfs_rq;
1192        /* rq "owned" by this entity/group: */
1193        struct cfs_rq           *my_q;
1194#endif
1195};
1196
1197struct sched_rt_entity {
1198        struct list_head run_list;
1199        unsigned long timeout;
1200        unsigned int time_slice;
1201        int nr_cpus_allowed;
1202
1203        struct sched_rt_entity *back;
1204#ifdef CONFIG_RT_GROUP_SCHED
1205        struct sched_rt_entity  *parent;
1206        /* rq on which this entity is (to be) queued: */
1207        struct rt_rq            *rt_rq;
1208        /* rq "owned" by this entity/group: */
1209        struct rt_rq            *my_q;
1210#endif
1211};
1212
1213struct rcu_node;
1214
1215struct task_struct {
1216        volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped */
1217        void *stack;
1218        atomic_t usage;
1219        unsigned int flags;     /* per process flags, defined below */
1220        unsigned int ptrace;
1221
1222        int lock_depth;         /* BKL lock depth */
1223
1224#ifdef CONFIG_SMP
1225#ifdef __ARCH_WANT_UNLOCKED_CTXSW
1226        int oncpu;
1227#endif
1228#endif
1229
1230        int prio, static_prio, normal_prio;
1231        unsigned int rt_priority;
1232        const struct sched_class *sched_class;
1233        struct sched_entity se;
1234        struct sched_rt_entity rt;
1235
1236#ifdef CONFIG_PREEMPT_NOTIFIERS
1237        /* list of struct preempt_notifier: */
1238        struct hlist_head preempt_notifiers;
1239#endif
1240
1241        /*
1242         * fpu_counter contains the number of consecutive context switches
1243         * that the FPU is used. If this is over a threshold, the lazy fpu
1244         * saving becomes unlazy to save the trap. This is an unsigned char
1245         * so that after 256 times the counter wraps and the behavior turns
1246         * lazy again; this to deal with bursty apps that only use FPU for
1247         * a short time
1248         */
1249        unsigned char fpu_counter;
1250#ifdef CONFIG_BLK_DEV_IO_TRACE
1251        unsigned int btrace_seq;
1252#endif
1253
1254        unsigned int policy;
1255        cpumask_t cpus_allowed;
1256
1257#ifdef CONFIG_TREE_PREEMPT_RCU
1258        int rcu_read_lock_nesting;
1259        char rcu_read_unlock_special;
1260        struct rcu_node *rcu_blocked_node;
1261        struct list_head rcu_node_entry;
1262#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1263
1264#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1265        struct sched_info sched_info;
1266#endif
1267
1268        struct list_head tasks;
1269        struct plist_node pushable_tasks;
1270
1271        struct mm_struct *mm, *active_mm;
1272
1273/* task state */
1274        int exit_state;
1275        int exit_code, exit_signal;
1276        int pdeath_signal;  /*  The signal sent when the parent dies  */
1277        /* ??? */
1278        unsigned int personality;
1279        unsigned did_exec:1;
1280        unsigned in_execve:1;   /* Tell the LSMs that the process is doing an
1281                                 * execve */
1282        unsigned in_iowait:1;
1283
1284
1285        /* Revert to default priority/policy when forking */
1286        unsigned sched_reset_on_fork:1;
1287
1288        pid_t pid;
1289        pid_t tgid;
1290
1291#ifdef CONFIG_CC_STACKPROTECTOR
1292        /* Canary value for the -fstack-protector gcc feature */
1293        unsigned long stack_canary;
1294#endif
1295
1296        /* 
1297         * pointers to (original) parent process, youngest child, younger sibling,
1298         * older sibling, respectively.  (p->father can be replaced with 
1299         * p->real_parent->pid)
1300         */
1301        struct task_struct *real_parent; /* real parent process */
1302        struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */
1303        /*
1304         * children/sibling forms the list of my natural children
1305         */
1306        struct list_head children;      /* list of my children */
1307        struct list_head sibling;       /* linkage in my parent's children list */
1308        struct task_struct *group_leader;       /* threadgroup leader */
1309
1310        /*
1311         * ptraced is the list of tasks this task is using ptrace on.
1312         * This includes both natural children and PTRACE_ATTACH targets.
1313         * p->ptrace_entry is p's link on the p->parent->ptraced list.
1314         */
1315        struct list_head ptraced;
1316        struct list_head ptrace_entry;
1317
1318        /*
1319         * This is the tracer handle for the ptrace BTS extension.
1320         * This field actually belongs to the ptracer task.
1321         */
1322        struct bts_context *bts;
1323
1324        /* PID/PID hash table linkage. */
1325        struct pid_link pids[PIDTYPE_MAX];
1326        struct list_head thread_group;
1327
1328        struct completion *vfork_done;          /* for vfork() */
1329        int __user *set_child_tid;              /* CLONE_CHILD_SETTID */
1330        int __user *clear_child_tid;            /* CLONE_CHILD_CLEARTID */
1331
1332        cputime_t utime, stime, utimescaled, stimescaled;
1333        cputime_t gtime;
1334        cputime_t prev_utime, prev_stime;
1335        unsigned long nvcsw, nivcsw; /* context switch counts */
1336        struct timespec start_time;             /* monotonic time */
1337        struct timespec real_start_time;        /* boot based time */
1338/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1339        unsigned long min_flt, maj_flt;
1340
1341        struct task_cputime cputime_expires;
1342        struct list_head cpu_timers[3];
1343
1344/* process credentials */
1345        const struct cred *real_cred;   /* objective and real subjective task
1346                                         * credentials (COW) */
1347        const struct cred *cred;        /* effective (overridable) subjective task
1348                                         * credentials (COW) */
1349        struct mutex cred_guard_mutex;  /* guard against foreign influences on
1350                                         * credential calculations
1351                                         * (notably. ptrace) */
1352        struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
1353
1354        char comm[TASK_COMM_LEN]; /* executable name excluding path
1355                                     - access with [gs]et_task_comm (which lock
1356                                       it with task_lock())
1357                                     - initialized normally by flush_old_exec */
1358/* file system info */
1359        int link_count, total_link_count;
1360#ifdef CONFIG_SYSVIPC
1361/* ipc stuff */
1362        struct sysv_sem sysvsem;
1363#endif
1364#ifdef CONFIG_DETECT_HUNG_TASK
1365/* hung task detection */
1366        unsigned long last_switch_count;
1367#endif
1368/* CPU-specific state of this task */
1369        struct thread_struct thread;
1370/* filesystem information */
1371        struct fs_struct *fs;
1372/* open file information */
1373        struct files_struct *files;
1374/* namespaces */
1375        struct nsproxy *nsproxy;
1376/* signal handlers */
1377        struct signal_struct *signal;
1378        struct sighand_struct *sighand;
1379
1380        sigset_t blocked, real_blocked;
1381        sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
1382        struct sigpending pending;
1383
1384        unsigned long sas_ss_sp;
1385        size_t sas_ss_size;
1386        int (*notifier)(void *priv);
1387        void *notifier_data;
1388        sigset_t *notifier_mask;
1389        struct audit_context *audit_context;
1390#ifdef CONFIG_AUDITSYSCALL
1391        uid_t loginuid;
1392        unsigned int sessionid;
1393#endif
1394        seccomp_t seccomp;
1395
1396/* Thread group tracking */
1397        u32 parent_exec_id;
1398        u32 self_exec_id;
1399/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
1400 * mempolicy */
1401        spinlock_t alloc_lock;
1402
1403#ifdef CONFIG_GENERIC_HARDIRQS
1404        /* IRQ handler threads */
1405        struct irqaction *irqaction;
1406#endif
1407
1408        /* Protection of the PI data structures: */
1409        spinlock_t pi_lock;
1410
1411#ifdef CONFIG_RT_MUTEXES
1412        /* PI waiters blocked on a rt_mutex held by this task */
1413        struct plist_head pi_waiters;
1414        /* Deadlock detection and priority inheritance handling */
1415        struct rt_mutex_waiter *pi_blocked_on;
1416#endif
1417
1418#ifdef CONFIG_DEBUG_MUTEXES
1419        /* mutex deadlock detection */
1420        struct mutex_waiter *blocked_on;
1421#endif
1422#ifdef CONFIG_TRACE_IRQFLAGS
1423        unsigned int irq_events;
1424        int hardirqs_enabled;
1425        unsigned long hardirq_enable_ip;
1426        unsigned int hardirq_enable_event;
1427        unsigned long hardirq_disable_ip;
1428        unsigned int hardirq_disable_event;
1429        int softirqs_enabled;
1430        unsigned long softirq_disable_ip;
1431        unsigned int softirq_disable_event;
1432        unsigned long softirq_enable_ip;
1433        unsigned int softirq_enable_event;
1434        int hardirq_context;
1435        int softirq_context;
1436#endif
1437#ifdef CONFIG_LOCKDEP
1438# define MAX_LOCK_DEPTH 48UL
1439        u64 curr_chain_key;
1440        int lockdep_depth;
1441        unsigned int lockdep_recursion;
1442        struct held_lock held_locks[MAX_LOCK_DEPTH];
1443        gfp_t lockdep_reclaim_gfp;
1444#endif
1445
1446/* journalling filesystem info */
1447        void *journal_info;
1448
1449/* stacked block device info */
1450        struct bio *bio_list, **bio_tail;
1451
1452/* VM state */
1453        struct reclaim_state *reclaim_state;
1454
1455        struct backing_dev_info *backing_dev_info;
1456
1457        struct io_context *io_context;
1458
1459        unsigned long ptrace_message;
1460        siginfo_t *last_siginfo; /* For ptrace use.  */
1461        struct task_io_accounting ioac;
1462#if defined(CONFIG_TASK_XACCT)
1463        u64 acct_rss_mem1;      /* accumulated rss usage */
1464        u64 acct_vm_mem1;       /* accumulated virtual memory usage */
1465        cputime_t acct_timexpd; /* stime + utime since last update */
1466#endif
1467#ifdef CONFIG_CPUSETS
1468        nodemask_t mems_allowed;        /* Protected by alloc_lock */
1469        int cpuset_mem_spread_rotor;
1470#endif
1471#ifdef CONFIG_CGROUPS
1472        /* Control Group info protected by css_set_lock */
1473        struct css_set *cgroups;
1474        /* cg_list protected by css_set_lock and tsk->alloc_lock */
1475        struct list_head cg_list;
1476#endif
1477#ifdef CONFIG_FUTEX
1478        struct robust_list_head __user *robust_list;
1479#ifdef CONFIG_COMPAT
1480        struct compat_robust_list_head __user *compat_robust_list;
1481#endif
1482        struct list_head pi_state_list;
1483        struct futex_pi_state *pi_state_cache;
1484#endif
1485#ifdef CONFIG_PERF_EVENTS
1486        struct perf_event_context *perf_event_ctxp;
1487        struct mutex perf_event_mutex;
1488        struct list_head perf_event_list;
1489#endif
1490#ifdef CONFIG_NUMA
1491        struct mempolicy *mempolicy;    /* Protected by alloc_lock */
1492        short il_next;
1493#endif
1494        atomic_t fs_excl;       /* holding fs exclusive resources */
1495        struct rcu_head rcu;
1496
1497        /*
1498         * cache last used pipe for splice
1499         */
1500        struct pipe_inode_info *splice_pipe;
1501#ifdef  CONFIG_TASK_DELAY_ACCT
1502        struct task_delay_info *delays;
1503#endif
1504#ifdef CONFIG_FAULT_INJECTION
1505        int make_it_fail;
1506#endif
1507        struct prop_local_single dirties;
1508#ifdef CONFIG_LATENCYTOP
1509        int latency_record_count;
1510        struct latency_record latency_record[LT_SAVECOUNT];
1511#endif
1512        /*
1513         * time slack values; these are used to round up poll() and
1514         * select() etc timeout values. These are in nanoseconds.
1515         */
1516        unsigned long timer_slack_ns;
1517        unsigned long default_timer_slack_ns;
1518
1519        struct list_head        *scm_work_list;
1520#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1521        /* Index of current stored adress in ret_stack */
1522        int curr_ret_stack;
1523        /* Stack of return addresses for return function tracing */
1524        struct ftrace_ret_stack *ret_stack;
1525        /* time stamp for last schedule */
1526        unsigned long long ftrace_timestamp;
1527        /*
1528         * Number of functions that haven't been traced
1529         * because of depth overrun.
1530         */
1531        atomic_t trace_overrun;
1532        /* Pause for the tracing */
1533        atomic_t tracing_graph_pause;
1534#endif
1535#ifdef CONFIG_TRACING
1536        /* state flags for use by tracers */
1537        unsigned long trace;
1538        /* bitmask of trace recursion */
1539        unsigned long trace_recursion;
1540#endif /* CONFIG_TRACING */
1541        unsigned long stack_start;
1542};
1543
1544/* Future-safe accessor for struct task_struct's cpus_allowed. */
1545#define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
1546
1547/*
1548 * Priority of a process goes from 0..MAX_PRIO-1, valid RT
1549 * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
1550 * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
1551 * values are inverted: lower p->prio value means higher priority.
1552 *
1553 * The MAX_USER_RT_PRIO value allows the actual maximum
1554 * RT priority to be separate from the value exported to
1555 * user-space.  This allows kernel threads to set their
1556 * priority to a value higher than any user task. Note:
1557 * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
1558 */
1559
1560#define MAX_USER_RT_PRIO        100
1561#define MAX_RT_PRIO             MAX_USER_RT_PRIO
1562
1563#define MAX_PRIO                (MAX_RT_PRIO + 40)
1564#define DEFAULT_PRIO            (MAX_RT_PRIO + 20)
1565
1566static inline int rt_prio(int prio)
1567{
1568        if (unlikely(prio < MAX_RT_PRIO))
1569                return 1;
1570        return 0;
1571}
1572
1573static inline int rt_task(struct task_struct *p)
1574{
1575        return rt_prio(p->prio);
1576}
1577
1578static inline struct pid *task_pid(struct task_struct *task)
1579{
1580        return task->pids[PIDTYPE_PID].pid;
1581}
1582
1583static inline struct pid *task_tgid(struct task_struct *task)
1584{
1585        return task->group_leader->pids[PIDTYPE_PID].pid;
1586}
1587
1588/*
1589 * Without tasklist or rcu lock it is not safe to dereference
1590 * the result of task_pgrp/task_session even if task == current,
1591 * we can race with another thread doing sys_setsid/sys_setpgid.
1592 */
1593static inline struct pid *task_pgrp(struct task_struct *task)
1594{
1595        return task->group_leader->pids[PIDTYPE_PGID].pid;
1596}
1597
1598static inline struct pid *task_session(struct task_struct *task)
1599{
1600        return task->group_leader->pids[PIDTYPE_SID].pid;
1601}
1602
1603struct pid_namespace;
1604
1605/*
1606 * the helpers to get the task's different pids as they are seen
1607 * from various namespaces
1608 *
1609 * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
1610 * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
1611 *                     current.
1612 * task_xid_nr_ns()  : id seen from the ns specified;
1613 *
1614 * set_task_vxid()   : assigns a virtual id to a task;
1615 *
1616 * see also pid_nr() etc in include/linux/pid.h
1617 */
1618pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1619                        struct pid_namespace *ns);
1620
1621static inline pid_t task_pid_nr(struct task_struct *tsk)
1622{
1623        return tsk->pid;
1624}
1625
1626static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1627                                        struct pid_namespace *ns)
1628{
1629        return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1630}
1631
1632static inline pid_t task_pid_vnr(struct task_struct *tsk)
1633{
1634        return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1635}
1636
1637
1638static inline pid_t task_tgid_nr(struct task_struct *tsk)
1639{
1640        return tsk->tgid;
1641}
1642
1643pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1644
1645static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1646{
1647        return pid_vnr(task_tgid(tsk));
1648}
1649
1650
1651static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1652                                        struct pid_namespace *ns)
1653{
1654        return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1655}
1656
1657static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1658{
1659        return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1660}
1661
1662
1663static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1664                                        struct pid_namespace *ns)
1665{
1666        return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1667}
1668
1669static inline pid_t task_session_vnr(struct task_struct *tsk)
1670{
1671        return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1672}
1673
1674/* obsolete, do not use */
1675static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1676{
1677        return task_pgrp_nr_ns(tsk, &init_pid_ns);
1678}
1679
1680/**
1681 * pid_alive - check that a task structure is not stale
1682 * @p: Task structure to be checked.
1683 *
1684 * Test if a process is not yet dead (at most zombie state)
1685 * If pid_alive fails, then pointers within the task structure
1686 * can be stale and must not be dereferenced.
1687 */
1688static inline int pid_alive(struct task_struct *p)
1689{
1690        return p->pids[PIDTYPE_PID].pid != NULL;
1691}
1692
1693/**
1694 * is_global_init - check if a task structure is init
1695 * @tsk: Task structure to be checked.
1696 *
1697 * Check if a task structure is the first user space task the kernel created.
1698 */
1699static inline int is_global_init(struct task_struct *tsk)
1700{
1701        return tsk->pid == 1;
1702}
1703
1704/*
1705 * is_container_init:
1706 * check whether in the task is init in its own pid namespace.
1707 */
1708extern int is_container_init(struct task_struct *tsk);
1709
1710extern struct pid *cad_pid;
1711
1712extern void free_task(struct task_struct *tsk);
1713#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1714
1715extern void __put_task_struct(struct task_struct *t);
1716
1717static inline void put_task_struct(struct task_struct *t)
1718{
1719        if (atomic_dec_and_test(&t->usage))
1720                __put_task_struct(t);
1721}
1722
1723extern cputime_t task_utime(struct task_struct *p);
1724extern cputime_t task_stime(struct task_struct *p);
1725extern cputime_t task_gtime(struct task_struct *p);
1726
1727/*
1728 * Per process flags
1729 */
1730#define PF_ALIGNWARN    0x00000001      /* Print alignment warning msgs */
1731                                        /* Not implemented yet, only for 486*/
1732#define PF_STARTING     0x00000002      /* being created */
1733#define PF_EXITING      0x00000004      /* getting shut down */
1734#define PF_EXITPIDONE   0x00000008      /* pi exit done on shut down */
1735#define PF_VCPU         0x00000010      /* I'm a virtual CPU */
1736#define PF_FORKNOEXEC   0x00000040      /* forked but didn't exec */
1737#define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
1738#define PF_SUPERPRIV    0x00000100      /* used super-user privileges */
1739#define PF_DUMPCORE     0x00000200      /* dumped core */
1740#define PF_SIGNALED     0x00000400      /* killed by a signal */
1741#define PF_MEMALLOC     0x00000800      /* Allocating memory */
1742#define PF_FLUSHER      0x00001000      /* responsible for disk writeback */
1743#define PF_USED_MATH    0x00002000      /* if unset the fpu must be initialized before use */
1744#define PF_FREEZING     0x00004000      /* freeze in progress. do not account to load */
1745#define PF_NOFREEZE     0x00008000      /* this thread should not be frozen */
1746#define PF_FROZEN       0x00010000      /* frozen for system suspend */
1747#define PF_FSTRANS      0x00020000      /* inside a filesystem transaction */
1748#define PF_KSWAPD       0x00040000      /* I am kswapd */
1749#define PF_OOM_ORIGIN   0x00080000      /* Allocating much memory to others */
1750#define PF_LESS_THROTTLE 0x00100000     /* Throttle me less: I clean memory */
1751#define PF_KTHREAD      0x00200000      /* I am a kernel thread */
1752#define PF_RANDOMIZE    0x00400000      /* randomize virtual address space */
1753#define PF_SWAPWRITE    0x00800000      /* Allowed to write to swap */
1754#define PF_SPREAD_PAGE  0x01000000      /* Spread page cache over cpuset */
1755#define PF_SPREAD_SLAB  0x02000000      /* Spread some slab caches over cpuset */
1756#define PF_THREAD_BOUND 0x04000000      /* Thread bound to specific cpu */
1757#define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
1758#define PF_MEMPOLICY    0x10000000      /* Non-default NUMA mempolicy */
1759#define PF_MUTEX_TESTER 0x20000000      /* Thread belongs to the rt mutex tester */
1760#define PF_FREEZER_SKIP 0x40000000      /* Freezer should not count it as freezeable */
1761#define PF_FREEZER_NOSIG 0x80000000     /* Freezer won't send signals to it */
1762
1763/*
1764 * Only the _current_ task can read/write to tsk->flags, but other
1765 * tasks can access tsk->flags in readonly mode for example
1766 * with tsk_used_math (like during threaded core dumping).
1767 * There is however an exception to this rule during ptrace
1768 * or during fork: the ptracer task is allowed to write to the
1769 * child->flags of its traced child (same goes for fork, the parent
1770 * can write to the child->flags), because we're guaranteed the
1771 * child is not running and in turn not changing child->flags
1772 * at the same time the parent does it.
1773 */
1774#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1775#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1776#define clear_used_math() clear_stopped_child_used_math(current)
1777#define set_used_math() set_stopped_child_used_math(current)
1778#define conditional_stopped_child_used_math(condition, child) \
1779        do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1780#define conditional_used_math(condition) \
1781        conditional_stopped_child_used_math(condition, current)
1782#define copy_to_stopped_child_used_math(child) \
1783        do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1784/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1785#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1786#define used_math() tsk_used_math(current)
1787
1788#ifdef CONFIG_TREE_PREEMPT_RCU
1789
1790#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
1791#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
1792
1793static inline void rcu_copy_process(struct task_struct *p)
1794{
1795        p->rcu_read_lock_nesting = 0;
1796        p->rcu_read_unlock_special = 0;
1797        p->rcu_blocked_node = NULL;
1798        INIT_LIST_HEAD(&p->rcu_node_entry);
1799}
1800
1801#else
1802
1803static inline void rcu_copy_process(struct task_struct *p)
1804{
1805}
1806
1807#endif
1808
1809#ifdef CONFIG_SMP
1810extern int set_cpus_allowed_ptr(struct task_struct *p,
1811                                const struct cpumask *new_mask);
1812#else
1813static inline int set_cpus_allowed_ptr(struct task_struct *p,
1814                                       const struct cpumask *new_mask)
1815{
1816        if (!cpumask_test_cpu(0, new_mask))
1817                return -EINVAL;
1818        return 0;
1819}
1820#endif
1821
1822#ifndef CONFIG_CPUMASK_OFFSTACK
1823static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1824{
1825        return set_cpus_allowed_ptr(p, &new_mask);
1826}
1827#endif
1828
1829/*
1830 * Architectures can set this to 1 if they have specified
1831 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
1832 * but then during bootup it turns out that sched_clock()
1833 * is reliable after all:
1834 */
1835#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1836extern int sched_clock_stable;
1837#endif
1838
1839extern unsigned long long sched_clock(void);
1840
1841extern void sched_clock_init(void);
1842extern u64 sched_clock_cpu(int cpu);
1843
1844#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1845static inline void sched_clock_tick(void)
1846{
1847}
1848
1849static inline void sched_clock_idle_sleep_event(void)
1850{
1851}
1852
1853static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
1854{
1855}
1856#else
1857extern void sched_clock_tick(void);
1858extern void sched_clock_idle_sleep_event(void);
1859extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1860#endif
1861
1862/*
1863 * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
1864 * clock constructed from sched_clock():
1865 */
1866extern unsigned long long cpu_clock(int cpu);
1867
1868extern unsigned long long
1869task_sched_runtime(struct task_struct *task);
1870extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
1871
1872/* sched_exec is called by processes performing an exec */
1873#ifdef CONFIG_SMP
1874extern void sched_exec(void);
1875#else
1876#define sched_exec()   {}
1877#endif
1878
1879extern void sched_clock_idle_sleep_event(void);
1880extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1881
1882#ifdef CONFIG_HOTPLUG_CPU
1883extern void idle_task_exit(void);
1884#else
1885static inline void idle_task_exit(void) {}
1886#endif
1887
1888extern void sched_idle_next(void);
1889
1890#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
1891extern void wake_up_idle_cpu(int cpu);
1892#else
1893static inline void wake_up_idle_cpu(int cpu) { }
1894#endif
1895
1896extern unsigned int sysctl_sched_latency;
1897extern unsigned int sysctl_sched_min_granularity;
1898extern unsigned int sysctl_sched_wakeup_granularity;
1899extern unsigned int sysctl_sched_shares_ratelimit;
1900extern unsigned int sysctl_sched_shares_thresh;
1901extern unsigned int sysctl_sched_child_runs_first;
1902#ifdef CONFIG_SCHED_DEBUG
1903extern unsigned int sysctl_sched_features;
1904extern unsigned int sysctl_sched_migration_cost;
1905extern unsigned int sysctl_sched_nr_migrate;
1906extern unsigned int sysctl_sched_time_avg;
1907extern unsigned int sysctl_timer_migration;
1908
1909int sched_nr_latency_handler(struct ctl_table *table, int write,
1910                void __user *buffer, size_t *length,
1911                loff_t *ppos);
1912#endif
1913#ifdef CONFIG_SCHED_DEBUG
1914static inline unsigned int get_sysctl_timer_migration(void)
1915{
1916        return sysctl_timer_migration;
1917}
1918#else
1919static inline unsigned int get_sysctl_timer_migration(void)
1920{
1921        return 1;
1922}
1923#endif
1924extern unsigned int sysctl_sched_rt_period;
1925extern int sysctl_sched_rt_runtime;
1926
1927int sched_rt_handler(struct ctl_table *table, int write,
1928                void __user *buffer, size_t *lenp,
1929                loff_t *ppos);
1930
1931extern unsigned int sysctl_sched_compat_yield;
1932
1933#ifdef CONFIG_RT_MUTEXES
1934extern int rt_mutex_getprio(struct task_struct *p);
1935extern void rt_mutex_setprio(struct task_struct *p, int prio);
1936extern void rt_mutex_adjust_pi(struct task_struct *p);
1937#else
1938static inline int rt_mutex_getprio(struct task_struct *p)
1939{
1940        return p->normal_prio;
1941}
1942# define rt_mutex_adjust_pi(p)          do { } while (0)
1943#endif
1944
1945extern void set_user_nice(struct task_struct *p, long nice);
1946extern int task_prio(const struct task_struct *p);
1947extern int task_nice(const struct task_struct *p);
1948extern int can_nice(const struct task_struct *p, const int nice);
1949extern int task_curr(const struct task_struct *p);
1950extern int idle_cpu(int cpu);
1951extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
1952extern int sched_setscheduler_nocheck(struct task_struct *, int,
1953                                      struct sched_param *);
1954extern struct task_struct *idle_task(int cpu);
1955extern struct task_struct *curr_task(int cpu);
1956extern void set_curr_task(int cpu, struct task_struct *p);
1957
1958void yield(void);
1959
1960/*
1961 * The default (Linux) execution domain.
1962 */
1963extern struct exec_domain       default_exec_domain;
1964
1965union thread_union {
1966        struct thread_info thread_info;
1967        unsigned long stack[THREAD_SIZE/sizeof(long)];
1968};
1969
1970#ifndef __HAVE_ARCH_KSTACK_END
1971static inline int kstack_end(void *addr)
1972{
1973        /* Reliable end of stack detection:
1974         * Some APM bios versions misalign the stack
1975         */
1976        return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
1977}
1978#endif
1979
1980extern union thread_union init_thread_union;
1981extern struct task_struct init_task;
1982
1983extern struct   mm_struct init_mm;
1984
1985extern struct pid_namespace init_pid_ns;
1986
1987/*
1988 * find a task by one of its numerical ids
1989 *
1990 * find_task_by_pid_ns():
1991 *      finds a task by its pid in the specified namespace
1992 * find_task_by_vpid():
1993 *      finds a task by its virtual pid
1994 *
1995 * see also find_vpid() etc in include/linux/pid.h
1996 */
1997
1998extern struct task_struct *find_task_by_vpid(pid_t nr);
1999extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2000                struct pid_namespace *ns);
2001
2002extern void __set_special_pids(struct pid *pid);
2003
2004/* per-UID process charging. */
2005extern struct user_struct * alloc_uid(struct user_namespace *, uid_t);
2006static inline struct user_struct *get_uid(struct user_struct *u)
2007{
2008        atomic_inc(&u->__count);
2009        return u;
2010}
2011extern void free_uid(struct user_struct *);
2012extern void release_uids(struct user_namespace *ns);
2013
2014#include <asm/current.h>
2015
2016extern void do_timer(unsigned long ticks);
2017
2018extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2019extern int wake_up_process(struct task_struct *tsk);
2020extern void wake_up_new_task(struct task_struct *tsk,
2021                                unsigned long clone_flags);
2022#ifdef CONFIG_SMP
2023 extern void kick_process(struct task_struct *tsk);
2024#else
2025 static inline void kick_process(struct task_struct *tsk) { }
2026#endif
2027extern void sched_fork(struct task_struct *p, int clone_flags);
2028extern void sched_dead(struct task_struct *p);
2029
2030extern void proc_caches_init(void);
2031extern void flush_signals(struct task_struct *);
2032extern void __flush_signals(struct task_struct *);
2033extern void ignore_signals(struct task_struct *);
2034extern void flush_signal_handlers(struct task_struct *, int force_default);
2035extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2036
2037static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
2038{
2039        unsigned long flags;
2040        int ret;
2041
2042        spin_lock_irqsave(&tsk->sighand->siglock, flags);
2043        ret = dequeue_signal(tsk, mask, info);
2044        spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
2045
2046        return ret;
2047}       
2048
2049extern void block_all_signals(int (*notifier)(void *priv), void *priv,
2050                              sigset_t *mask);
2051extern void unblock_all_signals(void);
2052extern void release_task(struct task_struct * p);
2053extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2054extern int force_sigsegv(int, struct task_struct *);
2055extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2056extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2057extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2058extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32);
2059extern int kill_pgrp(struct pid *pid, int sig, int priv);
2060extern int kill_pid(struct pid *pid, int sig, int priv);
2061extern int kill_proc_info(int, struct siginfo *, pid_t);
2062extern int do_notify_parent(struct task_struct *, int);
2063extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2064extern void force_sig(int, struct task_struct *);
2065extern void force_sig_specific(int, struct task_struct *);
2066extern int send_sig(int, struct task_struct *, int);
2067extern void zap_other_threads(struct task_struct *p);
2068extern struct sigqueue *sigqueue_alloc(void);
2069extern void sigqueue_free(struct sigqueue *);
2070extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
2071extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2072extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);
2073
2074static inline int kill_cad_pid(int sig, int priv)
2075{
2076        return kill_pid(cad_pid, sig, priv);
2077}
2078
2079/* These can be the second arg to send_sig_info/send_group_sig_info.  */
2080#define SEND_SIG_NOINFO ((struct siginfo *) 0)
2081#define SEND_SIG_PRIV   ((struct siginfo *) 1)
2082#define SEND_SIG_FORCED ((struct siginfo *) 2)
2083
2084static inline int is_si_special(const struct siginfo *info)
2085{
2086        return info <= SEND_SIG_FORCED;
2087}
2088
2089/* True if we are on the alternate signal stack.  */
2090
2091static inline int on_sig_stack(unsigned long sp)
2092{
2093        return (sp - current->sas_ss_sp < current->sas_ss_size);
2094}
2095
2096static inline int sas_ss_flags(unsigned long sp)
2097{
2098        return (current->sas_ss_size == 0 ? SS_DISABLE
2099                : on_sig_stack(sp) ? SS_ONSTACK : 0);
2100}
2101
2102/*
2103 * Routines for handling mm_structs
2104 */
2105extern struct mm_struct * mm_alloc(void);
2106
2107/* mmdrop drops the mm and the page tables */
2108extern void __mmdrop(struct mm_struct *);
2109static inline void mmdrop(struct mm_struct * mm)
2110{
2111        if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2112                __mmdrop(mm);
2113}
2114
2115/* mmput gets rid of the mappings and all user-space */
2116extern void mmput(struct mm_struct *);
2117/* Grab a reference to a task's mm, if it is not already going away */
2118extern struct mm_struct *get_task_mm(struct task_struct *task);
2119/* Remove the current tasks stale references to the old mm_struct */
2120extern void mm_release(struct task_struct *, struct mm_struct *);
2121/* Allocate a new mm structure and copy contents from tsk->mm */
2122extern struct mm_struct *dup_mm(struct task_struct *tsk);
2123
2124extern int copy_thread(unsigned long, unsigned long, unsigned long,
2125                        struct task_struct *, struct pt_regs *);
2126extern void flush_thread(void);
2127extern void exit_thread(void);
2128
2129extern void exit_files(struct task_struct *);
2130extern void __cleanup_signal(struct signal_struct *);
2131extern void __cleanup_sighand(struct sighand_struct *);
2132
2133extern void exit_itimers(struct signal_struct *);
2134extern void flush_itimer_signals(void);
2135
2136extern NORET_TYPE void do_group_exit(int);
2137
2138extern void daemonize(const char *, ...);
2139extern int allow_signal(int);
2140extern int disallow_signal(int);
2141
2142extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);
2143extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
2144struct task_struct *fork_idle(int);
2145
2146extern void set_task_comm(struct task_struct *tsk, char *from);
2147extern char *get_task_comm(char *to, struct task_struct *tsk);
2148
2149#ifdef CONFIG_SMP
2150extern void wait_task_context_switch(struct task_struct *p);
2151extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2152#else
2153static inline void wait_task_context_switch(struct task_struct *p) {}
2154static inline unsigned long wait_task_inactive(struct task_struct *p,
2155                                               long match_state)
2156{
2157        return 1;
2158}
2159#endif
2160
2161#define next_task(p) \
2162        list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2163
2164#define for_each_process(p) \
2165        for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2166
2167extern bool current_is_single_threaded(void);
2168
2169/*
2170 * Careful: do_each_thread/while_each_thread is a double loop so
2171 *          'break' will not work as expected - use goto instead.
2172 */
2173#define do_each_thread(g, t) \
2174        for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2175
2176#define while_each_thread(g, t) \
2177        while ((t = next_thread(t)) != g)
2178
2179/* de_thread depends on thread_group_leader not being a pid based check */
2180#define thread_group_leader(p)  (p == p->group_leader)
2181
2182/* Do to the insanities of de_thread it is possible for a process
2183 * to have the pid of the thread group leader without actually being
2184 * the thread group leader.  For iteration through the pids in proc
2185 * all we care about is that we have a task with the appropriate
2186 * pid, we don't actually care if we have the right task.
2187 */
2188static inline int has_group_leader_pid(struct task_struct *p)
2189{
2190        return p->pid == p->tgid;
2191}
2192
2193static inline
2194int same_thread_group(struct task_struct *p1, struct task_struct *p2)
2195{
2196        return p1->tgid == p2->tgid;
2197}
2198
2199static inline struct task_struct *next_thread(const struct task_struct *p)
2200{
2201        return list_entry_rcu(p->thread_group.next,
2202                              struct task_struct, thread_group);
2203}
2204
2205static inline int thread_group_empty(struct task_struct *p)
2206{
2207        return list_empty(&p->thread_group);
2208}
2209
2210#define delay_group_leader(p) \
2211                (thread_group_leader(p) && !thread_group_empty(p))
2212
2213static inline int task_detached(struct task_struct *p)
2214{
2215        return p->exit_signal == -1;
2216}
2217
2218/*
2219 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2220 * subscriptions and synchronises with wait4().  Also used in procfs.  Also
2221 * pins the final release of task.io_context.  Also protects ->cpuset and
2222 * ->cgroup.subsys[].
2223 *
2224 * Nests both inside and outside of read_lock(&tasklist_lock).
2225 * It must not be nested with write_lock_irq(&tasklist_lock),
2226 * neither inside nor outside.
2227 */
2228static inline void task_lock(struct task_struct *p)
2229{
2230        spin_lock(&p->alloc_lock);
2231}
2232
2233static inline void task_unlock(struct task_struct *p)
2234{
2235        spin_unlock(&p->alloc_lock);
2236}
2237
2238extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
2239                                                        unsigned long *flags);
2240
2241static inline void unlock_task_sighand(struct task_struct *tsk,
2242                                                unsigned long *flags)
2243{
2244        spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2245}
2246
2247#ifndef __HAVE_THREAD_FUNCTIONS
2248
2249#define task_thread_info(task)  ((struct thread_info *)(task)->stack)
2250#define task_stack_page(task)   ((task)->stack)
2251
2252static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
2253{
2254        *task_thread_info(p) = *task_thread_info(org);
2255        task_thread_info(p)->task = p;
2256}
2257
2258static inline unsigned long *end_of_stack(struct task_struct *p)
2259{
2260        return (unsigned long *)(task_thread_info(p) + 1);
2261}
2262
2263#endif
2264
2265static inline int object_is_on_stack(void *obj)
2266{
2267        void *stack = task_stack_page(current);
2268
2269        return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2270}
2271
2272extern void thread_info_cache_init(void);
2273
2274#ifdef CONFIG_DEBUG_STACK_USAGE
2275static inline unsigned long stack_not_used(struct task_struct *p)
2276{
2277        unsigned long *n = end_of_stack(p);
2278
2279        do {    /* Skip over canary */
2280                n++;
2281        } while (!*n);
2282
2283        return (unsigned long)n - (unsigned long)end_of_stack(p);
2284}
2285#endif
2286
2287/* set thread flags in other task's structures
2288 * - see asm/thread_info.h for TIF_xxxx flags available
2289 */
2290static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2291{
2292        set_ti_thread_flag(task_thread_info(tsk), flag);
2293}
2294
2295static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2296{
2297        clear_ti_thread_flag(task_thread_info(tsk), flag);
2298}
2299
2300static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2301{
2302        return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2303}
2304
2305static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2306{
2307        return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2308}
2309
2310static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2311{
2312        return test_ti_thread_flag(task_thread_info(tsk), flag);
2313}
2314
2315static inline void set_tsk_need_resched(struct task_struct *tsk)
2316{
2317        set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2318}
2319
2320static inline void clear_tsk_need_resched(struct task_struct *tsk)
2321{
2322        clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2323}
2324
2325static inline int test_tsk_need_resched(struct task_struct *tsk)
2326{
2327        return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2328}
2329
2330static inline int restart_syscall(void)
2331{
2332        set_tsk_thread_flag(current, TIF_SIGPENDING);
2333        return -ERESTARTNOINTR;
2334}
2335
2336static inline int signal_pending(struct task_struct *p)
2337{
2338        return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
2339}
2340
2341static inline int __fatal_signal_pending(struct task_struct *p)
2342{
2343        return unlikely(sigismember(&p->pending.signal, SIGKILL));
2344}
2345
2346static inline int fatal_signal_pending(struct task_struct *p)
2347{
2348        return signal_pending(p) && __fatal_signal_pending(p);
2349}
2350
2351static inline int signal_pending_state(long state, struct task_struct *p)
2352{
2353        if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
2354                return 0;
2355        if (!signal_pending(p))
2356                return 0;
2357
2358        return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2359}
2360
2361static inline int need_resched(void)
2362{
2363        return unlikely(test_thread_flag(TIF_NEED_RESCHED));
2364}
2365
2366/*
2367 * cond_resched() and cond_resched_lock(): latency reduction via
2368 * explicit rescheduling in places that are safe. The return
2369 * value indicates whether a reschedule was done in fact.
2370 * cond_resched_lock() will drop the spinlock before scheduling,
2371 * cond_resched_softirq() will enable bhs before scheduling.
2372 */
2373extern int _cond_resched(void);
2374
2375#define cond_resched() ({                       \
2376        __might_sleep(__FILE__, __LINE__, 0);   \
2377        _cond_resched();                        \
2378})
2379
2380extern int __cond_resched_lock(spinlock_t *lock);
2381
2382#ifdef CONFIG_PREEMPT
2383#define PREEMPT_LOCK_OFFSET     PREEMPT_OFFSET
2384#else
2385#define PREEMPT_LOCK_OFFSET     0
2386#endif
2387
2388#define cond_resched_lock(lock) ({                              \
2389        __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
2390        __cond_resched_lock(lock);                              \
2391})
2392
2393extern int __cond_resched_softirq(void);
2394
2395#define cond_resched_softirq() ({                               \
2396        __might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET);      \
2397        __cond_resched_softirq();                               \
2398})
2399
2400/*
2401 * Does a critical section need to be broken due to another
2402 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
2403 * but a general need for low latency)
2404 */
2405static inline int spin_needbreak(spinlock_t *lock)
2406{
2407#ifdef CONFIG_PREEMPT
2408        return spin_is_contended(lock);
2409#else
2410        return 0;
2411#endif
2412}
2413
2414/*
2415 * Thread group CPU time accounting.
2416 */
2417void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
2418void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2419
2420static inline void thread_group_cputime_init(struct signal_struct *sig)
2421{
2422        sig->cputimer.cputime = INIT_CPUTIME;
2423        spin_lock_init(&sig->cputimer.lock);
2424        sig->cputimer.running = 0;
2425}
2426
2427static inline void thread_group_cputime_free(struct signal_struct *sig)
2428{
2429}
2430
2431/*
2432 * Reevaluate whether the task has signals pending delivery.
2433 * Wake the task if so.
2434 * This is required every time the blocked sigset_t changes.
2435 * callers must hold sighand->siglock.
2436 */
2437extern void recalc_sigpending_and_wake(struct task_struct *t);
2438extern void recalc_sigpending(void);
2439
2440extern void signal_wake_up(struct task_struct *t, int resume_stopped);
2441
2442/*
2443 * Wrappers for p->thread_info->cpu access. No-op on UP.
2444 */
2445#ifdef CONFIG_SMP
2446
2447static inline unsigned int task_cpu(const struct task_struct *p)
2448{
2449        return task_thread_info(p)->cpu;
2450}
2451
2452extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2453
2454#else
2455
2456static inline unsigned int task_cpu(const struct task_struct *p)
2457{
2458        return 0;
2459}
2460
2461static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2462{
2463}
2464
2465#endif /* CONFIG_SMP */
2466
2467extern void arch_pick_mmap_layout(struct mm_struct *mm);
2468
2469#ifdef CONFIG_TRACING
2470extern void
2471__trace_special(void *__tr, void *__data,
2472                unsigned long arg1, unsigned long arg2, unsigned long arg3);
2473#else
2474static inline void
2475__trace_special(void *__tr, void *__data,
2476                unsigned long arg1, unsigned long arg2, unsigned long arg3)
2477{
2478}
2479#endif
2480
2481extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2482extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2483
2484extern void normalize_rt_tasks(void);
2485
2486#ifdef CONFIG_GROUP_SCHED
2487
2488extern struct task_group init_task_group;
2489#ifdef CONFIG_USER_SCHED
2490extern struct task_group root_task_group;
2491extern void set_tg_uid(struct user_struct *user);
2492#endif
2493
2494extern struct task_group *sched_create_group(struct task_group *parent);
2495extern void sched_destroy_group(struct task_group *tg);
2496extern void sched_move_task(struct task_struct *tsk);
2497#ifdef CONFIG_FAIR_GROUP_SCHED
2498extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
2499extern unsigned long sched_group_shares(struct task_group *tg);
2500#endif
2501#ifdef CONFIG_RT_GROUP_SCHED
2502extern int sched_group_set_rt_runtime(struct task_group *tg,
2503                                      long rt_runtime_us);
2504extern long sched_group_rt_runtime(struct task_group *tg);
2505extern int sched_group_set_rt_period(struct task_group *tg,
2506                                      long rt_period_us);
2507extern long sched_group_rt_period(struct task_group *tg);
2508extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
2509#endif
2510#endif
2511
2512extern int task_can_switch_user(struct user_struct *up,
2513                                        struct task_struct *tsk);
2514
2515#ifdef CONFIG_TASK_XACCT
2516static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2517{
2518        tsk->ioac.rchar += amt;
2519}
2520
2521static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2522{
2523        tsk->ioac.wchar += amt;
2524}
2525
2526static inline void inc_syscr(struct task_struct *tsk)
2527{
2528        tsk->ioac.syscr++;
2529}
2530
2531static inline void inc_syscw(struct task_struct *tsk)
2532{
2533        tsk->ioac.syscw++;
2534}
2535#else
2536static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2537{
2538}
2539
2540static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2541{
2542}
2543
2544static inline void inc_syscr(struct task_struct *tsk)
2545{
2546}
2547
2548static inline void inc_syscw(struct task_struct *tsk)
2549{
2550}
2551#endif
2552
2553#ifndef TASK_SIZE_OF
2554#define TASK_SIZE_OF(tsk)       TASK_SIZE
2555#endif
2556
2557/*
2558 * Call the function if the target task is executing on a CPU right now:
2559 */
2560extern void task_oncpu_function_call(struct task_struct *p,
2561                                     void (*func) (void *info), void *info);
2562
2563
2564#ifdef CONFIG_MM_OWNER
2565extern void mm_update_next_owner(struct mm_struct *mm);
2566extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
2567#else
2568static inline void mm_update_next_owner(struct mm_struct *mm)
2569{
2570}
2571
2572static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
2573{
2574}
2575#endif /* CONFIG_MM_OWNER */
2576
2577#define TASK_STATE_TO_CHAR_STR "RSDTtZX"
2578
2579#endif /* __KERNEL__ */
2580
2581#endif
2582
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.