linux-old/include/linux/sched.h
<<
>>
Prefs
   1#ifndef _LINUX_SCHED_H
   2#define _LINUX_SCHED_H
   3
   4#include <asm/param.h>  /* for HZ */
   5
   6extern unsigned long event;
   7
   8#include <linux/config.h>
   9#include <linux/binfmts.h>
  10#include <linux/threads.h>
  11#include <linux/kernel.h>
  12#include <linux/types.h>
  13#include <linux/times.h>
  14#include <linux/timex.h>
  15#include <linux/rbtree.h>
  16
  17#include <asm/system.h>
  18#include <asm/semaphore.h>
  19#include <asm/page.h>
  20#include <asm/ptrace.h>
  21#include <asm/mmu.h>
  22
  23#include <linux/smp.h>
  24#include <linux/tty.h>
  25#include <linux/sem.h>
  26#include <linux/signal.h>
  27#include <linux/securebits.h>
  28#include <linux/fs_struct.h>
  29
  30struct exec_domain;
  31
  32/*
  33 * cloning flags:
  34 */
  35#define CSIGNAL         0x000000ff      /* signal mask to be sent at exit */
  36#define CLONE_VM        0x00000100      /* set if VM shared between processes */
  37#define CLONE_FS        0x00000200      /* set if fs info shared between processes */
  38#define CLONE_FILES     0x00000400      /* set if open files shared between processes */
  39#define CLONE_SIGHAND   0x00000800      /* set if signal handlers and blocked signals shared */
  40#define CLONE_PID       0x00001000      /* set if pid shared */
  41#define CLONE_PTRACE    0x00002000      /* set if we want to let tracing continue on the child too */
  42#define CLONE_VFORK     0x00004000      /* set if the parent wants the child to wake it up on mm_release */
  43#define CLONE_PARENT    0x00008000      /* set if we want to have the same parent as the cloner */
  44#define CLONE_THREAD    0x00010000      /* Same thread group? */
  45#define CLONE_NEWNS     0x00020000      /* New namespace group? */
  46
  47#define CLONE_SIGNAL    (CLONE_SIGHAND | CLONE_THREAD)
  48
  49/*
  50 * These are the constant used to fake the fixed-point load-average
  51 * counting. Some notes:
  52 *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
  53 *    a load-average precision of 10 bits integer + 11 bits fractional
  54 *  - if you want to count load-averages more often, you need more
  55 *    precision, or rounding will get you. With 2-second counting freq,
  56 *    the EXP_n values would be 1981, 2034 and 2043 if still using only
  57 *    11 bit fractions.
  58 */
  59extern unsigned long avenrun[];         /* Load averages */
  60
  61#define FSHIFT          11              /* nr of bits of precision */
  62#define FIXED_1         (1<<FSHIFT)     /* 1.0 as fixed-point */
  63#define LOAD_FREQ       (5*HZ)          /* 5 sec intervals */
  64#define EXP_1           1884            /* 1/exp(5sec/1min) as fixed-point */
  65#define EXP_5           2014            /* 1/exp(5sec/5min) */
  66#define EXP_15          2037            /* 1/exp(5sec/15min) */
  67
  68#define CALC_LOAD(load,exp,n) \
  69        load *= exp; \
  70        load += n*(FIXED_1-exp); \
  71        load >>= FSHIFT;
  72
  73#define CT_TO_SECS(x)   ((x) / HZ)
  74#define CT_TO_USECS(x)  (((x) % HZ) * 1000000/HZ)
  75
  76extern int nr_running, nr_threads;
  77extern int last_pid;
  78
  79#include <linux/fs.h>
  80#include <linux/time.h>
  81#include <linux/param.h>
  82#include <linux/resource.h>
  83#ifdef __KERNEL__
  84#include <linux/timer.h>
  85#endif
  86
  87#include <asm/processor.h>
  88
  89#define TASK_RUNNING            0
  90#define TASK_INTERRUPTIBLE      1
  91#define TASK_UNINTERRUPTIBLE    2
  92#define TASK_ZOMBIE             4
  93#define TASK_STOPPED            8
  94
  95#define __set_task_state(tsk, state_value)              \
  96        do { (tsk)->state = (state_value); } while (0)
  97#ifdef CONFIG_SMP
  98#define set_task_state(tsk, state_value)                \
  99        set_mb((tsk)->state, (state_value))
 100#else
 101#define set_task_state(tsk, state_value)                \
 102        __set_task_state((tsk), (state_value))
 103#endif
 104
 105#define __set_current_state(state_value)                        \
 106        do { current->state = (state_value); } while (0)
 107#ifdef CONFIG_SMP
 108#define set_current_state(state_value)          \
 109        set_mb(current->state, (state_value))
 110#else
 111#define set_current_state(state_value)          \
 112        __set_current_state(state_value)
 113#endif
 114
 115/*
 116 * Scheduling policies
 117 */
 118#define SCHED_OTHER             0
 119#define SCHED_FIFO              1
 120#define SCHED_RR                2
 121
 122/*
 123 * This is an additional bit set when we want to
 124 * yield the CPU for one re-schedule..
 125 */
 126#define SCHED_YIELD             0x10
 127
 128struct sched_param {
 129        int sched_priority;
 130};
 131
 132struct completion;
 133
 134#ifdef __KERNEL__
 135
 136#include <linux/spinlock.h>
 137
 138/*
 139 * This serializes "schedule()" and also protects
 140 * the run-queue from deletions/modifications (but
 141 * _adding_ to the beginning of the run-queue has
 142 * a separate lock).
 143 */
 144extern rwlock_t tasklist_lock;
 145extern spinlock_t runqueue_lock;
 146extern spinlock_t mmlist_lock;
 147
 148extern void sched_init(void);
 149extern void init_idle(void);
 150extern void show_state(void);
 151extern void cpu_init (void);
 152extern void trap_init(void);
 153extern void update_process_times(int user);
 154extern void update_one_process(struct task_struct *p, unsigned long user,
 155                               unsigned long system, int cpu);
 156
 157#define MAX_SCHEDULE_TIMEOUT    LONG_MAX
 158extern signed long FASTCALL(schedule_timeout(signed long timeout));
 159asmlinkage void schedule(void);
 160
 161extern int schedule_task(struct tq_struct *task);
 162extern void flush_scheduled_tasks(void);
 163extern int start_context_thread(void);
 164extern int current_is_keventd(void);
 165
 166/*
 167 * The default fd array needs to be at least BITS_PER_LONG,
 168 * as this is the granularity returned by copy_fdset().
 169 */
 170#define NR_OPEN_DEFAULT BITS_PER_LONG
 171
 172struct namespace;
 173/*
 174 * Open file table structure
 175 */
 176struct files_struct {
 177        atomic_t count;
 178        rwlock_t file_lock;     /* Protects all the below members.  Nests inside tsk->alloc_lock */
 179        int max_fds;
 180        int max_fdset;
 181        int next_fd;
 182        struct file ** fd;      /* current fd array */
 183        fd_set *close_on_exec;
 184        fd_set *open_fds;
 185        fd_set close_on_exec_init;
 186        fd_set open_fds_init;
 187        struct file * fd_array[NR_OPEN_DEFAULT];
 188};
 189
 190#define INIT_FILES \
 191{                                                       \
 192        count:          ATOMIC_INIT(1),                 \
 193        file_lock:      RW_LOCK_UNLOCKED,               \
 194        max_fds:        NR_OPEN_DEFAULT,                \
 195        max_fdset:      __FD_SETSIZE,                   \
 196        next_fd:        0,                              \
 197        fd:             &init_files.fd_array[0],        \
 198        close_on_exec:  &init_files.close_on_exec_init, \
 199        open_fds:       &init_files.open_fds_init,      \
 200        close_on_exec_init: { { 0, } },                 \
 201        open_fds_init:  { { 0, } },                     \
 202        fd_array:       { NULL, }                       \
 203}
 204
 205/* Maximum number of active map areas.. This is a random (large) number */
 206#define DEFAULT_MAX_MAP_COUNT   (65536)
 207
 208extern int max_map_count;
 209
 210struct mm_struct {
 211        struct vm_area_struct * mmap;           /* list of VMAs */
 212        rb_root_t mm_rb;
 213        struct vm_area_struct * mmap_cache;     /* last find_vma result */
 214        pgd_t * pgd;
 215        atomic_t mm_users;                      /* How many users with user space? */
 216        atomic_t mm_count;                      /* How many references to "struct mm_struct" (users count as 1) */
 217        int map_count;                          /* number of VMAs */
 218        struct rw_semaphore mmap_sem;
 219        spinlock_t page_table_lock;             /* Protects task page tables and mm->rss */
 220
 221        struct list_head mmlist;                /* List of all active mm's.  These are globally strung
 222                                                 * together off init_mm.mmlist, and are protected
 223                                                 * by mmlist_lock
 224                                                 */
 225
 226        unsigned long start_code, end_code, start_data, end_data;
 227        unsigned long start_brk, brk, start_stack;
 228        unsigned long arg_start, arg_end, env_start, env_end;
 229        unsigned long rss, total_vm, locked_vm;
 230        unsigned long def_flags;
 231        unsigned long cpu_vm_mask;
 232        unsigned long swap_address;
 233
 234        unsigned dumpable:1;
 235
 236        /* Architecture-specific MM context */
 237        mm_context_t context;
 238};
 239
 240extern int mmlist_nr;
 241
 242#define INIT_MM(name) \
 243{                                                       \
 244        mm_rb:          RB_ROOT,                        \
 245        pgd:            swapper_pg_dir,                 \
 246        mm_users:       ATOMIC_INIT(2),                 \
 247        mm_count:       ATOMIC_INIT(1),                 \
 248        mmap_sem:       __RWSEM_INITIALIZER(name.mmap_sem), \
 249        page_table_lock: SPIN_LOCK_UNLOCKED,            \
 250        mmlist:         LIST_HEAD_INIT(name.mmlist),    \
 251}
 252
 253struct signal_struct {
 254        atomic_t                count;
 255        struct k_sigaction      action[_NSIG];
 256        spinlock_t              siglock;
 257};
 258
 259
 260#define INIT_SIGNALS {  \
 261        count:          ATOMIC_INIT(1),                 \
 262        action:         { {{0,}}, },                    \
 263        siglock:        SPIN_LOCK_UNLOCKED              \
 264}
 265
 266/*
 267 * Some day this will be a full-fledged user tracking system..
 268 */
 269struct user_struct {
 270        atomic_t __count;       /* reference count */
 271        atomic_t processes;     /* How many processes does this user have? */
 272        atomic_t files;         /* How many open files does this user have? */
 273
 274        /* Hash table maintenance information */
 275        struct user_struct *next, **pprev;
 276        uid_t uid;
 277};
 278
 279#define get_current_user() ({                           \
 280        struct user_struct *__user = current->user;     \
 281        atomic_inc(&__user->__count);                   \
 282        __user; })
 283
 284extern struct user_struct root_user;
 285#define INIT_USER (&root_user)
 286
 287struct task_struct {
 288        /*
 289         * offsets of these are hardcoded elsewhere - touch with care
 290         */
 291        volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped */
 292        unsigned long flags;    /* per process flags, defined below */
 293        int sigpending;
 294        mm_segment_t addr_limit;        /* thread address space:
 295                                                0-0xBFFFFFFF for user-thead
 296                                                0-0xFFFFFFFF for kernel-thread
 297                                         */
 298        struct exec_domain *exec_domain;
 299        volatile long need_resched;
 300        unsigned long ptrace;
 301
 302        int lock_depth;         /* Lock depth */
 303
 304/*
 305 * offset 32 begins here on 32-bit platforms. We keep
 306 * all fields in a single cacheline that are needed for
 307 * the goodness() loop in schedule().
 308 */
 309        long counter;
 310        long nice;
 311        unsigned long policy;
 312        struct mm_struct *mm;
 313        int processor;
 314        /*
 315         * cpus_runnable is ~0 if the process is not running on any
 316         * CPU. It's (1 << cpu) if it's running on a CPU. This mask
 317         * is updated under the runqueue lock.
 318         *
 319         * To determine whether a process might run on a CPU, this
 320         * mask is AND-ed with cpus_allowed.
 321         */
 322        unsigned long cpus_runnable, cpus_allowed;
 323        /*
 324         * (only the 'next' pointer fits into the cacheline, but
 325         * that's just fine.)
 326         */
 327        struct list_head run_list;
 328        unsigned long sleep_time;
 329
 330        struct task_struct *next_task, *prev_task;
 331        struct mm_struct *active_mm;
 332        struct list_head local_pages;
 333        unsigned int allocation_order, nr_local_pages;
 334
 335/* task state */
 336        struct linux_binfmt *binfmt;
 337        int exit_code, exit_signal;
 338        int pdeath_signal;  /*  The signal sent when the parent dies  */
 339        /* ??? */
 340        unsigned long personality;
 341        int did_exec:1;
 342        pid_t pid;
 343        pid_t pgrp;
 344        pid_t tty_old_pgrp;
 345        pid_t session;
 346        pid_t tgid;
 347        /* boolean value for session group leader */
 348        int leader;
 349        /* 
 350         * pointers to (original) parent process, youngest child, younger sibling,
 351         * older sibling, respectively.  (p->father can be replaced with 
 352         * p->p_pptr->pid)
 353         */
 354        struct task_struct *p_opptr, *p_pptr, *p_cptr, *p_ysptr, *p_osptr;
 355        struct list_head thread_group;
 356
 357        /* PID hash table linkage. */
 358        struct task_struct *pidhash_next;
 359        struct task_struct **pidhash_pprev;
 360
 361        wait_queue_head_t wait_chldexit;        /* for wait4() */
 362        struct completion *vfork_done;          /* for vfork() */
 363        unsigned long rt_priority;
 364        unsigned long it_real_value, it_prof_value, it_virt_value;
 365        unsigned long it_real_incr, it_prof_incr, it_virt_incr;
 366        struct timer_list real_timer;
 367        struct tms times;
 368        unsigned long start_time;
 369        long per_cpu_utime[NR_CPUS], per_cpu_stime[NR_CPUS];
 370/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
 371        unsigned long min_flt, maj_flt, nswap, cmin_flt, cmaj_flt, cnswap;
 372        int swappable:1;
 373/* process credentials */
 374        uid_t uid,euid,suid,fsuid;
 375        gid_t gid,egid,sgid,fsgid;
 376        int ngroups;
 377        gid_t   groups[NGROUPS];
 378        kernel_cap_t   cap_effective, cap_inheritable, cap_permitted;
 379        int keep_capabilities:1;
 380        struct user_struct *user;
 381/* limits */
 382        struct rlimit rlim[RLIM_NLIMITS];
 383        unsigned short used_math;
 384        char comm[16];
 385/* file system info */
 386        int link_count, total_link_count;
 387        struct tty_struct *tty; /* NULL if no tty */
 388        unsigned int locks; /* How many file locks are being held */
 389/* ipc stuff */
 390        struct sem_undo *semundo;
 391        struct sem_queue *semsleeping;
 392/* CPU-specific state of this task */
 393        struct thread_struct thread;
 394/* filesystem information */
 395        struct fs_struct *fs;
 396/* open file information */
 397        struct files_struct *files;
 398/* namespace */
 399        struct namespace *namespace;
 400/* signal handlers */
 401        spinlock_t sigmask_lock;        /* Protects signal and blocked */
 402        struct signal_struct *sig;
 403
 404        sigset_t blocked;
 405        struct sigpending pending;
 406
 407        unsigned long sas_ss_sp;
 408        size_t sas_ss_size;
 409        int (*notifier)(void *priv);
 410        void *notifier_data;
 411        sigset_t *notifier_mask;
 412        
 413/* Thread group tracking */
 414        u32 parent_exec_id;
 415        u32 self_exec_id;
 416/* Protection of (de-)allocation: mm, files, fs, tty */
 417        spinlock_t alloc_lock;
 418
 419/* journalling filesystem info */
 420        void *journal_info;
 421};
 422
 423/*
 424 * Per process flags
 425 */
 426#define PF_ALIGNWARN    0x00000001      /* Print alignment warning msgs */
 427                                        /* Not implemented yet, only for 486*/
 428#define PF_STARTING     0x00000002      /* being created */
 429#define PF_EXITING      0x00000004      /* getting shut down */
 430#define PF_FORKNOEXEC   0x00000040      /* forked but didn't exec */
 431#define PF_SUPERPRIV    0x00000100      /* used super-user privileges */
 432#define PF_DUMPCORE     0x00000200      /* dumped core */
 433#define PF_SIGNALED     0x00000400      /* killed by a signal */
 434#define PF_MEMALLOC     0x00000800      /* Allocating memory */
 435#define PF_MEMDIE       0x00001000      /* Killed for out-of-memory */
 436#define PF_FREE_PAGES   0x00002000      /* per process page freeing */
 437#define PF_NOIO         0x00004000      /* avoid generating further I/O */
 438
 439#define PF_USEDFPU      0x00100000      /* task used FPU this quantum (SMP) */
 440
 441/*
 442 * Ptrace flags
 443 */
 444
 445#define PT_PTRACED      0x00000001
 446#define PT_TRACESYS     0x00000002
 447#define PT_DTRACE       0x00000004      /* delayed trace (used on m68k, i386) */
 448#define PT_TRACESYSGOOD 0x00000008
 449#define PT_PTRACE_CAP   0x00000010      /* ptracer can follow suid-exec */
 450
 451/*
 452 * Limit the stack by to some sane default: root can always
 453 * increase this limit if needed..  8MB seems reasonable.
 454 */
 455#define _STK_LIM        (8*1024*1024)
 456
 457#define DEF_COUNTER     (10*HZ/100)     /* 100 ms time slice */
 458#define MAX_COUNTER     (20*HZ/100)
 459#define DEF_NICE        (0)
 460
 461extern void yield(void);
 462
 463/*
 464 * The default (Linux) execution domain.
 465 */
 466extern struct exec_domain       default_exec_domain;
 467
 468/*
 469 *  INIT_TASK is used to set up the first task table, touch at
 470 * your own risk!. Base=0, limit=0x1fffff (=2MB)
 471 */
 472#define INIT_TASK(tsk)  \
 473{                                                                       \
 474    state:              0,                                              \
 475    flags:              0,                                              \
 476    sigpending:         0,                                              \
 477    addr_limit:         KERNEL_DS,                                      \
 478    exec_domain:        &default_exec_domain,                           \
 479    lock_depth:         -1,                                             \
 480    counter:            DEF_COUNTER,                                    \
 481    nice:               DEF_NICE,                                       \
 482    policy:             SCHED_OTHER,                                    \
 483    mm:                 NULL,                                           \
 484    active_mm:          &init_mm,                                       \
 485    cpus_runnable:      -1,                                             \
 486    cpus_allowed:       -1,                                             \
 487    run_list:           LIST_HEAD_INIT(tsk.run_list),                   \
 488    next_task:          &tsk,                                           \
 489    prev_task:          &tsk,                                           \
 490    p_opptr:            &tsk,                                           \
 491    p_pptr:             &tsk,                                           \
 492    thread_group:       LIST_HEAD_INIT(tsk.thread_group),               \
 493    wait_chldexit:      __WAIT_QUEUE_HEAD_INITIALIZER(tsk.wait_chldexit),\
 494    real_timer:         {                                               \
 495        function:               it_real_fn                              \
 496    },                                                                  \
 497    cap_effective:      CAP_INIT_EFF_SET,                               \
 498    cap_inheritable:    CAP_INIT_INH_SET,                               \
 499    cap_permitted:      CAP_FULL_SET,                                   \
 500    keep_capabilities:  0,                                              \
 501    rlim:               INIT_RLIMITS,                                   \
 502    user:               INIT_USER,                                      \
 503    comm:               "swapper",                                      \
 504    thread:             INIT_THREAD,                                    \
 505    fs:                 &init_fs,                                       \
 506    files:              &init_files,                                    \
 507    sigmask_lock:       SPIN_LOCK_UNLOCKED,                             \
 508    sig:                &init_signals,                                  \
 509    pending:            { NULL, &tsk.pending.head, {{0}}},              \
 510    blocked:            {{0}},                                          \
 511    alloc_lock:         SPIN_LOCK_UNLOCKED,                             \
 512    journal_info:       NULL,                                           \
 513}
 514
 515
 516#ifndef INIT_TASK_SIZE
 517# define INIT_TASK_SIZE 2048*sizeof(long)
 518#endif
 519
 520union task_union {
 521        struct task_struct task;
 522        unsigned long stack[INIT_TASK_SIZE/sizeof(long)];
 523};
 524
 525extern union task_union init_task_union;
 526
 527extern struct   mm_struct init_mm;
 528extern struct task_struct *init_tasks[NR_CPUS];
 529
 530/* PID hashing. (shouldnt this be dynamic?) */
 531#define PIDHASH_SZ (4096 >> 2)
 532extern struct task_struct *pidhash[PIDHASH_SZ];
 533
 534#define pid_hashfn(x)   ((((x) >> 8) ^ (x)) & (PIDHASH_SZ - 1))
 535
 536static inline void hash_pid(struct task_struct *p)
 537{
 538        struct task_struct **htable = &pidhash[pid_hashfn(p->pid)];
 539
 540        if((p->pidhash_next = *htable) != NULL)
 541                (*htable)->pidhash_pprev = &p->pidhash_next;
 542        *htable = p;
 543        p->pidhash_pprev = htable;
 544}
 545
 546static inline void unhash_pid(struct task_struct *p)
 547{
 548        if(p->pidhash_next)
 549                p->pidhash_next->pidhash_pprev = p->pidhash_pprev;
 550        *p->pidhash_pprev = p->pidhash_next;
 551}
 552
 553static inline struct task_struct *find_task_by_pid(int pid)
 554{
 555        struct task_struct *p, **htable = &pidhash[pid_hashfn(pid)];
 556
 557        for(p = *htable; p && p->pid != pid; p = p->pidhash_next)
 558                ;
 559
 560        return p;
 561}
 562
 563#define task_has_cpu(tsk) ((tsk)->cpus_runnable != ~0UL)
 564
 565static inline void task_set_cpu(struct task_struct *tsk, unsigned int cpu)
 566{
 567        tsk->processor = cpu;
 568        tsk->cpus_runnable = 1UL << cpu;
 569}
 570
 571static inline void task_release_cpu(struct task_struct *tsk)
 572{
 573        tsk->cpus_runnable = ~0UL;
 574}
 575
 576/* per-UID process charging. */
 577extern struct user_struct * alloc_uid(uid_t);
 578extern void free_uid(struct user_struct *);
 579
 580#include <asm/current.h>
 581
 582extern unsigned long volatile jiffies;
 583extern unsigned long itimer_ticks;
 584extern unsigned long itimer_next;
 585extern struct timeval xtime;
 586extern void do_timer(struct pt_regs *);
 587
 588extern unsigned int * prof_buffer;
 589extern unsigned long prof_len;
 590extern unsigned long prof_shift;
 591
 592#define CURRENT_TIME (xtime.tv_sec)
 593
 594extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr));
 595extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr));
 596extern void FASTCALL(sleep_on(wait_queue_head_t *q));
 597extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q,
 598                                      signed long timeout));
 599extern void FASTCALL(interruptible_sleep_on(wait_queue_head_t *q));
 600extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q,
 601                                                    signed long timeout));
 602extern int FASTCALL(wake_up_process(struct task_struct * tsk));
 603
 604#define wake_up(x)                      __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1)
 605#define wake_up_nr(x, nr)               __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr)
 606#define wake_up_all(x)                  __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0)
 607#define wake_up_sync(x)                 __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1)
 608#define wake_up_sync_nr(x, nr)          __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr)
 609#define wake_up_interruptible(x)        __wake_up((x),TASK_INTERRUPTIBLE, 1)
 610#define wake_up_interruptible_nr(x, nr) __wake_up((x),TASK_INTERRUPTIBLE, nr)
 611#define wake_up_interruptible_all(x)    __wake_up((x),TASK_INTERRUPTIBLE, 0)
 612#define wake_up_interruptible_sync(x)   __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
 613#define wake_up_interruptible_sync_nr(x, nr) __wake_up_sync((x),TASK_INTERRUPTIBLE,  nr)
 614asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru);
 615
 616extern int in_group_p(gid_t);
 617extern int in_egroup_p(gid_t);
 618
 619extern void proc_caches_init(void);
 620extern void flush_signals(struct task_struct *);
 621extern void flush_signal_handlers(struct task_struct *);
 622extern void sig_exit(int, int, struct siginfo *);
 623extern int dequeue_signal(sigset_t *, siginfo_t *);
 624extern void block_all_signals(int (*notifier)(void *priv), void *priv,
 625                              sigset_t *mask);
 626extern void unblock_all_signals(void);
 627extern int send_sig_info(int, struct siginfo *, struct task_struct *);
 628extern int force_sig_info(int, struct siginfo *, struct task_struct *);
 629extern int kill_pg_info(int, struct siginfo *, pid_t);
 630extern int kill_sl_info(int, struct siginfo *, pid_t);
 631extern int kill_proc_info(int, struct siginfo *, pid_t);
 632extern void notify_parent(struct task_struct *, int);
 633extern void do_notify_parent(struct task_struct *, int);
 634extern void force_sig(int, struct task_struct *);
 635extern int send_sig(int, struct task_struct *, int);
 636extern int kill_pg(pid_t, int, int);
 637extern int kill_sl(pid_t, int, int);
 638extern int kill_proc(pid_t, int, int);
 639extern int do_sigaction(int, const struct k_sigaction *, struct k_sigaction *);
 640extern int do_sigaltstack(const stack_t *, stack_t *, unsigned long);
 641
 642static inline int signal_pending(struct task_struct *p)
 643{
 644        return (p->sigpending != 0);
 645}
 646
 647/*
 648 * Re-calculate pending state from the set of locally pending
 649 * signals, globally pending signals, and blocked signals.
 650 */
 651static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
 652{
 653        unsigned long ready;
 654        long i;
 655
 656        switch (_NSIG_WORDS) {
 657        default:
 658                for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
 659                        ready |= signal->sig[i] &~ blocked->sig[i];
 660                break;
 661
 662        case 4: ready  = signal->sig[3] &~ blocked->sig[3];
 663                ready |= signal->sig[2] &~ blocked->sig[2];
 664                ready |= signal->sig[1] &~ blocked->sig[1];
 665                ready |= signal->sig[0] &~ blocked->sig[0];
 666                break;
 667
 668        case 2: ready  = signal->sig[1] &~ blocked->sig[1];
 669                ready |= signal->sig[0] &~ blocked->sig[0];
 670                break;
 671
 672        case 1: ready  = signal->sig[0] &~ blocked->sig[0];
 673        }
 674        return ready != 0;
 675}
 676
 677/* Reevaluate whether the task has signals pending delivery.
 678   This is required every time the blocked sigset_t changes.
 679   All callers should have t->sigmask_lock.  */
 680
 681static inline void recalc_sigpending(struct task_struct *t)
 682{
 683        t->sigpending = has_pending_signals(&t->pending.signal, &t->blocked);
 684}
 685
 686/* True if we are on the alternate signal stack.  */
 687
 688static inline int on_sig_stack(unsigned long sp)
 689{
 690        return (sp - current->sas_ss_sp < current->sas_ss_size);
 691}
 692
 693static inline int sas_ss_flags(unsigned long sp)
 694{
 695        return (current->sas_ss_size == 0 ? SS_DISABLE
 696                : on_sig_stack(sp) ? SS_ONSTACK : 0);
 697}
 698
 699extern int request_irq(unsigned int,
 700                       void (*handler)(int, void *, struct pt_regs *),
 701                       unsigned long, const char *, void *);
 702extern void free_irq(unsigned int, void *);
 703
 704/*
 705 * This has now become a routine instead of a macro, it sets a flag if
 706 * it returns true (to do BSD-style accounting where the process is flagged
 707 * if it uses root privs). The implication of this is that you should do
 708 * normal permissions checks first, and check suser() last.
 709 *
 710 * [Dec 1997 -- Chris Evans]
 711 * For correctness, the above considerations need to be extended to
 712 * fsuser(). This is done, along with moving fsuser() checks to be
 713 * last.
 714 *
 715 * These will be removed, but in the mean time, when the SECURE_NOROOT 
 716 * flag is set, uids don't grant privilege.
 717 */
 718static inline int suser(void)
 719{
 720        if (!issecure(SECURE_NOROOT) && current->euid == 0) { 
 721                current->flags |= PF_SUPERPRIV;
 722                return 1;
 723        }
 724        return 0;
 725}
 726
 727static inline int fsuser(void)
 728{
 729        if (!issecure(SECURE_NOROOT) && current->fsuid == 0) {
 730                current->flags |= PF_SUPERPRIV;
 731                return 1;
 732        }
 733        return 0;
 734}
 735
 736/*
 737 * capable() checks for a particular capability.  
 738 * New privilege checks should use this interface, rather than suser() or
 739 * fsuser(). See include/linux/capability.h for defined capabilities.
 740 */
 741
 742static inline int capable(int cap)
 743{
 744#if 1 /* ok now */
 745        if (cap_raised(current->cap_effective, cap))
 746#else
 747        if (cap_is_fs_cap(cap) ? current->fsuid == 0 : current->euid == 0)
 748#endif
 749        {
 750                current->flags |= PF_SUPERPRIV;
 751                return 1;
 752        }
 753        return 0;
 754}
 755
 756/*
 757 * Routines for handling mm_structs
 758 */
 759extern struct mm_struct * mm_alloc(void);
 760
 761extern struct mm_struct * start_lazy_tlb(void);
 762extern void end_lazy_tlb(struct mm_struct *mm);
 763
 764/* mmdrop drops the mm and the page tables */
 765extern inline void FASTCALL(__mmdrop(struct mm_struct *));
 766static inline void mmdrop(struct mm_struct * mm)
 767{
 768        if (atomic_dec_and_test(&mm->mm_count))
 769                __mmdrop(mm);
 770}
 771
 772/* mmput gets rid of the mappings and all user-space */
 773extern void mmput(struct mm_struct *);
 774/* Remove the current tasks stale references to the old mm_struct */
 775extern void mm_release(void);
 776
 777/*
 778 * Routines for handling the fd arrays
 779 */
 780extern struct file ** alloc_fd_array(int);
 781extern int expand_fd_array(struct files_struct *, int nr);
 782extern void free_fd_array(struct file **, int);
 783
 784extern fd_set *alloc_fdset(int);
 785extern int expand_fdset(struct files_struct *, int nr);
 786extern void free_fdset(fd_set *, int);
 787
 788extern int  copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *);
 789extern void flush_thread(void);
 790extern void exit_thread(void);
 791
 792extern void exit_mm(struct task_struct *);
 793extern void exit_files(struct task_struct *);
 794extern void exit_sighand(struct task_struct *);
 795
 796extern void reparent_to_init(void);
 797extern void daemonize(void);
 798
 799extern int do_execve(char *, char **, char **, struct pt_regs *);
 800extern int do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long);
 801
 802extern void FASTCALL(add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait));
 803extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait));
 804extern void FASTCALL(remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait));
 805
 806#define __wait_event(wq, condition)                                     \
 807do {                                                                    \
 808        wait_queue_t __wait;                                            \
 809        init_waitqueue_entry(&__wait, current);                         \
 810                                                                        \
 811        add_wait_queue(&wq, &__wait);                                   \
 812        for (;;) {                                                      \
 813                set_current_state(TASK_UNINTERRUPTIBLE);                \
 814                if (condition)                                          \
 815                        break;                                          \
 816                schedule();                                             \
 817        }                                                               \
 818        current->state = TASK_RUNNING;                                  \
 819        remove_wait_queue(&wq, &__wait);                                \
 820} while (0)
 821
 822#define wait_event(wq, condition)                                       \
 823do {                                                                    \
 824        if (condition)                                                  \
 825                break;                                                  \
 826        __wait_event(wq, condition);                                    \
 827} while (0)
 828
 829#define __wait_event_interruptible(wq, condition, ret)                  \
 830do {                                                                    \
 831        wait_queue_t __wait;                                            \
 832        init_waitqueue_entry(&__wait, current);                         \
 833                                                                        \
 834        add_wait_queue(&wq, &__wait);                                   \
 835        for (;;) {                                                      \
 836                set_current_state(TASK_INTERRUPTIBLE);                  \
 837                if (condition)                                          \
 838                        break;                                          \
 839                if (!signal_pending(current)) {                         \
 840                        schedule();                                     \
 841                        continue;                                       \
 842                }                                                       \
 843                ret = -ERESTARTSYS;                                     \
 844                break;                                                  \
 845        }                                                               \
 846        current->state = TASK_RUNNING;                                  \
 847        remove_wait_queue(&wq, &__wait);                                \
 848} while (0)
 849        
 850#define wait_event_interruptible(wq, condition)                         \
 851({                                                                      \
 852        int __ret = 0;                                                  \
 853        if (!(condition))                                               \
 854                __wait_event_interruptible(wq, condition, __ret);       \
 855        __ret;                                                          \
 856})
 857
 858#define REMOVE_LINKS(p) do { \
 859        (p)->next_task->prev_task = (p)->prev_task; \
 860        (p)->prev_task->next_task = (p)->next_task; \
 861        if ((p)->p_osptr) \
 862                (p)->p_osptr->p_ysptr = (p)->p_ysptr; \
 863        if ((p)->p_ysptr) \
 864                (p)->p_ysptr->p_osptr = (p)->p_osptr; \
 865        else \
 866                (p)->p_pptr->p_cptr = (p)->p_osptr; \
 867        } while (0)
 868
 869#define SET_LINKS(p) do { \
 870        (p)->next_task = &init_task; \
 871        (p)->prev_task = init_task.prev_task; \
 872        init_task.prev_task->next_task = (p); \
 873        init_task.prev_task = (p); \
 874        (p)->p_ysptr = NULL; \
 875        if (((p)->p_osptr = (p)->p_pptr->p_cptr) != NULL) \
 876                (p)->p_osptr->p_ysptr = p; \
 877        (p)->p_pptr->p_cptr = p; \
 878        } while (0)
 879
 880#define for_each_task(p) \
 881        for (p = &init_task ; (p = p->next_task) != &init_task ; )
 882
 883#define for_each_thread(task) \
 884        for (task = next_thread(current) ; task != current ; task = next_thread(task))
 885
 886#define next_thread(p) \
 887        list_entry((p)->thread_group.next, struct task_struct, thread_group)
 888
 889#define thread_group_leader(p)  (p->pid == p->tgid)
 890
 891static inline void del_from_runqueue(struct task_struct * p)
 892{
 893        nr_running--;
 894        p->sleep_time = jiffies;
 895        list_del(&p->run_list);
 896        p->run_list.next = NULL;
 897}
 898
 899static inline int task_on_runqueue(struct task_struct *p)
 900{
 901        return (p->run_list.next != NULL);
 902}
 903
 904static inline void unhash_process(struct task_struct *p)
 905{
 906        if (task_on_runqueue(p))
 907                out_of_line_bug();
 908        write_lock_irq(&tasklist_lock);
 909        nr_threads--;
 910        unhash_pid(p);
 911        REMOVE_LINKS(p);
 912        list_del(&p->thread_group);
 913        write_unlock_irq(&tasklist_lock);
 914}
 915
 916/* Protects ->fs, ->files, ->mm, and synchronises with wait4().  Nests inside tasklist_lock */
 917static inline void task_lock(struct task_struct *p)
 918{
 919        spin_lock(&p->alloc_lock);
 920}
 921
 922static inline void task_unlock(struct task_struct *p)
 923{
 924        spin_unlock(&p->alloc_lock);
 925}
 926
 927/* write full pathname into buffer and return start of pathname */
 928static inline char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
 929                                char *buf, int buflen)
 930{
 931        char *res;
 932        struct vfsmount *rootmnt;
 933        struct dentry *root;
 934        read_lock(&current->fs->lock);
 935        rootmnt = mntget(current->fs->rootmnt);
 936        root = dget(current->fs->root);
 937        read_unlock(&current->fs->lock);
 938        spin_lock(&dcache_lock);
 939        res = __d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
 940        spin_unlock(&dcache_lock);
 941        dput(root);
 942        mntput(rootmnt);
 943        return res;
 944}
 945
 946static inline int need_resched(void)
 947{
 948        return (unlikely(current->need_resched));
 949}
 950
 951extern void __cond_resched(void);
 952static inline void cond_resched(void)
 953{
 954        if (need_resched())
 955                __cond_resched();
 956}
 957
 958#endif /* __KERNEL__ */
 959#endif
 960
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.