linux-old/include/linux/sched.h
<<
>>
Prefs
   1#ifndef _LINUX_SCHED_H
   2#define _LINUX_SCHED_H
   3
   4#include <asm/param.h>  /* for HZ */
   5
   6extern unsigned long event;
   7
   8#include <linux/config.h>
   9#include <linux/binfmts.h>
  10#include <linux/threads.h>
  11#include <linux/kernel.h>
  12#include <linux/types.h>
  13#include <linux/times.h>
  14#include <linux/timex.h>
  15#include <linux/rbtree.h>
  16
  17#include <asm/system.h>
  18#include <asm/semaphore.h>
  19#include <asm/page.h>
  20#include <asm/ptrace.h>
  21#include <asm/mmu.h>
  22
  23#include <linux/smp.h>
  24#include <linux/tty.h>
  25#include <linux/sem.h>
  26#include <linux/signal.h>
  27#include <linux/securebits.h>
  28#include <linux/fs_struct.h>
  29
  30struct exec_domain;
  31
  32/*
  33 * cloning flags:
  34 */
  35#define CSIGNAL         0x000000ff      /* signal mask to be sent at exit */
  36#define CLONE_VM        0x00000100      /* set if VM shared between processes */
  37#define CLONE_FS        0x00000200      /* set if fs info shared between processes */
  38#define CLONE_FILES     0x00000400      /* set if open files shared between processes */
  39#define CLONE_SIGHAND   0x00000800      /* set if signal handlers and blocked signals shared */
  40#define CLONE_PID       0x00001000      /* set if pid shared */
  41#define CLONE_PTRACE    0x00002000      /* set if we want to let tracing continue on the child too */
  42#define CLONE_VFORK     0x00004000      /* set if the parent wants the child to wake it up on mm_release */
  43#define CLONE_PARENT    0x00008000      /* set if we want to have the same parent as the cloner */
  44#define CLONE_THREAD    0x00010000      /* Same thread group? */
  45#define CLONE_NEWNS     0x00020000      /* New namespace group? */
  46
  47#define CLONE_SIGNAL    (CLONE_SIGHAND | CLONE_THREAD)
  48
  49/*
  50 * These are the constant used to fake the fixed-point load-average
  51 * counting. Some notes:
  52 *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
  53 *    a load-average precision of 10 bits integer + 11 bits fractional
  54 *  - if you want to count load-averages more often, you need more
  55 *    precision, or rounding will get you. With 2-second counting freq,
  56 *    the EXP_n values would be 1981, 2034 and 2043 if still using only
  57 *    11 bit fractions.
  58 */
  59extern unsigned long avenrun[];         /* Load averages */
  60
  61#define FSHIFT          11              /* nr of bits of precision */
  62#define FIXED_1         (1<<FSHIFT)     /* 1.0 as fixed-point */
  63#define LOAD_FREQ       (5*HZ)          /* 5 sec intervals */
  64#define EXP_1           1884            /* 1/exp(5sec/1min) as fixed-point */
  65#define EXP_5           2014            /* 1/exp(5sec/5min) */
  66#define EXP_15          2037            /* 1/exp(5sec/15min) */
  67
  68#define CALC_LOAD(load,exp,n) \
  69        load *= exp; \
  70        load += n*(FIXED_1-exp); \
  71        load >>= FSHIFT;
  72
  73#define CT_TO_SECS(x)   ((x) / HZ)
  74#define CT_TO_USECS(x)  (((x) % HZ) * 1000000/HZ)
  75
  76extern int nr_running, nr_threads;
  77extern int last_pid;
  78
  79#include <linux/fs.h>
  80#include <linux/time.h>
  81#include <linux/param.h>
  82#include <linux/resource.h>
  83#ifdef __KERNEL__
  84#include <linux/timer.h>
  85#endif
  86
  87#include <asm/processor.h>
  88
  89#define TASK_RUNNING            0
  90#define TASK_INTERRUPTIBLE      1
  91#define TASK_UNINTERRUPTIBLE    2
  92#define TASK_ZOMBIE             4
  93#define TASK_STOPPED            8
  94
  95#define __set_task_state(tsk, state_value)              \
  96        do { (tsk)->state = (state_value); } while (0)
  97#define set_task_state(tsk, state_value)                \
  98        set_mb((tsk)->state, (state_value))
  99
 100#define __set_current_state(state_value)                        \
 101        do { current->state = (state_value); } while (0)
 102#define set_current_state(state_value)          \
 103        set_mb(current->state, (state_value))
 104
 105/*
 106 * Scheduling policies
 107 */
 108#define SCHED_OTHER             0
 109#define SCHED_FIFO              1
 110#define SCHED_RR                2
 111
 112/*
 113 * This is an additional bit set when we want to
 114 * yield the CPU for one re-schedule..
 115 */
 116#define SCHED_YIELD             0x10
 117
 118struct sched_param {
 119        int sched_priority;
 120};
 121
 122struct completion;
 123
 124#ifdef __KERNEL__
 125
 126#include <linux/spinlock.h>
 127
 128/*
 129 * This serializes "schedule()" and also protects
 130 * the run-queue from deletions/modifications (but
 131 * _adding_ to the beginning of the run-queue has
 132 * a separate lock).
 133 */
 134extern rwlock_t tasklist_lock;
 135extern spinlock_t runqueue_lock;
 136extern spinlock_t mmlist_lock;
 137
 138extern void sched_init(void);
 139extern void init_idle(void);
 140extern void show_state(void);
 141extern void cpu_init (void);
 142extern void trap_init(void);
 143extern void update_process_times(int user);
 144extern void update_one_process(struct task_struct *p, unsigned long user,
 145                               unsigned long system, int cpu);
 146
 147#define MAX_SCHEDULE_TIMEOUT    LONG_MAX
 148extern signed long FASTCALL(schedule_timeout(signed long timeout));
 149asmlinkage void schedule(void);
 150
 151extern int schedule_task(struct tq_struct *task);
 152extern void flush_scheduled_tasks(void);
 153extern int start_context_thread(void);
 154extern int current_is_keventd(void);
 155
 156#if CONFIG_SMP
 157extern void set_cpus_allowed(struct task_struct *p, unsigned long new_mask);
 158#else
 159# define set_cpus_allowed(p, new_mask) do { } while (0)
 160#endif
 161
 162/*
 163 * The default fd array needs to be at least BITS_PER_LONG,
 164 * as this is the granularity returned by copy_fdset().
 165 */
 166#define NR_OPEN_DEFAULT BITS_PER_LONG
 167
 168struct namespace;
 169/*
 170 * Open file table structure
 171 */
 172struct files_struct {
 173        atomic_t count;
 174        rwlock_t file_lock;     /* Protects all the below members.  Nests inside tsk->alloc_lock */
 175        int max_fds;
 176        int max_fdset;
 177        int next_fd;
 178        struct file ** fd;      /* current fd array */
 179        fd_set *close_on_exec;
 180        fd_set *open_fds;
 181        fd_set close_on_exec_init;
 182        fd_set open_fds_init;
 183        struct file * fd_array[NR_OPEN_DEFAULT];
 184};
 185
 186#define INIT_FILES \
 187{                                                       \
 188        count:          ATOMIC_INIT(1),                 \
 189        file_lock:      RW_LOCK_UNLOCKED,               \
 190        max_fds:        NR_OPEN_DEFAULT,                \
 191        max_fdset:      __FD_SETSIZE,                   \
 192        next_fd:        0,                              \
 193        fd:             &init_files.fd_array[0],        \
 194        close_on_exec:  &init_files.close_on_exec_init, \
 195        open_fds:       &init_files.open_fds_init,      \
 196        close_on_exec_init: { { 0, } },                 \
 197        open_fds_init:  { { 0, } },                     \
 198        fd_array:       { NULL, }                       \
 199}
 200
 201/* Maximum number of active map areas.. This is a random (large) number */
 202#define DEFAULT_MAX_MAP_COUNT   (65536)
 203
 204extern int max_map_count;
 205
 206struct mm_struct {
 207        struct vm_area_struct * mmap;           /* list of VMAs */
 208        rb_root_t mm_rb;
 209        struct vm_area_struct * mmap_cache;     /* last find_vma result */
 210        pgd_t * pgd;
 211        atomic_t mm_users;                      /* How many users with user space? */
 212        atomic_t mm_count;                      /* How many references to "struct mm_struct" (users count as 1) */
 213        int map_count;                          /* number of VMAs */
 214        struct rw_semaphore mmap_sem;
 215        spinlock_t page_table_lock;             /* Protects task page tables and mm->rss */
 216
 217        struct list_head mmlist;                /* List of all active mm's.  These are globally strung
 218                                                 * together off init_mm.mmlist, and are protected
 219                                                 * by mmlist_lock
 220                                                 */
 221
 222        unsigned long start_code, end_code, start_data, end_data;
 223        unsigned long start_brk, brk, start_stack;
 224        unsigned long arg_start, arg_end, env_start, env_end;
 225        unsigned long rss, total_vm, locked_vm;
 226        unsigned long def_flags;
 227        unsigned long cpu_vm_mask;
 228        unsigned long swap_address;
 229
 230        unsigned dumpable:1;
 231
 232        /* Architecture-specific MM context */
 233        mm_context_t context;
 234};
 235
 236extern int mmlist_nr;
 237
 238#define INIT_MM(name) \
 239{                                                       \
 240        mm_rb:          RB_ROOT,                        \
 241        pgd:            swapper_pg_dir,                 \
 242        mm_users:       ATOMIC_INIT(2),                 \
 243        mm_count:       ATOMIC_INIT(1),                 \
 244        mmap_sem:       __RWSEM_INITIALIZER(name.mmap_sem), \
 245        page_table_lock: SPIN_LOCK_UNLOCKED,            \
 246        mmlist:         LIST_HEAD_INIT(name.mmlist),    \
 247}
 248
 249struct signal_struct {
 250        atomic_t                count;
 251        struct k_sigaction      action[_NSIG];
 252        spinlock_t              siglock;
 253};
 254
 255
 256#define INIT_SIGNALS {  \
 257        count:          ATOMIC_INIT(1),                 \
 258        action:         { {{0,}}, },                    \
 259        siglock:        SPIN_LOCK_UNLOCKED              \
 260}
 261
 262/*
 263 * Some day this will be a full-fledged user tracking system..
 264 */
 265struct user_struct {
 266        atomic_t __count;       /* reference count */
 267        atomic_t processes;     /* How many processes does this user have? */
 268        atomic_t files;         /* How many open files does this user have? */
 269
 270        /* Hash table maintenance information */
 271        struct user_struct *next, **pprev;
 272        uid_t uid;
 273};
 274
 275#define get_current_user() ({                           \
 276        struct user_struct *__user = current->user;     \
 277        atomic_inc(&__user->__count);                   \
 278        __user; })
 279
 280extern struct user_struct root_user;
 281#define INIT_USER (&root_user)
 282
 283struct task_struct {
 284        /*
 285         * offsets of these are hardcoded elsewhere - touch with care
 286         */
 287        volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped */
 288        unsigned long flags;    /* per process flags, defined below */
 289        int sigpending;
 290        mm_segment_t addr_limit;        /* thread address space:
 291                                                0-0xBFFFFFFF for user-thead
 292                                                0-0xFFFFFFFF for kernel-thread
 293                                         */
 294        struct exec_domain *exec_domain;
 295        volatile long need_resched;
 296        unsigned long ptrace;
 297
 298        int lock_depth;         /* Lock depth */
 299
 300/*
 301 * offset 32 begins here on 32-bit platforms. We keep
 302 * all fields in a single cacheline that are needed for
 303 * the goodness() loop in schedule().
 304 */
 305        long counter;
 306        long nice;
 307        unsigned long policy;
 308        struct mm_struct *mm;
 309        int processor;
 310        /*
 311         * cpus_runnable is ~0 if the process is not running on any
 312         * CPU. It's (1 << cpu) if it's running on a CPU. This mask
 313         * is updated under the runqueue lock.
 314         *
 315         * To determine whether a process might run on a CPU, this
 316         * mask is AND-ed with cpus_allowed.
 317         */
 318        unsigned long cpus_runnable, cpus_allowed;
 319        /*
 320         * (only the 'next' pointer fits into the cacheline, but
 321         * that's just fine.)
 322         */
 323        struct list_head run_list;
 324        unsigned long sleep_time;
 325
 326        struct task_struct *next_task, *prev_task;
 327        struct mm_struct *active_mm;
 328        struct list_head local_pages;
 329        unsigned int allocation_order, nr_local_pages;
 330
 331/* task state */
 332        struct linux_binfmt *binfmt;
 333        int exit_code, exit_signal;
 334        int pdeath_signal;  /*  The signal sent when the parent dies  */
 335        /* ??? */
 336        unsigned long personality;
 337        int did_exec:1;
 338        unsigned task_dumpable:1;
 339        pid_t pid;
 340        pid_t pgrp;
 341        pid_t tty_old_pgrp;
 342        pid_t session;
 343        pid_t tgid;
 344        /* boolean value for session group leader */
 345        int leader;
 346        /* 
 347         * pointers to (original) parent process, youngest child, younger sibling,
 348         * older sibling, respectively.  (p->father can be replaced with 
 349         * p->p_pptr->pid)
 350         */
 351        struct task_struct *p_opptr, *p_pptr, *p_cptr, *p_ysptr, *p_osptr;
 352        struct list_head thread_group;
 353
 354        /* PID hash table linkage. */
 355        struct task_struct *pidhash_next;
 356        struct task_struct **pidhash_pprev;
 357
 358        wait_queue_head_t wait_chldexit;        /* for wait4() */
 359        struct completion *vfork_done;          /* for vfork() */
 360        unsigned long rt_priority;
 361        unsigned long it_real_value, it_prof_value, it_virt_value;
 362        unsigned long it_real_incr, it_prof_incr, it_virt_incr;
 363        struct timer_list real_timer;
 364        struct tms times;
 365        unsigned long start_time;
 366        long per_cpu_utime[NR_CPUS], per_cpu_stime[NR_CPUS];
 367/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
 368        unsigned long min_flt, maj_flt, nswap, cmin_flt, cmaj_flt, cnswap;
 369        int swappable:1;
 370/* process credentials */
 371        uid_t uid,euid,suid,fsuid;
 372        gid_t gid,egid,sgid,fsgid;
 373        int ngroups;
 374        gid_t   groups[NGROUPS];
 375        kernel_cap_t   cap_effective, cap_inheritable, cap_permitted;
 376        int keep_capabilities:1;
 377        struct user_struct *user;
 378/* limits */
 379        struct rlimit rlim[RLIM_NLIMITS];
 380        unsigned short used_math;
 381        char comm[16];
 382/* file system info */
 383        int link_count, total_link_count;
 384        struct tty_struct *tty; /* NULL if no tty */
 385        unsigned int locks; /* How many file locks are being held */
 386/* ipc stuff */
 387        struct sem_undo *semundo;
 388        struct sem_queue *semsleeping;
 389/* CPU-specific state of this task */
 390        struct thread_struct thread;
 391/* filesystem information */
 392        struct fs_struct *fs;
 393/* open file information */
 394        struct files_struct *files;
 395/* namespace */
 396        struct namespace *namespace;
 397/* signal handlers */
 398        spinlock_t sigmask_lock;        /* Protects signal and blocked */
 399        struct signal_struct *sig;
 400
 401        sigset_t blocked;
 402        struct sigpending pending;
 403
 404        unsigned long sas_ss_sp;
 405        size_t sas_ss_size;
 406        int (*notifier)(void *priv);
 407        void *notifier_data;
 408        sigset_t *notifier_mask;
 409        
 410/* Thread group tracking */
 411        u32 parent_exec_id;
 412        u32 self_exec_id;
 413/* Protection of (de-)allocation: mm, files, fs, tty */
 414        spinlock_t alloc_lock;
 415
 416/* journalling filesystem info */
 417        void *journal_info;
 418};
 419
 420/*
 421 * Per process flags
 422 */
 423#define PF_ALIGNWARN    0x00000001      /* Print alignment warning msgs */
 424                                        /* Not implemented yet, only for 486*/
 425#define PF_STARTING     0x00000002      /* being created */
 426#define PF_EXITING      0x00000004      /* getting shut down */
 427#define PF_FORKNOEXEC   0x00000040      /* forked but didn't exec */
 428#define PF_SUPERPRIV    0x00000100      /* used super-user privileges */
 429#define PF_DUMPCORE     0x00000200      /* dumped core */
 430#define PF_SIGNALED     0x00000400      /* killed by a signal */
 431#define PF_MEMALLOC     0x00000800      /* Allocating memory */
 432#define PF_MEMDIE      0x00001000       /* Killed for out-of-memory */
 433#define PF_FREE_PAGES   0x00002000      /* per process page freeing */
 434#define PF_NOIO         0x00004000      /* avoid generating further I/O */
 435#define PF_FSTRANS      0x00008000      /* inside a filesystem transaction */
 436
 437#define PF_USEDFPU      0x00100000      /* task used FPU this quantum (SMP) */
 438
 439/*
 440 * Ptrace flags
 441 */
 442
 443#define PT_PTRACED      0x00000001
 444#define PT_TRACESYS     0x00000002
 445#define PT_DTRACE       0x00000004      /* delayed trace (used on m68k, i386) */
 446#define PT_TRACESYSGOOD 0x00000008
 447#define PT_PTRACE_CAP   0x00000010      /* ptracer can follow suid-exec */
 448
 449#define is_dumpable(tsk)    ((tsk)->task_dumpable && (tsk)->mm && (tsk)->mm->dumpable)
 450
 451/*
 452 * Limit the stack by to some sane default: root can always
 453 * increase this limit if needed..  8MB seems reasonable.
 454 */
 455#define _STK_LIM        (8*1024*1024)
 456
 457#define DEF_COUNTER     (10*HZ/100)     /* 100 ms time slice */
 458#define MAX_COUNTER     (20*HZ/100)
 459#define DEF_NICE        (0)
 460
 461extern void yield(void);
 462
 463/*
 464 * The default (Linux) execution domain.
 465 */
 466extern struct exec_domain       default_exec_domain;
 467
 468/*
 469 *  INIT_TASK is used to set up the first task table, touch at
 470 * your own risk!. Base=0, limit=0x1fffff (=2MB)
 471 */
 472#define INIT_TASK(tsk)  \
 473{                                                                       \
 474    state:              0,                                              \
 475    flags:              0,                                              \
 476    sigpending:         0,                                              \
 477    addr_limit:         KERNEL_DS,                                      \
 478    exec_domain:        &default_exec_domain,                           \
 479    lock_depth:         -1,                                             \
 480    counter:            DEF_COUNTER,                                    \
 481    nice:               DEF_NICE,                                       \
 482    policy:             SCHED_OTHER,                                    \
 483    mm:                 NULL,                                           \
 484    active_mm:          &init_mm,                                       \
 485    cpus_runnable:      ~0UL,                                           \
 486    cpus_allowed:       ~0UL,                                           \
 487    run_list:           LIST_HEAD_INIT(tsk.run_list),                   \
 488    next_task:          &tsk,                                           \
 489    prev_task:          &tsk,                                           \
 490    p_opptr:            &tsk,                                           \
 491    p_pptr:             &tsk,                                           \
 492    thread_group:       LIST_HEAD_INIT(tsk.thread_group),               \
 493    wait_chldexit:      __WAIT_QUEUE_HEAD_INITIALIZER(tsk.wait_chldexit),\
 494    real_timer:         {                                               \
 495        function:               it_real_fn                              \
 496    },                                                                  \
 497    cap_effective:      CAP_INIT_EFF_SET,                               \
 498    cap_inheritable:    CAP_INIT_INH_SET,                               \
 499    cap_permitted:      CAP_FULL_SET,                                   \
 500    keep_capabilities:  0,                                              \
 501    rlim:               INIT_RLIMITS,                                   \
 502    user:               INIT_USER,                                      \
 503    comm:               "swapper",                                      \
 504    thread:             INIT_THREAD,                                    \
 505    fs:                 &init_fs,                                       \
 506    files:              &init_files,                                    \
 507    sigmask_lock:       SPIN_LOCK_UNLOCKED,                             \
 508    sig:                &init_signals,                                  \
 509    pending:            { NULL, &tsk.pending.head, {{0}}},              \
 510    blocked:            {{0}},                                          \
 511    alloc_lock:         SPIN_LOCK_UNLOCKED,                             \
 512    journal_info:       NULL,                                           \
 513}
 514
 515
 516#ifndef INIT_TASK_SIZE
 517# define INIT_TASK_SIZE 2048*sizeof(long)
 518#endif
 519
 520union task_union {
 521        struct task_struct task;
 522        unsigned long stack[INIT_TASK_SIZE/sizeof(long)];
 523};
 524
 525extern union task_union init_task_union;
 526
 527extern struct   mm_struct init_mm;
 528extern struct task_struct *init_tasks[NR_CPUS];
 529
 530/* PID hashing. (shouldnt this be dynamic?) */
 531#define PIDHASH_SZ (4096 >> 2)
 532extern struct task_struct *pidhash[PIDHASH_SZ];
 533
 534#define pid_hashfn(x)   ((((x) >> 8) ^ (x)) & (PIDHASH_SZ - 1))
 535
 536static inline void hash_pid(struct task_struct *p)
 537{
 538        struct task_struct **htable = &pidhash[pid_hashfn(p->pid)];
 539
 540        if((p->pidhash_next = *htable) != NULL)
 541                (*htable)->pidhash_pprev = &p->pidhash_next;
 542        *htable = p;
 543        p->pidhash_pprev = htable;
 544}
 545
 546static inline void unhash_pid(struct task_struct *p)
 547{
 548        if(p->pidhash_next)
 549                p->pidhash_next->pidhash_pprev = p->pidhash_pprev;
 550        *p->pidhash_pprev = p->pidhash_next;
 551}
 552
 553static inline struct task_struct *find_task_by_pid(int pid)
 554{
 555        struct task_struct *p, **htable = &pidhash[pid_hashfn(pid)];
 556
 557        for(p = *htable; p && p->pid != pid; p = p->pidhash_next)
 558                ;
 559
 560        return p;
 561}
 562
 563#define task_has_cpu(tsk) ((tsk)->cpus_runnable != ~0UL)
 564
 565static inline void task_set_cpu(struct task_struct *tsk, unsigned int cpu)
 566{
 567        tsk->processor = cpu;
 568        tsk->cpus_runnable = 1UL << cpu;
 569}
 570
 571static inline void task_release_cpu(struct task_struct *tsk)
 572{
 573        tsk->cpus_runnable = ~0UL;
 574}
 575
 576/* per-UID process charging. */
 577extern struct user_struct * alloc_uid(uid_t);
 578extern void free_uid(struct user_struct *);
 579extern void switch_uid(struct user_struct *);
 580
 581#include <asm/current.h>
 582
 583extern unsigned long volatile jiffies;
 584extern unsigned long itimer_ticks;
 585extern unsigned long itimer_next;
 586extern struct timeval xtime;
 587extern void do_timer(struct pt_regs *);
 588
 589extern unsigned int * prof_buffer;
 590extern unsigned long prof_len;
 591extern unsigned long prof_shift;
 592
 593#define CURRENT_TIME (xtime.tv_sec)
 594
 595extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr));
 596extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr));
 597extern void FASTCALL(sleep_on(wait_queue_head_t *q));
 598extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q,
 599                                      signed long timeout));
 600extern void FASTCALL(interruptible_sleep_on(wait_queue_head_t *q));
 601extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q,
 602                                                    signed long timeout));
 603extern int FASTCALL(wake_up_process(struct task_struct * tsk));
 604
 605#define wake_up(x)                      __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1)
 606#define wake_up_nr(x, nr)               __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr)
 607#define wake_up_all(x)                  __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0)
 608#define wake_up_sync(x)                 __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1)
 609#define wake_up_sync_nr(x, nr)          __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr)
 610#define wake_up_interruptible(x)        __wake_up((x),TASK_INTERRUPTIBLE, 1)
 611#define wake_up_interruptible_nr(x, nr) __wake_up((x),TASK_INTERRUPTIBLE, nr)
 612#define wake_up_interruptible_all(x)    __wake_up((x),TASK_INTERRUPTIBLE, 0)
 613#define wake_up_interruptible_sync(x)   __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
 614#define wake_up_interruptible_sync_nr(x, nr) __wake_up_sync((x),TASK_INTERRUPTIBLE,  nr)
 615asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru);
 616
 617extern int in_group_p(gid_t);
 618extern int in_egroup_p(gid_t);
 619
 620extern void proc_caches_init(void);
 621extern void flush_signals(struct task_struct *);
 622extern void flush_signal_handlers(struct task_struct *);
 623extern void sig_exit(int, int, struct siginfo *);
 624extern int dequeue_signal(sigset_t *, siginfo_t *);
 625extern void block_all_signals(int (*notifier)(void *priv), void *priv,
 626                              sigset_t *mask);
 627extern void unblock_all_signals(void);
 628extern int send_sig_info(int, struct siginfo *, struct task_struct *);
 629extern int force_sig_info(int, struct siginfo *, struct task_struct *);
 630extern int kill_pg_info(int, struct siginfo *, pid_t);
 631extern int kill_sl_info(int, struct siginfo *, pid_t);
 632extern int kill_proc_info(int, struct siginfo *, pid_t);
 633extern void notify_parent(struct task_struct *, int);
 634extern void do_notify_parent(struct task_struct *, int);
 635extern void force_sig(int, struct task_struct *);
 636extern int send_sig(int, struct task_struct *, int);
 637extern int kill_pg(pid_t, int, int);
 638extern int kill_sl(pid_t, int, int);
 639extern int kill_proc(pid_t, int, int);
 640extern int do_sigaction(int, const struct k_sigaction *, struct k_sigaction *);
 641extern int do_sigaltstack(const stack_t *, stack_t *, unsigned long);
 642
 643static inline int signal_pending(struct task_struct *p)
 644{
 645        return (p->sigpending != 0);
 646}
 647
 648/*
 649 * Re-calculate pending state from the set of locally pending
 650 * signals, globally pending signals, and blocked signals.
 651 */
 652static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
 653{
 654        unsigned long ready;
 655        long i;
 656
 657        switch (_NSIG_WORDS) {
 658        default:
 659                for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
 660                        ready |= signal->sig[i] &~ blocked->sig[i];
 661                break;
 662
 663        case 4: ready  = signal->sig[3] &~ blocked->sig[3];
 664                ready |= signal->sig[2] &~ blocked->sig[2];
 665                ready |= signal->sig[1] &~ blocked->sig[1];
 666                ready |= signal->sig[0] &~ blocked->sig[0];
 667                break;
 668
 669        case 2: ready  = signal->sig[1] &~ blocked->sig[1];
 670                ready |= signal->sig[0] &~ blocked->sig[0];
 671                break;
 672
 673        case 1: ready  = signal->sig[0] &~ blocked->sig[0];
 674        }
 675        return ready != 0;
 676}
 677
 678/* Reevaluate whether the task has signals pending delivery.
 679   This is required every time the blocked sigset_t changes.
 680   All callers should have t->sigmask_lock.  */
 681
 682static inline void recalc_sigpending(struct task_struct *t)
 683{
 684        t->sigpending = has_pending_signals(&t->pending.signal, &t->blocked);
 685}
 686
 687/* True if we are on the alternate signal stack.  */
 688
 689static inline int on_sig_stack(unsigned long sp)
 690{
 691        return (sp - current->sas_ss_sp < current->sas_ss_size);
 692}
 693
 694static inline int sas_ss_flags(unsigned long sp)
 695{
 696        return (current->sas_ss_size == 0 ? SS_DISABLE
 697                : on_sig_stack(sp) ? SS_ONSTACK : 0);
 698}
 699
 700extern int request_irq(unsigned int,
 701                       void (*handler)(int, void *, struct pt_regs *),
 702                       unsigned long, const char *, void *);
 703extern void free_irq(unsigned int, void *);
 704
 705/*
 706 * This has now become a routine instead of a macro, it sets a flag if
 707 * it returns true (to do BSD-style accounting where the process is flagged
 708 * if it uses root privs). The implication of this is that you should do
 709 * normal permissions checks first, and check suser() last.
 710 *
 711 * [Dec 1997 -- Chris Evans]
 712 * For correctness, the above considerations need to be extended to
 713 * fsuser(). This is done, along with moving fsuser() checks to be
 714 * last.
 715 *
 716 * These will be removed, but in the mean time, when the SECURE_NOROOT 
 717 * flag is set, uids don't grant privilege.
 718 */
 719static inline int suser(void)
 720{
 721        if (!issecure(SECURE_NOROOT) && current->euid == 0) { 
 722                current->flags |= PF_SUPERPRIV;
 723                return 1;
 724        }
 725        return 0;
 726}
 727
 728static inline int fsuser(void)
 729{
 730        if (!issecure(SECURE_NOROOT) && current->fsuid == 0) {
 731                current->flags |= PF_SUPERPRIV;
 732                return 1;
 733        }
 734        return 0;
 735}
 736
 737/*
 738 * capable() checks for a particular capability.  
 739 * New privilege checks should use this interface, rather than suser() or
 740 * fsuser(). See include/linux/capability.h for defined capabilities.
 741 */
 742
 743static inline int capable(int cap)
 744{
 745#if 1 /* ok now */
 746        if (cap_raised(current->cap_effective, cap))
 747#else
 748        if (cap_is_fs_cap(cap) ? current->fsuid == 0 : current->euid == 0)
 749#endif
 750        {
 751                current->flags |= PF_SUPERPRIV;
 752                return 1;
 753        }
 754        return 0;
 755}
 756
 757/*
 758 * Routines for handling mm_structs
 759 */
 760extern struct mm_struct * mm_alloc(void);
 761
 762extern struct mm_struct * start_lazy_tlb(void);
 763extern void end_lazy_tlb(struct mm_struct *mm);
 764
 765/* mmdrop drops the mm and the page tables */
 766extern inline void FASTCALL(__mmdrop(struct mm_struct *));
 767static inline void mmdrop(struct mm_struct * mm)
 768{
 769        if (atomic_dec_and_test(&mm->mm_count))
 770                __mmdrop(mm);
 771}
 772
 773/* mmput gets rid of the mappings and all user-space */
 774extern void mmput(struct mm_struct *);
 775/* Remove the current tasks stale references to the old mm_struct */
 776extern void mm_release(void);
 777
 778/*
 779 * Routines for handling the fd arrays
 780 */
 781extern struct file ** alloc_fd_array(int);
 782extern int expand_fd_array(struct files_struct *, int nr);
 783extern void free_fd_array(struct file **, int);
 784
 785extern fd_set *alloc_fdset(int);
 786extern int expand_fdset(struct files_struct *, int nr);
 787extern void free_fdset(fd_set *, int);
 788
 789extern int  copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *);
 790extern void flush_thread(void);
 791extern void exit_thread(void);
 792
 793extern void exit_mm(struct task_struct *);
 794extern void exit_files(struct task_struct *);
 795extern void exit_sighand(struct task_struct *);
 796
 797extern void reparent_to_init(void);
 798extern void daemonize(void);
 799
 800extern int do_execve(char *, char **, char **, struct pt_regs *);
 801extern int do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long);
 802
 803extern void FASTCALL(add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait));
 804extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait));
 805extern void FASTCALL(remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait));
 806
 807extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
 808
 809#define __wait_event(wq, condition)                                     \
 810do {                                                                    \
 811        wait_queue_t __wait;                                            \
 812        init_waitqueue_entry(&__wait, current);                         \
 813                                                                        \
 814        add_wait_queue(&wq, &__wait);                                   \
 815        for (;;) {                                                      \
 816                set_current_state(TASK_UNINTERRUPTIBLE);                \
 817                if (condition)                                          \
 818                        break;                                          \
 819                schedule();                                             \
 820        }                                                               \
 821        current->state = TASK_RUNNING;                                  \
 822        remove_wait_queue(&wq, &__wait);                                \
 823} while (0)
 824
 825#define wait_event(wq, condition)                                       \
 826do {                                                                    \
 827        if (condition)                                                  \
 828                break;                                                  \
 829        __wait_event(wq, condition);                                    \
 830} while (0)
 831
 832#define __wait_event_interruptible(wq, condition, ret)                  \
 833do {                                                                    \
 834        wait_queue_t __wait;                                            \
 835        init_waitqueue_entry(&__wait, current);                         \
 836                                                                        \
 837        add_wait_queue(&wq, &__wait);                                   \
 838        for (;;) {                                                      \
 839                set_current_state(TASK_INTERRUPTIBLE);                  \
 840                if (condition)                                          \
 841                        break;                                          \
 842                if (!signal_pending(current)) {                         \
 843                        schedule();                                     \
 844                        continue;                                       \
 845                }                                                       \
 846                ret = -ERESTARTSYS;                                     \
 847                break;                                                  \
 848        }                                                               \
 849        current->state = TASK_RUNNING;                                  \
 850        remove_wait_queue(&wq, &__wait);                                \
 851} while (0)
 852        
 853#define wait_event_interruptible(wq, condition)                         \
 854({                                                                      \
 855        int __ret = 0;                                                  \
 856        if (!(condition))                                               \
 857                __wait_event_interruptible(wq, condition, __ret);       \
 858        __ret;                                                          \
 859})
 860
 861#define REMOVE_LINKS(p) do { \
 862        (p)->next_task->prev_task = (p)->prev_task; \
 863        (p)->prev_task->next_task = (p)->next_task; \
 864        if ((p)->p_osptr) \
 865                (p)->p_osptr->p_ysptr = (p)->p_ysptr; \
 866        if ((p)->p_ysptr) \
 867                (p)->p_ysptr->p_osptr = (p)->p_osptr; \
 868        else \
 869                (p)->p_pptr->p_cptr = (p)->p_osptr; \
 870        } while (0)
 871
 872#define SET_LINKS(p) do { \
 873        (p)->next_task = &init_task; \
 874        (p)->prev_task = init_task.prev_task; \
 875        init_task.prev_task->next_task = (p); \
 876        init_task.prev_task = (p); \
 877        (p)->p_ysptr = NULL; \
 878        if (((p)->p_osptr = (p)->p_pptr->p_cptr) != NULL) \
 879                (p)->p_osptr->p_ysptr = p; \
 880        (p)->p_pptr->p_cptr = p; \
 881        } while (0)
 882
 883#define for_each_task(p) \
 884        for (p = &init_task ; (p = p->next_task) != &init_task ; )
 885
 886#define for_each_thread(task) \
 887        for (task = next_thread(current) ; task != current ; task = next_thread(task))
 888
 889#define next_thread(p) \
 890        list_entry((p)->thread_group.next, struct task_struct, thread_group)
 891
 892#define thread_group_leader(p)  (p->pid == p->tgid)
 893
 894static inline void del_from_runqueue(struct task_struct * p)
 895{
 896        nr_running--;
 897        p->sleep_time = jiffies;
 898        list_del(&p->run_list);
 899        p->run_list.next = NULL;
 900}
 901
 902static inline int task_on_runqueue(struct task_struct *p)
 903{
 904        return (p->run_list.next != NULL);
 905}
 906
 907static inline void unhash_process(struct task_struct *p)
 908{
 909        if (task_on_runqueue(p))
 910                out_of_line_bug();
 911        write_lock_irq(&tasklist_lock);
 912        nr_threads--;
 913        unhash_pid(p);
 914        REMOVE_LINKS(p);
 915        list_del(&p->thread_group);
 916        write_unlock_irq(&tasklist_lock);
 917}
 918
 919/* Protects ->fs, ->files, ->mm, and synchronises with wait4().  Nests inside tasklist_lock */
 920static inline void task_lock(struct task_struct *p)
 921{
 922        spin_lock(&p->alloc_lock);
 923}
 924
 925static inline void task_unlock(struct task_struct *p)
 926{
 927        spin_unlock(&p->alloc_lock);
 928}
 929
 930/* write full pathname into buffer and return start of pathname */
 931static inline char * d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
 932                                char *buf, int buflen)
 933{
 934        char *res;
 935        struct vfsmount *rootmnt;
 936        struct dentry *root;
 937        read_lock(&current->fs->lock);
 938        rootmnt = mntget(current->fs->rootmnt);
 939        root = dget(current->fs->root);
 940        read_unlock(&current->fs->lock);
 941        spin_lock(&dcache_lock);
 942        res = __d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
 943        spin_unlock(&dcache_lock);
 944        dput(root);
 945        mntput(rootmnt);
 946        return res;
 947}
 948
 949static inline int need_resched(void)
 950{
 951        return (unlikely(current->need_resched));
 952}
 953
 954extern void __cond_resched(void);
 955static inline void cond_resched(void)
 956{
 957        if (need_resched())
 958                __cond_resched();
 959}
 960
 961#endif /* __KERNEL__ */
 962#endif
 963
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.