1#ifndef _LINUX_SCHED_H
2#define _LINUX_SCHED_H
3
4#include <asm/param.h>
5
6#include <linux/config.h>
7#include <linux/capability.h>
8#include <linux/threads.h>
9#include <linux/kernel.h>
10#include <linux/types.h>
11#include <linux/timex.h>
12#include <linux/jiffies.h>
13#include <linux/rbtree.h>
14#include <linux/thread_info.h>
15#include <linux/cpumask.h>
16#include <linux/errno.h>
17#include <linux/nodemask.h>
18
19#include <asm/system.h>
20#include <asm/semaphore.h>
21#include <asm/page.h>
22#include <asm/ptrace.h>
23#include <asm/mmu.h>
24#include <asm/cputime.h>
25
26#include <linux/smp.h>
27#include <linux/sem.h>
28#include <linux/signal.h>
29#include <linux/securebits.h>
30#include <linux/fs_struct.h>
31#include <linux/compiler.h>
32#include <linux/completion.h>
33#include <linux/pid.h>
34#include <linux/percpu.h>
35#include <linux/topology.h>
36#include <linux/seccomp.h>
37#include <linux/rcupdate.h>
38#include <linux/futex.h>
39
40#include <linux/auxvec.h>
41
42struct exec_domain;
43
44
45
46
47#define CSIGNAL 0x000000ff
48#define CLONE_VM 0x00000100
49#define CLONE_FS 0x00000200
50#define CLONE_FILES 0x00000400
51#define CLONE_SIGHAND 0x00000800
52#define CLONE_PTRACE 0x00002000
53#define CLONE_VFORK 0x00004000
54#define CLONE_PARENT 0x00008000
55#define CLONE_THREAD 0x00010000
56#define CLONE_NEWNS 0x00020000
57#define CLONE_SYSVSEM 0x00040000
58#define CLONE_SETTLS 0x00080000
59#define CLONE_PARENT_SETTID 0x00100000
60#define CLONE_CHILD_CLEARTID 0x00200000
61#define CLONE_DETACHED 0x00400000
62#define CLONE_UNTRACED 0x00800000
63#define CLONE_CHILD_SETTID 0x01000000
64#define CLONE_STOPPED 0x02000000
65
66
67
68
69
70#define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
71
72
73
74
75
76
77
78
79
80
81
82extern unsigned long avenrun[];
83
84#define FSHIFT 11
85#define FIXED_1 (1<<FSHIFT)
86#define LOAD_FREQ (5*HZ)
87#define EXP_1 1884
88#define EXP_5 2014
89#define EXP_15 2037
90
91#define CALC_LOAD(load,exp,n) \
92 load *= exp; \
93 load += n*(FIXED_1-exp); \
94 load >>= FSHIFT;
95
96extern unsigned long total_forks;
97extern int nr_threads;
98extern int last_pid;
99DECLARE_PER_CPU(unsigned long, process_counts);
100extern int nr_processes(void);
101extern unsigned long nr_running(void);
102extern unsigned long nr_uninterruptible(void);
103extern unsigned long nr_active(void);
104extern unsigned long nr_iowait(void);
105
106#include <linux/time.h>
107#include <linux/param.h>
108#include <linux/resource.h>
109#include <linux/timer.h>
110#include <linux/hrtimer.h>
111
112#include <asm/processor.h>
113
114
115
116
117
118
119
120
121
122
123
124#define TASK_RUNNING 0
125#define TASK_INTERRUPTIBLE 1
126#define TASK_UNINTERRUPTIBLE 2
127#define TASK_STOPPED 4
128#define TASK_TRACED 8
129
130#define EXIT_ZOMBIE 16
131#define EXIT_DEAD 32
132
133#define TASK_NONINTERACTIVE 64
134
135#define __set_task_state(tsk, state_value) \
136 do { (tsk)->state = (state_value); } while (0)
137#define set_task_state(tsk, state_value) \
138 set_mb((tsk)->state, (state_value))
139
140
141
142
143
144
145
146
147
148
149
150
151#define __set_current_state(state_value) \
152 do { current->state = (state_value); } while (0)
153#define set_current_state(state_value) \
154 set_mb(current->state, (state_value))
155
156
157#define TASK_COMM_LEN 16
158
159
160
161
162#define SCHED_NORMAL 0
163#define SCHED_FIFO 1
164#define SCHED_RR 2
165#define SCHED_BATCH 3
166
167struct sched_param {
168 int sched_priority;
169};
170
171#ifdef __KERNEL__
172
173#include <linux/spinlock.h>
174
175
176
177
178
179
180
181extern rwlock_t tasklist_lock;
182extern spinlock_t mmlist_lock;
183
184typedef struct task_struct task_t;
185
186extern void sched_init(void);
187extern void sched_init_smp(void);
188extern void init_idle(task_t *idle, int cpu);
189
190extern cpumask_t nohz_cpu_mask;
191
192extern void show_state(void);
193extern void show_regs(struct pt_regs *);
194
195
196
197
198
199
200extern void show_stack(struct task_struct *task, unsigned long *sp);
201
202void io_schedule(void);
203long io_schedule_timeout(long timeout);
204
205extern void cpu_init (void);
206extern void trap_init(void);
207extern void update_process_times(int user);
208extern void scheduler_tick(void);
209
210#ifdef CONFIG_DETECT_SOFTLOCKUP
211extern void softlockup_tick(void);
212extern void spawn_softlockup_task(void);
213extern void touch_softlockup_watchdog(void);
214#else
215static inline void softlockup_tick(void)
216{
217}
218static inline void spawn_softlockup_task(void)
219{
220}
221static inline void touch_softlockup_watchdog(void)
222{
223}
224#endif
225
226
227
228#define __sched __attribute__((__section__(".sched.text")))
229
230extern int in_sched_functions(unsigned long addr);
231
232#define MAX_SCHEDULE_TIMEOUT LONG_MAX
233extern signed long FASTCALL(schedule_timeout(signed long timeout));
234extern signed long schedule_timeout_interruptible(signed long timeout);
235extern signed long schedule_timeout_uninterruptible(signed long timeout);
236asmlinkage void schedule(void);
237
238struct namespace;
239
240
241#define DEFAULT_MAX_MAP_COUNT 65536
242
243extern int sysctl_max_map_count;
244
245#include <linux/aio.h>
246
247extern unsigned long
248arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
249 unsigned long, unsigned long);
250extern unsigned long
251arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
252 unsigned long len, unsigned long pgoff,
253 unsigned long flags);
254extern void arch_unmap_area(struct mm_struct *, unsigned long);
255extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
256
257#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
258
259
260
261
262#define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value)
263#define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member))
264#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member)
265#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
266#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
267typedef atomic_long_t mm_counter_t;
268
269#else
270
271
272
273
274#define set_mm_counter(mm, member, value) (mm)->_##member = (value)
275#define get_mm_counter(mm, member) ((mm)->_##member)
276#define add_mm_counter(mm, member, value) (mm)->_##member += (value)
277#define inc_mm_counter(mm, member) (mm)->_##member++
278#define dec_mm_counter(mm, member) (mm)->_##member--
279typedef unsigned long mm_counter_t;
280
281#endif
282
283#define get_mm_rss(mm) \
284 (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))
285#define update_hiwater_rss(mm) do { \
286 unsigned long _rss = get_mm_rss(mm); \
287 if ((mm)->hiwater_rss < _rss) \
288 (mm)->hiwater_rss = _rss; \
289} while (0)
290#define update_hiwater_vm(mm) do { \
291 if ((mm)->hiwater_vm < (mm)->total_vm) \
292 (mm)->hiwater_vm = (mm)->total_vm; \
293} while (0)
294
295struct mm_struct {
296 struct vm_area_struct * mmap;
297 struct rb_root mm_rb;
298 struct vm_area_struct * mmap_cache;
299 unsigned long (*get_unmapped_area) (struct file *filp,
300 unsigned long addr, unsigned long len,
301 unsigned long pgoff, unsigned long flags);
302 void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
303 unsigned long mmap_base;
304 unsigned long task_size;
305 unsigned long cached_hole_size;
306 unsigned long free_area_cache;
307 pgd_t * pgd;
308 atomic_t mm_users;
309 atomic_t mm_count;
310 int map_count;
311 struct rw_semaphore mmap_sem;
312 spinlock_t page_table_lock;
313
314 struct list_head mmlist;
315
316
317
318
319
320
321
322 mm_counter_t _file_rss;
323 mm_counter_t _anon_rss;
324
325 unsigned long hiwater_rss;
326 unsigned long hiwater_vm;
327
328 unsigned long total_vm, locked_vm, shared_vm, exec_vm;
329 unsigned long stack_vm, reserved_vm, def_flags, nr_ptes;
330 unsigned long start_code, end_code, start_data, end_data;
331 unsigned long start_brk, brk, start_stack;
332 unsigned long arg_start, arg_end, env_start, env_end;
333
334 unsigned long saved_auxv[AT_VECTOR_SIZE];
335
336 unsigned dumpable:2;
337 cpumask_t cpu_vm_mask;
338
339
340 mm_context_t context;
341
342
343 unsigned long swap_token_time;
344 char recent_pagein;
345
346
347 int core_waiters;
348 struct completion *core_startup_done, core_done;
349
350
351 rwlock_t ioctx_list_lock;
352 struct kioctx *ioctx_list;
353};
354
355struct sighand_struct {
356 atomic_t count;
357 struct k_sigaction action[_NSIG];
358 spinlock_t siglock;
359};
360
361
362
363
364
365
366
367
368struct signal_struct {
369 atomic_t count;
370 atomic_t live;
371
372 wait_queue_head_t wait_chldexit;
373
374
375 task_t *curr_target;
376
377
378 struct sigpending shared_pending;
379
380
381 int group_exit_code;
382
383
384
385
386
387 struct task_struct *group_exit_task;
388 int notify_count;
389
390
391 int group_stop_count;
392 unsigned int flags;
393
394
395 struct list_head posix_timers;
396
397
398 struct hrtimer real_timer;
399 struct task_struct *tsk;
400 ktime_t it_real_incr;
401
402
403 cputime_t it_prof_expires, it_virt_expires;
404 cputime_t it_prof_incr, it_virt_incr;
405
406
407 pid_t pgrp;
408 pid_t tty_old_pgrp;
409 pid_t session;
410
411 int leader;
412
413 struct tty_struct *tty;
414
415
416
417
418
419
420
421 cputime_t utime, stime, cutime, cstime;
422 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
423 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
424
425
426
427
428
429
430
431 unsigned long long sched_time;
432
433
434
435
436
437
438
439
440
441
442 struct rlimit rlim[RLIM_NLIMITS];
443
444 struct list_head cpu_timers[3];
445
446
447
448#ifdef CONFIG_KEYS
449 struct key *session_keyring;
450 struct key *process_keyring;
451#endif
452};
453
454
455#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
456# define __ARCH_WANT_UNLOCKED_CTXSW
457#endif
458
459
460
461
462#define SIGNAL_STOP_STOPPED 0x00000001
463#define SIGNAL_STOP_DEQUEUED 0x00000002
464#define SIGNAL_STOP_CONTINUED 0x00000004
465#define SIGNAL_GROUP_EXIT 0x00000008
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481#define MAX_USER_RT_PRIO 100
482#define MAX_RT_PRIO MAX_USER_RT_PRIO
483
484#define MAX_PRIO (MAX_RT_PRIO + 40)
485
486#define rt_task(p) (unlikely((p)->prio < MAX_RT_PRIO))
487#define batch_task(p) (unlikely((p)->policy == SCHED_BATCH))
488
489
490
491
492struct user_struct {
493 atomic_t __count;
494 atomic_t processes;
495 atomic_t files;
496 atomic_t sigpending;
497#ifdef CONFIG_INOTIFY
498 atomic_t inotify_watches;
499 atomic_t inotify_devs;
500#endif
501
502 unsigned long mq_bytes;
503 unsigned long locked_shm;
504
505#ifdef CONFIG_KEYS
506 struct key *uid_keyring;
507 struct key *session_keyring;
508#endif
509
510
511 struct list_head uidhash_list;
512 uid_t uid;
513};
514
515extern struct user_struct *find_user(uid_t);
516
517extern struct user_struct root_user;
518#define INIT_USER (&root_user)
519
520typedef struct prio_array prio_array_t;
521struct backing_dev_info;
522struct reclaim_state;
523
524#ifdef CONFIG_SCHEDSTATS
525struct sched_info {
526
527 unsigned long cpu_time,
528 run_delay,
529 pcnt;
530
531
532 unsigned long last_arrival,
533 last_queued;
534};
535
536extern struct file_operations proc_schedstat_operations;
537#endif
538
539enum idle_type
540{
541 SCHED_IDLE,
542 NOT_IDLE,
543 NEWLY_IDLE,
544 MAX_IDLE_TYPES
545};
546
547
548
549
550#ifdef CONFIG_SMP
551#define SCHED_LOAD_SCALE 128UL
552
553#define SD_LOAD_BALANCE 1
554#define SD_BALANCE_NEWIDLE 2
555#define SD_BALANCE_EXEC 4
556#define SD_BALANCE_FORK 8
557#define SD_WAKE_IDLE 16
558#define SD_WAKE_AFFINE 32
559#define SD_WAKE_BALANCE 64
560#define SD_SHARE_CPUPOWER 128
561
562struct sched_group {
563 struct sched_group *next;
564 cpumask_t cpumask;
565
566
567
568
569
570 unsigned long cpu_power;
571};
572
573struct sched_domain {
574
575 struct sched_domain *parent;
576 struct sched_group *groups;
577 cpumask_t span;
578 unsigned long min_interval;
579 unsigned long max_interval;
580 unsigned int busy_factor;
581 unsigned int imbalance_pct;
582 unsigned long long cache_hot_time;
583 unsigned int cache_nice_tries;
584 unsigned int per_cpu_gain;
585 unsigned int busy_idx;
586 unsigned int idle_idx;
587 unsigned int newidle_idx;
588 unsigned int wake_idx;
589 unsigned int forkexec_idx;
590 int flags;
591
592
593 unsigned long last_balance;
594 unsigned int balance_interval;
595 unsigned int nr_balance_failed;
596
597#ifdef CONFIG_SCHEDSTATS
598
599 unsigned long lb_cnt[MAX_IDLE_TYPES];
600 unsigned long lb_failed[MAX_IDLE_TYPES];
601 unsigned long lb_balanced[MAX_IDLE_TYPES];
602 unsigned long lb_imbalance[MAX_IDLE_TYPES];
603 unsigned long lb_gained[MAX_IDLE_TYPES];
604 unsigned long lb_hot_gained[MAX_IDLE_TYPES];
605 unsigned long lb_nobusyg[MAX_IDLE_TYPES];
606 unsigned long lb_nobusyq[MAX_IDLE_TYPES];
607
608
609 unsigned long alb_cnt;
610 unsigned long alb_failed;
611 unsigned long alb_pushed;
612
613
614 unsigned long sbe_cnt;
615 unsigned long sbe_balanced;
616 unsigned long sbe_pushed;
617
618
619 unsigned long sbf_cnt;
620 unsigned long sbf_balanced;
621 unsigned long sbf_pushed;
622
623
624 unsigned long ttwu_wake_remote;
625 unsigned long ttwu_move_affine;
626 unsigned long ttwu_move_balance;
627#endif
628};
629
630extern void partition_sched_domains(cpumask_t *partition1,
631 cpumask_t *partition2);
632
633
634
635
636
637extern unsigned int max_cache_size;
638
639#endif
640
641
642struct io_context;
643void exit_io_context(void);
644struct cpuset;
645
646#define NGROUPS_SMALL 32
647#define NGROUPS_PER_BLOCK ((int)(PAGE_SIZE / sizeof(gid_t)))
648struct group_info {
649 int ngroups;
650 atomic_t usage;
651 gid_t small_block[NGROUPS_SMALL];
652 int nblocks;
653 gid_t *blocks[0];
654};
655
656
657
658
659
660
661
662#define get_group_info(group_info) do { \
663 atomic_inc(&(group_info)->usage); \
664} while (0)
665
666#define put_group_info(group_info) do { \
667 if (atomic_dec_and_test(&(group_info)->usage)) \
668 groups_free(group_info); \
669} while (0)
670
671extern struct group_info *groups_alloc(int gidsetsize);
672extern void groups_free(struct group_info *group_info);
673extern int set_current_groups(struct group_info *group_info);
674extern int groups_search(struct group_info *group_info, gid_t grp);
675
676#define GROUP_AT(gi, i) \
677 ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK])
678
679#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
680extern void prefetch_stack(struct task_struct*);
681#else
682static inline void prefetch_stack(struct task_struct *t) { }
683#endif
684
685struct audit_context;
686struct mempolicy;
687struct pipe_inode_info;
688
689enum sleep_type {
690 SLEEP_NORMAL,
691 SLEEP_NONINTERACTIVE,
692 SLEEP_INTERACTIVE,
693 SLEEP_INTERRUPTED,
694};
695
696struct task_struct {
697 volatile long state;
698 struct thread_info *thread_info;
699 atomic_t usage;
700 unsigned long flags;
701 unsigned long ptrace;
702
703 int lock_depth;
704
705#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
706 int oncpu;
707#endif
708 int prio, static_prio;
709 struct list_head run_list;
710 prio_array_t *array;
711
712 unsigned short ioprio;
713 unsigned int btrace_seq;
714
715 unsigned long sleep_avg;
716 unsigned long long timestamp, last_ran;
717 unsigned long long sched_time;
718 enum sleep_type sleep_type;
719
720 unsigned long policy;
721 cpumask_t cpus_allowed;
722 unsigned int time_slice, first_time_slice;
723
724#ifdef CONFIG_SCHEDSTATS
725 struct sched_info sched_info;
726#endif
727
728 struct list_head tasks;
729
730
731
732
733 struct list_head ptrace_children;
734 struct list_head ptrace_list;
735
736 struct mm_struct *mm, *active_mm;
737
738
739 struct linux_binfmt *binfmt;
740 long exit_state;
741 int exit_code, exit_signal;
742 int pdeath_signal;
743
744 unsigned long personality;
745 unsigned did_exec:1;
746 pid_t pid;
747 pid_t tgid;
748
749
750
751
752
753 struct task_struct *real_parent;
754 struct task_struct *parent;
755
756
757
758
759 struct list_head children;
760 struct list_head sibling;
761 struct task_struct *group_leader;
762
763
764 struct pid_link pids[PIDTYPE_MAX];
765 struct list_head thread_group;
766
767 struct completion *vfork_done;
768 int __user *set_child_tid;
769 int __user *clear_child_tid;
770
771 unsigned long rt_priority;
772 cputime_t utime, stime;
773 unsigned long nvcsw, nivcsw;
774 struct timespec start_time;
775
776 unsigned long min_flt, maj_flt;
777
778 cputime_t it_prof_expires, it_virt_expires;
779 unsigned long long it_sched_expires;
780 struct list_head cpu_timers[3];
781
782
783 uid_t uid,euid,suid,fsuid;
784 gid_t gid,egid,sgid,fsgid;
785 struct group_info *group_info;
786 kernel_cap_t cap_effective, cap_inheritable, cap_permitted;
787 unsigned keep_capabilities:1;
788 struct user_struct *user;
789#ifdef CONFIG_KEYS
790 struct key *request_key_auth;
791 struct key *thread_keyring;
792 unsigned char jit_keyring;
793#endif
794 int oomkilladj;
795 char comm[TASK_COMM_LEN];
796
797
798
799
800 int link_count, total_link_count;
801
802 struct sysv_sem sysvsem;
803
804 struct thread_struct thread;
805
806 struct fs_struct *fs;
807
808 struct files_struct *files;
809
810 struct namespace *namespace;
811
812 struct signal_struct *signal;
813 struct sighand_struct *sighand;
814
815 sigset_t blocked, real_blocked;
816 sigset_t saved_sigmask;
817 struct sigpending pending;
818
819 unsigned long sas_ss_sp;
820 size_t sas_ss_size;
821 int (*notifier)(void *priv);
822 void *notifier_data;
823 sigset_t *notifier_mask;
824
825 void *security;
826 struct audit_context *audit_context;
827 seccomp_t seccomp;
828
829
830 u32 parent_exec_id;
831 u32 self_exec_id;
832
833 spinlock_t alloc_lock;
834
835 spinlock_t proc_lock;
836
837#ifdef CONFIG_DEBUG_MUTEXES
838
839 struct mutex_waiter *blocked_on;
840#endif
841
842
843 void *journal_info;
844
845
846 struct reclaim_state *reclaim_state;
847
848 struct dentry *proc_dentry;
849 struct backing_dev_info *backing_dev_info;
850
851 struct io_context *io_context;
852
853 unsigned long ptrace_message;
854 siginfo_t *last_siginfo;
855
856
857
858
859
860
861 wait_queue_t *io_wait;
862
863 u64 rchar, wchar, syscr, syscw;
864#if defined(CONFIG_BSD_PROCESS_ACCT)
865 u64 acct_rss_mem1;
866 u64 acct_vm_mem1;
867 clock_t acct_stimexpd;
868#endif
869#ifdef CONFIG_NUMA
870 struct mempolicy *mempolicy;
871 short il_next;
872#endif
873#ifdef CONFIG_CPUSETS
874 struct cpuset *cpuset;
875 nodemask_t mems_allowed;
876 int cpuset_mems_generation;
877 int cpuset_mem_spread_rotor;
878#endif
879 struct robust_list_head __user *robust_list;
880#ifdef CONFIG_COMPAT
881 struct compat_robust_list_head __user *compat_robust_list;
882#endif
883
884 atomic_t fs_excl;
885 struct rcu_head rcu;
886
887
888
889
890 struct pipe_inode_info *splice_pipe;
891};
892
893static inline pid_t process_group(struct task_struct *tsk)
894{
895 return tsk->signal->pgrp;
896}
897
898
899
900
901
902
903
904
905
906static inline int pid_alive(struct task_struct *p)
907{
908 return p->pids[PIDTYPE_PID].pid != NULL;
909}
910
911extern void free_task(struct task_struct *tsk);
912#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
913
914extern void __put_task_struct(struct task_struct *t);
915
916static inline void put_task_struct(struct task_struct *t)
917{
918 if (atomic_dec_and_test(&t->usage))
919 __put_task_struct(t);
920}
921
922
923
924
925#define PF_ALIGNWARN 0x00000001
926
927#define PF_STARTING 0x00000002
928#define PF_EXITING 0x00000004
929#define PF_DEAD 0x00000008
930#define PF_FORKNOEXEC 0x00000040
931#define PF_SUPERPRIV 0x00000100
932#define PF_DUMPCORE 0x00000200
933#define PF_SIGNALED 0x00000400
934#define PF_MEMALLOC 0x00000800
935#define PF_FLUSHER 0x00001000
936#define PF_USED_MATH 0x00002000
937#define PF_FREEZE 0x00004000
938#define PF_NOFREEZE 0x00008000
939#define PF_FROZEN 0x00010000
940#define PF_FSTRANS 0x00020000
941#define PF_KSWAPD 0x00040000
942#define PF_SWAPOFF 0x00080000
943#define PF_LESS_THROTTLE 0x00100000
944#define PF_SYNCWRITE 0x00200000
945#define PF_BORROWED_MM 0x00400000
946#define PF_RANDOMIZE 0x00800000
947#define PF_SWAPWRITE 0x01000000
948#define PF_SPREAD_PAGE 0x04000000
949#define PF_SPREAD_SLAB 0x08000000
950#define PF_MEMPOLICY 0x10000000
951
952
953
954
955
956
957
958
959
960
961
962
963#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
964#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
965#define clear_used_math() clear_stopped_child_used_math(current)
966#define set_used_math() set_stopped_child_used_math(current)
967#define conditional_stopped_child_used_math(condition, child) \
968 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
969#define conditional_used_math(condition) \
970 conditional_stopped_child_used_math(condition, current)
971#define copy_to_stopped_child_used_math(child) \
972 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
973
974#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
975#define used_math() tsk_used_math(current)
976
977#ifdef CONFIG_SMP
978extern int set_cpus_allowed(task_t *p, cpumask_t new_mask);
979#else
980static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask)
981{
982 if (!cpu_isset(0, new_mask))
983 return -EINVAL;
984 return 0;
985}
986#endif
987
988extern unsigned long long sched_clock(void);
989extern unsigned long long current_sched_time(const task_t *current_task);
990
991
992#ifdef CONFIG_SMP
993extern void sched_exec(void);
994#else
995#define sched_exec() {}
996#endif
997
998#ifdef CONFIG_HOTPLUG_CPU
999extern void idle_task_exit(void);
1000#else
1001static inline void idle_task_exit(void) {}
1002#endif
1003
1004extern void sched_idle_next(void);
1005extern void set_user_nice(task_t *p, long nice);
1006extern int task_prio(const task_t *p);
1007extern int task_nice(const task_t *p);
1008extern int can_nice(const task_t *p, const int nice);
1009extern int task_curr(const task_t *p);
1010extern int idle_cpu(int cpu);
1011extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
1012extern task_t *idle_task(int cpu);
1013extern task_t *curr_task(int cpu);
1014extern void set_curr_task(int cpu, task_t *p);
1015
1016void yield(void);
1017
1018
1019
1020
1021extern struct exec_domain default_exec_domain;
1022
1023union thread_union {
1024 struct thread_info thread_info;
1025 unsigned long stack[THREAD_SIZE/sizeof(long)];
1026};
1027
1028#ifndef __HAVE_ARCH_KSTACK_END
1029static inline int kstack_end(void *addr)
1030{
1031
1032
1033
1034 return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
1035}
1036#endif
1037
1038extern union thread_union init_thread_union;
1039extern struct task_struct init_task;
1040
1041extern struct mm_struct init_mm;
1042
1043#define find_task_by_pid(nr) find_task_by_pid_type(PIDTYPE_PID, nr)
1044extern struct task_struct *find_task_by_pid_type(int type, int pid);
1045extern void set_special_pids(pid_t session, pid_t pgrp);
1046extern void __set_special_pids(pid_t session, pid_t pgrp);
1047
1048
1049extern struct user_struct * alloc_uid(uid_t);
1050static inline struct user_struct *get_uid(struct user_struct *u)
1051{
1052 atomic_inc(&u->__count);
1053 return u;
1054}
1055extern void free_uid(struct user_struct *);
1056extern void switch_uid(struct user_struct *);
1057
1058#include <asm/current.h>
1059
1060extern void do_timer(struct pt_regs *);
1061
1062extern int FASTCALL(wake_up_state(struct task_struct * tsk, unsigned int state));
1063extern int FASTCALL(wake_up_process(struct task_struct * tsk));
1064extern void FASTCALL(wake_up_new_task(struct task_struct * tsk,
1065 unsigned long clone_flags));
1066#ifdef CONFIG_SMP
1067 extern void kick_process(struct task_struct *tsk);
1068#else
1069 static inline void kick_process(struct task_struct *tsk) { }
1070#endif
1071extern void FASTCALL(sched_fork(task_t * p, int clone_flags));
1072extern void FASTCALL(sched_exit(task_t * p));
1073
1074extern int in_group_p(gid_t);
1075extern int in_egroup_p(gid_t);
1076
1077extern void proc_caches_init(void);
1078extern void flush_signals(struct task_struct *);
1079extern void flush_signal_handlers(struct task_struct *, int force_default);
1080extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
1081
1082static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
1083{
1084 unsigned long flags;
1085 int ret;
1086
1087 spin_lock_irqsave(&tsk->sighand->siglock, flags);
1088 ret = dequeue_signal(tsk, mask, info);
1089 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
1090
1091 return ret;
1092}
1093
1094extern void block_all_signals(int (*notifier)(void *priv), void *priv,
1095 sigset_t *mask);
1096extern void unblock_all_signals(void);
1097extern void release_task(struct task_struct * p);
1098extern int send_sig_info(int, struct siginfo *, struct task_struct *);
1099extern int send_group_sig_info(int, struct siginfo *, struct task_struct *);
1100extern int force_sigsegv(int, struct task_struct *);
1101extern int force_sig_info(int, struct siginfo *, struct task_struct *);
1102extern int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp);
1103extern int kill_pg_info(int, struct siginfo *, pid_t);
1104extern int kill_proc_info(int, struct siginfo *, pid_t);
1105extern int kill_proc_info_as_uid(int, struct siginfo *, pid_t, uid_t, uid_t);
1106extern void do_notify_parent(struct task_struct *, int);
1107extern void force_sig(int, struct task_struct *);
1108extern void force_sig_specific(int, struct task_struct *);
1109extern int send_sig(int, struct task_struct *, int);
1110extern void zap_other_threads(struct task_struct *p);
1111extern int kill_pg(pid_t, int, int);
1112extern int kill_proc(pid_t, int, int);
1113extern struct sigqueue *sigqueue_alloc(void);
1114extern void sigqueue_free(struct sigqueue *);
1115extern int send_sigqueue(int, struct sigqueue *, struct task_struct *);
1116extern int send_group_sigqueue(int, struct sigqueue *, struct task_struct *);
1117extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
1118extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);
1119
1120
1121#define SEND_SIG_NOINFO ((struct siginfo *) 0)
1122#define SEND_SIG_PRIV ((struct siginfo *) 1)
1123#define SEND_SIG_FORCED ((struct siginfo *) 2)
1124
1125static inline int is_si_special(const struct siginfo *info)
1126{
1127 return info <= SEND_SIG_FORCED;
1128}
1129
1130
1131
1132static inline int on_sig_stack(unsigned long sp)
1133{
1134 return (sp - current->sas_ss_sp < current->sas_ss_size);
1135}
1136
1137static inline int sas_ss_flags(unsigned long sp)
1138{
1139 return (current->sas_ss_size == 0 ? SS_DISABLE
1140 : on_sig_stack(sp) ? SS_ONSTACK : 0);
1141}
1142
1143
1144
1145
1146extern struct mm_struct * mm_alloc(void);
1147
1148
1149extern void FASTCALL(__mmdrop(struct mm_struct *));
1150static inline void mmdrop(struct mm_struct * mm)
1151{
1152 if (atomic_dec_and_test(&mm->mm_count))
1153 __mmdrop(mm);
1154}
1155
1156
1157extern void mmput(struct mm_struct *);
1158
1159extern struct mm_struct *get_task_mm(struct task_struct *task);
1160
1161extern void mm_release(struct task_struct *, struct mm_struct *);
1162
1163extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *);
1164extern void flush_thread(void);
1165extern void exit_thread(void);
1166
1167extern void exit_files(struct task_struct *);
1168extern void __cleanup_signal(struct signal_struct *);
1169extern void __cleanup_sighand(struct sighand_struct *);
1170extern void exit_itimers(struct signal_struct *);
1171
1172extern NORET_TYPE void do_group_exit(int);
1173
1174extern void daemonize(const char *, ...);
1175extern int allow_signal(int);
1176extern int disallow_signal(int);
1177extern task_t *child_reaper;
1178
1179extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);
1180extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
1181task_t *fork_idle(int);
1182
1183extern void set_task_comm(struct task_struct *tsk, char *from);
1184extern void get_task_comm(char *to, struct task_struct *tsk);
1185
1186#ifdef CONFIG_SMP
1187extern void wait_task_inactive(task_t * p);
1188#else
1189#define wait_task_inactive(p) do { } while (0)
1190#endif
1191
1192#define remove_parent(p) list_del_init(&(p)->sibling)
1193#define add_parent(p) list_add_tail(&(p)->sibling,&(p)->parent->children)
1194
1195#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks)
1196
1197#define for_each_process(p) \
1198 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
1199
1200
1201
1202
1203
1204#define do_each_thread(g, t) \
1205 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
1206
1207#define while_each_thread(g, t) \
1208 while ((t = next_thread(t)) != g)
1209
1210
1211#define thread_group_leader(p) (p == p->group_leader)
1212
1213static inline task_t *next_thread(const task_t *p)
1214{
1215 return list_entry(rcu_dereference(p->thread_group.next),
1216 task_t, thread_group);
1217}
1218
1219static inline int thread_group_empty(task_t *p)
1220{
1221 return list_empty(&p->thread_group);
1222}
1223
1224#define delay_group_leader(p) \
1225 (thread_group_leader(p) && !thread_group_empty(p))
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236static inline void task_lock(struct task_struct *p)
1237{
1238 spin_lock(&p->alloc_lock);
1239}
1240
1241static inline void task_unlock(struct task_struct *p)
1242{
1243 spin_unlock(&p->alloc_lock);
1244}
1245
1246extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
1247 unsigned long *flags);
1248
1249static inline void unlock_task_sighand(struct task_struct *tsk,
1250 unsigned long *flags)
1251{
1252 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
1253}
1254
1255#ifndef __HAVE_THREAD_FUNCTIONS
1256
1257#define task_thread_info(task) (task)->thread_info
1258#define task_stack_page(task) ((void*)((task)->thread_info))
1259
1260static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
1261{
1262 *task_thread_info(p) = *task_thread_info(org);
1263 task_thread_info(p)->task = p;
1264}
1265
1266static inline unsigned long *end_of_stack(struct task_struct *p)
1267{
1268 return (unsigned long *)(p->thread_info + 1);
1269}
1270
1271#endif
1272
1273
1274
1275
1276static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
1277{
1278 set_ti_thread_flag(task_thread_info(tsk), flag);
1279}
1280
1281static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1282{
1283 clear_ti_thread_flag(task_thread_info(tsk), flag);
1284}
1285
1286static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
1287{
1288 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
1289}
1290
1291static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1292{
1293 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
1294}
1295
1296static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
1297{
1298 return test_ti_thread_flag(task_thread_info(tsk), flag);
1299}
1300
1301static inline void set_tsk_need_resched(struct task_struct *tsk)
1302{
1303 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1304}
1305
1306static inline void clear_tsk_need_resched(struct task_struct *tsk)
1307{
1308 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1309}
1310
1311static inline int signal_pending(struct task_struct *p)
1312{
1313 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
1314}
1315
1316static inline int need_resched(void)
1317{
1318 return unlikely(test_thread_flag(TIF_NEED_RESCHED));
1319}
1320
1321
1322
1323
1324
1325
1326
1327
1328extern int cond_resched(void);
1329extern int cond_resched_lock(spinlock_t * lock);
1330extern int cond_resched_softirq(void);
1331
1332
1333
1334
1335
1336#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP)
1337# define need_lockbreak(lock) ((lock)->break_lock)
1338#else
1339# define need_lockbreak(lock) 0
1340#endif
1341
1342
1343
1344
1345
1346static inline int lock_need_resched(spinlock_t *lock)
1347{
1348 if (need_lockbreak(lock) || need_resched())
1349 return 1;
1350 return 0;
1351}
1352
1353
1354
1355
1356
1357extern FASTCALL(void recalc_sigpending_tsk(struct task_struct *t));
1358extern void recalc_sigpending(void);
1359
1360extern void signal_wake_up(struct task_struct *t, int resume_stopped);
1361
1362
1363
1364
1365#ifdef CONFIG_SMP
1366
1367static inline unsigned int task_cpu(const struct task_struct *p)
1368{
1369 return task_thread_info(p)->cpu;
1370}
1371
1372static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
1373{
1374 task_thread_info(p)->cpu = cpu;
1375}
1376
1377#else
1378
1379static inline unsigned int task_cpu(const struct task_struct *p)
1380{
1381 return 0;
1382}
1383
1384static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
1385{
1386}
1387
1388#endif
1389
1390#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
1391extern void arch_pick_mmap_layout(struct mm_struct *mm);
1392#else
1393static inline void arch_pick_mmap_layout(struct mm_struct *mm)
1394{
1395 mm->mmap_base = TASK_UNMAPPED_BASE;
1396 mm->get_unmapped_area = arch_get_unmapped_area;
1397 mm->unmap_area = arch_unmap_area;
1398}
1399#endif
1400
1401extern long sched_setaffinity(pid_t pid, cpumask_t new_mask);
1402extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
1403
1404extern void normalize_rt_tasks(void);
1405
1406#ifdef CONFIG_PM
1407
1408
1409
1410static inline int frozen(struct task_struct *p)
1411{
1412 return p->flags & PF_FROZEN;
1413}
1414
1415
1416
1417
1418static inline int freezing(struct task_struct *p)
1419{
1420 return p->flags & PF_FREEZE;
1421}
1422
1423
1424
1425
1426
1427static inline void freeze(struct task_struct *p)
1428{
1429 p->flags |= PF_FREEZE;
1430}
1431
1432
1433
1434
1435static inline int thaw_process(struct task_struct *p)
1436{
1437 if (frozen(p)) {
1438 p->flags &= ~PF_FROZEN;
1439 wake_up_process(p);
1440 return 1;
1441 }
1442 return 0;
1443}
1444
1445
1446
1447
1448static inline void frozen_process(struct task_struct *p)
1449{
1450 p->flags = (p->flags & ~PF_FREEZE) | PF_FROZEN;
1451}
1452
1453extern void refrigerator(void);
1454extern int freeze_processes(void);
1455extern void thaw_processes(void);
1456
1457static inline int try_to_freeze(void)
1458{
1459 if (freezing(current)) {
1460 refrigerator();
1461 return 1;
1462 } else
1463 return 0;
1464}
1465#else
1466static inline int frozen(struct task_struct *p) { return 0; }
1467static inline int freezing(struct task_struct *p) { return 0; }
1468static inline void freeze(struct task_struct *p) { BUG(); }
1469static inline int thaw_process(struct task_struct *p) { return 1; }
1470static inline void frozen_process(struct task_struct *p) { BUG(); }
1471
1472static inline void refrigerator(void) {}
1473static inline int freeze_processes(void) { BUG(); return 0; }
1474static inline void thaw_processes(void) {}
1475
1476static inline int try_to_freeze(void) { return 0; }
1477
1478#endif
1479#endif
1480
1481#endif
1482