1
2
3
4
5
6
7
8
9
10
11
12#ifndef _ASM_IA64_SWITCH_TO_H
13#define _ASM_IA64_SWITCH_TO_H
14
15#include <linux/percpu.h>
16
17struct task_struct;
18
19
20
21
22
23
24
25
26
27
28
29extern struct task_struct *ia64_switch_to (void *next_task);
30
31extern void ia64_save_extra (struct task_struct *task);
32extern void ia64_load_extra (struct task_struct *task);
33
34#define IA64_HAS_EXTRA_STATE(t) \
35 ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID))
36
37#define __switch_to(prev,next,last) do { \
38 if (IA64_HAS_EXTRA_STATE(prev)) \
39 ia64_save_extra(prev); \
40 if (IA64_HAS_EXTRA_STATE(next)) \
41 ia64_load_extra(next); \
42 ia64_psr(task_pt_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \
43 (last) = ia64_switch_to((next)); \
44} while (0)
45
46#ifdef CONFIG_SMP
47
48
49
50
51
52
53# define switch_to(prev,next,last) do { \
54 if (ia64_psr(task_pt_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) { \
55 ia64_psr(task_pt_regs(prev))->mfh = 0; \
56 (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \
57 __ia64_save_fpu((prev)->thread.fph); \
58 } \
59 __switch_to(prev, next, last); \
60 \
61 if (unlikely((current->thread.flags & IA64_THREAD_MIGRATION) && \
62 (task_cpu(current) != \
63 task_thread_info(current)->last_cpu))) { \
64 task_thread_info(current)->last_cpu = task_cpu(current); \
65 } \
66} while (0)
67#else
68# define switch_to(prev,next,last) __switch_to(prev, next, last)
69#endif
70
71#endif
72