1#ifndef _ASM_X86_SYSTEM_H
2#define _ASM_X86_SYSTEM_H
3
4#include <asm/asm.h>
5#include <asm/segment.h>
6#include <asm/cpufeature.h>
7#include <asm/cmpxchg.h>
8#include <asm/nops.h>
9
10#include <linux/kernel.h>
11#include <linux/irqflags.h>
12
13
14#if defined(CONFIG_IA32_EMULATION) || !defined(CONFIG_X86_64)
15# define AT_VECTOR_SIZE_ARCH 2
16#else
17# define AT_VECTOR_SIZE_ARCH 1
18#endif
19
20struct task_struct;
21struct task_struct *__switch_to(struct task_struct *prev,
22 struct task_struct *next);
23struct tss_struct;
24void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
25 struct tss_struct *tss);
26extern void show_regs_common(void);
27
28#ifdef CONFIG_X86_32
29
30#ifdef CONFIG_CC_STACKPROTECTOR
31#define __switch_canary \
32 "movl %P[task_canary](%[next]), %%ebx\n\t" \
33 "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
34#define __switch_canary_oparam \
35 , [stack_canary] "=m" (stack_canary.canary)
36#define __switch_canary_iparam \
37 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
38#else
39#define __switch_canary
40#define __switch_canary_oparam
41#define __switch_canary_iparam
42#endif
43
44
45
46
47
48#define switch_to(prev, next, last) \
49do { \
50
51
52
53
54
55
56 \
57 unsigned long ebx, ecx, edx, esi, edi; \
58 \
59 asm volatile("pushfl\n\t" \
60 "pushl %%ebp\n\t" \
61 "movl %%esp,%[prev_sp]\n\t" \
62 "movl %[next_sp],%%esp\n\t" \
63 "movl $1f,%[prev_ip]\n\t" \
64 "pushl %[next_ip]\n\t" \
65 __switch_canary \
66 "jmp __switch_to\n" \
67 "1:\t" \
68 "popl %%ebp\n\t" \
69 "popfl\n" \
70 \
71 \
72 : [prev_sp] "=m" (prev->thread.sp), \
73 [prev_ip] "=m" (prev->thread.ip), \
74 "=a" (last), \
75 \
76 \
77 "=b" (ebx), "=c" (ecx), "=d" (edx), \
78 "=S" (esi), "=D" (edi) \
79 \
80 __switch_canary_oparam \
81 \
82 \
83 : [next_sp] "m" (next->thread.sp), \
84 [next_ip] "m" (next->thread.ip), \
85 \
86 \
87 [prev] "a" (prev), \
88 [next] "d" (next) \
89 \
90 __switch_canary_iparam \
91 \
92 : \
93 "memory"); \
94} while (0)
95
96
97
98
99#define HAVE_DISABLE_HLT
100#else
101
102
103#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
104#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
105
106#define __EXTRA_CLOBBER \
107 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
108 "r12", "r13", "r14", "r15"
109
110#ifdef CONFIG_CC_STACKPROTECTOR
111#define __switch_canary \
112 "movq %P[task_canary](%%rsi),%%r8\n\t" \
113 "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
114#define __switch_canary_oparam \
115 , [gs_canary] "=m" (irq_stack_union.stack_canary)
116#define __switch_canary_iparam \
117 , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
118#else
119#define __switch_canary
120#define __switch_canary_oparam
121#define __switch_canary_iparam
122#endif
123
124
125#define switch_to(prev, next, last) \
126 asm volatile(SAVE_CONTEXT \
127 "movq %%rsp,%P[threadrsp](%[prev])\n\t" \
128 "movq %P[threadrsp](%[next]),%%rsp\n\t" \
129 "call __switch_to\n\t" \
130 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
131 __switch_canary \
132 "movq %P[thread_info](%%rsi),%%r8\n\t" \
133 "movq %%rax,%%rdi\n\t" \
134 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
135 "jnz ret_from_fork\n\t" \
136 RESTORE_CONTEXT \
137 : "=a" (last) \
138 __switch_canary_oparam \
139 : [next] "S" (next), [prev] "D" (prev), \
140 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
141 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
142 [_tif_fork] "i" (_TIF_FORK), \
143 [thread_info] "i" (offsetof(struct task_struct, stack)), \
144 [current_task] "m" (current_task) \
145 __switch_canary_iparam \
146 : "memory", "cc" __EXTRA_CLOBBER)
147#endif
148
149#ifdef __KERNEL__
150
151extern void native_load_gs_index(unsigned);
152
153
154
155
156
157#define loadsegment(seg, value) \
158do { \
159 unsigned short __val = (value); \
160 \
161 asm volatile(" \n" \
162 "1: movl %k0,%%" #seg " \n" \
163 \
164 ".section .fixup,\"ax\" \n" \
165 "2: xorl %k0,%k0 \n" \
166 " jmp 1b \n" \
167 ".previous \n" \
168 \
169 _ASM_EXTABLE(1b, 2b) \
170 \
171 : "+r" (__val) : : "memory"); \
172} while (0)
173
174
175
176
177#define savesegment(seg, value) \
178 asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
179
180
181
182
183#ifdef CONFIG_X86_32
184#ifdef CONFIG_X86_32_LAZY_GS
185#define get_user_gs(regs) (u16)({unsigned long v; savesegment(gs, v); v;})
186#define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
187#define task_user_gs(tsk) ((tsk)->thread.gs)
188#define lazy_save_gs(v) savesegment(gs, (v))
189#define lazy_load_gs(v) loadsegment(gs, (v))
190#else
191#define get_user_gs(regs) (u16)((regs)->gs)
192#define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0)
193#define task_user_gs(tsk) (task_pt_regs(tsk)->gs)
194#define lazy_save_gs(v) do { } while (0)
195#define lazy_load_gs(v) do { } while (0)
196#endif
197#endif
198
199static inline unsigned long get_limit(unsigned long segment)
200{
201 unsigned long __limit;
202 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
203 return __limit + 1;
204}
205
206static inline void native_clts(void)
207{
208 asm volatile("clts");
209}
210
211
212
213
214
215
216
217
218static unsigned long __force_order;
219
220static inline unsigned long native_read_cr0(void)
221{
222 unsigned long val;
223 asm volatile("mov %%cr0,%0\n\t" : "=r" (val), "=m" (__force_order));
224 return val;
225}
226
227static inline void native_write_cr0(unsigned long val)
228{
229 asm volatile("mov %0,%%cr0": : "r" (val), "m" (__force_order));
230}
231
232static inline unsigned long native_read_cr2(void)
233{
234 unsigned long val;
235 asm volatile("mov %%cr2,%0\n\t" : "=r" (val), "=m" (__force_order));
236 return val;
237}
238
239static inline void native_write_cr2(unsigned long val)
240{
241 asm volatile("mov %0,%%cr2": : "r" (val), "m" (__force_order));
242}
243
244static inline unsigned long native_read_cr3(void)
245{
246 unsigned long val;
247 asm volatile("mov %%cr3,%0\n\t" : "=r" (val), "=m" (__force_order));
248 return val;
249}
250
251static inline void native_write_cr3(unsigned long val)
252{
253 asm volatile("mov %0,%%cr3": : "r" (val), "m" (__force_order));
254}
255
256static inline unsigned long native_read_cr4(void)
257{
258 unsigned long val;
259 asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
260 return val;
261}
262
263static inline unsigned long native_read_cr4_safe(void)
264{
265 unsigned long val;
266
267
268#ifdef CONFIG_X86_32
269 asm volatile("1: mov %%cr4, %0\n"
270 "2:\n"
271 _ASM_EXTABLE(1b, 2b)
272 : "=r" (val), "=m" (__force_order) : "0" (0));
273#else
274 val = native_read_cr4();
275#endif
276 return val;
277}
278
279static inline void native_write_cr4(unsigned long val)
280{
281 asm volatile("mov %0,%%cr4": : "r" (val), "m" (__force_order));
282}
283
284#ifdef CONFIG_X86_64
285static inline unsigned long native_read_cr8(void)
286{
287 unsigned long cr8;
288 asm volatile("movq %%cr8,%0" : "=r" (cr8));
289 return cr8;
290}
291
292static inline void native_write_cr8(unsigned long val)
293{
294 asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
295}
296#endif
297
298static inline void native_wbinvd(void)
299{
300 asm volatile("wbinvd": : :"memory");
301}
302
303#ifdef CONFIG_PARAVIRT
304#include <asm/paravirt.h>
305#else
306
307static inline unsigned long read_cr0(void)
308{
309 return native_read_cr0();
310}
311
312static inline void write_cr0(unsigned long x)
313{
314 native_write_cr0(x);
315}
316
317static inline unsigned long read_cr2(void)
318{
319 return native_read_cr2();
320}
321
322static inline void write_cr2(unsigned long x)
323{
324 native_write_cr2(x);
325}
326
327static inline unsigned long read_cr3(void)
328{
329 return native_read_cr3();
330}
331
332static inline void write_cr3(unsigned long x)
333{
334 native_write_cr3(x);
335}
336
337static inline unsigned long read_cr4(void)
338{
339 return native_read_cr4();
340}
341
342static inline unsigned long read_cr4_safe(void)
343{
344 return native_read_cr4_safe();
345}
346
347static inline void write_cr4(unsigned long x)
348{
349 native_write_cr4(x);
350}
351
352static inline void wbinvd(void)
353{
354 native_wbinvd();
355}
356
357#ifdef CONFIG_X86_64
358
359static inline unsigned long read_cr8(void)
360{
361 return native_read_cr8();
362}
363
364static inline void write_cr8(unsigned long x)
365{
366 native_write_cr8(x);
367}
368
369static inline void load_gs_index(unsigned selector)
370{
371 native_load_gs_index(selector);
372}
373
374#endif
375
376
377static inline void clts(void)
378{
379 native_clts();
380}
381
382#endif
383
384#define stts() write_cr0(read_cr0() | X86_CR0_TS)
385
386#endif
387
388static inline void clflush(volatile void *__p)
389{
390 asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
391}
392
393#define nop() asm volatile ("nop")
394
395void disable_hlt(void);
396void enable_hlt(void);
397
398void cpu_idle_wait(void);
399
400extern unsigned long arch_align_stack(unsigned long sp);
401extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
402
403void default_idle(void);
404bool set_pm_idle_to_default(void);
405
406void stop_this_cpu(void *dummy);
407
408
409
410
411
412
413#ifdef CONFIG_X86_32
414
415
416
417
418#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
419#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
420#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
421#else
422#define mb() asm volatile("mfence":::"memory")
423#define rmb() asm volatile("lfence":::"memory")
424#define wmb() asm volatile("sfence" ::: "memory")
425#endif
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479#define read_barrier_depends() do { } while (0)
480
481#ifdef CONFIG_SMP
482#define smp_mb() mb()
483#ifdef CONFIG_X86_PPRO_FENCE
484# define smp_rmb() rmb()
485#else
486# define smp_rmb() barrier()
487#endif
488#ifdef CONFIG_X86_OOSTORE
489# define smp_wmb() wmb()
490#else
491# define smp_wmb() barrier()
492#endif
493#define smp_read_barrier_depends() read_barrier_depends()
494#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
495#else
496#define smp_mb() barrier()
497#define smp_rmb() barrier()
498#define smp_wmb() barrier()
499#define smp_read_barrier_depends() do { } while (0)
500#define set_mb(var, value) do { var = value; barrier(); } while (0)
501#endif
502
503
504
505
506
507
508
509
510static __always_inline void rdtsc_barrier(void)
511{
512 alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
513 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
514}
515
516
517
518
519
520
521
522#define NET_IP_ALIGN 0
523#endif
524