1
2#ifndef _ASM_POWERPC_PAGE_64_H
3#define _ASM_POWERPC_PAGE_64_H
4
5
6
7
8
9#include <asm/asm-const.h>
10
11
12
13
14
15
16#define HW_PAGE_SHIFT 12
17#define HW_PAGE_SIZE (ASM_CONST(1) << HW_PAGE_SHIFT)
18#define HW_PAGE_MASK (~(HW_PAGE_SIZE-1))
19
20
21
22
23
24#define PAGE_FACTOR (PAGE_SHIFT - HW_PAGE_SHIFT)
25
26
27#define SID_SHIFT 28
28#define SID_MASK ASM_CONST(0xfffffffff)
29#define ESID_MASK 0xfffffffff0000000UL
30#define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK)
31
32
33#define SID_SHIFT_1T 40
34#define SID_MASK_1T 0xffffffUL
35#define ESID_MASK_1T 0xffffff0000000000UL
36#define GET_ESID_1T(x) (((x) >> SID_SHIFT_1T) & SID_MASK_1T)
37
38#ifndef __ASSEMBLY__
39#include <asm/cache.h>
40
41typedef unsigned long pte_basic_t;
42
43static inline void clear_page(void *addr)
44{
45 unsigned long iterations;
46 unsigned long onex, twox, fourx, eightx;
47
48 iterations = ppc64_caches.l1d.blocks_per_page / 8;
49
50
51
52
53
54
55 onex = ppc64_caches.l1d.block_size;
56 twox = onex << 1;
57 fourx = onex << 2;
58 eightx = onex << 3;
59
60 asm volatile(
61 "mtctr %1 # clear_page\n\
62 .balign 16\n\
631: dcbz 0,%0\n\
64 dcbz %3,%0\n\
65 dcbz %4,%0\n\
66 dcbz %5,%0\n\
67 dcbz %6,%0\n\
68 dcbz %7,%0\n\
69 dcbz %8,%0\n\
70 dcbz %9,%0\n\
71 add %0,%0,%10\n\
72 bdnz+ 1b"
73 : "=&r" (addr)
74 : "r" (iterations), "0" (addr), "b" (onex), "b" (twox),
75 "b" (twox+onex), "b" (fourx), "b" (fourx+onex),
76 "b" (twox+fourx), "b" (eightx-onex), "r" (eightx)
77 : "ctr", "memory");
78}
79
80extern void copy_page(void *to, void *from);
81
82
83extern u64 ppc64_pft_size;
84
85#endif
86
87#define VM_DATA_DEFAULT_FLAGS \
88 (is_32bit_task() ? \
89 VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
90
91
92
93
94
95
96
97#define VM_STACK_DEFAULT_FLAGS32 VM_DATA_FLAGS_EXEC
98#define VM_STACK_DEFAULT_FLAGS64 VM_DATA_FLAGS_NON_EXEC
99
100#define VM_STACK_DEFAULT_FLAGS \
101 (is_32bit_task() ? \
102 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
103
104#include <asm-generic/getorder.h>
105
106#endif
107