1#ifndef _ASM_POWERPC_PGTABLE_PPC32_H
2#define _ASM_POWERPC_PGTABLE_PPC32_H
3
4#include <asm-generic/pgtable-nopmd.h>
5
6#ifndef __ASSEMBLY__
7#include <linux/sched.h>
8#include <linux/threads.h>
9#include <asm/io.h>
10
11extern unsigned long va_to_phys(unsigned long address);
12extern pte_t *va_to_pte(unsigned long address);
13extern unsigned long ioremap_bot;
14
15#ifdef CONFIG_44x
16extern int icache_44x_need_flush;
17#endif
18
19#endif
20
21
22
23
24
25
26
27
28
29
30
31
32#define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT)
33#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
34#define PGDIR_MASK (~(PGDIR_SIZE-1))
35
36
37
38
39
40#ifndef __ASSEMBLY__
41#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT)
42#define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT))
43#endif
44
45#define PTRS_PER_PTE (1 << PTE_SHIFT)
46#define PTRS_PER_PMD 1
47#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
48
49#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
50#define FIRST_USER_ADDRESS 0
51
52#define pte_ERROR(e) \
53 printk("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
54 (unsigned long long)pte_val(e))
55#define pgd_ERROR(e) \
56 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
57
58
59
60
61
62
63#ifdef CONFIG_HIGHMEM
64#define KVIRT_TOP PKMAP_BASE
65#else
66#define KVIRT_TOP (0xfe000000UL)
67#endif
68
69
70
71
72
73
74#ifdef CONFIG_NOT_COHERENT_CACHE
75#define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
76#else
77#define IOREMAP_TOP KVIRT_TOP
78#endif
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97#define VMALLOC_OFFSET (0x1000000)
98#ifdef PPC_PIN_SIZE
99#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
100#else
101#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
102#endif
103#define VMALLOC_END ioremap_bot
104
105
106
107
108
109
110#if defined(CONFIG_40x)
111#include <asm/pte-40x.h>
112#elif defined(CONFIG_44x)
113#include <asm/pte-44x.h>
114#elif defined(CONFIG_FSL_BOOKE)
115#include <asm/pte-fsl-booke.h>
116#elif defined(CONFIG_8xx)
117#include <asm/pte-8xx.h>
118#else
119#include <asm/pte-hash32.h>
120#endif
121
122
123#include <asm/pte-common.h>
124
125#ifndef __ASSEMBLY__
126
127#define pte_clear(mm, addr, ptep) \
128 do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
129
130#define pmd_none(pmd) (!pmd_val(pmd))
131#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
132#define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
133#define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)
134
135
136
137
138
139extern int flush_hash_pages(unsigned context, unsigned long va,
140 unsigned long pmdval, int count);
141
142
143extern void add_hash_page(unsigned context, unsigned long va,
144 unsigned long pmdval);
145
146
147extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
148 unsigned long address);
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165#ifndef CONFIG_PTE_64BIT
166static inline unsigned long pte_update(pte_t *p,
167 unsigned long clr,
168 unsigned long set)
169{
170#ifdef PTE_ATOMIC_UPDATES
171 unsigned long old, tmp;
172
173 __asm__ __volatile__("\
1741: lwarx %0,0,%3\n\
175 andc %1,%0,%4\n\
176 or %1,%1,%5\n"
177 PPC405_ERR77(0,%3)
178" stwcx. %1,0,%3\n\
179 bne- 1b"
180 : "=&r" (old), "=&r" (tmp), "=m" (*p)
181 : "r" (p), "r" (clr), "r" (set), "m" (*p)
182 : "cc" );
183#else
184 unsigned long old = pte_val(*p);
185 *p = __pte((old & ~clr) | set);
186#endif
187
188#ifdef CONFIG_44x
189 if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC))
190 icache_44x_need_flush = 1;
191#endif
192 return old;
193}
194#else
195static inline unsigned long long pte_update(pte_t *p,
196 unsigned long clr,
197 unsigned long set)
198{
199#ifdef PTE_ATOMIC_UPDATES
200 unsigned long long old;
201 unsigned long tmp;
202
203 __asm__ __volatile__("\
2041: lwarx %L0,0,%4\n\
205 lwzx %0,0,%3\n\
206 andc %1,%L0,%5\n\
207 or %1,%1,%6\n"
208 PPC405_ERR77(0,%3)
209" stwcx. %1,0,%4\n\
210 bne- 1b"
211 : "=&r" (old), "=&r" (tmp), "=m" (*p)
212 : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
213 : "cc" );
214#else
215 unsigned long long old = pte_val(*p);
216 *p = __pte((old & ~(unsigned long long)clr) | set);
217#endif
218
219#ifdef CONFIG_44x
220 if ((old & _PAGE_USER) && (old & _PAGE_HWEXEC))
221 icache_44x_need_flush = 1;
222#endif
223 return old;
224}
225#endif
226
227
228
229
230
231#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
232static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
233{
234 unsigned long old;
235 old = pte_update(ptep, _PAGE_ACCESSED, 0);
236#if _PAGE_HASHPTE != 0
237 if (old & _PAGE_HASHPTE) {
238 unsigned long ptephys = __pa(ptep) & PAGE_MASK;
239 flush_hash_pages(context, addr, ptephys, 1);
240 }
241#endif
242 return (old & _PAGE_ACCESSED) != 0;
243}
244#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
245 __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
246
247#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
248static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
249 pte_t *ptep)
250{
251 return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
252}
253
254#define __HAVE_ARCH_PTEP_SET_WRPROTECT
255static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
256 pte_t *ptep)
257{
258 pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0);
259}
260static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
261 unsigned long addr, pte_t *ptep)
262{
263 ptep_set_wrprotect(mm, addr, ptep);
264}
265
266
267static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
268{
269 unsigned long bits = pte_val(entry) &
270 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW |
271 _PAGE_HWEXEC | _PAGE_EXEC);
272 pte_update(ptep, 0, bits);
273}
274
275#define __HAVE_ARCH_PTE_SAME
276#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
277
278
279
280
281
282
283
284
285#ifndef CONFIG_BOOKE
286#define pmd_page_vaddr(pmd) \
287 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
288#define pmd_page(pmd) \
289 (mem_map + (pmd_val(pmd) >> PAGE_SHIFT))
290#else
291#define pmd_page_vaddr(pmd) \
292 ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
293#define pmd_page(pmd) \
294 pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
295#endif
296
297
298#define pgd_offset_k(address) pgd_offset(&init_mm, address)
299
300
301#define pgd_index(address) ((address) >> PGDIR_SHIFT)
302#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
303
304
305#define pte_index(address) \
306 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
307#define pte_offset_kernel(dir, addr) \
308 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
309#define pte_offset_map(dir, addr) \
310 ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr))
311#define pte_offset_map_nested(dir, addr) \
312 ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr))
313
314#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
315#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
316
317
318
319
320
321
322
323#define __swp_type(entry) ((entry).val & 0x1f)
324#define __swp_offset(entry) ((entry).val >> 5)
325#define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) })
326#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
327#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
328
329
330#define PTE_FILE_MAX_BITS 29
331#define pte_to_pgoff(pte) (pte_val(pte) >> 3)
332#define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE })
333
334
335
336
337#define pgtable_cache_init() do { } while (0)
338
339extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
340 pmd_t **pmdp);
341
342#endif
343
344#endif
345