1
2
3
4
5
6
7
8#ifndef _ASM_MICROBLAZE_PGTABLE_H
9#define _ASM_MICROBLAZE_PGTABLE_H
10
11#include <asm/setup.h>
12
13#ifndef __ASSEMBLY__
14extern int mem_init_done;
15#endif
16
17#include <asm-generic/pgtable-nopmd.h>
18
19#ifdef __KERNEL__
20#ifndef __ASSEMBLY__
21
22#include <linux/sched.h>
23#include <linux/threads.h>
24#include <asm/processor.h>
25#include <asm/mmu.h>
26#include <asm/page.h>
27
28extern unsigned long va_to_phys(unsigned long address);
29extern pte_t *va_to_pte(unsigned long address);
30
31
32
33
34
35
36
37
38
39#define VMALLOC_START (CONFIG_KERNEL_START + CONFIG_LOWMEM_SIZE)
40#define VMALLOC_END ioremap_bot
41
42#endif
43
44
45
46
47
48#define _PAGE_CACHE_CTL (_PAGE_GUARDED | _PAGE_NO_CACHE | \
49 _PAGE_WRITETHRU)
50
51#define pgprot_noncached(prot) \
52 (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
53 _PAGE_NO_CACHE | _PAGE_GUARDED))
54
55#define pgprot_noncached_wc(prot) \
56 (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
57 _PAGE_NO_CACHE))
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89#define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT)
90#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
91#define PGDIR_MASK (~(PGDIR_SIZE-1))
92
93
94
95
96
97#define PTRS_PER_PTE (1 << PTE_SHIFT)
98#define PTRS_PER_PMD 1
99#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
100
101#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
102#define FIRST_USER_PGD_NR 0
103
104#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
105#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
106
107#define pte_ERROR(e) \
108 printk(KERN_ERR "%s:%d: bad pte "PTE_FMT".\n", \
109 __FILE__, __LINE__, pte_val(e))
110#define pgd_ERROR(e) \
111 printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
112 __FILE__, __LINE__, pgd_val(e))
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146#define _PAGE_GUARDED 0x001
147#define _PAGE_PRESENT 0x002
148#define _PAGE_NO_CACHE 0x004
149#define _PAGE_WRITETHRU 0x008
150#define _PAGE_USER 0x010
151#define _PAGE_RW 0x040
152#define _PAGE_DIRTY 0x080
153#define _PAGE_HWWRITE 0x100
154#define _PAGE_HWEXEC 0x200
155#define _PAGE_ACCESSED 0x400
156#define _PMD_PRESENT PAGE_MASK
157
158
159
160
161#ifndef _PAGE_HASHPTE
162#define _PAGE_HASHPTE 0
163#endif
164#ifndef _PTE_NONE_MASK
165#define _PTE_NONE_MASK 0
166#endif
167#ifndef _PAGE_SHARED
168#define _PAGE_SHARED 0
169#endif
170#ifndef _PAGE_EXEC
171#define _PAGE_EXEC 0
172#endif
173
174#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
175
176
177
178
179
180
181
182#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
183#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE)
184
185#define _PAGE_KERNEL \
186 (_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC)
187
188#define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED)
189
190#define PAGE_NONE __pgprot(_PAGE_BASE)
191#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
192#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
193#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
194#define PAGE_SHARED_X \
195 __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
196#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
197#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
198
199#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
200#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_SHARED)
201#define PAGE_KERNEL_CI __pgprot(_PAGE_IO)
202
203
204
205
206
207#define __P000 PAGE_NONE
208#define __P001 PAGE_READONLY_X
209#define __P010 PAGE_COPY
210#define __P011 PAGE_COPY_X
211#define __P100 PAGE_READONLY
212#define __P101 PAGE_READONLY_X
213#define __P110 PAGE_COPY
214#define __P111 PAGE_COPY_X
215
216#define __S000 PAGE_NONE
217#define __S001 PAGE_READONLY_X
218#define __S010 PAGE_SHARED
219#define __S011 PAGE_SHARED_X
220#define __S100 PAGE_READONLY
221#define __S101 PAGE_READONLY_X
222#define __S110 PAGE_SHARED
223#define __S111 PAGE_SHARED_X
224
225#ifndef __ASSEMBLY__
226
227
228
229
230extern unsigned long empty_zero_page[1024];
231#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
232
233#endif
234
235#define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
236#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
237#define pte_clear(mm, addr, ptep) \
238 do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0)
239
240#define pmd_none(pmd) (!pmd_val(pmd))
241#define pmd_bad(pmd) ((pmd_val(pmd) & _PMD_PRESENT) == 0)
242#define pmd_present(pmd) ((pmd_val(pmd) & _PMD_PRESENT) != 0)
243#define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)
244
245#define pte_page(x) (mem_map + (unsigned long) \
246 ((pte_val(x) - memory_start) >> PAGE_SHIFT))
247#define PFN_SHIFT_OFFSET (PAGE_SHIFT)
248
249#define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET)
250
251#define pfn_pte(pfn, prot) \
252 __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) | pgprot_val(prot))
253
254#ifndef __ASSEMBLY__
255
256
257
258
259static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
260static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
261static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
262static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
263static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
264
265static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
266static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
267
268static inline pte_t pte_rdprotect(pte_t pte) \
269 { pte_val(pte) &= ~_PAGE_USER; return pte; }
270static inline pte_t pte_wrprotect(pte_t pte) \
271 { pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
272static inline pte_t pte_exprotect(pte_t pte) \
273 { pte_val(pte) &= ~_PAGE_EXEC; return pte; }
274static inline pte_t pte_mkclean(pte_t pte) \
275 { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
276static inline pte_t pte_mkold(pte_t pte) \
277 { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
278
279static inline pte_t pte_mkread(pte_t pte) \
280 { pte_val(pte) |= _PAGE_USER; return pte; }
281static inline pte_t pte_mkexec(pte_t pte) \
282 { pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
283static inline pte_t pte_mkwrite(pte_t pte) \
284 { pte_val(pte) |= _PAGE_RW; return pte; }
285static inline pte_t pte_mkdirty(pte_t pte) \
286 { pte_val(pte) |= _PAGE_DIRTY; return pte; }
287static inline pte_t pte_mkyoung(pte_t pte) \
288 { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
289
290
291
292
293
294
295static inline pte_t mk_pte_phys(phys_addr_t physpage, pgprot_t pgprot)
296{
297 pte_t pte;
298 pte_val(pte) = physpage | pgprot_val(pgprot);
299 return pte;
300}
301
302#define mk_pte(page, pgprot) \
303({ \
304 pte_t pte; \
305 pte_val(pte) = (((page - mem_map) << PAGE_SHIFT) + memory_start) | \
306 pgprot_val(pgprot); \
307 pte; \
308})
309
310static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
311{
312 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
313 return pte;
314}
315
316
317
318
319
320
321
322
323
324static inline unsigned long pte_update(pte_t *p, unsigned long clr,
325 unsigned long set)
326{
327 unsigned long flags, old, tmp;
328
329 raw_local_irq_save(flags);
330
331 __asm__ __volatile__( "lw %0, %2, r0 \n"
332 "andn %1, %0, %3 \n"
333 "or %1, %1, %4 \n"
334 "sw %1, %2, r0 \n"
335 : "=&r" (old), "=&r" (tmp)
336 : "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set)
337 : "cc");
338
339 raw_local_irq_restore(flags);
340
341 return old;
342}
343
344
345
346
347static inline void set_pte(struct mm_struct *mm, unsigned long addr,
348 pte_t *ptep, pte_t pte)
349{
350 *ptep = pte;
351}
352
353static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
354 pte_t *ptep, pte_t pte)
355{
356 *ptep = pte;
357}
358
359#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
360static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
361 unsigned long address, pte_t *ptep)
362{
363 return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0;
364}
365
366static inline int ptep_test_and_clear_dirty(struct mm_struct *mm,
367 unsigned long addr, pte_t *ptep)
368{
369 return (pte_update(ptep, \
370 (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;
371}
372
373#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
374static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
375 unsigned long addr, pte_t *ptep)
376{
377 return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
378}
379
380
381
382
383
384
385
386static inline void ptep_mkdirty(struct mm_struct *mm,
387 unsigned long addr, pte_t *ptep)
388{
389 pte_update(ptep, 0, _PAGE_DIRTY);
390}
391
392
393
394
395
396
397static inline unsigned long pmd_page_vaddr(pmd_t pmd)
398{
399 return ((unsigned long) (pmd_val(pmd) & PAGE_MASK));
400}
401
402
403#define pmd_page(pmd) (pfn_to_page(__pa(pmd_val(pmd)) >> PAGE_SHIFT))
404
405
406
407extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
408
409
410
411
412
413
414
415#define __swp_type(entry) ((entry).val & 0x3f)
416#define __swp_offset(entry) ((entry).val >> 6)
417#define __swp_entry(type, offset) \
418 ((swp_entry_t) { (type) | ((offset) << 6) })
419#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 2 })
420#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 2 })
421
422extern unsigned long iopa(unsigned long addr);
423
424
425
426
427
428#define IOMAP_FULL_CACHING 0
429#define IOMAP_NOCACHE_SER 1
430#define IOMAP_NOCACHE_NONSER 2
431#define IOMAP_NO_COPYBACK 3
432
433
434#define kern_addr_valid(addr) (1)
435
436void do_page_fault(struct pt_regs *regs, unsigned long address,
437 unsigned long error_code);
438
439void mapin_ram(void);
440int map_page(unsigned long va, phys_addr_t pa, int flags);
441
442extern int mem_init_done;
443
444asmlinkage void __init mmu_init(void);
445
446void __init *early_get_page(void);
447
448#endif
449#endif
450
451#ifndef __ASSEMBLY__
452extern unsigned long ioremap_bot, ioremap_base;
453
454void setup_memory(void);
455#endif
456
457#endif
458