1
2
3
4
5
6
7
8
9
10#ifndef _ASMARM_PGTABLE_H
11#define _ASMARM_PGTABLE_H
12
13#include <linux/const.h>
14#include <asm-generic/4level-fixup.h>
15#include <asm/proc-fns.h>
16
17#ifndef CONFIG_MMU
18
19#include "pgtable-nommu.h"
20
21#else
22
23#include <asm/memory.h>
24#include <mach/vmalloc.h>
25#include <asm/pgtable-hwdef.h>
26
27
28
29
30
31
32
33
34
35
36
37
38
39#ifndef VMALLOC_START
40#define VMALLOC_OFFSET (8*1024*1024)
41#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
42#endif
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102#define PTRS_PER_PTE 512
103#define PTRS_PER_PMD 1
104#define PTRS_PER_PGD 2048
105
106#define PTE_HWTABLE_PTRS (PTRS_PER_PTE)
107#define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t))
108#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32))
109
110
111
112
113
114#define PMD_SHIFT 21
115#define PGDIR_SHIFT 21
116
117#define LIBRARY_TEXT_START 0x0c000000
118
119#ifndef __ASSEMBLY__
120extern void __pte_error(const char *file, int line, pte_t);
121extern void __pmd_error(const char *file, int line, pmd_t);
122extern void __pgd_error(const char *file, int line, pgd_t);
123
124#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte)
125#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
126#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
127#endif
128
129#define PMD_SIZE (1UL << PMD_SHIFT)
130#define PMD_MASK (~(PMD_SIZE-1))
131#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
132#define PGDIR_MASK (~(PGDIR_SIZE-1))
133
134
135
136
137
138
139#define FIRST_USER_ADDRESS PAGE_SIZE
140
141#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
142
143
144
145
146#define SECTION_SHIFT 20
147#define SECTION_SIZE (1UL << SECTION_SHIFT)
148#define SECTION_MASK (~(SECTION_SIZE-1))
149
150
151
152
153#define SUPERSECTION_SHIFT 24
154#define SUPERSECTION_SIZE (1UL << SUPERSECTION_SHIFT)
155#define SUPERSECTION_MASK (~(SUPERSECTION_SIZE-1))
156
157
158
159
160
161
162
163
164
165
166
167
168#define L_PTE_PRESENT (_AT(pteval_t, 1) << 0)
169#define L_PTE_YOUNG (_AT(pteval_t, 1) << 1)
170#define L_PTE_FILE (_AT(pteval_t, 1) << 2)
171#define L_PTE_DIRTY (_AT(pteval_t, 1) << 6)
172#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7)
173#define L_PTE_USER (_AT(pteval_t, 1) << 8)
174#define L_PTE_XN (_AT(pteval_t, 1) << 9)
175#define L_PTE_SHARED (_AT(pteval_t, 1) << 10)
176
177
178
179
180
181#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2)
182#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2)
183#define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 0x02) << 2)
184#define L_PTE_MT_WRITEBACK (_AT(pteval_t, 0x03) << 2)
185#define L_PTE_MT_MINICACHE (_AT(pteval_t, 0x06) << 2)
186#define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 0x07) << 2)
187#define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 0x04) << 2)
188#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2)
189#define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2)
190#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2)
191#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2)
192
193#ifndef __ASSEMBLY__
194
195
196
197
198
199
200
201#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
202
203extern pgprot_t pgprot_user;
204extern pgprot_t pgprot_kernel;
205
206#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
207
208#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY)
209#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
210#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER)
211#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
212#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
213#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
214#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
215#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
216#define PAGE_KERNEL_EXEC pgprot_kernel
217
218#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN)
219#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
220#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
221#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
222#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
223#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
224#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
225
226#define __pgprot_modify(prot,mask,bits) \
227 __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
228
229#define pgprot_noncached(prot) \
230 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
231
232#define pgprot_writecombine(prot) \
233 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
234
235#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
236#define pgprot_dmacoherent(prot) \
237 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
238#define __HAVE_PHYS_MEM_ACCESS_PROT
239struct file;
240extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
241 unsigned long size, pgprot_t vma_prot);
242#else
243#define pgprot_dmacoherent(prot) \
244 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
245#endif
246
247#endif
248
249
250
251
252
253
254
255
256
257#define __P000 __PAGE_NONE
258#define __P001 __PAGE_READONLY
259#define __P010 __PAGE_COPY
260#define __P011 __PAGE_COPY
261#define __P100 __PAGE_READONLY_EXEC
262#define __P101 __PAGE_READONLY_EXEC
263#define __P110 __PAGE_COPY_EXEC
264#define __P111 __PAGE_COPY_EXEC
265
266#define __S000 __PAGE_NONE
267#define __S001 __PAGE_READONLY
268#define __S010 __PAGE_SHARED
269#define __S011 __PAGE_SHARED
270#define __S100 __PAGE_READONLY_EXEC
271#define __S101 __PAGE_READONLY_EXEC
272#define __S110 __PAGE_SHARED_EXEC
273#define __S111 __PAGE_SHARED_EXEC
274
275#ifndef __ASSEMBLY__
276
277
278
279
280extern struct page *empty_zero_page;
281#define ZERO_PAGE(vaddr) (empty_zero_page)
282
283
284extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
285
286
287#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
288
289#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
290
291
292#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
293
294
295
296
297
298
299#define pgd_none(pgd) (0)
300#define pgd_bad(pgd) (0)
301#define pgd_present(pgd) (1)
302#define pgd_clear(pgdp) do { } while (0)
303#define set_pgd(pgd,pgdp) do { } while (0)
304#define set_pud(pud,pudp) do { } while (0)
305
306
307
308#define pmd_offset(dir, addr) ((pmd_t *)(dir))
309
310#define pmd_none(pmd) (!pmd_val(pmd))
311#define pmd_present(pmd) (pmd_val(pmd))
312#define pmd_bad(pmd) (pmd_val(pmd) & 2)
313
314#define copy_pmd(pmdpd,pmdps) \
315 do { \
316 pmdpd[0] = pmdps[0]; \
317 pmdpd[1] = pmdps[1]; \
318 flush_pmd_entry(pmdpd); \
319 } while (0)
320
321#define pmd_clear(pmdp) \
322 do { \
323 pmdp[0] = __pmd(0); \
324 pmdp[1] = __pmd(0); \
325 clean_pmd_entry(pmdp); \
326 } while (0)
327
328static inline pte_t *pmd_page_vaddr(pmd_t pmd)
329{
330 return __va(pmd_val(pmd) & PAGE_MASK);
331}
332
333#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd)))
334
335
336#define pmd_addr_end(addr,end) (end)
337
338
339#ifndef CONFIG_HIGHPTE
340#define __pte_map(pmd) pmd_page_vaddr(*(pmd))
341#define __pte_unmap(pte) do { } while (0)
342#else
343#define __pte_map(pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd)))
344#define __pte_unmap(pte) kunmap_atomic(pte)
345#endif
346
347#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
348
349#define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr))
350
351#define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr))
352#define pte_unmap(pte) __pte_unmap(pte)
353
354#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
355#define pfn_pte(pfn,prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
356
357#define pte_page(pte) pfn_to_page(pte_pfn(pte))
358#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot)
359
360#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
361#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
362
363#if __LINUX_ARM_ARCH__ < 6
364static inline void __sync_icache_dcache(pte_t pteval)
365{
366}
367#else
368extern void __sync_icache_dcache(pte_t pteval);
369#endif
370
371static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
372 pte_t *ptep, pte_t pteval)
373{
374 if (addr >= TASK_SIZE)
375 set_pte_ext(ptep, pteval, 0);
376 else {
377 __sync_icache_dcache(pteval);
378 set_pte_ext(ptep, pteval, PTE_EXT_NG);
379 }
380}
381
382#define pte_none(pte) (!pte_val(pte))
383#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
384#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
385#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
386#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
387#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
388#define pte_special(pte) (0)
389
390#define pte_present_user(pte) \
391 ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
392 (L_PTE_PRESENT | L_PTE_USER))
393
394#define PTE_BIT_FUNC(fn,op) \
395static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
396
397PTE_BIT_FUNC(wrprotect, |= L_PTE_RDONLY);
398PTE_BIT_FUNC(mkwrite, &= ~L_PTE_RDONLY);
399PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY);
400PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY);
401PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG);
402PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
403
404static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
405
406static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
407{
408 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER;
409 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
410 return pte;
411}
412
413
414
415
416
417
418
419
420
421
422
423
424#define __SWP_TYPE_SHIFT 3
425#define __SWP_TYPE_BITS 6
426#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
427#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
428
429#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
430#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
431#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
432
433#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
434#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
435
436
437
438
439
440
441#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
442
443
444
445
446
447
448
449
450
451#define pte_file(pte) (pte_val(pte) & L_PTE_FILE)
452#define pte_to_pgoff(x) (pte_val(x) >> 3)
453#define pgoff_to_pte(x) __pte(((x) << 3) | L_PTE_FILE)
454
455#define PTE_FILE_MAX_BITS 29
456
457
458
459#define kern_addr_valid(addr) (1)
460
461#include <asm-generic/pgtable.h>
462
463
464
465
466#define HAVE_ARCH_UNMAPPED_AREA
467
468
469
470
471
472#define io_remap_pfn_range(vma,from,pfn,size,prot) \
473 remap_pfn_range(vma, from, pfn, size, prot)
474
475#define pgtable_cache_init() do { } while (0)
476
477void identity_mapping_add(pgd_t *, unsigned long, unsigned long);
478void identity_mapping_del(pgd_t *, unsigned long, unsigned long);
479
480#endif
481
482#endif
483
484#endif
485