1
2#ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
3#define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
4
5#include <asm-generic/pgtable-nopmd.h>
6
7#include <asm/book3s/32/hash.h>
8
9
10
11#define _PAGE_KERNEL_RO 0
12#define _PAGE_KERNEL_ROX (_PAGE_EXEC)
13#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW)
14#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
15
16#define _PAGE_HPTEFLAGS _PAGE_HASHPTE
17
18#ifndef __ASSEMBLY__
19
20static inline bool pte_user(pte_t pte)
21{
22 return pte_val(pte) & _PAGE_USER;
23}
24#endif
25
26
27
28
29
30
31#define PTE_RPN_SHIFT (PAGE_SHIFT)
32
33
34
35
36
37#ifdef CONFIG_PTE_64BIT
38#define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
39#define MAX_POSSIBLE_PHYSMEM_BITS 36
40#else
41#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
42#define MAX_POSSIBLE_PHYSMEM_BITS 32
43#endif
44
45
46
47
48
49#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HASHPTE | _PAGE_DIRTY | \
50 _PAGE_ACCESSED | _PAGE_SPECIAL)
51
52
53
54
55
56
57
58#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
59#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT)
60
61
62
63
64
65
66
67
68#define PAGE_NONE __pgprot(_PAGE_BASE)
69#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
70#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
71#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
72#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
73#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
74#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
75
76
77#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
78#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
79#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
80 _PAGE_NO_CACHE | _PAGE_GUARDED)
81#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
82#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
83#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
84
85
86
87
88
89
90#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
91 defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
92#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
93#else
94#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
95#endif
96
97
98#define PAGE_KERNEL_EXEC PAGE_KERNEL_X
99
100
101#define PAGE_AGP (PAGE_KERNEL_NC)
102#define HAVE_PAGE_AGP
103
104#define PTE_INDEX_SIZE PTE_SHIFT
105#define PMD_INDEX_SIZE 0
106#define PUD_INDEX_SIZE 0
107#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
108
109#define PMD_CACHE_INDEX PMD_INDEX_SIZE
110#define PUD_CACHE_INDEX PUD_INDEX_SIZE
111
112#ifndef __ASSEMBLY__
113#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
114#define PMD_TABLE_SIZE 0
115#define PUD_TABLE_SIZE 0
116#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
117
118
119#define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1)
120#endif
121
122#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
123#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
124
125
126
127
128
129
130
131
132
133
134
135
136#define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
137#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
138#define PGDIR_MASK (~(PGDIR_SIZE-1))
139
140#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
141
142#ifndef __ASSEMBLY__
143
144int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
145
146#endif
147
148
149
150
151
152
153#include <asm/fixmap.h>
154
155
156
157
158
159
160#ifdef CONFIG_HIGHMEM
161#define IOREMAP_TOP PKMAP_BASE
162#else
163#define IOREMAP_TOP FIXADDR_START
164#endif
165
166
167#define IOREMAP_START VMALLOC_START
168#define IOREMAP_END VMALLOC_END
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187#define VMALLOC_OFFSET (0x1000000)
188
189#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
190
191#ifdef CONFIG_KASAN_VMALLOC
192#define VMALLOC_END ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
193#else
194#define VMALLOC_END ioremap_bot
195#endif
196
197#define MODULES_END ALIGN_DOWN(PAGE_OFFSET, SZ_256M)
198#define MODULES_VADDR (MODULES_END - SZ_256M)
199
200#ifndef __ASSEMBLY__
201#include <linux/sched.h>
202#include <linux/threads.h>
203
204
205#define PGD_MASKED_BITS 0
206
207#define pte_ERROR(e) \
208 pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
209 (unsigned long long)pte_val(e))
210#define pgd_ERROR(e) \
211 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
212
213
214
215
216
217#define pte_clear(mm, addr, ptep) \
218 do { pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0); } while (0)
219
220#define pmd_none(pmd) (!pmd_val(pmd))
221#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
222#define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
223static inline void pmd_clear(pmd_t *pmdp)
224{
225 *pmdp = __pmd(0);
226}
227
228
229
230
231
232
233extern int flush_hash_pages(unsigned context, unsigned long va,
234 unsigned long pmdval, int count);
235
236
237extern void add_hash_page(unsigned context, unsigned long va,
238 unsigned long pmdval);
239
240
241static inline void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
242{
243 if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
244 unsigned long ptephys = __pa(ptep) & PAGE_MASK;
245
246 flush_hash_pages(mm->context.id, addr, ptephys, 1);
247 }
248}
249
250
251
252
253
254
255
256
257
258
259
260static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
261 unsigned long clr, unsigned long set, int huge)
262{
263 pte_basic_t old;
264 unsigned long tmp;
265
266 __asm__ __volatile__(
267#ifndef CONFIG_PTE_64BIT
268"1: lwarx %0, 0, %3\n"
269" andc %1, %0, %4\n"
270#else
271"1: lwarx %L0, 0, %3\n"
272" lwz %0, -4(%3)\n"
273" andc %1, %L0, %4\n"
274#endif
275" or %1, %1, %5\n"
276" stwcx. %1, 0, %3\n"
277" bne- 1b"
278 : "=&r" (old), "=&r" (tmp), "=m" (*p)
279#ifndef CONFIG_PTE_64BIT
280 : "r" (p),
281#else
282 : "b" ((unsigned long)(p) + 4),
283#endif
284 "r" (clr), "r" (set), "m" (*p)
285 : "cc" );
286
287 return old;
288}
289
290
291
292
293
294#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
295static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
296 unsigned long addr, pte_t *ptep)
297{
298 unsigned long old;
299 old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
300 if (old & _PAGE_HASHPTE)
301 flush_hash_entry(mm, ptep, addr);
302
303 return (old & _PAGE_ACCESSED) != 0;
304}
305#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
306 __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep)
307
308#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
309static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
310 pte_t *ptep)
311{
312 return __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0));
313}
314
315#define __HAVE_ARCH_PTEP_SET_WRPROTECT
316static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
317 pte_t *ptep)
318{
319 pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
320}
321
322static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
323 pte_t *ptep, pte_t entry,
324 unsigned long address,
325 int psize)
326{
327 unsigned long set = pte_val(entry) &
328 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
329
330 pte_update(vma->vm_mm, address, ptep, 0, set, 0);
331
332 flush_tlb_page(vma, address);
333}
334
335#define __HAVE_ARCH_PTE_SAME
336#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
337
338#define pmd_page(pmd) \
339 pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
340
341
342
343
344
345
346
347#define __swp_type(entry) ((entry).val & 0x1f)
348#define __swp_offset(entry) ((entry).val >> 5)
349#define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) })
350#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
351#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
352
353
354static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);}
355static inline int pte_read(pte_t pte) { return 1; }
356static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); }
357static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); }
358static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); }
359static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
360static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
361
362static inline int pte_present(pte_t pte)
363{
364 return pte_val(pte) & _PAGE_PRESENT;
365}
366
367static inline bool pte_hw_valid(pte_t pte)
368{
369 return pte_val(pte) & _PAGE_PRESENT;
370}
371
372static inline bool pte_hashpte(pte_t pte)
373{
374 return !!(pte_val(pte) & _PAGE_HASHPTE);
375}
376
377static inline bool pte_ci(pte_t pte)
378{
379 return !!(pte_val(pte) & _PAGE_NO_CACHE);
380}
381
382
383
384
385
386#define pte_access_permitted pte_access_permitted
387static inline bool pte_access_permitted(pte_t pte, bool write)
388{
389
390
391
392
393 if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
394 return false;
395
396 if (write && !pte_write(pte))
397 return false;
398
399 return true;
400}
401
402
403
404
405
406
407
408static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
409{
410 return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
411 pgprot_val(pgprot));
412}
413
414static inline unsigned long pte_pfn(pte_t pte)
415{
416 return pte_val(pte) >> PTE_RPN_SHIFT;
417}
418
419
420static inline pte_t pte_wrprotect(pte_t pte)
421{
422 return __pte(pte_val(pte) & ~_PAGE_RW);
423}
424
425static inline pte_t pte_exprotect(pte_t pte)
426{
427 return __pte(pte_val(pte) & ~_PAGE_EXEC);
428}
429
430static inline pte_t pte_mkclean(pte_t pte)
431{
432 return __pte(pte_val(pte) & ~_PAGE_DIRTY);
433}
434
435static inline pte_t pte_mkold(pte_t pte)
436{
437 return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
438}
439
440static inline pte_t pte_mkexec(pte_t pte)
441{
442 return __pte(pte_val(pte) | _PAGE_EXEC);
443}
444
445static inline pte_t pte_mkpte(pte_t pte)
446{
447 return pte;
448}
449
450static inline pte_t pte_mkwrite(pte_t pte)
451{
452 return __pte(pte_val(pte) | _PAGE_RW);
453}
454
455static inline pte_t pte_mkdirty(pte_t pte)
456{
457 return __pte(pte_val(pte) | _PAGE_DIRTY);
458}
459
460static inline pte_t pte_mkyoung(pte_t pte)
461{
462 return __pte(pte_val(pte) | _PAGE_ACCESSED);
463}
464
465static inline pte_t pte_mkspecial(pte_t pte)
466{
467 return __pte(pte_val(pte) | _PAGE_SPECIAL);
468}
469
470static inline pte_t pte_mkhuge(pte_t pte)
471{
472 return pte;
473}
474
475static inline pte_t pte_mkprivileged(pte_t pte)
476{
477 return __pte(pte_val(pte) & ~_PAGE_USER);
478}
479
480static inline pte_t pte_mkuser(pte_t pte)
481{
482 return __pte(pte_val(pte) | _PAGE_USER);
483}
484
485static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
486{
487 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
488}
489
490
491
492
493
494
495
496
497static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
498 pte_t *ptep, pte_t pte, int percpu)
499{
500#if defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
501
502
503
504
505
506
507 if (percpu)
508 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
509 | (pte_val(pte) & ~_PAGE_HASHPTE));
510 else
511 pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, pte_val(pte), 0);
512
513#elif defined(CONFIG_PTE_64BIT)
514
515
516
517
518
519
520
521
522 if (percpu) {
523 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
524 | (pte_val(pte) & ~_PAGE_HASHPTE));
525 return;
526 }
527 if (pte_val(*ptep) & _PAGE_HASHPTE)
528 flush_hash_entry(mm, ptep, addr);
529 __asm__ __volatile__("\
530 stw%X0 %2,%0\n\
531 eieio\n\
532 stw%X1 %L2,%1"
533 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
534 : "r" (pte) : "memory");
535
536#else
537
538
539
540
541
542 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
543 | (pte_val(pte) & ~_PAGE_HASHPTE));
544#endif
545}
546
547
548
549
550
551#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
552 _PAGE_WRITETHRU)
553
554#define pgprot_noncached pgprot_noncached
555static inline pgprot_t pgprot_noncached(pgprot_t prot)
556{
557 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
558 _PAGE_NO_CACHE | _PAGE_GUARDED);
559}
560
561#define pgprot_noncached_wc pgprot_noncached_wc
562static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
563{
564 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
565 _PAGE_NO_CACHE);
566}
567
568#define pgprot_cached pgprot_cached
569static inline pgprot_t pgprot_cached(pgprot_t prot)
570{
571 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
572 _PAGE_COHERENT);
573}
574
575#define pgprot_cached_wthru pgprot_cached_wthru
576static inline pgprot_t pgprot_cached_wthru(pgprot_t prot)
577{
578 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
579 _PAGE_COHERENT | _PAGE_WRITETHRU);
580}
581
582#define pgprot_cached_noncoherent pgprot_cached_noncoherent
583static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot)
584{
585 return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL);
586}
587
588#define pgprot_writecombine pgprot_writecombine
589static inline pgprot_t pgprot_writecombine(pgprot_t prot)
590{
591 return pgprot_noncached_wc(prot);
592}
593
594#endif
595
596#endif
597