1
2#ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
3#define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
4
5#include <asm-generic/pgtable-nopmd.h>
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#define _PAGE_PRESENT 0x001
22#define _PAGE_HASHPTE 0x002
23#define _PAGE_USER 0x004
24#define _PAGE_GUARDED 0x008
25#define _PAGE_COHERENT 0x010
26#define _PAGE_NO_CACHE 0x020
27#define _PAGE_WRITETHRU 0x040
28#define _PAGE_DIRTY 0x080
29#define _PAGE_ACCESSED 0x100
30#define _PAGE_EXEC 0x200
31#define _PAGE_RW 0x400
32#define _PAGE_SPECIAL 0x800
33
34#ifdef CONFIG_PTE_64BIT
35
36#define _PTE_NONE_MASK (0xffffffff00000000ULL | _PAGE_HASHPTE)
37#else
38#define _PTE_NONE_MASK _PAGE_HASHPTE
39#endif
40
41#define _PMD_PRESENT 0
42#define _PMD_PRESENT_MASK (PAGE_MASK)
43#define _PMD_BAD (~PAGE_MASK)
44
45
46
47#define _PAGE_KERNEL_RO 0
48#define _PAGE_KERNEL_ROX (_PAGE_EXEC)
49#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW)
50#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
51
52#define _PAGE_HPTEFLAGS _PAGE_HASHPTE
53
54#ifndef __ASSEMBLY__
55
56static inline bool pte_user(pte_t pte)
57{
58 return pte_val(pte) & _PAGE_USER;
59}
60#endif
61
62
63
64
65
66
67#define PTE_RPN_SHIFT (PAGE_SHIFT)
68
69
70
71
72
73#ifdef CONFIG_PTE_64BIT
74#define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
75#define MAX_POSSIBLE_PHYSMEM_BITS 36
76#else
77#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
78#define MAX_POSSIBLE_PHYSMEM_BITS 32
79#endif
80
81
82
83
84
85#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HASHPTE | _PAGE_DIRTY | \
86 _PAGE_ACCESSED | _PAGE_SPECIAL)
87
88
89
90
91
92
93
94#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
95#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT)
96
97
98
99
100
101
102
103
104#define PAGE_NONE __pgprot(_PAGE_BASE)
105#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
106#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
107#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
108#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
109#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
110#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
111
112
113#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
114#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
115#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
116 _PAGE_NO_CACHE | _PAGE_GUARDED)
117#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
118#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
119#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
120
121
122
123
124
125
126#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
127 defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
128#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
129#else
130#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
131#endif
132
133
134#define PAGE_KERNEL_EXEC PAGE_KERNEL_X
135
136
137#define PAGE_AGP (PAGE_KERNEL_NC)
138#define HAVE_PAGE_AGP
139
140#define PTE_INDEX_SIZE PTE_SHIFT
141#define PMD_INDEX_SIZE 0
142#define PUD_INDEX_SIZE 0
143#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
144
145#define PMD_CACHE_INDEX PMD_INDEX_SIZE
146#define PUD_CACHE_INDEX PUD_INDEX_SIZE
147
148#ifndef __ASSEMBLY__
149#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
150#define PMD_TABLE_SIZE 0
151#define PUD_TABLE_SIZE 0
152#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
153
154
155#define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1)
156#endif
157
158#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
159#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
160
161
162
163
164
165
166
167
168
169
170
171
172#define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
173#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
174#define PGDIR_MASK (~(PGDIR_SIZE-1))
175
176#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
177
178#ifndef __ASSEMBLY__
179
180int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
181
182#endif
183
184
185
186
187
188
189#include <asm/fixmap.h>
190
191
192
193
194
195
196#ifdef CONFIG_HIGHMEM
197#define IOREMAP_TOP PKMAP_BASE
198#else
199#define IOREMAP_TOP FIXADDR_START
200#endif
201
202
203#define IOREMAP_START VMALLOC_START
204#define IOREMAP_END VMALLOC_END
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223#define VMALLOC_OFFSET (0x1000000)
224
225#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
226
227#ifdef CONFIG_KASAN_VMALLOC
228#define VMALLOC_END ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
229#else
230#define VMALLOC_END ioremap_bot
231#endif
232
233#define MODULES_END ALIGN_DOWN(PAGE_OFFSET, SZ_256M)
234#define MODULES_VADDR (MODULES_END - SZ_256M)
235
236#ifndef __ASSEMBLY__
237#include <linux/sched.h>
238#include <linux/threads.h>
239
240
241#define PGD_MASKED_BITS 0
242
243#define pte_ERROR(e) \
244 pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
245 (unsigned long long)pte_val(e))
246#define pgd_ERROR(e) \
247 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
248
249
250
251
252
253#define pte_clear(mm, addr, ptep) \
254 do { pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0); } while (0)
255
256#define pmd_none(pmd) (!pmd_val(pmd))
257#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
258#define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
259static inline void pmd_clear(pmd_t *pmdp)
260{
261 *pmdp = __pmd(0);
262}
263
264
265
266
267
268
269extern int flush_hash_pages(unsigned context, unsigned long va,
270 unsigned long pmdval, int count);
271
272
273extern void add_hash_page(unsigned context, unsigned long va,
274 unsigned long pmdval);
275
276
277static inline void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
278{
279 if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
280 unsigned long ptephys = __pa(ptep) & PAGE_MASK;
281
282 flush_hash_pages(mm->context.id, addr, ptephys, 1);
283 }
284}
285
286
287
288
289
290
291
292
293
294
295
296static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
297 unsigned long clr, unsigned long set, int huge)
298{
299 pte_basic_t old;
300 unsigned long tmp;
301
302 __asm__ __volatile__(
303#ifndef CONFIG_PTE_64BIT
304"1: lwarx %0, 0, %3\n"
305" andc %1, %0, %4\n"
306#else
307"1: lwarx %L0, 0, %3\n"
308" lwz %0, -4(%3)\n"
309" andc %1, %L0, %4\n"
310#endif
311" or %1, %1, %5\n"
312" stwcx. %1, 0, %3\n"
313" bne- 1b"
314 : "=&r" (old), "=&r" (tmp), "=m" (*p)
315#ifndef CONFIG_PTE_64BIT
316 : "r" (p),
317#else
318 : "b" ((unsigned long)(p) + 4),
319#endif
320 "r" (clr), "r" (set), "m" (*p)
321 : "cc" );
322
323 return old;
324}
325
326
327
328
329
330#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
331static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
332 unsigned long addr, pte_t *ptep)
333{
334 unsigned long old;
335 old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
336 if (old & _PAGE_HASHPTE)
337 flush_hash_entry(mm, ptep, addr);
338
339 return (old & _PAGE_ACCESSED) != 0;
340}
341#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
342 __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep)
343
344#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
345static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
346 pte_t *ptep)
347{
348 return __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0));
349}
350
351#define __HAVE_ARCH_PTEP_SET_WRPROTECT
352static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
353 pte_t *ptep)
354{
355 pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
356}
357
358static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
359 pte_t *ptep, pte_t entry,
360 unsigned long address,
361 int psize)
362{
363 unsigned long set = pte_val(entry) &
364 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
365
366 pte_update(vma->vm_mm, address, ptep, 0, set, 0);
367
368 flush_tlb_page(vma, address);
369}
370
371#define __HAVE_ARCH_PTE_SAME
372#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
373
374#define pmd_page(pmd) \
375 pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
376
377
378
379
380
381
382
383#define __swp_type(entry) ((entry).val & 0x1f)
384#define __swp_offset(entry) ((entry).val >> 5)
385#define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) })
386#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
387#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
388
389
390static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);}
391static inline int pte_read(pte_t pte) { return 1; }
392static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); }
393static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); }
394static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); }
395static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
396static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
397
398static inline int pte_present(pte_t pte)
399{
400 return pte_val(pte) & _PAGE_PRESENT;
401}
402
403static inline bool pte_hw_valid(pte_t pte)
404{
405 return pte_val(pte) & _PAGE_PRESENT;
406}
407
408static inline bool pte_hashpte(pte_t pte)
409{
410 return !!(pte_val(pte) & _PAGE_HASHPTE);
411}
412
413static inline bool pte_ci(pte_t pte)
414{
415 return !!(pte_val(pte) & _PAGE_NO_CACHE);
416}
417
418
419
420
421
422#define pte_access_permitted pte_access_permitted
423static inline bool pte_access_permitted(pte_t pte, bool write)
424{
425
426
427
428
429 if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
430 return false;
431
432 if (write && !pte_write(pte))
433 return false;
434
435 return true;
436}
437
438
439
440
441
442
443
444static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
445{
446 return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
447 pgprot_val(pgprot));
448}
449
450static inline unsigned long pte_pfn(pte_t pte)
451{
452 return pte_val(pte) >> PTE_RPN_SHIFT;
453}
454
455
456static inline pte_t pte_wrprotect(pte_t pte)
457{
458 return __pte(pte_val(pte) & ~_PAGE_RW);
459}
460
461static inline pte_t pte_exprotect(pte_t pte)
462{
463 return __pte(pte_val(pte) & ~_PAGE_EXEC);
464}
465
466static inline pte_t pte_mkclean(pte_t pte)
467{
468 return __pte(pte_val(pte) & ~_PAGE_DIRTY);
469}
470
471static inline pte_t pte_mkold(pte_t pte)
472{
473 return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
474}
475
476static inline pte_t pte_mkexec(pte_t pte)
477{
478 return __pte(pte_val(pte) | _PAGE_EXEC);
479}
480
481static inline pte_t pte_mkpte(pte_t pte)
482{
483 return pte;
484}
485
486static inline pte_t pte_mkwrite(pte_t pte)
487{
488 return __pte(pte_val(pte) | _PAGE_RW);
489}
490
491static inline pte_t pte_mkdirty(pte_t pte)
492{
493 return __pte(pte_val(pte) | _PAGE_DIRTY);
494}
495
496static inline pte_t pte_mkyoung(pte_t pte)
497{
498 return __pte(pte_val(pte) | _PAGE_ACCESSED);
499}
500
501static inline pte_t pte_mkspecial(pte_t pte)
502{
503 return __pte(pte_val(pte) | _PAGE_SPECIAL);
504}
505
506static inline pte_t pte_mkhuge(pte_t pte)
507{
508 return pte;
509}
510
511static inline pte_t pte_mkprivileged(pte_t pte)
512{
513 return __pte(pte_val(pte) & ~_PAGE_USER);
514}
515
516static inline pte_t pte_mkuser(pte_t pte)
517{
518 return __pte(pte_val(pte) | _PAGE_USER);
519}
520
521static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
522{
523 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
524}
525
526
527
528
529
530
531
532
533static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
534 pte_t *ptep, pte_t pte, int percpu)
535{
536#if defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
537
538
539
540
541
542
543 if (percpu)
544 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
545 | (pte_val(pte) & ~_PAGE_HASHPTE));
546 else
547 pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, pte_val(pte), 0);
548
549#elif defined(CONFIG_PTE_64BIT)
550
551
552
553
554
555
556
557
558 if (percpu) {
559 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
560 | (pte_val(pte) & ~_PAGE_HASHPTE));
561 return;
562 }
563 if (pte_val(*ptep) & _PAGE_HASHPTE)
564 flush_hash_entry(mm, ptep, addr);
565 __asm__ __volatile__("\
566 stw%X0 %2,%0\n\
567 eieio\n\
568 stw%X1 %L2,%1"
569 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
570 : "r" (pte) : "memory");
571
572#else
573
574
575
576
577
578 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
579 | (pte_val(pte) & ~_PAGE_HASHPTE));
580#endif
581}
582
583
584
585
586
587#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
588 _PAGE_WRITETHRU)
589
590#define pgprot_noncached pgprot_noncached
591static inline pgprot_t pgprot_noncached(pgprot_t prot)
592{
593 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
594 _PAGE_NO_CACHE | _PAGE_GUARDED);
595}
596
597#define pgprot_noncached_wc pgprot_noncached_wc
598static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
599{
600 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
601 _PAGE_NO_CACHE);
602}
603
604#define pgprot_cached pgprot_cached
605static inline pgprot_t pgprot_cached(pgprot_t prot)
606{
607 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
608 _PAGE_COHERENT);
609}
610
611#define pgprot_cached_wthru pgprot_cached_wthru
612static inline pgprot_t pgprot_cached_wthru(pgprot_t prot)
613{
614 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
615 _PAGE_COHERENT | _PAGE_WRITETHRU);
616}
617
618#define pgprot_cached_noncoherent pgprot_cached_noncoherent
619static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot)
620{
621 return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL);
622}
623
624#define pgprot_writecombine pgprot_writecombine
625static inline pgprot_t pgprot_writecombine(pgprot_t prot)
626{
627 return pgprot_noncached_wc(prot);
628}
629
630#endif
631
632#endif
633