1
2
3
4
5
6
7
8
9
10
11
12#ifndef _ASM_S390_PGTABLE_H
13#define _ASM_S390_PGTABLE_H
14
15#include <linux/sched.h>
16#include <linux/mm_types.h>
17#include <linux/page-flags.h>
18#include <linux/radix-tree.h>
19#include <linux/atomic.h>
20#include <asm/sections.h>
21#include <asm/bug.h>
22#include <asm/page.h>
23#include <asm/uv.h>
24
25extern pgd_t swapper_pg_dir[];
26extern void paging_init(void);
27extern unsigned long s390_invalid_asce;
28
29enum {
30 PG_DIRECT_MAP_4K = 0,
31 PG_DIRECT_MAP_1M,
32 PG_DIRECT_MAP_2G,
33 PG_DIRECT_MAP_MAX
34};
35
36extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
37
38static inline void update_page_count(int level, long count)
39{
40 if (IS_ENABLED(CONFIG_PROC_FS))
41 atomic_long_add(count, &direct_pages_count[level]);
42}
43
44struct seq_file;
45void arch_report_meminfo(struct seq_file *m);
46
47
48
49
50
51#define update_mmu_cache(vma, address, ptep) do { } while (0)
52#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
53
54
55
56
57
58
59extern unsigned long empty_zero_page;
60extern unsigned long zero_page_mask;
61
62#define ZERO_PAGE(vaddr) \
63 (virt_to_page((void *)(empty_zero_page + \
64 (((unsigned long)(vaddr)) &zero_page_mask))))
65#define __HAVE_COLOR_ZERO_PAGE
66
67
68
69#define pte_ERROR(e) \
70 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
71#define pmd_ERROR(e) \
72 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
73#define pud_ERROR(e) \
74 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
75#define p4d_ERROR(e) \
76 printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e))
77#define pgd_ERROR(e) \
78 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
79
80
81
82
83
84
85
86
87
88extern unsigned long __bootdata_preserved(VMALLOC_START);
89extern unsigned long __bootdata_preserved(VMALLOC_END);
90#define VMALLOC_DEFAULT_SIZE ((512UL << 30) - MODULES_LEN)
91extern struct page *__bootdata_preserved(vmemmap);
92extern unsigned long __bootdata_preserved(vmemmap_size);
93
94#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
95
96extern unsigned long __bootdata_preserved(MODULES_VADDR);
97extern unsigned long __bootdata_preserved(MODULES_END);
98#define MODULES_VADDR MODULES_VADDR
99#define MODULES_END MODULES_END
100#define MODULES_LEN (1UL << 31)
101
102static inline int is_module_addr(void *addr)
103{
104 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
105 if (addr < (void *)MODULES_VADDR)
106 return 0;
107 if (addr > (void *)MODULES_END)
108 return 0;
109 return 1;
110}
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164#define _PAGE_NOEXEC 0x100
165#define _PAGE_PROTECT 0x200
166#define _PAGE_INVALID 0x400
167#define _PAGE_LARGE 0x800
168
169
170#define _PAGE_PRESENT 0x001
171#define _PAGE_YOUNG 0x004
172#define _PAGE_DIRTY 0x008
173#define _PAGE_READ 0x010
174#define _PAGE_WRITE 0x020
175#define _PAGE_SPECIAL 0x040
176#define _PAGE_UNUSED 0x080
177
178#ifdef CONFIG_MEM_SOFT_DIRTY
179#define _PAGE_SOFT_DIRTY 0x002
180#else
181#define _PAGE_SOFT_DIRTY 0x000
182#endif
183
184
185#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
186 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226#define _ASCE_ORIGIN ~0xfffUL
227#define _ASCE_PRIVATE_SPACE 0x100
228#define _ASCE_ALT_EVENT 0x80
229#define _ASCE_SPACE_SWITCH 0x40
230#define _ASCE_REAL_SPACE 0x20
231#define _ASCE_TYPE_MASK 0x0c
232#define _ASCE_TYPE_REGION1 0x0c
233#define _ASCE_TYPE_REGION2 0x08
234#define _ASCE_TYPE_REGION3 0x04
235#define _ASCE_TYPE_SEGMENT 0x00
236#define _ASCE_TABLE_LENGTH 0x03
237
238
239#define _REGION_ENTRY_ORIGIN ~0xfffUL
240#define _REGION_ENTRY_PROTECT 0x200
241#define _REGION_ENTRY_NOEXEC 0x100
242#define _REGION_ENTRY_OFFSET 0xc0
243#define _REGION_ENTRY_INVALID 0x20
244#define _REGION_ENTRY_TYPE_MASK 0x0c
245#define _REGION_ENTRY_TYPE_R1 0x0c
246#define _REGION_ENTRY_TYPE_R2 0x08
247#define _REGION_ENTRY_TYPE_R3 0x04
248#define _REGION_ENTRY_LENGTH 0x03
249
250#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
251#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
252#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
253#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
254#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
255#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
256
257#define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL
258#define _REGION3_ENTRY_DIRTY 0x2000
259#define _REGION3_ENTRY_YOUNG 0x1000
260#define _REGION3_ENTRY_LARGE 0x0400
261#define _REGION3_ENTRY_READ 0x0002
262#define _REGION3_ENTRY_WRITE 0x0001
263
264#ifdef CONFIG_MEM_SOFT_DIRTY
265#define _REGION3_ENTRY_SOFT_DIRTY 0x4000
266#else
267#define _REGION3_ENTRY_SOFT_DIRTY 0x0000
268#endif
269
270#define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
271
272
273#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
274#define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL
275#define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL
276#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL
277#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL
278#define _SEGMENT_ENTRY_PROTECT 0x200
279#define _SEGMENT_ENTRY_NOEXEC 0x100
280#define _SEGMENT_ENTRY_INVALID 0x20
281#define _SEGMENT_ENTRY_TYPE_MASK 0x0c
282
283#define _SEGMENT_ENTRY (0)
284#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
285
286#define _SEGMENT_ENTRY_DIRTY 0x2000
287#define _SEGMENT_ENTRY_YOUNG 0x1000
288#define _SEGMENT_ENTRY_LARGE 0x0400
289#define _SEGMENT_ENTRY_WRITE 0x0002
290#define _SEGMENT_ENTRY_READ 0x0001
291
292#ifdef CONFIG_MEM_SOFT_DIRTY
293#define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000
294#else
295#define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000
296#endif
297
298#define _CRST_ENTRIES 2048
299#define _PAGE_ENTRIES 256
300
301#define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
302#define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
303
304#define _REGION1_SHIFT 53
305#define _REGION2_SHIFT 42
306#define _REGION3_SHIFT 31
307#define _SEGMENT_SHIFT 20
308
309#define _REGION1_INDEX (0x7ffUL << _REGION1_SHIFT)
310#define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT)
311#define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT)
312#define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT)
313#define _PAGE_INDEX (0xffUL << _PAGE_SHIFT)
314
315#define _REGION1_SIZE (1UL << _REGION1_SHIFT)
316#define _REGION2_SIZE (1UL << _REGION2_SHIFT)
317#define _REGION3_SIZE (1UL << _REGION3_SHIFT)
318#define _SEGMENT_SIZE (1UL << _SEGMENT_SHIFT)
319
320#define _REGION1_MASK (~(_REGION1_SIZE - 1))
321#define _REGION2_MASK (~(_REGION2_SIZE - 1))
322#define _REGION3_MASK (~(_REGION3_SIZE - 1))
323#define _SEGMENT_MASK (~(_SEGMENT_SIZE - 1))
324
325#define PMD_SHIFT _SEGMENT_SHIFT
326#define PUD_SHIFT _REGION3_SHIFT
327#define P4D_SHIFT _REGION2_SHIFT
328#define PGDIR_SHIFT _REGION1_SHIFT
329
330#define PMD_SIZE _SEGMENT_SIZE
331#define PUD_SIZE _REGION3_SIZE
332#define P4D_SIZE _REGION2_SIZE
333#define PGDIR_SIZE _REGION1_SIZE
334
335#define PMD_MASK _SEGMENT_MASK
336#define PUD_MASK _REGION3_MASK
337#define P4D_MASK _REGION2_MASK
338#define PGDIR_MASK _REGION1_MASK
339
340#define PTRS_PER_PTE _PAGE_ENTRIES
341#define PTRS_PER_PMD _CRST_ENTRIES
342#define PTRS_PER_PUD _CRST_ENTRIES
343#define PTRS_PER_P4D _CRST_ENTRIES
344#define PTRS_PER_PGD _CRST_ENTRIES
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369#define PGSTE_ACC_BITS 0xf000000000000000UL
370#define PGSTE_FP_BIT 0x0800000000000000UL
371#define PGSTE_PCL_BIT 0x0080000000000000UL
372#define PGSTE_HR_BIT 0x0040000000000000UL
373#define PGSTE_HC_BIT 0x0020000000000000UL
374#define PGSTE_GR_BIT 0x0004000000000000UL
375#define PGSTE_GC_BIT 0x0002000000000000UL
376#define PGSTE_UC_BIT 0x0000800000000000UL
377#define PGSTE_IN_BIT 0x0000400000000000UL
378#define PGSTE_VSIE_BIT 0x0000200000000000UL
379
380
381#define _PGSTE_GPS_ZERO 0x0000000080000000UL
382#define _PGSTE_GPS_NODAT 0x0000000040000000UL
383#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
384#define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
385#define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
386#define _PGSTE_GPS_USAGE_POT_VOLATILE 0x0000000002000000UL
387#define _PGSTE_GPS_USAGE_VOLATILE _PGSTE_GPS_USAGE_MASK
388
389
390
391
392
393
394#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
395 _ASCE_ALT_EVENT)
396
397
398
399
400#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
401#define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \
402 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
403#define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \
404 _PAGE_INVALID | _PAGE_PROTECT)
405#define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
406 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
407#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
408 _PAGE_INVALID | _PAGE_PROTECT)
409
410#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
411 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
412#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
413 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
414#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
415 _PAGE_PROTECT | _PAGE_NOEXEC)
416#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
417 _PAGE_YOUNG | _PAGE_DIRTY)
418
419
420
421
422
423
424
425#define __P000 PAGE_NONE
426#define __P001 PAGE_RO
427#define __P010 PAGE_RO
428#define __P011 PAGE_RO
429#define __P100 PAGE_RX
430#define __P101 PAGE_RX
431#define __P110 PAGE_RX
432#define __P111 PAGE_RX
433
434#define __S000 PAGE_NONE
435#define __S001 PAGE_RO
436#define __S010 PAGE_RW
437#define __S011 PAGE_RW
438#define __S100 PAGE_RX
439#define __S101 PAGE_RX
440#define __S110 PAGE_RWX
441#define __S111 PAGE_RWX
442
443
444
445
446#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
447 _SEGMENT_ENTRY_PROTECT)
448#define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \
449 _SEGMENT_ENTRY_READ | \
450 _SEGMENT_ENTRY_NOEXEC)
451#define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \
452 _SEGMENT_ENTRY_READ)
453#define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \
454 _SEGMENT_ENTRY_WRITE | \
455 _SEGMENT_ENTRY_NOEXEC)
456#define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \
457 _SEGMENT_ENTRY_WRITE)
458#define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \
459 _SEGMENT_ENTRY_LARGE | \
460 _SEGMENT_ENTRY_READ | \
461 _SEGMENT_ENTRY_WRITE | \
462 _SEGMENT_ENTRY_YOUNG | \
463 _SEGMENT_ENTRY_DIRTY | \
464 _SEGMENT_ENTRY_NOEXEC)
465#define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \
466 _SEGMENT_ENTRY_LARGE | \
467 _SEGMENT_ENTRY_READ | \
468 _SEGMENT_ENTRY_YOUNG | \
469 _SEGMENT_ENTRY_PROTECT | \
470 _SEGMENT_ENTRY_NOEXEC)
471#define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY | \
472 _SEGMENT_ENTRY_LARGE | \
473 _SEGMENT_ENTRY_READ | \
474 _SEGMENT_ENTRY_WRITE | \
475 _SEGMENT_ENTRY_YOUNG | \
476 _SEGMENT_ENTRY_DIRTY)
477
478
479
480
481
482#define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \
483 _REGION3_ENTRY_LARGE | \
484 _REGION3_ENTRY_READ | \
485 _REGION3_ENTRY_WRITE | \
486 _REGION3_ENTRY_YOUNG | \
487 _REGION3_ENTRY_DIRTY | \
488 _REGION_ENTRY_NOEXEC)
489#define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
490 _REGION3_ENTRY_LARGE | \
491 _REGION3_ENTRY_READ | \
492 _REGION3_ENTRY_YOUNG | \
493 _REGION_ENTRY_PROTECT | \
494 _REGION_ENTRY_NOEXEC)
495
496static inline bool mm_p4d_folded(struct mm_struct *mm)
497{
498 return mm->context.asce_limit <= _REGION1_SIZE;
499}
500#define mm_p4d_folded(mm) mm_p4d_folded(mm)
501
502static inline bool mm_pud_folded(struct mm_struct *mm)
503{
504 return mm->context.asce_limit <= _REGION2_SIZE;
505}
506#define mm_pud_folded(mm) mm_pud_folded(mm)
507
508static inline bool mm_pmd_folded(struct mm_struct *mm)
509{
510 return mm->context.asce_limit <= _REGION3_SIZE;
511}
512#define mm_pmd_folded(mm) mm_pmd_folded(mm)
513
514static inline int mm_has_pgste(struct mm_struct *mm)
515{
516#ifdef CONFIG_PGSTE
517 if (unlikely(mm->context.has_pgste))
518 return 1;
519#endif
520 return 0;
521}
522
523static inline int mm_is_protected(struct mm_struct *mm)
524{
525#ifdef CONFIG_PGSTE
526 if (unlikely(atomic_read(&mm->context.is_protected)))
527 return 1;
528#endif
529 return 0;
530}
531
532static inline int mm_alloc_pgste(struct mm_struct *mm)
533{
534#ifdef CONFIG_PGSTE
535 if (unlikely(mm->context.alloc_pgste))
536 return 1;
537#endif
538 return 0;
539}
540
541
542
543
544
545#define mm_forbids_zeropage mm_has_pgste
546static inline int mm_uses_skeys(struct mm_struct *mm)
547{
548#ifdef CONFIG_PGSTE
549 if (mm->context.uses_skeys)
550 return 1;
551#endif
552 return 0;
553}
554
555static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
556{
557 union register_pair r1 = { .even = old, .odd = new, };
558 unsigned long address = (unsigned long)ptr | 1;
559
560 asm volatile(
561 " csp %[r1],%[address]"
562 : [r1] "+&d" (r1.pair), "+m" (*ptr)
563 : [address] "d" (address)
564 : "cc");
565}
566
567static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
568{
569 union register_pair r1 = { .even = old, .odd = new, };
570 unsigned long address = (unsigned long)ptr | 1;
571
572 asm volatile(
573 " .insn rre,0xb98a0000,%[r1],%[address]"
574 : [r1] "+&d" (r1.pair), "+m" (*ptr)
575 : [address] "d" (address)
576 : "cc");
577}
578
579#define CRDTE_DTT_PAGE 0x00UL
580#define CRDTE_DTT_SEGMENT 0x10UL
581#define CRDTE_DTT_REGION3 0x14UL
582#define CRDTE_DTT_REGION2 0x18UL
583#define CRDTE_DTT_REGION1 0x1cUL
584
585static inline void crdte(unsigned long old, unsigned long new,
586 unsigned long table, unsigned long dtt,
587 unsigned long address, unsigned long asce)
588{
589 union register_pair r1 = { .even = old, .odd = new, };
590 union register_pair r2 = { .even = table | dtt, .odd = address, };
591
592 asm volatile(".insn rrf,0xb98f0000,%[r1],%[r2],%[asce],0"
593 : [r1] "+&d" (r1.pair)
594 : [r2] "d" (r2.pair), [asce] "a" (asce)
595 : "memory", "cc");
596}
597
598
599
600
601static inline int pgd_folded(pgd_t pgd)
602{
603 return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
604}
605
606static inline int pgd_present(pgd_t pgd)
607{
608 if (pgd_folded(pgd))
609 return 1;
610 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
611}
612
613static inline int pgd_none(pgd_t pgd)
614{
615 if (pgd_folded(pgd))
616 return 0;
617 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
618}
619
620static inline int pgd_bad(pgd_t pgd)
621{
622 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1)
623 return 0;
624 return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0;
625}
626
627static inline unsigned long pgd_pfn(pgd_t pgd)
628{
629 unsigned long origin_mask;
630
631 origin_mask = _REGION_ENTRY_ORIGIN;
632 return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
633}
634
635static inline int p4d_folded(p4d_t p4d)
636{
637 return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
638}
639
640static inline int p4d_present(p4d_t p4d)
641{
642 if (p4d_folded(p4d))
643 return 1;
644 return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
645}
646
647static inline int p4d_none(p4d_t p4d)
648{
649 if (p4d_folded(p4d))
650 return 0;
651 return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
652}
653
654static inline unsigned long p4d_pfn(p4d_t p4d)
655{
656 unsigned long origin_mask;
657
658 origin_mask = _REGION_ENTRY_ORIGIN;
659 return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
660}
661
662static inline int pud_folded(pud_t pud)
663{
664 return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
665}
666
667static inline int pud_present(pud_t pud)
668{
669 if (pud_folded(pud))
670 return 1;
671 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
672}
673
674static inline int pud_none(pud_t pud)
675{
676 if (pud_folded(pud))
677 return 0;
678 return pud_val(pud) == _REGION3_ENTRY_EMPTY;
679}
680
681#define pud_leaf pud_large
682static inline int pud_large(pud_t pud)
683{
684 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
685 return 0;
686 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
687}
688
689#define pmd_leaf pmd_large
690static inline int pmd_large(pmd_t pmd)
691{
692 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
693}
694
695static inline int pmd_bad(pmd_t pmd)
696{
697 if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd))
698 return 1;
699 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
700}
701
702static inline int pud_bad(pud_t pud)
703{
704 unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
705
706 if (type > _REGION_ENTRY_TYPE_R3 || pud_large(pud))
707 return 1;
708 if (type < _REGION_ENTRY_TYPE_R3)
709 return 0;
710 return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
711}
712
713static inline int p4d_bad(p4d_t p4d)
714{
715 unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK;
716
717 if (type > _REGION_ENTRY_TYPE_R2)
718 return 1;
719 if (type < _REGION_ENTRY_TYPE_R2)
720 return 0;
721 return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
722}
723
724static inline int pmd_present(pmd_t pmd)
725{
726 return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
727}
728
729static inline int pmd_none(pmd_t pmd)
730{
731 return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
732}
733
734#define pmd_write pmd_write
735static inline int pmd_write(pmd_t pmd)
736{
737 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
738}
739
740#define pud_write pud_write
741static inline int pud_write(pud_t pud)
742{
743 return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
744}
745
746static inline int pmd_dirty(pmd_t pmd)
747{
748 return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
749}
750
751static inline int pmd_young(pmd_t pmd)
752{
753 return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
754}
755
756static inline int pte_present(pte_t pte)
757{
758
759 return (pte_val(pte) & _PAGE_PRESENT) != 0;
760}
761
762static inline int pte_none(pte_t pte)
763{
764
765 return pte_val(pte) == _PAGE_INVALID;
766}
767
768static inline int pte_swap(pte_t pte)
769{
770
771 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
772 == _PAGE_PROTECT;
773}
774
775static inline int pte_special(pte_t pte)
776{
777 return (pte_val(pte) & _PAGE_SPECIAL);
778}
779
780#define __HAVE_ARCH_PTE_SAME
781static inline int pte_same(pte_t a, pte_t b)
782{
783 return pte_val(a) == pte_val(b);
784}
785
786#ifdef CONFIG_NUMA_BALANCING
787static inline int pte_protnone(pte_t pte)
788{
789 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
790}
791
792static inline int pmd_protnone(pmd_t pmd)
793{
794
795 return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
796}
797#endif
798
799static inline int pte_soft_dirty(pte_t pte)
800{
801 return pte_val(pte) & _PAGE_SOFT_DIRTY;
802}
803#define pte_swp_soft_dirty pte_soft_dirty
804
805static inline pte_t pte_mksoft_dirty(pte_t pte)
806{
807 pte_val(pte) |= _PAGE_SOFT_DIRTY;
808 return pte;
809}
810#define pte_swp_mksoft_dirty pte_mksoft_dirty
811
812static inline pte_t pte_clear_soft_dirty(pte_t pte)
813{
814 pte_val(pte) &= ~_PAGE_SOFT_DIRTY;
815 return pte;
816}
817#define pte_swp_clear_soft_dirty pte_clear_soft_dirty
818
819static inline int pmd_soft_dirty(pmd_t pmd)
820{
821 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
822}
823
824static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
825{
826 pmd_val(pmd) |= _SEGMENT_ENTRY_SOFT_DIRTY;
827 return pmd;
828}
829
830static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
831{
832 pmd_val(pmd) &= ~_SEGMENT_ENTRY_SOFT_DIRTY;
833 return pmd;
834}
835
836
837
838
839
840static inline int pte_write(pte_t pte)
841{
842 return (pte_val(pte) & _PAGE_WRITE) != 0;
843}
844
845static inline int pte_dirty(pte_t pte)
846{
847 return (pte_val(pte) & _PAGE_DIRTY) != 0;
848}
849
850static inline int pte_young(pte_t pte)
851{
852 return (pte_val(pte) & _PAGE_YOUNG) != 0;
853}
854
855#define __HAVE_ARCH_PTE_UNUSED
856static inline int pte_unused(pte_t pte)
857{
858 return pte_val(pte) & _PAGE_UNUSED;
859}
860
861
862
863
864
865
866
867static inline pgprot_t pte_pgprot(pte_t pte)
868{
869 unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK;
870
871 if (pte_write(pte))
872 pte_flags |= pgprot_val(PAGE_KERNEL);
873 else
874 pte_flags |= pgprot_val(PAGE_KERNEL_RO);
875 pte_flags |= pte_val(pte) & mio_wb_bit_mask;
876
877 return __pgprot(pte_flags);
878}
879
880
881
882
883
884static inline void pgd_clear(pgd_t *pgd)
885{
886 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
887 pgd_val(*pgd) = _REGION1_ENTRY_EMPTY;
888}
889
890static inline void p4d_clear(p4d_t *p4d)
891{
892 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
893 p4d_val(*p4d) = _REGION2_ENTRY_EMPTY;
894}
895
896static inline void pud_clear(pud_t *pud)
897{
898 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
899 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
900}
901
902static inline void pmd_clear(pmd_t *pmdp)
903{
904 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
905}
906
907static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
908{
909 pte_val(*ptep) = _PAGE_INVALID;
910}
911
912
913
914
915
916static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
917{
918 pte_val(pte) &= _PAGE_CHG_MASK;
919 pte_val(pte) |= pgprot_val(newprot);
920
921
922
923
924 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
925 pte_val(pte) &= ~_PAGE_INVALID;
926
927
928
929
930 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
931 pte_val(pte) &= ~_PAGE_PROTECT;
932 return pte;
933}
934
935static inline pte_t pte_wrprotect(pte_t pte)
936{
937 pte_val(pte) &= ~_PAGE_WRITE;
938 pte_val(pte) |= _PAGE_PROTECT;
939 return pte;
940}
941
942static inline pte_t pte_mkwrite(pte_t pte)
943{
944 pte_val(pte) |= _PAGE_WRITE;
945 if (pte_val(pte) & _PAGE_DIRTY)
946 pte_val(pte) &= ~_PAGE_PROTECT;
947 return pte;
948}
949
950static inline pte_t pte_mkclean(pte_t pte)
951{
952 pte_val(pte) &= ~_PAGE_DIRTY;
953 pte_val(pte) |= _PAGE_PROTECT;
954 return pte;
955}
956
957static inline pte_t pte_mkdirty(pte_t pte)
958{
959 pte_val(pte) |= _PAGE_DIRTY | _PAGE_SOFT_DIRTY;
960 if (pte_val(pte) & _PAGE_WRITE)
961 pte_val(pte) &= ~_PAGE_PROTECT;
962 return pte;
963}
964
965static inline pte_t pte_mkold(pte_t pte)
966{
967 pte_val(pte) &= ~_PAGE_YOUNG;
968 pte_val(pte) |= _PAGE_INVALID;
969 return pte;
970}
971
972static inline pte_t pte_mkyoung(pte_t pte)
973{
974 pte_val(pte) |= _PAGE_YOUNG;
975 if (pte_val(pte) & _PAGE_READ)
976 pte_val(pte) &= ~_PAGE_INVALID;
977 return pte;
978}
979
980static inline pte_t pte_mkspecial(pte_t pte)
981{
982 pte_val(pte) |= _PAGE_SPECIAL;
983 return pte;
984}
985
986#ifdef CONFIG_HUGETLB_PAGE
987static inline pte_t pte_mkhuge(pte_t pte)
988{
989 pte_val(pte) |= _PAGE_LARGE;
990 return pte;
991}
992#endif
993
994#define IPTE_GLOBAL 0
995#define IPTE_LOCAL 1
996
997#define IPTE_NODAT 0x400
998#define IPTE_GUEST_ASCE 0x800
999
1000static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
1001 unsigned long opt, unsigned long asce,
1002 int local)
1003{
1004 unsigned long pto = (unsigned long) ptep;
1005
1006 if (__builtin_constant_p(opt) && opt == 0) {
1007
1008 asm volatile(
1009 " .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
1010 : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
1011 [m4] "i" (local));
1012 return;
1013 }
1014
1015
1016 opt = opt | (asce & _ASCE_ORIGIN);
1017 asm volatile(
1018 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1019 : [r2] "+a" (address), [r3] "+a" (opt)
1020 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1021}
1022
1023static __always_inline void __ptep_ipte_range(unsigned long address, int nr,
1024 pte_t *ptep, int local)
1025{
1026 unsigned long pto = (unsigned long) ptep;
1027
1028
1029 do {
1030 asm volatile(
1031 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1032 : [r2] "+a" (address), [r3] "+a" (nr)
1033 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1034 } while (nr != 255);
1035}
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
1051pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
1052
1053#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1054static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1055 unsigned long addr, pte_t *ptep)
1056{
1057 pte_t pte = *ptep;
1058
1059 pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
1060 return pte_young(pte);
1061}
1062
1063#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1064static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1065 unsigned long address, pte_t *ptep)
1066{
1067 return ptep_test_and_clear_young(vma, address, ptep);
1068}
1069
1070#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1071static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1072 unsigned long addr, pte_t *ptep)
1073{
1074 pte_t res;
1075
1076 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1077 if (mm_is_protected(mm) && pte_present(res))
1078 uv_convert_from_secure(pte_val(res) & PAGE_MASK);
1079 return res;
1080}
1081
1082#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1083pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
1084void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
1085 pte_t *, pte_t, pte_t);
1086
1087#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1088static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1089 unsigned long addr, pte_t *ptep)
1090{
1091 pte_t res;
1092
1093 res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
1094 if (mm_is_protected(vma->vm_mm) && pte_present(res))
1095 uv_convert_from_secure(pte_val(res) & PAGE_MASK);
1096 return res;
1097}
1098
1099
1100
1101
1102
1103
1104
1105
1106#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1107static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1108 unsigned long addr,
1109 pte_t *ptep, int full)
1110{
1111 pte_t res;
1112
1113 if (full) {
1114 res = *ptep;
1115 *ptep = __pte(_PAGE_INVALID);
1116 } else {
1117 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1118 }
1119 if (mm_is_protected(mm) && pte_present(res))
1120 uv_convert_from_secure(pte_val(res) & PAGE_MASK);
1121 return res;
1122}
1123
1124#define __HAVE_ARCH_PTEP_SET_WRPROTECT
1125static inline void ptep_set_wrprotect(struct mm_struct *mm,
1126 unsigned long addr, pte_t *ptep)
1127{
1128 pte_t pte = *ptep;
1129
1130 if (pte_write(pte))
1131 ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
1132}
1133
1134#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1135static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1136 unsigned long addr, pte_t *ptep,
1137 pte_t entry, int dirty)
1138{
1139 if (pte_same(*ptep, entry))
1140 return 0;
1141 ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
1142 return 1;
1143}
1144
1145
1146
1147
1148void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
1149 pte_t *ptep, pte_t entry);
1150void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1151void ptep_notify(struct mm_struct *mm, unsigned long addr,
1152 pte_t *ptep, unsigned long bits);
1153int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
1154 pte_t *ptep, int prot, unsigned long bit);
1155void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
1156 pte_t *ptep , int reset);
1157void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1158int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
1159 pte_t *sptep, pte_t *tptep, pte_t pte);
1160void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
1161
1162bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
1163 pte_t *ptep);
1164int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1165 unsigned char key, bool nq);
1166int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1167 unsigned char key, unsigned char *oldkey,
1168 bool nq, bool mr, bool mc);
1169int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
1170int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1171 unsigned char *key);
1172
1173int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
1174 unsigned long bits, unsigned long value);
1175int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
1176int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
1177 unsigned long *oldpte, unsigned long *oldpgste);
1178void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
1179void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
1180void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
1181void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
1182
1183#define pgprot_writecombine pgprot_writecombine
1184pgprot_t pgprot_writecombine(pgprot_t prot);
1185
1186#define pgprot_writethrough pgprot_writethrough
1187pgprot_t pgprot_writethrough(pgprot_t prot);
1188
1189
1190
1191
1192
1193
1194static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1195 pte_t *ptep, pte_t entry)
1196{
1197 if (pte_present(entry))
1198 pte_val(entry) &= ~_PAGE_UNUSED;
1199 if (mm_has_pgste(mm))
1200 ptep_set_pte_at(mm, addr, ptep, entry);
1201 else
1202 *ptep = entry;
1203}
1204
1205
1206
1207
1208
1209static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1210{
1211 pte_t __pte;
1212
1213 pte_val(__pte) = physpage | pgprot_val(pgprot);
1214 if (!MACHINE_HAS_NX)
1215 pte_val(__pte) &= ~_PAGE_NOEXEC;
1216 return pte_mkyoung(__pte);
1217}
1218
1219static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1220{
1221 unsigned long physpage = page_to_phys(page);
1222 pte_t __pte = mk_pte_phys(physpage, pgprot);
1223
1224 if (pte_write(__pte) && PageDirty(page))
1225 __pte = pte_mkdirty(__pte);
1226 return __pte;
1227}
1228
1229#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1230#define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
1231#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1232#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1233
1234#define p4d_deref(pud) ((unsigned long)__va(p4d_val(pud) & _REGION_ENTRY_ORIGIN))
1235#define pgd_deref(pgd) ((unsigned long)__va(pgd_val(pgd) & _REGION_ENTRY_ORIGIN))
1236
1237static inline unsigned long pmd_deref(pmd_t pmd)
1238{
1239 unsigned long origin_mask;
1240
1241 origin_mask = _SEGMENT_ENTRY_ORIGIN;
1242 if (pmd_large(pmd))
1243 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
1244 return (unsigned long)__va(pmd_val(pmd) & origin_mask);
1245}
1246
1247static inline unsigned long pmd_pfn(pmd_t pmd)
1248{
1249 return __pa(pmd_deref(pmd)) >> PAGE_SHIFT;
1250}
1251
1252static inline unsigned long pud_deref(pud_t pud)
1253{
1254 unsigned long origin_mask;
1255
1256 origin_mask = _REGION_ENTRY_ORIGIN;
1257 if (pud_large(pud))
1258 origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
1259 return (unsigned long)__va(pud_val(pud) & origin_mask);
1260}
1261
1262static inline unsigned long pud_pfn(pud_t pud)
1263{
1264 return __pa(pud_deref(pud)) >> PAGE_SHIFT;
1265}
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
1279{
1280 unsigned long rste;
1281 unsigned int shift;
1282
1283
1284 rste = pgd_val(*pgd);
1285
1286 shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20;
1287 return pgd + ((address >> shift) & (PTRS_PER_PGD - 1));
1288}
1289
1290#define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
1291
1292static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
1293{
1294 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
1295 return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
1296 return (p4d_t *) pgdp;
1297}
1298#define p4d_offset_lockless p4d_offset_lockless
1299
1300static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
1301{
1302 return p4d_offset_lockless(pgdp, *pgdp, address);
1303}
1304
1305static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
1306{
1307 if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
1308 return (pud_t *) p4d_deref(p4d) + pud_index(address);
1309 return (pud_t *) p4dp;
1310}
1311#define pud_offset_lockless pud_offset_lockless
1312
1313static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
1314{
1315 return pud_offset_lockless(p4dp, *p4dp, address);
1316}
1317#define pud_offset pud_offset
1318
1319static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
1320{
1321 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
1322 return (pmd_t *) pud_deref(pud) + pmd_index(address);
1323 return (pmd_t *) pudp;
1324}
1325#define pmd_offset_lockless pmd_offset_lockless
1326
1327static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
1328{
1329 return pmd_offset_lockless(pudp, *pudp, address);
1330}
1331#define pmd_offset pmd_offset
1332
1333static inline unsigned long pmd_page_vaddr(pmd_t pmd)
1334{
1335 return (unsigned long) pmd_deref(pmd);
1336}
1337
1338static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
1339{
1340 return end <= current->mm->context.asce_limit;
1341}
1342#define gup_fast_permitted gup_fast_permitted
1343
1344#define pfn_pte(pfn, pgprot) mk_pte_phys(((pfn) << PAGE_SHIFT), (pgprot))
1345#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1346#define pte_page(x) pfn_to_page(pte_pfn(x))
1347
1348#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1349#define pud_page(pud) pfn_to_page(pud_pfn(pud))
1350#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
1351#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
1352
1353static inline pmd_t pmd_wrprotect(pmd_t pmd)
1354{
1355 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1356 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1357 return pmd;
1358}
1359
1360static inline pmd_t pmd_mkwrite(pmd_t pmd)
1361{
1362 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1363 if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)
1364 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1365 return pmd;
1366}
1367
1368static inline pmd_t pmd_mkclean(pmd_t pmd)
1369{
1370 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1371 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1372 return pmd;
1373}
1374
1375static inline pmd_t pmd_mkdirty(pmd_t pmd)
1376{
1377 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY;
1378 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1379 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1380 return pmd;
1381}
1382
1383static inline pud_t pud_wrprotect(pud_t pud)
1384{
1385 pud_val(pud) &= ~_REGION3_ENTRY_WRITE;
1386 pud_val(pud) |= _REGION_ENTRY_PROTECT;
1387 return pud;
1388}
1389
1390static inline pud_t pud_mkwrite(pud_t pud)
1391{
1392 pud_val(pud) |= _REGION3_ENTRY_WRITE;
1393 if (pud_val(pud) & _REGION3_ENTRY_DIRTY)
1394 pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1395 return pud;
1396}
1397
1398static inline pud_t pud_mkclean(pud_t pud)
1399{
1400 pud_val(pud) &= ~_REGION3_ENTRY_DIRTY;
1401 pud_val(pud) |= _REGION_ENTRY_PROTECT;
1402 return pud;
1403}
1404
1405static inline pud_t pud_mkdirty(pud_t pud)
1406{
1407 pud_val(pud) |= _REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY;
1408 if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1409 pud_val(pud) &= ~_REGION_ENTRY_PROTECT;
1410 return pud;
1411}
1412
1413#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1414static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1415{
1416
1417
1418
1419
1420 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1421 return pgprot_val(SEGMENT_NONE);
1422 if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1423 return pgprot_val(SEGMENT_RO);
1424 if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
1425 return pgprot_val(SEGMENT_RX);
1426 if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
1427 return pgprot_val(SEGMENT_RW);
1428 return pgprot_val(SEGMENT_RWX);
1429}
1430
1431static inline pmd_t pmd_mkyoung(pmd_t pmd)
1432{
1433 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1434 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1435 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1436 return pmd;
1437}
1438
1439static inline pmd_t pmd_mkold(pmd_t pmd)
1440{
1441 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1442 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1443 return pmd;
1444}
1445
1446static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1447{
1448 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1449 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1450 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SOFT_DIRTY;
1451 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1452 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1453 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1454 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1455 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1456 return pmd;
1457}
1458
1459static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1460{
1461 pmd_t __pmd;
1462 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1463 return __pmd;
1464}
1465
1466#endif
1467
1468static inline void __pmdp_csp(pmd_t *pmdp)
1469{
1470 csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
1471 pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1472}
1473
1474#define IDTE_GLOBAL 0
1475#define IDTE_LOCAL 1
1476
1477#define IDTE_PTOA 0x0800
1478#define IDTE_NODAT 0x1000
1479#define IDTE_GUEST_ASCE 0x2000
1480
1481static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
1482 unsigned long opt, unsigned long asce,
1483 int local)
1484{
1485 unsigned long sto;
1486
1487 sto = (unsigned long) pmdp - pmd_index(addr) * sizeof(pmd_t);
1488 if (__builtin_constant_p(opt) && opt == 0) {
1489
1490 asm volatile(
1491 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1492 : "+m" (*pmdp)
1493 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
1494 [m4] "i" (local)
1495 : "cc" );
1496 } else {
1497
1498 asm volatile(
1499 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1500 : "+m" (*pmdp)
1501 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
1502 [r3] "a" (asce), [m4] "i" (local)
1503 : "cc" );
1504 }
1505}
1506
1507static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
1508 unsigned long opt, unsigned long asce,
1509 int local)
1510{
1511 unsigned long r3o;
1512
1513 r3o = (unsigned long) pudp - pud_index(addr) * sizeof(pud_t);
1514 r3o |= _ASCE_TYPE_REGION3;
1515 if (__builtin_constant_p(opt) && opt == 0) {
1516
1517 asm volatile(
1518 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1519 : "+m" (*pudp)
1520 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
1521 [m4] "i" (local)
1522 : "cc");
1523 } else {
1524
1525 asm volatile(
1526 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1527 : "+m" (*pudp)
1528 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
1529 [r3] "a" (asce), [m4] "i" (local)
1530 : "cc" );
1531 }
1532}
1533
1534pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1535pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1536pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
1537
1538#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1539
1540#define __HAVE_ARCH_PGTABLE_DEPOSIT
1541void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1542 pgtable_t pgtable);
1543
1544#define __HAVE_ARCH_PGTABLE_WITHDRAW
1545pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1546
1547#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1548static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1549 unsigned long addr, pmd_t *pmdp,
1550 pmd_t entry, int dirty)
1551{
1552 VM_BUG_ON(addr & ~HPAGE_MASK);
1553
1554 entry = pmd_mkyoung(entry);
1555 if (dirty)
1556 entry = pmd_mkdirty(entry);
1557 if (pmd_val(*pmdp) == pmd_val(entry))
1558 return 0;
1559 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1560 return 1;
1561}
1562
1563#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1564static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1565 unsigned long addr, pmd_t *pmdp)
1566{
1567 pmd_t pmd = *pmdp;
1568
1569 pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1570 return pmd_young(pmd);
1571}
1572
1573#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1574static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1575 unsigned long addr, pmd_t *pmdp)
1576{
1577 VM_BUG_ON(addr & ~HPAGE_MASK);
1578 return pmdp_test_and_clear_young(vma, addr, pmdp);
1579}
1580
1581static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1582 pmd_t *pmdp, pmd_t entry)
1583{
1584 if (!MACHINE_HAS_NX)
1585 pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC;
1586 *pmdp = entry;
1587}
1588
1589static inline pmd_t pmd_mkhuge(pmd_t pmd)
1590{
1591 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1592 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1593 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1594 return pmd;
1595}
1596
1597#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1598static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1599 unsigned long addr, pmd_t *pmdp)
1600{
1601 return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1602}
1603
1604#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1605static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
1606 unsigned long addr,
1607 pmd_t *pmdp, int full)
1608{
1609 if (full) {
1610 pmd_t pmd = *pmdp;
1611 *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY);
1612 return pmd;
1613 }
1614 return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1615}
1616
1617#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1618static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1619 unsigned long addr, pmd_t *pmdp)
1620{
1621 return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
1622}
1623
1624#define __HAVE_ARCH_PMDP_INVALIDATE
1625static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
1626 unsigned long addr, pmd_t *pmdp)
1627{
1628 pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1629
1630 return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
1631}
1632
1633#define __HAVE_ARCH_PMDP_SET_WRPROTECT
1634static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1635 unsigned long addr, pmd_t *pmdp)
1636{
1637 pmd_t pmd = *pmdp;
1638
1639 if (pmd_write(pmd))
1640 pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
1641}
1642
1643static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1644 unsigned long address,
1645 pmd_t *pmdp)
1646{
1647 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1648}
1649#define pmdp_collapse_flush pmdp_collapse_flush
1650
1651#define pfn_pmd(pfn, pgprot) mk_pmd_phys(((pfn) << PAGE_SHIFT), (pgprot))
1652#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1653
1654static inline int pmd_trans_huge(pmd_t pmd)
1655{
1656 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1657}
1658
1659#define has_transparent_hugepage has_transparent_hugepage
1660static inline int has_transparent_hugepage(void)
1661{
1662 return MACHINE_HAS_EDAT1 ? 1 : 0;
1663}
1664#endif
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683#define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1684#define __SWP_OFFSET_SHIFT 12
1685#define __SWP_TYPE_MASK ((1UL << 5) - 1)
1686#define __SWP_TYPE_SHIFT 2
1687
1688static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1689{
1690 pte_t pte;
1691
1692 pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
1693 pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1694 pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1695 return pte;
1696}
1697
1698static inline unsigned long __swp_type(swp_entry_t entry)
1699{
1700 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1701}
1702
1703static inline unsigned long __swp_offset(swp_entry_t entry)
1704{
1705 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1706}
1707
1708static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1709{
1710 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1711}
1712
1713#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1714#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1715
1716#define kern_addr_valid(addr) (1)
1717
1718extern int vmem_add_mapping(unsigned long start, unsigned long size);
1719extern void vmem_remove_mapping(unsigned long start, unsigned long size);
1720extern int s390_enable_sie(void);
1721extern int s390_enable_skey(void);
1722extern void s390_reset_cmma(struct mm_struct *mm);
1723
1724
1725#define HAVE_ARCH_UNMAPPED_AREA
1726#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1727
1728#define pmd_pgtable(pmd) \
1729 ((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE))
1730
1731#endif
1732