1
2#ifndef _ASM_X86_PGTABLE_64_H
3#define _ASM_X86_PGTABLE_64_H
4
5#include <linux/const.h>
6#include <asm/pgtable_64_types.h>
7
8#ifndef __ASSEMBLY__
9
10
11
12
13
14#include <asm/processor.h>
15#include <linux/bitops.h>
16#include <linux/threads.h>
17#include <asm/fixmap.h>
18
19extern p4d_t level4_kernel_pgt[512];
20extern p4d_t level4_ident_pgt[512];
21extern pud_t level3_kernel_pgt[512];
22extern pud_t level3_ident_pgt[512];
23extern pmd_t level2_kernel_pgt[512];
24extern pmd_t level2_fixmap_pgt[512];
25extern pmd_t level2_ident_pgt[512];
26extern pte_t level1_fixmap_pgt[512 * FIXMAP_PMD_NUM];
27extern pgd_t init_top_pgt[];
28
29#define swapper_pg_dir init_top_pgt
30
31extern void paging_init(void);
32static inline void sync_initial_page_table(void) { }
33
34#define pte_ERROR(e) \
35 pr_err("%s:%d: bad pte %p(%016lx)\n", \
36 __FILE__, __LINE__, &(e), pte_val(e))
37#define pmd_ERROR(e) \
38 pr_err("%s:%d: bad pmd %p(%016lx)\n", \
39 __FILE__, __LINE__, &(e), pmd_val(e))
40#define pud_ERROR(e) \
41 pr_err("%s:%d: bad pud %p(%016lx)\n", \
42 __FILE__, __LINE__, &(e), pud_val(e))
43
44#if CONFIG_PGTABLE_LEVELS >= 5
45#define p4d_ERROR(e) \
46 pr_err("%s:%d: bad p4d %p(%016lx)\n", \
47 __FILE__, __LINE__, &(e), p4d_val(e))
48#endif
49
50#define pgd_ERROR(e) \
51 pr_err("%s:%d: bad pgd %p(%016lx)\n", \
52 __FILE__, __LINE__, &(e), pgd_val(e))
53
54struct mm_struct;
55
56#define mm_p4d_folded mm_p4d_folded
57static inline bool mm_p4d_folded(struct mm_struct *mm)
58{
59 return !pgtable_l5_enabled();
60}
61
62void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte);
63void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
64
65static inline void native_set_pte(pte_t *ptep, pte_t pte)
66{
67 WRITE_ONCE(*ptep, pte);
68}
69
70static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
71 pte_t *ptep)
72{
73 native_set_pte(ptep, native_make_pte(0));
74}
75
76static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
77{
78 native_set_pte(ptep, pte);
79}
80
81static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
82{
83 WRITE_ONCE(*pmdp, pmd);
84}
85
86static inline void native_pmd_clear(pmd_t *pmd)
87{
88 native_set_pmd(pmd, native_make_pmd(0));
89}
90
91static inline pte_t native_ptep_get_and_clear(pte_t *xp)
92{
93#ifdef CONFIG_SMP
94 return native_make_pte(xchg(&xp->pte, 0));
95#else
96
97
98 pte_t ret = *xp;
99 native_pte_clear(NULL, 0, xp);
100 return ret;
101#endif
102}
103
104static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
105{
106#ifdef CONFIG_SMP
107 return native_make_pmd(xchg(&xp->pmd, 0));
108#else
109
110
111 pmd_t ret = *xp;
112 native_pmd_clear(xp);
113 return ret;
114#endif
115}
116
117static inline void native_set_pud(pud_t *pudp, pud_t pud)
118{
119 WRITE_ONCE(*pudp, pud);
120}
121
122static inline void native_pud_clear(pud_t *pud)
123{
124 native_set_pud(pud, native_make_pud(0));
125}
126
127static inline pud_t native_pudp_get_and_clear(pud_t *xp)
128{
129#ifdef CONFIG_SMP
130 return native_make_pud(xchg(&xp->pud, 0));
131#else
132
133
134
135 pud_t ret = *xp;
136
137 native_pud_clear(xp);
138 return ret;
139#endif
140}
141
142static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
143{
144 pgd_t pgd;
145
146 if (pgtable_l5_enabled() || !IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) {
147 WRITE_ONCE(*p4dp, p4d);
148 return;
149 }
150
151 pgd = native_make_pgd(native_p4d_val(p4d));
152 pgd = pti_set_user_pgtbl((pgd_t *)p4dp, pgd);
153 WRITE_ONCE(*p4dp, native_make_p4d(native_pgd_val(pgd)));
154}
155
156static inline void native_p4d_clear(p4d_t *p4d)
157{
158 native_set_p4d(p4d, native_make_p4d(0));
159}
160
161static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
162{
163 WRITE_ONCE(*pgdp, pti_set_user_pgtbl(pgdp, pgd));
164}
165
166static inline void native_pgd_clear(pgd_t *pgd)
167{
168 native_set_pgd(pgd, native_make_pgd(0));
169}
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212#define SWP_TYPE_BITS 5
213
214#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
215
216
217#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT+SWP_TYPE_BITS)
218
219#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
220
221
222#define __swp_type(x) ((x).val >> (64 - SWP_TYPE_BITS))
223
224
225#define __swp_offset(x) (~(x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)
226
227
228
229
230
231
232#define __swp_entry(type, offset) ((swp_entry_t) { \
233 (~(unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
234 | ((unsigned long)(type) << (64-SWP_TYPE_BITS)) })
235
236#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
237#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val((pmd)) })
238#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
239#define __swp_entry_to_pmd(x) ((pmd_t) { .pmd = (x).val })
240
241extern int kern_addr_valid(unsigned long addr);
242extern void cleanup_highmap(void);
243
244#define HAVE_ARCH_UNMAPPED_AREA
245#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
246
247#define PAGE_AGP PAGE_KERNEL_NOCACHE
248#define HAVE_PAGE_AGP 1
249
250
251#define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
252#define kc_offset_to_vaddr(o) ((o) | ~__VIRTUAL_MASK)
253
254#define __HAVE_ARCH_PTE_SAME
255
256#define vmemmap ((struct page *)VMEMMAP_START)
257
258extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
259extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
260
261#define gup_fast_permitted gup_fast_permitted
262static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
263{
264 if (end >> __VIRTUAL_MASK_SHIFT)
265 return false;
266 return true;
267}
268
269#include <asm/pgtable-invert.h>
270
271#endif
272#endif
273