1
2
3
4
5
6#include <linux/sched.h>
7#include <linux/mm_types.h>
8#include <linux/memblock.h>
9#include <misc/cxl-base.h>
10
11#include <asm/debugfs.h>
12#include <asm/pgalloc.h>
13#include <asm/tlb.h>
14#include <asm/trace.h>
15#include <asm/powernv.h>
16#include <asm/firmware.h>
17#include <asm/ultravisor.h>
18#include <asm/kexec.h>
19
20#include <mm/mmu_decl.h>
21#include <trace/events/thp.h>
22
23#include "internal.h"
24
25unsigned long __pmd_frag_nr;
26EXPORT_SYMBOL(__pmd_frag_nr);
27unsigned long __pmd_frag_size_shift;
28EXPORT_SYMBOL(__pmd_frag_size_shift);
29
30#ifdef CONFIG_TRANSPARENT_HUGEPAGE
31
32
33
34
35
36
37
38int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
39 pmd_t *pmdp, pmd_t entry, int dirty)
40{
41 int changed;
42#ifdef CONFIG_DEBUG_VM
43 WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
44 assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp));
45#endif
46 changed = !pmd_same(*(pmdp), entry);
47 if (changed) {
48
49
50
51
52 __ptep_set_access_flags(vma, pmdp_ptep(pmdp),
53 pmd_pte(entry), address, MMU_PAGE_2M);
54 }
55 return changed;
56}
57
58int pmdp_test_and_clear_young(struct vm_area_struct *vma,
59 unsigned long address, pmd_t *pmdp)
60{
61 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
62}
63
64
65
66
67void set_pmd_at(struct mm_struct *mm, unsigned long addr,
68 pmd_t *pmdp, pmd_t pmd)
69{
70#ifdef CONFIG_DEBUG_VM
71
72
73
74
75
76 WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
77 assert_spin_locked(pmd_lockptr(mm, pmdp));
78 WARN_ON(!(pmd_large(pmd)));
79#endif
80 trace_hugepage_set_pmd(addr, pmd_val(pmd));
81 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
82}
83
84static void do_serialize(void *arg)
85{
86
87 if (radix_enabled()) {
88 struct mm_struct *mm = arg;
89 exit_lazy_flush_tlb(mm, false);
90 }
91}
92
93
94
95
96
97
98
99
100
101
102
103void serialize_against_pte_lookup(struct mm_struct *mm)
104{
105 smp_mb();
106 smp_call_function_many(mm_cpumask(mm), do_serialize, mm, 1);
107}
108
109
110
111
112
113pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
114 pmd_t *pmdp)
115{
116 unsigned long old_pmd;
117
118 old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID);
119 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
120 return __pmd(old_pmd);
121}
122
123pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
124 unsigned long addr, pmd_t *pmdp, int full)
125{
126 pmd_t pmd;
127 VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
128 VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
129 !pmd_devmap(*pmdp)) || !pmd_present(*pmdp));
130 pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
131
132
133
134
135
136 if (!full)
137 flush_pmd_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
138 return pmd;
139}
140
141static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
142{
143 return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
144}
145
146
147
148
149
150
151pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
152{
153 unsigned long pmdv;
154
155 pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
156
157 return __pmd_mkhuge(pmd_set_protbits(__pmd(pmdv), pgprot));
158}
159
160pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
161{
162 return pfn_pmd(page_to_pfn(page), pgprot);
163}
164
165pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
166{
167 unsigned long pmdv;
168
169 pmdv = pmd_val(pmd);
170 pmdv &= _HPAGE_CHG_MASK;
171 return pmd_set_protbits(__pmd(pmdv), newprot);
172}
173#endif
174
175
176void mmu_cleanup_all(void)
177{
178 if (radix_enabled())
179 radix__mmu_cleanup_all();
180 else if (mmu_hash_ops.hpte_clear_all)
181 mmu_hash_ops.hpte_clear_all();
182
183 reset_sprs();
184}
185
186#ifdef CONFIG_MEMORY_HOTPLUG
187int __meminit create_section_mapping(unsigned long start, unsigned long end,
188 int nid, pgprot_t prot)
189{
190 if (radix_enabled())
191 return radix__create_section_mapping(start, end, nid, prot);
192
193 return hash__create_section_mapping(start, end, nid, prot);
194}
195
196int __meminit remove_section_mapping(unsigned long start, unsigned long end)
197{
198 if (radix_enabled())
199 return radix__remove_section_mapping(start, end);
200
201 return hash__remove_section_mapping(start, end);
202}
203#endif
204
205void __init mmu_partition_table_init(void)
206{
207 unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
208 unsigned long ptcr;
209
210 BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
211
212 partition_tb = memblock_alloc(patb_size, patb_size);
213 if (!partition_tb)
214 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
215 __func__, patb_size, patb_size);
216
217
218
219
220
221 ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12);
222 set_ptcr_when_no_uv(ptcr);
223 powernv_set_nmmu_ptcr(ptcr);
224}
225
226static void flush_partition(unsigned int lpid, bool radix)
227{
228 if (radix) {
229 radix__flush_all_lpid(lpid);
230 radix__flush_all_lpid_guest(lpid);
231 } else {
232 asm volatile("ptesync" : : : "memory");
233 asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
234 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
235
236 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
237 trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
238 }
239}
240
241void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
242 unsigned long dw1, bool flush)
243{
244 unsigned long old = be64_to_cpu(partition_tb[lpid].patb0);
245
246
247
248
249
250
251
252
253
254
255 partition_tb[lpid].patb0 = cpu_to_be64(dw0);
256 partition_tb[lpid].patb1 = cpu_to_be64(dw1);
257
258
259
260
261
262
263
264
265 if (firmware_has_feature(FW_FEATURE_ULTRAVISOR)) {
266 uv_register_pate(lpid, dw0, dw1);
267 pr_info("PATE registered by ultravisor: dw0 = 0x%lx, dw1 = 0x%lx\n",
268 dw0, dw1);
269 } else if (flush) {
270
271
272
273
274
275 flush_partition(lpid, (old & PATB_HR));
276 }
277}
278EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
279
280static pmd_t *get_pmd_from_cache(struct mm_struct *mm)
281{
282 void *pmd_frag, *ret;
283
284 if (PMD_FRAG_NR == 1)
285 return NULL;
286
287 spin_lock(&mm->page_table_lock);
288 ret = mm->context.pmd_frag;
289 if (ret) {
290 pmd_frag = ret + PMD_FRAG_SIZE;
291
292
293
294 if (((unsigned long)pmd_frag & ~PAGE_MASK) == 0)
295 pmd_frag = NULL;
296 mm->context.pmd_frag = pmd_frag;
297 }
298 spin_unlock(&mm->page_table_lock);
299 return (pmd_t *)ret;
300}
301
302static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
303{
304 void *ret = NULL;
305 struct page *page;
306 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
307
308 if (mm == &init_mm)
309 gfp &= ~__GFP_ACCOUNT;
310 page = alloc_page(gfp);
311 if (!page)
312 return NULL;
313 if (!pgtable_pmd_page_ctor(page)) {
314 __free_pages(page, 0);
315 return NULL;
316 }
317
318 atomic_set(&page->pt_frag_refcount, 1);
319
320 ret = page_address(page);
321
322
323
324
325 if (PMD_FRAG_NR == 1)
326 return ret;
327
328 spin_lock(&mm->page_table_lock);
329
330
331
332
333
334 if (likely(!mm->context.pmd_frag)) {
335 atomic_set(&page->pt_frag_refcount, PMD_FRAG_NR);
336 mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
337 }
338 spin_unlock(&mm->page_table_lock);
339
340 return (pmd_t *)ret;
341}
342
343pmd_t *pmd_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr)
344{
345 pmd_t *pmd;
346
347 pmd = get_pmd_from_cache(mm);
348 if (pmd)
349 return pmd;
350
351 return __alloc_for_pmdcache(mm);
352}
353
354void pmd_fragment_free(unsigned long *pmd)
355{
356 struct page *page = virt_to_page(pmd);
357
358 if (PageReserved(page))
359 return free_reserved_page(page);
360
361 BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
362 if (atomic_dec_and_test(&page->pt_frag_refcount)) {
363 pgtable_pmd_page_dtor(page);
364 __free_page(page);
365 }
366}
367
368static inline void pgtable_free(void *table, int index)
369{
370 switch (index) {
371 case PTE_INDEX:
372 pte_fragment_free(table, 0);
373 break;
374 case PMD_INDEX:
375 pmd_fragment_free(table);
376 break;
377 case PUD_INDEX:
378 __pud_free(table);
379 break;
380#if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE)
381
382 case HTLB_16M_INDEX:
383 BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0);
384 kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table);
385 break;
386
387 case HTLB_16G_INDEX:
388 BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0);
389 kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table);
390 break;
391#endif
392
393 default:
394 BUG();
395 }
396}
397
398void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
399{
400 unsigned long pgf = (unsigned long)table;
401
402 BUG_ON(index > MAX_PGTABLE_INDEX_SIZE);
403 pgf |= index;
404 tlb_remove_table(tlb, (void *)pgf);
405}
406
407void __tlb_remove_table(void *_table)
408{
409 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
410 unsigned int index = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
411
412 return pgtable_free(table, index);
413}
414
415#ifdef CONFIG_PROC_FS
416atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
417
418void arch_report_meminfo(struct seq_file *m)
419{
420
421
422
423
424 if (!radix_enabled())
425 return;
426 seq_printf(m, "DirectMap4k: %8lu kB\n",
427 atomic_long_read(&direct_pages_count[MMU_PAGE_4K]) << 2);
428 seq_printf(m, "DirectMap64k: %8lu kB\n",
429 atomic_long_read(&direct_pages_count[MMU_PAGE_64K]) << 6);
430 seq_printf(m, "DirectMap2M: %8lu kB\n",
431 atomic_long_read(&direct_pages_count[MMU_PAGE_2M]) << 11);
432 seq_printf(m, "DirectMap1G: %8lu kB\n",
433 atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
434}
435#endif
436
437pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
438 pte_t *ptep)
439{
440 unsigned long pte_val;
441
442
443
444
445
446
447 pte_val = pte_update(vma->vm_mm, addr, ptep, _PAGE_PRESENT, _PAGE_INVALID, 0);
448
449 return __pte(pte_val);
450
451}
452
453void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
454 pte_t *ptep, pte_t old_pte, pte_t pte)
455{
456 if (radix_enabled())
457 return radix__ptep_modify_prot_commit(vma, addr,
458 ptep, old_pte, pte);
459 set_pte_at(vma->vm_mm, addr, ptep, pte);
460}
461
462
463
464
465
466
467
468
469
470
471
472
473
474int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
475 struct spinlock *old_pmd_ptl,
476 struct vm_area_struct *vma)
477{
478 if (radix_enabled())
479 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
480
481 return true;
482}
483
484
485
486
487bool tlbie_capable __read_mostly = true;
488EXPORT_SYMBOL(tlbie_capable);
489
490
491
492
493
494
495bool tlbie_enabled __read_mostly = true;
496
497static int __init setup_disable_tlbie(char *str)
498{
499 if (!radix_enabled()) {
500 pr_err("disable_tlbie: Unable to disable TLBIE with Hash MMU.\n");
501 return 1;
502 }
503
504 tlbie_capable = false;
505 tlbie_enabled = false;
506
507 return 1;
508}
509__setup("disable_tlbie", setup_disable_tlbie);
510
511static int __init pgtable_debugfs_setup(void)
512{
513 if (!tlbie_capable)
514 return 0;
515
516
517
518
519
520
521
522 debugfs_create_bool("tlbie_enabled", 0600,
523 powerpc_debugfs_root,
524 &tlbie_enabled);
525
526 return 0;
527}
528arch_initcall(pgtable_debugfs_setup);
529