linux/arch/powerpc/mm/book3s64/pgtable.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
   4 */
   5
   6#include <linux/sched.h>
   7#include <linux/mm_types.h>
   8#include <linux/memblock.h>
   9#include <misc/cxl-base.h>
  10
  11#include <asm/debugfs.h>
  12#include <asm/pgalloc.h>
  13#include <asm/tlb.h>
  14#include <asm/trace.h>
  15#include <asm/powernv.h>
  16#include <asm/firmware.h>
  17#include <asm/ultravisor.h>
  18#include <asm/kexec.h>
  19
  20#include <mm/mmu_decl.h>
  21#include <trace/events/thp.h>
  22
  23#include "internal.h"
  24
  25unsigned long __pmd_frag_nr;
  26EXPORT_SYMBOL(__pmd_frag_nr);
  27unsigned long __pmd_frag_size_shift;
  28EXPORT_SYMBOL(__pmd_frag_size_shift);
  29
  30#ifdef CONFIG_TRANSPARENT_HUGEPAGE
  31/*
  32 * This is called when relaxing access to a hugepage. It's also called in the page
  33 * fault path when we don't hit any of the major fault cases, ie, a minor
  34 * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
  35 * handled those two for us, we additionally deal with missing execute
  36 * permission here on some processors
  37 */
  38int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
  39                          pmd_t *pmdp, pmd_t entry, int dirty)
  40{
  41        int changed;
  42#ifdef CONFIG_DEBUG_VM
  43        WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
  44        assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp));
  45#endif
  46        changed = !pmd_same(*(pmdp), entry);
  47        if (changed) {
  48                /*
  49                 * We can use MMU_PAGE_2M here, because only radix
  50                 * path look at the psize.
  51                 */
  52                __ptep_set_access_flags(vma, pmdp_ptep(pmdp),
  53                                        pmd_pte(entry), address, MMU_PAGE_2M);
  54        }
  55        return changed;
  56}
  57
  58int pmdp_test_and_clear_young(struct vm_area_struct *vma,
  59                              unsigned long address, pmd_t *pmdp)
  60{
  61        return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
  62}
  63/*
  64 * set a new huge pmd. We should not be called for updating
  65 * an existing pmd entry. That should go via pmd_hugepage_update.
  66 */
  67void set_pmd_at(struct mm_struct *mm, unsigned long addr,
  68                pmd_t *pmdp, pmd_t pmd)
  69{
  70#ifdef CONFIG_DEBUG_VM
  71        /*
  72         * Make sure hardware valid bit is not set. We don't do
  73         * tlb flush for this update.
  74         */
  75
  76        WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
  77        assert_spin_locked(pmd_lockptr(mm, pmdp));
  78        WARN_ON(!(pmd_large(pmd)));
  79#endif
  80        trace_hugepage_set_pmd(addr, pmd_val(pmd));
  81        return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
  82}
  83
  84static void do_serialize(void *arg)
  85{
  86        /* We've taken the IPI, so try to trim the mask while here */
  87        if (radix_enabled()) {
  88                struct mm_struct *mm = arg;
  89                exit_lazy_flush_tlb(mm, false);
  90        }
  91}
  92
  93/*
  94 * Serialize against find_current_mm_pte which does lock-less
  95 * lookup in page tables with local interrupts disabled. For huge pages
  96 * it casts pmd_t to pte_t. Since format of pte_t is different from
  97 * pmd_t we want to prevent transit from pmd pointing to page table
  98 * to pmd pointing to huge page (and back) while interrupts are disabled.
  99 * We clear pmd to possibly replace it with page table pointer in
 100 * different code paths. So make sure we wait for the parallel
 101 * find_current_mm_pte to finish.
 102 */
 103void serialize_against_pte_lookup(struct mm_struct *mm)
 104{
 105        smp_mb();
 106        smp_call_function_many(mm_cpumask(mm), do_serialize, mm, 1);
 107}
 108
 109/*
 110 * We use this to invalidate a pmdp entry before switching from a
 111 * hugepte to regular pmd entry.
 112 */
 113pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
 114                     pmd_t *pmdp)
 115{
 116        unsigned long old_pmd;
 117
 118        old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID);
 119        flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 120        return __pmd(old_pmd);
 121}
 122
 123pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
 124                                   unsigned long addr, pmd_t *pmdp, int full)
 125{
 126        pmd_t pmd;
 127        VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
 128        VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
 129                   !pmd_devmap(*pmdp)) || !pmd_present(*pmdp));
 130        pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
 131        /*
 132         * if it not a fullmm flush, then we can possibly end up converting
 133         * this PMD pte entry to a regular level 0 PTE by a parallel page fault.
 134         * Make sure we flush the tlb in this case.
 135         */
 136        if (!full)
 137                flush_pmd_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
 138        return pmd;
 139}
 140
 141static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
 142{
 143        return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
 144}
 145
 146/*
 147 * At some point we should be able to get rid of
 148 * pmd_mkhuge() and mk_huge_pmd() when we update all the
 149 * other archs to mark the pmd huge in pfn_pmd()
 150 */
 151pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
 152{
 153        unsigned long pmdv;
 154
 155        pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
 156
 157        return __pmd_mkhuge(pmd_set_protbits(__pmd(pmdv), pgprot));
 158}
 159
 160pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
 161{
 162        return pfn_pmd(page_to_pfn(page), pgprot);
 163}
 164
 165pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
 166{
 167        unsigned long pmdv;
 168
 169        pmdv = pmd_val(pmd);
 170        pmdv &= _HPAGE_CHG_MASK;
 171        return pmd_set_protbits(__pmd(pmdv), newprot);
 172}
 173#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 174
 175/* For use by kexec */
 176void mmu_cleanup_all(void)
 177{
 178        if (radix_enabled())
 179                radix__mmu_cleanup_all();
 180        else if (mmu_hash_ops.hpte_clear_all)
 181                mmu_hash_ops.hpte_clear_all();
 182
 183        reset_sprs();
 184}
 185
 186#ifdef CONFIG_MEMORY_HOTPLUG
 187int __meminit create_section_mapping(unsigned long start, unsigned long end,
 188                                     int nid, pgprot_t prot)
 189{
 190        if (radix_enabled())
 191                return radix__create_section_mapping(start, end, nid, prot);
 192
 193        return hash__create_section_mapping(start, end, nid, prot);
 194}
 195
 196int __meminit remove_section_mapping(unsigned long start, unsigned long end)
 197{
 198        if (radix_enabled())
 199                return radix__remove_section_mapping(start, end);
 200
 201        return hash__remove_section_mapping(start, end);
 202}
 203#endif /* CONFIG_MEMORY_HOTPLUG */
 204
 205void __init mmu_partition_table_init(void)
 206{
 207        unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
 208        unsigned long ptcr;
 209
 210        BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
 211        /* Initialize the Partition Table with no entries */
 212        partition_tb = memblock_alloc(patb_size, patb_size);
 213        if (!partition_tb)
 214                panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 215                      __func__, patb_size, patb_size);
 216
 217        /*
 218         * update partition table control register,
 219         * 64 K size.
 220         */
 221        ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12);
 222        set_ptcr_when_no_uv(ptcr);
 223        powernv_set_nmmu_ptcr(ptcr);
 224}
 225
 226static void flush_partition(unsigned int lpid, bool radix)
 227{
 228        if (radix) {
 229                radix__flush_all_lpid(lpid);
 230                radix__flush_all_lpid_guest(lpid);
 231        } else {
 232                asm volatile("ptesync" : : : "memory");
 233                asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
 234                             "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
 235                /* do we need fixup here ?*/
 236                asm volatile("eieio; tlbsync; ptesync" : : : "memory");
 237                trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
 238        }
 239}
 240
 241void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
 242                                  unsigned long dw1, bool flush)
 243{
 244        unsigned long old = be64_to_cpu(partition_tb[lpid].patb0);
 245
 246        /*
 247         * When ultravisor is enabled, the partition table is stored in secure
 248         * memory and can only be accessed doing an ultravisor call. However, we
 249         * maintain a copy of the partition table in normal memory to allow Nest
 250         * MMU translations to occur (for normal VMs).
 251         *
 252         * Therefore, here we always update partition_tb, regardless of whether
 253         * we are running under an ultravisor or not.
 254         */
 255        partition_tb[lpid].patb0 = cpu_to_be64(dw0);
 256        partition_tb[lpid].patb1 = cpu_to_be64(dw1);
 257
 258        /*
 259         * If ultravisor is enabled, we do an ultravisor call to register the
 260         * partition table entry (PATE), which also do a global flush of TLBs
 261         * and partition table caches for the lpid. Otherwise, just do the
 262         * flush. The type of flush (hash or radix) depends on what the previous
 263         * use of the partition ID was, not the new use.
 264         */
 265        if (firmware_has_feature(FW_FEATURE_ULTRAVISOR)) {
 266                uv_register_pate(lpid, dw0, dw1);
 267                pr_info("PATE registered by ultravisor: dw0 = 0x%lx, dw1 = 0x%lx\n",
 268                        dw0, dw1);
 269        } else if (flush) {
 270                /*
 271                 * Boot does not need to flush, because MMU is off and each
 272                 * CPU does a tlbiel_all() before switching them on, which
 273                 * flushes everything.
 274                 */
 275                flush_partition(lpid, (old & PATB_HR));
 276        }
 277}
 278EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
 279
 280static pmd_t *get_pmd_from_cache(struct mm_struct *mm)
 281{
 282        void *pmd_frag, *ret;
 283
 284        if (PMD_FRAG_NR == 1)
 285                return NULL;
 286
 287        spin_lock(&mm->page_table_lock);
 288        ret = mm->context.pmd_frag;
 289        if (ret) {
 290                pmd_frag = ret + PMD_FRAG_SIZE;
 291                /*
 292                 * If we have taken up all the fragments mark PTE page NULL
 293                 */
 294                if (((unsigned long)pmd_frag & ~PAGE_MASK) == 0)
 295                        pmd_frag = NULL;
 296                mm->context.pmd_frag = pmd_frag;
 297        }
 298        spin_unlock(&mm->page_table_lock);
 299        return (pmd_t *)ret;
 300}
 301
 302static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
 303{
 304        void *ret = NULL;
 305        struct page *page;
 306        gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
 307
 308        if (mm == &init_mm)
 309                gfp &= ~__GFP_ACCOUNT;
 310        page = alloc_page(gfp);
 311        if (!page)
 312                return NULL;
 313        if (!pgtable_pmd_page_ctor(page)) {
 314                __free_pages(page, 0);
 315                return NULL;
 316        }
 317
 318        atomic_set(&page->pt_frag_refcount, 1);
 319
 320        ret = page_address(page);
 321        /*
 322         * if we support only one fragment just return the
 323         * allocated page.
 324         */
 325        if (PMD_FRAG_NR == 1)
 326                return ret;
 327
 328        spin_lock(&mm->page_table_lock);
 329        /*
 330         * If we find pgtable_page set, we return
 331         * the allocated page with single fragement
 332         * count.
 333         */
 334        if (likely(!mm->context.pmd_frag)) {
 335                atomic_set(&page->pt_frag_refcount, PMD_FRAG_NR);
 336                mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
 337        }
 338        spin_unlock(&mm->page_table_lock);
 339
 340        return (pmd_t *)ret;
 341}
 342
 343pmd_t *pmd_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr)
 344{
 345        pmd_t *pmd;
 346
 347        pmd = get_pmd_from_cache(mm);
 348        if (pmd)
 349                return pmd;
 350
 351        return __alloc_for_pmdcache(mm);
 352}
 353
 354void pmd_fragment_free(unsigned long *pmd)
 355{
 356        struct page *page = virt_to_page(pmd);
 357
 358        if (PageReserved(page))
 359                return free_reserved_page(page);
 360
 361        BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
 362        if (atomic_dec_and_test(&page->pt_frag_refcount)) {
 363                pgtable_pmd_page_dtor(page);
 364                __free_page(page);
 365        }
 366}
 367
 368static inline void pgtable_free(void *table, int index)
 369{
 370        switch (index) {
 371        case PTE_INDEX:
 372                pte_fragment_free(table, 0);
 373                break;
 374        case PMD_INDEX:
 375                pmd_fragment_free(table);
 376                break;
 377        case PUD_INDEX:
 378                __pud_free(table);
 379                break;
 380#if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE)
 381                /* 16M hugepd directory at pud level */
 382        case HTLB_16M_INDEX:
 383                BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0);
 384                kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table);
 385                break;
 386                /* 16G hugepd directory at the pgd level */
 387        case HTLB_16G_INDEX:
 388                BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0);
 389                kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table);
 390                break;
 391#endif
 392                /* We don't free pgd table via RCU callback */
 393        default:
 394                BUG();
 395        }
 396}
 397
 398void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
 399{
 400        unsigned long pgf = (unsigned long)table;
 401
 402        BUG_ON(index > MAX_PGTABLE_INDEX_SIZE);
 403        pgf |= index;
 404        tlb_remove_table(tlb, (void *)pgf);
 405}
 406
 407void __tlb_remove_table(void *_table)
 408{
 409        void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
 410        unsigned int index = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
 411
 412        return pgtable_free(table, index);
 413}
 414
 415#ifdef CONFIG_PROC_FS
 416atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
 417
 418void arch_report_meminfo(struct seq_file *m)
 419{
 420        /*
 421         * Hash maps the memory with one size mmu_linear_psize.
 422         * So don't bother to print these on hash
 423         */
 424        if (!radix_enabled())
 425                return;
 426        seq_printf(m, "DirectMap4k:    %8lu kB\n",
 427                   atomic_long_read(&direct_pages_count[MMU_PAGE_4K]) << 2);
 428        seq_printf(m, "DirectMap64k:    %8lu kB\n",
 429                   atomic_long_read(&direct_pages_count[MMU_PAGE_64K]) << 6);
 430        seq_printf(m, "DirectMap2M:    %8lu kB\n",
 431                   atomic_long_read(&direct_pages_count[MMU_PAGE_2M]) << 11);
 432        seq_printf(m, "DirectMap1G:    %8lu kB\n",
 433                   atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
 434}
 435#endif /* CONFIG_PROC_FS */
 436
 437pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
 438                             pte_t *ptep)
 439{
 440        unsigned long pte_val;
 441
 442        /*
 443         * Clear the _PAGE_PRESENT so that no hardware parallel update is
 444         * possible. Also keep the pte_present true so that we don't take
 445         * wrong fault.
 446         */
 447        pte_val = pte_update(vma->vm_mm, addr, ptep, _PAGE_PRESENT, _PAGE_INVALID, 0);
 448
 449        return __pte(pte_val);
 450
 451}
 452
 453void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
 454                             pte_t *ptep, pte_t old_pte, pte_t pte)
 455{
 456        if (radix_enabled())
 457                return radix__ptep_modify_prot_commit(vma, addr,
 458                                                      ptep, old_pte, pte);
 459        set_pte_at(vma->vm_mm, addr, ptep, pte);
 460}
 461
 462/*
 463 * For hash translation mode, we use the deposited table to store hash slot
 464 * information and they are stored at PTRS_PER_PMD offset from related pmd
 465 * location. Hence a pmd move requires deposit and withdraw.
 466 *
 467 * For radix translation with split pmd ptl, we store the deposited table in the
 468 * pmd page. Hence if we have different pmd page we need to withdraw during pmd
 469 * move.
 470 *
 471 * With hash we use deposited table always irrespective of anon or not.
 472 * With radix we use deposited table only for anonymous mapping.
 473 */
 474int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
 475                           struct spinlock *old_pmd_ptl,
 476                           struct vm_area_struct *vma)
 477{
 478        if (radix_enabled())
 479                return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
 480
 481        return true;
 482}
 483
 484/*
 485 * Does the CPU support tlbie?
 486 */
 487bool tlbie_capable __read_mostly = true;
 488EXPORT_SYMBOL(tlbie_capable);
 489
 490/*
 491 * Should tlbie be used for management of CPU TLBs, for kernel and process
 492 * address spaces? tlbie may still be used for nMMU accelerators, and for KVM
 493 * guest address spaces.
 494 */
 495bool tlbie_enabled __read_mostly = true;
 496
 497static int __init setup_disable_tlbie(char *str)
 498{
 499        if (!radix_enabled()) {
 500                pr_err("disable_tlbie: Unable to disable TLBIE with Hash MMU.\n");
 501                return 1;
 502        }
 503
 504        tlbie_capable = false;
 505        tlbie_enabled = false;
 506
 507        return 1;
 508}
 509__setup("disable_tlbie", setup_disable_tlbie);
 510
 511static int __init pgtable_debugfs_setup(void)
 512{
 513        if (!tlbie_capable)
 514                return 0;
 515
 516        /*
 517         * There is no locking vs tlb flushing when changing this value.
 518         * The tlb flushers will see one value or another, and use either
 519         * tlbie or tlbiel with IPIs. In both cases the TLBs will be
 520         * invalidated as expected.
 521         */
 522        debugfs_create_bool("tlbie_enabled", 0600,
 523                        powerpc_debugfs_root,
 524                        &tlbie_enabled);
 525
 526        return 0;
 527}
 528arch_initcall(pgtable_debugfs_setup);
 529