linux/arch/arm64/mm/mmu.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Based on arch/arm/mm/mmu.c
   4 *
   5 * Copyright (C) 1995-2005 Russell King
   6 * Copyright (C) 2012 ARM Ltd.
   7 */
   8
   9#include <linux/cache.h>
  10#include <linux/export.h>
  11#include <linux/kernel.h>
  12#include <linux/errno.h>
  13#include <linux/init.h>
  14#include <linux/ioport.h>
  15#include <linux/kexec.h>
  16#include <linux/libfdt.h>
  17#include <linux/mman.h>
  18#include <linux/nodemask.h>
  19#include <linux/memblock.h>
  20#include <linux/memory.h>
  21#include <linux/fs.h>
  22#include <linux/io.h>
  23#include <linux/mm.h>
  24#include <linux/vmalloc.h>
  25#include <linux/set_memory.h>
  26
  27#include <asm/barrier.h>
  28#include <asm/cputype.h>
  29#include <asm/fixmap.h>
  30#include <asm/kasan.h>
  31#include <asm/kernel-pgtable.h>
  32#include <asm/sections.h>
  33#include <asm/setup.h>
  34#include <linux/sizes.h>
  35#include <asm/tlb.h>
  36#include <asm/mmu_context.h>
  37#include <asm/ptdump.h>
  38#include <asm/tlbflush.h>
  39#include <asm/pgalloc.h>
  40
  41#define NO_BLOCK_MAPPINGS       BIT(0)
  42#define NO_CONT_MAPPINGS        BIT(1)
  43#define NO_EXEC_MAPPINGS        BIT(2)  /* assumes FEAT_HPDS is not used */
  44
  45u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN);
  46u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
  47
  48u64 __section(".mmuoff.data.write") vabits_actual;
  49EXPORT_SYMBOL(vabits_actual);
  50
  51u64 kimage_voffset __ro_after_init;
  52EXPORT_SYMBOL(kimage_voffset);
  53
  54/*
  55 * Empty_zero_page is a special page that is used for zero-initialized data
  56 * and COW.
  57 */
  58unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
  59EXPORT_SYMBOL(empty_zero_page);
  60
  61static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
  62static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
  63static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
  64
  65static DEFINE_SPINLOCK(swapper_pgdir_lock);
  66
  67void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
  68{
  69        pgd_t *fixmap_pgdp;
  70
  71        spin_lock(&swapper_pgdir_lock);
  72        fixmap_pgdp = pgd_set_fixmap(__pa_symbol(pgdp));
  73        WRITE_ONCE(*fixmap_pgdp, pgd);
  74        /*
  75         * We need dsb(ishst) here to ensure the page-table-walker sees
  76         * our new entry before set_p?d() returns. The fixmap's
  77         * flush_tlb_kernel_range() via clear_fixmap() does this for us.
  78         */
  79        pgd_clear_fixmap();
  80        spin_unlock(&swapper_pgdir_lock);
  81}
  82
  83pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  84                              unsigned long size, pgprot_t vma_prot)
  85{
  86        if (!pfn_is_map_memory(pfn))
  87                return pgprot_noncached(vma_prot);
  88        else if (file->f_flags & O_SYNC)
  89                return pgprot_writecombine(vma_prot);
  90        return vma_prot;
  91}
  92EXPORT_SYMBOL(phys_mem_access_prot);
  93
  94static phys_addr_t __init early_pgtable_alloc(int shift)
  95{
  96        phys_addr_t phys;
  97        void *ptr;
  98
  99        phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
 100        if (!phys)
 101                panic("Failed to allocate page table page\n");
 102
 103        /*
 104         * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
 105         * slot will be free, so we can (ab)use the FIX_PTE slot to initialise
 106         * any level of table.
 107         */
 108        ptr = pte_set_fixmap(phys);
 109
 110        memset(ptr, 0, PAGE_SIZE);
 111
 112        /*
 113         * Implicit barriers also ensure the zeroed page is visible to the page
 114         * table walker
 115         */
 116        pte_clear_fixmap();
 117
 118        return phys;
 119}
 120
 121static bool pgattr_change_is_safe(u64 old, u64 new)
 122{
 123        /*
 124         * The following mapping attributes may be updated in live
 125         * kernel mappings without the need for break-before-make.
 126         */
 127        pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
 128
 129        /* creating or taking down mappings is always safe */
 130        if (old == 0 || new == 0)
 131                return true;
 132
 133        /* live contiguous mappings may not be manipulated at all */
 134        if ((old | new) & PTE_CONT)
 135                return false;
 136
 137        /* Transitioning from Non-Global to Global is unsafe */
 138        if (old & ~new & PTE_NG)
 139                return false;
 140
 141        /*
 142         * Changing the memory type between Normal and Normal-Tagged is safe
 143         * since Tagged is considered a permission attribute from the
 144         * mismatched attribute aliases perspective.
 145         */
 146        if (((old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) ||
 147             (old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)) &&
 148            ((new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) ||
 149             (new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)))
 150                mask |= PTE_ATTRINDX_MASK;
 151
 152        return ((old ^ new) & ~mask) == 0;
 153}
 154
 155static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
 156                     phys_addr_t phys, pgprot_t prot)
 157{
 158        pte_t *ptep;
 159
 160        ptep = pte_set_fixmap_offset(pmdp, addr);
 161        do {
 162                pte_t old_pte = READ_ONCE(*ptep);
 163
 164                set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot));
 165
 166                /*
 167                 * After the PTE entry has been populated once, we
 168                 * only allow updates to the permission attributes.
 169                 */
 170                BUG_ON(!pgattr_change_is_safe(pte_val(old_pte),
 171                                              READ_ONCE(pte_val(*ptep))));
 172
 173                phys += PAGE_SIZE;
 174        } while (ptep++, addr += PAGE_SIZE, addr != end);
 175
 176        pte_clear_fixmap();
 177}
 178
 179static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
 180                                unsigned long end, phys_addr_t phys,
 181                                pgprot_t prot,
 182                                phys_addr_t (*pgtable_alloc)(int),
 183                                int flags)
 184{
 185        unsigned long next;
 186        pmd_t pmd = READ_ONCE(*pmdp);
 187
 188        BUG_ON(pmd_sect(pmd));
 189        if (pmd_none(pmd)) {
 190                pmdval_t pmdval = PMD_TYPE_TABLE | PMD_TABLE_UXN;
 191                phys_addr_t pte_phys;
 192
 193                if (flags & NO_EXEC_MAPPINGS)
 194                        pmdval |= PMD_TABLE_PXN;
 195                BUG_ON(!pgtable_alloc);
 196                pte_phys = pgtable_alloc(PAGE_SHIFT);
 197                __pmd_populate(pmdp, pte_phys, pmdval);
 198                pmd = READ_ONCE(*pmdp);
 199        }
 200        BUG_ON(pmd_bad(pmd));
 201
 202        do {
 203                pgprot_t __prot = prot;
 204
 205                next = pte_cont_addr_end(addr, end);
 206
 207                /* use a contiguous mapping if the range is suitably aligned */
 208                if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) &&
 209                    (flags & NO_CONT_MAPPINGS) == 0)
 210                        __prot = __pgprot(pgprot_val(prot) | PTE_CONT);
 211
 212                init_pte(pmdp, addr, next, phys, __prot);
 213
 214                phys += next - addr;
 215        } while (addr = next, addr != end);
 216}
 217
 218static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
 219                     phys_addr_t phys, pgprot_t prot,
 220                     phys_addr_t (*pgtable_alloc)(int), int flags)
 221{
 222        unsigned long next;
 223        pmd_t *pmdp;
 224
 225        pmdp = pmd_set_fixmap_offset(pudp, addr);
 226        do {
 227                pmd_t old_pmd = READ_ONCE(*pmdp);
 228
 229                next = pmd_addr_end(addr, end);
 230
 231                /* try section mapping first */
 232                if (((addr | next | phys) & ~PMD_MASK) == 0 &&
 233                    (flags & NO_BLOCK_MAPPINGS) == 0) {
 234                        pmd_set_huge(pmdp, phys, prot);
 235
 236                        /*
 237                         * After the PMD entry has been populated once, we
 238                         * only allow updates to the permission attributes.
 239                         */
 240                        BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
 241                                                      READ_ONCE(pmd_val(*pmdp))));
 242                } else {
 243                        alloc_init_cont_pte(pmdp, addr, next, phys, prot,
 244                                            pgtable_alloc, flags);
 245
 246                        BUG_ON(pmd_val(old_pmd) != 0 &&
 247                               pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp)));
 248                }
 249                phys += next - addr;
 250        } while (pmdp++, addr = next, addr != end);
 251
 252        pmd_clear_fixmap();
 253}
 254
 255static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
 256                                unsigned long end, phys_addr_t phys,
 257                                pgprot_t prot,
 258                                phys_addr_t (*pgtable_alloc)(int), int flags)
 259{
 260        unsigned long next;
 261        pud_t pud = READ_ONCE(*pudp);
 262
 263        /*
 264         * Check for initial section mappings in the pgd/pud.
 265         */
 266        BUG_ON(pud_sect(pud));
 267        if (pud_none(pud)) {
 268                pudval_t pudval = PUD_TYPE_TABLE | PUD_TABLE_UXN;
 269                phys_addr_t pmd_phys;
 270
 271                if (flags & NO_EXEC_MAPPINGS)
 272                        pudval |= PUD_TABLE_PXN;
 273                BUG_ON(!pgtable_alloc);
 274                pmd_phys = pgtable_alloc(PMD_SHIFT);
 275                __pud_populate(pudp, pmd_phys, pudval);
 276                pud = READ_ONCE(*pudp);
 277        }
 278        BUG_ON(pud_bad(pud));
 279
 280        do {
 281                pgprot_t __prot = prot;
 282
 283                next = pmd_cont_addr_end(addr, end);
 284
 285                /* use a contiguous mapping if the range is suitably aligned */
 286                if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) &&
 287                    (flags & NO_CONT_MAPPINGS) == 0)
 288                        __prot = __pgprot(pgprot_val(prot) | PTE_CONT);
 289
 290                init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags);
 291
 292                phys += next - addr;
 293        } while (addr = next, addr != end);
 294}
 295
 296static inline bool use_1G_block(unsigned long addr, unsigned long next,
 297                        unsigned long phys)
 298{
 299        if (PAGE_SHIFT != 12)
 300                return false;
 301
 302        if (((addr | next | phys) & ~PUD_MASK) != 0)
 303                return false;
 304
 305        return true;
 306}
 307
 308static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
 309                           phys_addr_t phys, pgprot_t prot,
 310                           phys_addr_t (*pgtable_alloc)(int),
 311                           int flags)
 312{
 313        unsigned long next;
 314        pud_t *pudp;
 315        p4d_t *p4dp = p4d_offset(pgdp, addr);
 316        p4d_t p4d = READ_ONCE(*p4dp);
 317
 318        if (p4d_none(p4d)) {
 319                p4dval_t p4dval = P4D_TYPE_TABLE | P4D_TABLE_UXN;
 320                phys_addr_t pud_phys;
 321
 322                if (flags & NO_EXEC_MAPPINGS)
 323                        p4dval |= P4D_TABLE_PXN;
 324                BUG_ON(!pgtable_alloc);
 325                pud_phys = pgtable_alloc(PUD_SHIFT);
 326                __p4d_populate(p4dp, pud_phys, p4dval);
 327                p4d = READ_ONCE(*p4dp);
 328        }
 329        BUG_ON(p4d_bad(p4d));
 330
 331        pudp = pud_set_fixmap_offset(p4dp, addr);
 332        do {
 333                pud_t old_pud = READ_ONCE(*pudp);
 334
 335                next = pud_addr_end(addr, end);
 336
 337                /*
 338                 * For 4K granule only, attempt to put down a 1GB block
 339                 */
 340                if (use_1G_block(addr, next, phys) &&
 341                    (flags & NO_BLOCK_MAPPINGS) == 0) {
 342                        pud_set_huge(pudp, phys, prot);
 343
 344                        /*
 345                         * After the PUD entry has been populated once, we
 346                         * only allow updates to the permission attributes.
 347                         */
 348                        BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
 349                                                      READ_ONCE(pud_val(*pudp))));
 350                } else {
 351                        alloc_init_cont_pmd(pudp, addr, next, phys, prot,
 352                                            pgtable_alloc, flags);
 353
 354                        BUG_ON(pud_val(old_pud) != 0 &&
 355                               pud_val(old_pud) != READ_ONCE(pud_val(*pudp)));
 356                }
 357                phys += next - addr;
 358        } while (pudp++, addr = next, addr != end);
 359
 360        pud_clear_fixmap();
 361}
 362
 363static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
 364                                 unsigned long virt, phys_addr_t size,
 365                                 pgprot_t prot,
 366                                 phys_addr_t (*pgtable_alloc)(int),
 367                                 int flags)
 368{
 369        unsigned long addr, end, next;
 370        pgd_t *pgdp = pgd_offset_pgd(pgdir, virt);
 371
 372        /*
 373         * If the virtual and physical address don't have the same offset
 374         * within a page, we cannot map the region as the caller expects.
 375         */
 376        if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
 377                return;
 378
 379        phys &= PAGE_MASK;
 380        addr = virt & PAGE_MASK;
 381        end = PAGE_ALIGN(virt + size);
 382
 383        do {
 384                next = pgd_addr_end(addr, end);
 385                alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc,
 386                               flags);
 387                phys += next - addr;
 388        } while (pgdp++, addr = next, addr != end);
 389}
 390
 391static phys_addr_t __pgd_pgtable_alloc(int shift)
 392{
 393        void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL);
 394        BUG_ON(!ptr);
 395
 396        /* Ensure the zeroed page is visible to the page table walker */
 397        dsb(ishst);
 398        return __pa(ptr);
 399}
 400
 401static phys_addr_t pgd_pgtable_alloc(int shift)
 402{
 403        phys_addr_t pa = __pgd_pgtable_alloc(shift);
 404
 405        /*
 406         * Call proper page table ctor in case later we need to
 407         * call core mm functions like apply_to_page_range() on
 408         * this pre-allocated page table.
 409         *
 410         * We don't select ARCH_ENABLE_SPLIT_PMD_PTLOCK if pmd is
 411         * folded, and if so pgtable_pmd_page_ctor() becomes nop.
 412         */
 413        if (shift == PAGE_SHIFT)
 414                BUG_ON(!pgtable_pte_page_ctor(phys_to_page(pa)));
 415        else if (shift == PMD_SHIFT)
 416                BUG_ON(!pgtable_pmd_page_ctor(phys_to_page(pa)));
 417
 418        return pa;
 419}
 420
 421/*
 422 * This function can only be used to modify existing table entries,
 423 * without allocating new levels of table. Note that this permits the
 424 * creation of new section or page entries.
 425 */
 426static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
 427                                  phys_addr_t size, pgprot_t prot)
 428{
 429        if ((virt >= PAGE_END) && (virt < VMALLOC_START)) {
 430                pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
 431                        &phys, virt);
 432                return;
 433        }
 434        __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL,
 435                             NO_CONT_MAPPINGS);
 436}
 437
 438void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
 439                               unsigned long virt, phys_addr_t size,
 440                               pgprot_t prot, bool page_mappings_only)
 441{
 442        int flags = 0;
 443
 444        BUG_ON(mm == &init_mm);
 445
 446        if (page_mappings_only)
 447                flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
 448
 449        __create_pgd_mapping(mm->pgd, phys, virt, size, prot,
 450                             pgd_pgtable_alloc, flags);
 451}
 452
 453static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
 454                                phys_addr_t size, pgprot_t prot)
 455{
 456        if ((virt >= PAGE_END) && (virt < VMALLOC_START)) {
 457                pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n",
 458                        &phys, virt);
 459                return;
 460        }
 461
 462        __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL,
 463                             NO_CONT_MAPPINGS);
 464
 465        /* flush the TLBs after updating live kernel mappings */
 466        flush_tlb_kernel_range(virt, virt + size);
 467}
 468
 469static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start,
 470                                  phys_addr_t end, pgprot_t prot, int flags)
 471{
 472        __create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start,
 473                             prot, early_pgtable_alloc, flags);
 474}
 475
 476void __init mark_linear_text_alias_ro(void)
 477{
 478        /*
 479         * Remove the write permissions from the linear alias of .text/.rodata
 480         */
 481        update_mapping_prot(__pa_symbol(_stext), (unsigned long)lm_alias(_stext),
 482                            (unsigned long)__init_begin - (unsigned long)_stext,
 483                            PAGE_KERNEL_RO);
 484}
 485
 486static bool crash_mem_map __initdata;
 487
 488static int __init enable_crash_mem_map(char *arg)
 489{
 490        /*
 491         * Proper parameter parsing is done by reserve_crashkernel(). We only
 492         * need to know if the linear map has to avoid block mappings so that
 493         * the crashkernel reservations can be unmapped later.
 494         */
 495        crash_mem_map = true;
 496
 497        return 0;
 498}
 499early_param("crashkernel", enable_crash_mem_map);
 500
 501static void __init map_mem(pgd_t *pgdp)
 502{
 503        static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
 504        phys_addr_t kernel_start = __pa_symbol(_stext);
 505        phys_addr_t kernel_end = __pa_symbol(__init_begin);
 506        phys_addr_t start, end;
 507        int flags = NO_EXEC_MAPPINGS;
 508        u64 i;
 509
 510        /*
 511         * Setting hierarchical PXNTable attributes on table entries covering
 512         * the linear region is only possible if it is guaranteed that no table
 513         * entries at any level are being shared between the linear region and
 514         * the vmalloc region. Check whether this is true for the PGD level, in
 515         * which case it is guaranteed to be true for all other levels as well.
 516         */
 517        BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
 518
 519        if (can_set_direct_map() || crash_mem_map || IS_ENABLED(CONFIG_KFENCE))
 520                flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
 521
 522        /*
 523         * Take care not to create a writable alias for the
 524         * read-only text and rodata sections of the kernel image.
 525         * So temporarily mark them as NOMAP to skip mappings in
 526         * the following for-loop
 527         */
 528        memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
 529
 530        /* map all the memory banks */
 531        for_each_mem_range(i, &start, &end) {
 532                if (start >= end)
 533                        break;
 534                /*
 535                 * The linear map must allow allocation tags reading/writing
 536                 * if MTE is present. Otherwise, it has the same attributes as
 537                 * PAGE_KERNEL.
 538                 */
 539                __map_memblock(pgdp, start, end, pgprot_tagged(PAGE_KERNEL),
 540                               flags);
 541        }
 542
 543        /*
 544         * Map the linear alias of the [_stext, __init_begin) interval
 545         * as non-executable now, and remove the write permission in
 546         * mark_linear_text_alias_ro() below (which will be called after
 547         * alternative patching has completed). This makes the contents
 548         * of the region accessible to subsystems such as hibernate,
 549         * but protects it from inadvertent modification or execution.
 550         * Note that contiguous mappings cannot be remapped in this way,
 551         * so we should avoid them here.
 552         */
 553        __map_memblock(pgdp, kernel_start, kernel_end,
 554                       PAGE_KERNEL, NO_CONT_MAPPINGS);
 555        memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
 556}
 557
 558void mark_rodata_ro(void)
 559{
 560        unsigned long section_size;
 561
 562        /*
 563         * mark .rodata as read only. Use __init_begin rather than __end_rodata
 564         * to cover NOTES and EXCEPTION_TABLE.
 565         */
 566        section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
 567        update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata,
 568                            section_size, PAGE_KERNEL_RO);
 569
 570        debug_checkwx();
 571}
 572
 573static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
 574                                      pgprot_t prot, struct vm_struct *vma,
 575                                      int flags, unsigned long vm_flags)
 576{
 577        phys_addr_t pa_start = __pa_symbol(va_start);
 578        unsigned long size = va_end - va_start;
 579
 580        BUG_ON(!PAGE_ALIGNED(pa_start));
 581        BUG_ON(!PAGE_ALIGNED(size));
 582
 583        __create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size, prot,
 584                             early_pgtable_alloc, flags);
 585
 586        if (!(vm_flags & VM_NO_GUARD))
 587                size += PAGE_SIZE;
 588
 589        vma->addr       = va_start;
 590        vma->phys_addr  = pa_start;
 591        vma->size       = size;
 592        vma->flags      = VM_MAP | vm_flags;
 593        vma->caller     = __builtin_return_address(0);
 594
 595        vm_area_add_early(vma);
 596}
 597
 598static int __init parse_rodata(char *arg)
 599{
 600        int ret = strtobool(arg, &rodata_enabled);
 601        if (!ret) {
 602                rodata_full = false;
 603                return 0;
 604        }
 605
 606        /* permit 'full' in addition to boolean options */
 607        if (strcmp(arg, "full"))
 608                return -EINVAL;
 609
 610        rodata_enabled = true;
 611        rodata_full = true;
 612        return 0;
 613}
 614early_param("rodata", parse_rodata);
 615
 616#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 617static int __init map_entry_trampoline(void)
 618{
 619        pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
 620        phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
 621
 622        /* The trampoline is always mapped and can therefore be global */
 623        pgprot_val(prot) &= ~PTE_NG;
 624
 625        /* Map only the text into the trampoline page table */
 626        memset(tramp_pg_dir, 0, PGD_SIZE);
 627        __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
 628                             prot, __pgd_pgtable_alloc, 0);
 629
 630        /* Map both the text and data into the kernel page table */
 631        __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
 632        if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
 633                extern char __entry_tramp_data_start[];
 634
 635                __set_fixmap(FIX_ENTRY_TRAMP_DATA,
 636                             __pa_symbol(__entry_tramp_data_start),
 637                             PAGE_KERNEL_RO);
 638        }
 639
 640        return 0;
 641}
 642core_initcall(map_entry_trampoline);
 643#endif
 644
 645/*
 646 * Open coded check for BTI, only for use to determine configuration
 647 * for early mappings for before the cpufeature code has run.
 648 */
 649static bool arm64_early_this_cpu_has_bti(void)
 650{
 651        u64 pfr1;
 652
 653        if (!IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
 654                return false;
 655
 656        pfr1 = __read_sysreg_by_encoding(SYS_ID_AA64PFR1_EL1);
 657        return cpuid_feature_extract_unsigned_field(pfr1,
 658                                                    ID_AA64PFR1_BT_SHIFT);
 659}
 660
 661/*
 662 * Create fine-grained mappings for the kernel.
 663 */
 664static void __init map_kernel(pgd_t *pgdp)
 665{
 666        static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext,
 667                                vmlinux_initdata, vmlinux_data;
 668
 669        /*
 670         * External debuggers may need to write directly to the text
 671         * mapping to install SW breakpoints. Allow this (only) when
 672         * explicitly requested with rodata=off.
 673         */
 674        pgprot_t text_prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
 675
 676        /*
 677         * If we have a CPU that supports BTI and a kernel built for
 678         * BTI then mark the kernel executable text as guarded pages
 679         * now so we don't have to rewrite the page tables later.
 680         */
 681        if (arm64_early_this_cpu_has_bti())
 682                text_prot = __pgprot_modify(text_prot, PTE_GP, PTE_GP);
 683
 684        /*
 685         * Only rodata will be remapped with different permissions later on,
 686         * all other segments are allowed to use contiguous mappings.
 687         */
 688        map_kernel_segment(pgdp, _stext, _etext, text_prot, &vmlinux_text, 0,
 689                           VM_NO_GUARD);
 690        map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL,
 691                           &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD);
 692        map_kernel_segment(pgdp, __inittext_begin, __inittext_end, text_prot,
 693                           &vmlinux_inittext, 0, VM_NO_GUARD);
 694        map_kernel_segment(pgdp, __initdata_begin, __initdata_end, PAGE_KERNEL,
 695                           &vmlinux_initdata, 0, VM_NO_GUARD);
 696        map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
 697
 698        if (!READ_ONCE(pgd_val(*pgd_offset_pgd(pgdp, FIXADDR_START)))) {
 699                /*
 700                 * The fixmap falls in a separate pgd to the kernel, and doesn't
 701                 * live in the carveout for the swapper_pg_dir. We can simply
 702                 * re-use the existing dir for the fixmap.
 703                 */
 704                set_pgd(pgd_offset_pgd(pgdp, FIXADDR_START),
 705                        READ_ONCE(*pgd_offset_k(FIXADDR_START)));
 706        } else if (CONFIG_PGTABLE_LEVELS > 3) {
 707                pgd_t *bm_pgdp;
 708                p4d_t *bm_p4dp;
 709                pud_t *bm_pudp;
 710                /*
 711                 * The fixmap shares its top level pgd entry with the kernel
 712                 * mapping. This can really only occur when we are running
 713                 * with 16k/4 levels, so we can simply reuse the pud level
 714                 * entry instead.
 715                 */
 716                BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
 717                bm_pgdp = pgd_offset_pgd(pgdp, FIXADDR_START);
 718                bm_p4dp = p4d_offset(bm_pgdp, FIXADDR_START);
 719                bm_pudp = pud_set_fixmap_offset(bm_p4dp, FIXADDR_START);
 720                pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd));
 721                pud_clear_fixmap();
 722        } else {
 723                BUG();
 724        }
 725
 726        kasan_copy_shadow(pgdp);
 727}
 728
 729void __init paging_init(void)
 730{
 731        pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir));
 732
 733        map_kernel(pgdp);
 734        map_mem(pgdp);
 735
 736        pgd_clear_fixmap();
 737
 738        cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
 739        init_mm.pgd = swapper_pg_dir;
 740
 741        memblock_free(__pa_symbol(init_pg_dir),
 742                      __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir));
 743
 744        memblock_allow_resize();
 745}
 746
 747/*
 748 * Check whether a kernel address is valid (derived from arch/x86/).
 749 */
 750int kern_addr_valid(unsigned long addr)
 751{
 752        pgd_t *pgdp;
 753        p4d_t *p4dp;
 754        pud_t *pudp, pud;
 755        pmd_t *pmdp, pmd;
 756        pte_t *ptep, pte;
 757
 758        addr = arch_kasan_reset_tag(addr);
 759        if ((((long)addr) >> VA_BITS) != -1UL)
 760                return 0;
 761
 762        pgdp = pgd_offset_k(addr);
 763        if (pgd_none(READ_ONCE(*pgdp)))
 764                return 0;
 765
 766        p4dp = p4d_offset(pgdp, addr);
 767        if (p4d_none(READ_ONCE(*p4dp)))
 768                return 0;
 769
 770        pudp = pud_offset(p4dp, addr);
 771        pud = READ_ONCE(*pudp);
 772        if (pud_none(pud))
 773                return 0;
 774
 775        if (pud_sect(pud))
 776                return pfn_valid(pud_pfn(pud));
 777
 778        pmdp = pmd_offset(pudp, addr);
 779        pmd = READ_ONCE(*pmdp);
 780        if (pmd_none(pmd))
 781                return 0;
 782
 783        if (pmd_sect(pmd))
 784                return pfn_valid(pmd_pfn(pmd));
 785
 786        ptep = pte_offset_kernel(pmdp, addr);
 787        pte = READ_ONCE(*ptep);
 788        if (pte_none(pte))
 789                return 0;
 790
 791        return pfn_valid(pte_pfn(pte));
 792}
 793
 794#ifdef CONFIG_MEMORY_HOTPLUG
 795static void free_hotplug_page_range(struct page *page, size_t size,
 796                                    struct vmem_altmap *altmap)
 797{
 798        if (altmap) {
 799                vmem_altmap_free(altmap, size >> PAGE_SHIFT);
 800        } else {
 801                WARN_ON(PageReserved(page));
 802                free_pages((unsigned long)page_address(page), get_order(size));
 803        }
 804}
 805
 806static void free_hotplug_pgtable_page(struct page *page)
 807{
 808        free_hotplug_page_range(page, PAGE_SIZE, NULL);
 809}
 810
 811static bool pgtable_range_aligned(unsigned long start, unsigned long end,
 812                                  unsigned long floor, unsigned long ceiling,
 813                                  unsigned long mask)
 814{
 815        start &= mask;
 816        if (start < floor)
 817                return false;
 818
 819        if (ceiling) {
 820                ceiling &= mask;
 821                if (!ceiling)
 822                        return false;
 823        }
 824
 825        if (end - 1 > ceiling - 1)
 826                return false;
 827        return true;
 828}
 829
 830static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr,
 831                                    unsigned long end, bool free_mapped,
 832                                    struct vmem_altmap *altmap)
 833{
 834        pte_t *ptep, pte;
 835
 836        do {
 837                ptep = pte_offset_kernel(pmdp, addr);
 838                pte = READ_ONCE(*ptep);
 839                if (pte_none(pte))
 840                        continue;
 841
 842                WARN_ON(!pte_present(pte));
 843                pte_clear(&init_mm, addr, ptep);
 844                flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
 845                if (free_mapped)
 846                        free_hotplug_page_range(pte_page(pte),
 847                                                PAGE_SIZE, altmap);
 848        } while (addr += PAGE_SIZE, addr < end);
 849}
 850
 851static void unmap_hotplug_pmd_range(pud_t *pudp, unsigned long addr,
 852                                    unsigned long end, bool free_mapped,
 853                                    struct vmem_altmap *altmap)
 854{
 855        unsigned long next;
 856        pmd_t *pmdp, pmd;
 857
 858        do {
 859                next = pmd_addr_end(addr, end);
 860                pmdp = pmd_offset(pudp, addr);
 861                pmd = READ_ONCE(*pmdp);
 862                if (pmd_none(pmd))
 863                        continue;
 864
 865                WARN_ON(!pmd_present(pmd));
 866                if (pmd_sect(pmd)) {
 867                        pmd_clear(pmdp);
 868
 869                        /*
 870                         * One TLBI should be sufficient here as the PMD_SIZE
 871                         * range is mapped with a single block entry.
 872                         */
 873                        flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
 874                        if (free_mapped)
 875                                free_hotplug_page_range(pmd_page(pmd),
 876                                                        PMD_SIZE, altmap);
 877                        continue;
 878                }
 879                WARN_ON(!pmd_table(pmd));
 880                unmap_hotplug_pte_range(pmdp, addr, next, free_mapped, altmap);
 881        } while (addr = next, addr < end);
 882}
 883
 884static void unmap_hotplug_pud_range(p4d_t *p4dp, unsigned long addr,
 885                                    unsigned long end, bool free_mapped,
 886                                    struct vmem_altmap *altmap)
 887{
 888        unsigned long next;
 889        pud_t *pudp, pud;
 890
 891        do {
 892                next = pud_addr_end(addr, end);
 893                pudp = pud_offset(p4dp, addr);
 894                pud = READ_ONCE(*pudp);
 895                if (pud_none(pud))
 896                        continue;
 897
 898                WARN_ON(!pud_present(pud));
 899                if (pud_sect(pud)) {
 900                        pud_clear(pudp);
 901
 902                        /*
 903                         * One TLBI should be sufficient here as the PUD_SIZE
 904                         * range is mapped with a single block entry.
 905                         */
 906                        flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
 907                        if (free_mapped)
 908                                free_hotplug_page_range(pud_page(pud),
 909                                                        PUD_SIZE, altmap);
 910                        continue;
 911                }
 912                WARN_ON(!pud_table(pud));
 913                unmap_hotplug_pmd_range(pudp, addr, next, free_mapped, altmap);
 914        } while (addr = next, addr < end);
 915}
 916
 917static void unmap_hotplug_p4d_range(pgd_t *pgdp, unsigned long addr,
 918                                    unsigned long end, bool free_mapped,
 919                                    struct vmem_altmap *altmap)
 920{
 921        unsigned long next;
 922        p4d_t *p4dp, p4d;
 923
 924        do {
 925                next = p4d_addr_end(addr, end);
 926                p4dp = p4d_offset(pgdp, addr);
 927                p4d = READ_ONCE(*p4dp);
 928                if (p4d_none(p4d))
 929                        continue;
 930
 931                WARN_ON(!p4d_present(p4d));
 932                unmap_hotplug_pud_range(p4dp, addr, next, free_mapped, altmap);
 933        } while (addr = next, addr < end);
 934}
 935
 936static void unmap_hotplug_range(unsigned long addr, unsigned long end,
 937                                bool free_mapped, struct vmem_altmap *altmap)
 938{
 939        unsigned long next;
 940        pgd_t *pgdp, pgd;
 941
 942        /*
 943         * altmap can only be used as vmemmap mapping backing memory.
 944         * In case the backing memory itself is not being freed, then
 945         * altmap is irrelevant. Warn about this inconsistency when
 946         * encountered.
 947         */
 948        WARN_ON(!free_mapped && altmap);
 949
 950        do {
 951                next = pgd_addr_end(addr, end);
 952                pgdp = pgd_offset_k(addr);
 953                pgd = READ_ONCE(*pgdp);
 954                if (pgd_none(pgd))
 955                        continue;
 956
 957                WARN_ON(!pgd_present(pgd));
 958                unmap_hotplug_p4d_range(pgdp, addr, next, free_mapped, altmap);
 959        } while (addr = next, addr < end);
 960}
 961
 962static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr,
 963                                 unsigned long end, unsigned long floor,
 964                                 unsigned long ceiling)
 965{
 966        pte_t *ptep, pte;
 967        unsigned long i, start = addr;
 968
 969        do {
 970                ptep = pte_offset_kernel(pmdp, addr);
 971                pte = READ_ONCE(*ptep);
 972
 973                /*
 974                 * This is just a sanity check here which verifies that
 975                 * pte clearing has been done by earlier unmap loops.
 976                 */
 977                WARN_ON(!pte_none(pte));
 978        } while (addr += PAGE_SIZE, addr < end);
 979
 980        if (!pgtable_range_aligned(start, end, floor, ceiling, PMD_MASK))
 981                return;
 982
 983        /*
 984         * Check whether we can free the pte page if the rest of the
 985         * entries are empty. Overlap with other regions have been
 986         * handled by the floor/ceiling check.
 987         */
 988        ptep = pte_offset_kernel(pmdp, 0UL);
 989        for (i = 0; i < PTRS_PER_PTE; i++) {
 990                if (!pte_none(READ_ONCE(ptep[i])))
 991                        return;
 992        }
 993
 994        pmd_clear(pmdp);
 995        __flush_tlb_kernel_pgtable(start);
 996        free_hotplug_pgtable_page(virt_to_page(ptep));
 997}
 998
 999static void free_empty_pmd_table(pud_t *pudp, unsigned long addr,
1000                                 unsigned long end, unsigned long floor,
1001                                 unsigned long ceiling)
1002{
1003        pmd_t *pmdp, pmd;
1004        unsigned long i, next, start = addr;
1005
1006        do {
1007                next = pmd_addr_end(addr, end);
1008                pmdp = pmd_offset(pudp, addr);
1009                pmd = READ_ONCE(*pmdp);
1010                if (pmd_none(pmd))
1011                        continue;
1012
1013                WARN_ON(!pmd_present(pmd) || !pmd_table(pmd) || pmd_sect(pmd));
1014                free_empty_pte_table(pmdp, addr, next, floor, ceiling);
1015        } while (addr = next, addr < end);
1016
1017        if (CONFIG_PGTABLE_LEVELS <= 2)
1018                return;
1019
1020        if (!pgtable_range_aligned(start, end, floor, ceiling, PUD_MASK))
1021                return;
1022
1023        /*
1024         * Check whether we can free the pmd page if the rest of the
1025         * entries are empty. Overlap with other regions have been
1026         * handled by the floor/ceiling check.
1027         */
1028        pmdp = pmd_offset(pudp, 0UL);
1029        for (i = 0; i < PTRS_PER_PMD; i++) {
1030                if (!pmd_none(READ_ONCE(pmdp[i])))
1031                        return;
1032        }
1033
1034        pud_clear(pudp);
1035        __flush_tlb_kernel_pgtable(start);
1036        free_hotplug_pgtable_page(virt_to_page(pmdp));
1037}
1038
1039static void free_empty_pud_table(p4d_t *p4dp, unsigned long addr,
1040                                 unsigned long end, unsigned long floor,
1041                                 unsigned long ceiling)
1042{
1043        pud_t *pudp, pud;
1044        unsigned long i, next, start = addr;
1045
1046        do {
1047                next = pud_addr_end(addr, end);
1048                pudp = pud_offset(p4dp, addr);
1049                pud = READ_ONCE(*pudp);
1050                if (pud_none(pud))
1051                        continue;
1052
1053                WARN_ON(!pud_present(pud) || !pud_table(pud) || pud_sect(pud));
1054                free_empty_pmd_table(pudp, addr, next, floor, ceiling);
1055        } while (addr = next, addr < end);
1056
1057        if (CONFIG_PGTABLE_LEVELS <= 3)
1058                return;
1059
1060        if (!pgtable_range_aligned(start, end, floor, ceiling, PGDIR_MASK))
1061                return;
1062
1063        /*
1064         * Check whether we can free the pud page if the rest of the
1065         * entries are empty. Overlap with other regions have been
1066         * handled by the floor/ceiling check.
1067         */
1068        pudp = pud_offset(p4dp, 0UL);
1069        for (i = 0; i < PTRS_PER_PUD; i++) {
1070                if (!pud_none(READ_ONCE(pudp[i])))
1071                        return;
1072        }
1073
1074        p4d_clear(p4dp);
1075        __flush_tlb_kernel_pgtable(start);
1076        free_hotplug_pgtable_page(virt_to_page(pudp));
1077}
1078
1079static void free_empty_p4d_table(pgd_t *pgdp, unsigned long addr,
1080                                 unsigned long end, unsigned long floor,
1081                                 unsigned long ceiling)
1082{
1083        unsigned long next;
1084        p4d_t *p4dp, p4d;
1085
1086        do {
1087                next = p4d_addr_end(addr, end);
1088                p4dp = p4d_offset(pgdp, addr);
1089                p4d = READ_ONCE(*p4dp);
1090                if (p4d_none(p4d))
1091                        continue;
1092
1093                WARN_ON(!p4d_present(p4d));
1094                free_empty_pud_table(p4dp, addr, next, floor, ceiling);
1095        } while (addr = next, addr < end);
1096}
1097
1098static void free_empty_tables(unsigned long addr, unsigned long end,
1099                              unsigned long floor, unsigned long ceiling)
1100{
1101        unsigned long next;
1102        pgd_t *pgdp, pgd;
1103
1104        do {
1105                next = pgd_addr_end(addr, end);
1106                pgdp = pgd_offset_k(addr);
1107                pgd = READ_ONCE(*pgdp);
1108                if (pgd_none(pgd))
1109                        continue;
1110
1111                WARN_ON(!pgd_present(pgd));
1112                free_empty_p4d_table(pgdp, addr, next, floor, ceiling);
1113        } while (addr = next, addr < end);
1114}
1115#endif
1116
1117#if !ARM64_KERNEL_USES_PMD_MAPS
1118int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
1119                struct vmem_altmap *altmap)
1120{
1121        WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
1122        return vmemmap_populate_basepages(start, end, node, altmap);
1123}
1124#else   /* !ARM64_KERNEL_USES_PMD_MAPS */
1125int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
1126                struct vmem_altmap *altmap)
1127{
1128        unsigned long addr = start;
1129        unsigned long next;
1130        pgd_t *pgdp;
1131        p4d_t *p4dp;
1132        pud_t *pudp;
1133        pmd_t *pmdp;
1134
1135        WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
1136        do {
1137                next = pmd_addr_end(addr, end);
1138
1139                pgdp = vmemmap_pgd_populate(addr, node);
1140                if (!pgdp)
1141                        return -ENOMEM;
1142
1143                p4dp = vmemmap_p4d_populate(pgdp, addr, node);
1144                if (!p4dp)
1145                        return -ENOMEM;
1146
1147                pudp = vmemmap_pud_populate(p4dp, addr, node);
1148                if (!pudp)
1149                        return -ENOMEM;
1150
1151                pmdp = pmd_offset(pudp, addr);
1152                if (pmd_none(READ_ONCE(*pmdp))) {
1153                        void *p = NULL;
1154
1155                        p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
1156                        if (!p) {
1157                                if (vmemmap_populate_basepages(addr, next, node, altmap))
1158                                        return -ENOMEM;
1159                                continue;
1160                        }
1161
1162                        pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL));
1163                } else
1164                        vmemmap_verify((pte_t *)pmdp, node, addr, next);
1165        } while (addr = next, addr != end);
1166
1167        return 0;
1168}
1169#endif  /* !ARM64_KERNEL_USES_PMD_MAPS */
1170
1171#ifdef CONFIG_MEMORY_HOTPLUG
1172void vmemmap_free(unsigned long start, unsigned long end,
1173                struct vmem_altmap *altmap)
1174{
1175        WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
1176
1177        unmap_hotplug_range(start, end, true, altmap);
1178        free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END);
1179}
1180#endif /* CONFIG_MEMORY_HOTPLUG */
1181
1182static inline pud_t *fixmap_pud(unsigned long addr)
1183{
1184        pgd_t *pgdp = pgd_offset_k(addr);
1185        p4d_t *p4dp = p4d_offset(pgdp, addr);
1186        p4d_t p4d = READ_ONCE(*p4dp);
1187
1188        BUG_ON(p4d_none(p4d) || p4d_bad(p4d));
1189
1190        return pud_offset_kimg(p4dp, addr);
1191}
1192
1193static inline pmd_t *fixmap_pmd(unsigned long addr)
1194{
1195        pud_t *pudp = fixmap_pud(addr);
1196        pud_t pud = READ_ONCE(*pudp);
1197
1198        BUG_ON(pud_none(pud) || pud_bad(pud));
1199
1200        return pmd_offset_kimg(pudp, addr);
1201}
1202
1203static inline pte_t *fixmap_pte(unsigned long addr)
1204{
1205        return &bm_pte[pte_index(addr)];
1206}
1207
1208/*
1209 * The p*d_populate functions call virt_to_phys implicitly so they can't be used
1210 * directly on kernel symbols (bm_p*d). This function is called too early to use
1211 * lm_alias so __p*d_populate functions must be used to populate with the
1212 * physical address from __pa_symbol.
1213 */
1214void __init early_fixmap_init(void)
1215{
1216        pgd_t *pgdp;
1217        p4d_t *p4dp, p4d;
1218        pud_t *pudp;
1219        pmd_t *pmdp;
1220        unsigned long addr = FIXADDR_START;
1221
1222        pgdp = pgd_offset_k(addr);
1223        p4dp = p4d_offset(pgdp, addr);
1224        p4d = READ_ONCE(*p4dp);
1225        if (CONFIG_PGTABLE_LEVELS > 3 &&
1226            !(p4d_none(p4d) || p4d_page_paddr(p4d) == __pa_symbol(bm_pud))) {
1227                /*
1228                 * We only end up here if the kernel mapping and the fixmap
1229                 * share the top level pgd entry, which should only happen on
1230                 * 16k/4 levels configurations.
1231                 */
1232                BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
1233                pudp = pud_offset_kimg(p4dp, addr);
1234        } else {
1235                if (p4d_none(p4d))
1236                        __p4d_populate(p4dp, __pa_symbol(bm_pud), P4D_TYPE_TABLE);
1237                pudp = fixmap_pud(addr);
1238        }
1239        if (pud_none(READ_ONCE(*pudp)))
1240                __pud_populate(pudp, __pa_symbol(bm_pmd), PUD_TYPE_TABLE);
1241        pmdp = fixmap_pmd(addr);
1242        __pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
1243
1244        /*
1245         * The boot-ioremap range spans multiple pmds, for which
1246         * we are not prepared:
1247         */
1248        BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
1249                     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
1250
1251        if ((pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
1252             || pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
1253                WARN_ON(1);
1254                pr_warn("pmdp %p != %p, %p\n",
1255                        pmdp, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
1256                        fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
1257                pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
1258                        fix_to_virt(FIX_BTMAP_BEGIN));
1259                pr_warn("fix_to_virt(FIX_BTMAP_END):   %08lx\n",
1260                        fix_to_virt(FIX_BTMAP_END));
1261
1262                pr_warn("FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
1263                pr_warn("FIX_BTMAP_BEGIN:     %d\n", FIX_BTMAP_BEGIN);
1264        }
1265}
1266
1267/*
1268 * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we
1269 * ever need to use IPIs for TLB broadcasting, then we're in trouble here.
1270 */
1271void __set_fixmap(enum fixed_addresses idx,
1272                               phys_addr_t phys, pgprot_t flags)
1273{
1274        unsigned long addr = __fix_to_virt(idx);
1275        pte_t *ptep;
1276
1277        BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
1278
1279        ptep = fixmap_pte(addr);
1280
1281        if (pgprot_val(flags)) {
1282                set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
1283        } else {
1284                pte_clear(&init_mm, addr, ptep);
1285                flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
1286        }
1287}
1288
1289void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
1290{
1291        const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
1292        int offset;
1293        void *dt_virt;
1294
1295        /*
1296         * Check whether the physical FDT address is set and meets the minimum
1297         * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
1298         * at least 8 bytes so that we can always access the magic and size
1299         * fields of the FDT header after mapping the first chunk, double check
1300         * here if that is indeed the case.
1301         */
1302        BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
1303        if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
1304                return NULL;
1305
1306        /*
1307         * Make sure that the FDT region can be mapped without the need to
1308         * allocate additional translation table pages, so that it is safe
1309         * to call create_mapping_noalloc() this early.
1310         *
1311         * On 64k pages, the FDT will be mapped using PTEs, so we need to
1312         * be in the same PMD as the rest of the fixmap.
1313         * On 4k pages, we'll use section mappings for the FDT so we only
1314         * have to be in the same PUD.
1315         */
1316        BUILD_BUG_ON(dt_virt_base % SZ_2M);
1317
1318        BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
1319                     __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
1320
1321        offset = dt_phys % SWAPPER_BLOCK_SIZE;
1322        dt_virt = (void *)dt_virt_base + offset;
1323
1324        /* map the first chunk so we can read the size from the header */
1325        create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
1326                        dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
1327
1328        if (fdt_magic(dt_virt) != FDT_MAGIC)
1329                return NULL;
1330
1331        *size = fdt_totalsize(dt_virt);
1332        if (*size > MAX_FDT_SIZE)
1333                return NULL;
1334
1335        if (offset + *size > SWAPPER_BLOCK_SIZE)
1336                create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
1337                               round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot);
1338
1339        return dt_virt;
1340}
1341
1342int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
1343{
1344        pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot));
1345
1346        /* Only allow permission changes for now */
1347        if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)),
1348                                   pud_val(new_pud)))
1349                return 0;
1350
1351        VM_BUG_ON(phys & ~PUD_MASK);
1352        set_pud(pudp, new_pud);
1353        return 1;
1354}
1355
1356int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
1357{
1358        pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), mk_pmd_sect_prot(prot));
1359
1360        /* Only allow permission changes for now */
1361        if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)),
1362                                   pmd_val(new_pmd)))
1363                return 0;
1364
1365        VM_BUG_ON(phys & ~PMD_MASK);
1366        set_pmd(pmdp, new_pmd);
1367        return 1;
1368}
1369
1370int pud_clear_huge(pud_t *pudp)
1371{
1372        if (!pud_sect(READ_ONCE(*pudp)))
1373                return 0;
1374        pud_clear(pudp);
1375        return 1;
1376}
1377
1378int pmd_clear_huge(pmd_t *pmdp)
1379{
1380        if (!pmd_sect(READ_ONCE(*pmdp)))
1381                return 0;
1382        pmd_clear(pmdp);
1383        return 1;
1384}
1385
1386int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr)
1387{
1388        pte_t *table;
1389        pmd_t pmd;
1390
1391        pmd = READ_ONCE(*pmdp);
1392
1393        if (!pmd_table(pmd)) {
1394                VM_WARN_ON(1);
1395                return 1;
1396        }
1397
1398        table = pte_offset_kernel(pmdp, addr);
1399        pmd_clear(pmdp);
1400        __flush_tlb_kernel_pgtable(addr);
1401        pte_free_kernel(NULL, table);
1402        return 1;
1403}
1404
1405int pud_free_pmd_page(pud_t *pudp, unsigned long addr)
1406{
1407        pmd_t *table;
1408        pmd_t *pmdp;
1409        pud_t pud;
1410        unsigned long next, end;
1411
1412        pud = READ_ONCE(*pudp);
1413
1414        if (!pud_table(pud)) {
1415                VM_WARN_ON(1);
1416                return 1;
1417        }
1418
1419        table = pmd_offset(pudp, addr);
1420        pmdp = table;
1421        next = addr;
1422        end = addr + PUD_SIZE;
1423        do {
1424                pmd_free_pte_page(pmdp, next);
1425        } while (pmdp++, next += PMD_SIZE, next != end);
1426
1427        pud_clear(pudp);
1428        __flush_tlb_kernel_pgtable(addr);
1429        pmd_free(NULL, table);
1430        return 1;
1431}
1432
1433#ifdef CONFIG_MEMORY_HOTPLUG
1434static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
1435{
1436        unsigned long end = start + size;
1437
1438        WARN_ON(pgdir != init_mm.pgd);
1439        WARN_ON((start < PAGE_OFFSET) || (end > PAGE_END));
1440
1441        unmap_hotplug_range(start, end, false, NULL);
1442        free_empty_tables(start, end, PAGE_OFFSET, PAGE_END);
1443}
1444
1445struct range arch_get_mappable_range(void)
1446{
1447        struct range mhp_range;
1448        u64 start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual));
1449        u64 end_linear_pa = __pa(PAGE_END - 1);
1450
1451        if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
1452                /*
1453                 * Check for a wrap, it is possible because of randomized linear
1454                 * mapping the start physical address is actually bigger than
1455                 * the end physical address. In this case set start to zero
1456                 * because [0, end_linear_pa] range must still be able to cover
1457                 * all addressable physical addresses.
1458                 */
1459                if (start_linear_pa > end_linear_pa)
1460                        start_linear_pa = 0;
1461        }
1462
1463        WARN_ON(start_linear_pa > end_linear_pa);
1464
1465        /*
1466         * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
1467         * accommodating both its ends but excluding PAGE_END. Max physical
1468         * range which can be mapped inside this linear mapping range, must
1469         * also be derived from its end points.
1470         */
1471        mhp_range.start = start_linear_pa;
1472        mhp_range.end =  end_linear_pa;
1473
1474        return mhp_range;
1475}
1476
1477int arch_add_memory(int nid, u64 start, u64 size,
1478                    struct mhp_params *params)
1479{
1480        int ret, flags = NO_EXEC_MAPPINGS;
1481
1482        VM_BUG_ON(!mhp_range_allowed(start, size, true));
1483
1484        /*
1485         * KFENCE requires linear map to be mapped at page granularity, so that
1486         * it is possible to protect/unprotect single pages in the KFENCE pool.
1487         */
1488        if (can_set_direct_map() || IS_ENABLED(CONFIG_KFENCE))
1489                flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
1490
1491        __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
1492                             size, params->pgprot, __pgd_pgtable_alloc,
1493                             flags);
1494
1495        memblock_clear_nomap(start, size);
1496
1497        ret = __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT,
1498                           params);
1499        if (ret)
1500                __remove_pgd_mapping(swapper_pg_dir,
1501                                     __phys_to_virt(start), size);
1502        return ret;
1503}
1504
1505void arch_remove_memory(int nid, u64 start, u64 size,
1506                        struct vmem_altmap *altmap)
1507{
1508        unsigned long start_pfn = start >> PAGE_SHIFT;
1509        unsigned long nr_pages = size >> PAGE_SHIFT;
1510
1511        __remove_pages(start_pfn, nr_pages, altmap);
1512        __remove_pgd_mapping(swapper_pg_dir, __phys_to_virt(start), size);
1513}
1514
1515/*
1516 * This memory hotplug notifier helps prevent boot memory from being
1517 * inadvertently removed as it blocks pfn range offlining process in
1518 * __offline_pages(). Hence this prevents both offlining as well as
1519 * removal process for boot memory which is initially always online.
1520 * In future if and when boot memory could be removed, this notifier
1521 * should be dropped and free_hotplug_page_range() should handle any
1522 * reserved pages allocated during boot.
1523 */
1524static int prevent_bootmem_remove_notifier(struct notifier_block *nb,
1525                                           unsigned long action, void *data)
1526{
1527        struct mem_section *ms;
1528        struct memory_notify *arg = data;
1529        unsigned long end_pfn = arg->start_pfn + arg->nr_pages;
1530        unsigned long pfn = arg->start_pfn;
1531
1532        if ((action != MEM_GOING_OFFLINE) && (action != MEM_OFFLINE))
1533                return NOTIFY_OK;
1534
1535        for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
1536                unsigned long start = PFN_PHYS(pfn);
1537                unsigned long end = start + (1UL << PA_SECTION_SHIFT);
1538
1539                ms = __pfn_to_section(pfn);
1540                if (!early_section(ms))
1541                        continue;
1542
1543                if (action == MEM_GOING_OFFLINE) {
1544                        /*
1545                         * Boot memory removal is not supported. Prevent
1546                         * it via blocking any attempted offline request
1547                         * for the boot memory and just report it.
1548                         */
1549                        pr_warn("Boot memory [%lx %lx] offlining attempted\n", start, end);
1550                        return NOTIFY_BAD;
1551                } else if (action == MEM_OFFLINE) {
1552                        /*
1553                         * This should have never happened. Boot memory
1554                         * offlining should have been prevented by this
1555                         * very notifier. Probably some memory removal
1556                         * procedure might have changed which would then
1557                         * require further debug.
1558                         */
1559                        pr_err("Boot memory [%lx %lx] offlined\n", start, end);
1560
1561                        /*
1562                         * Core memory hotplug does not process a return
1563                         * code from the notifier for MEM_OFFLINE events.
1564                         * The error condition has been reported. Return
1565                         * from here as if ignored.
1566                         */
1567                        return NOTIFY_DONE;
1568                }
1569        }
1570        return NOTIFY_OK;
1571}
1572
1573static struct notifier_block prevent_bootmem_remove_nb = {
1574        .notifier_call = prevent_bootmem_remove_notifier,
1575};
1576
1577/*
1578 * This ensures that boot memory sections on the platform are online
1579 * from early boot. Memory sections could not be prevented from being
1580 * offlined, unless for some reason they are not online to begin with.
1581 * This helps validate the basic assumption on which the above memory
1582 * event notifier works to prevent boot memory section offlining and
1583 * its possible removal.
1584 */
1585static void validate_bootmem_online(void)
1586{
1587        phys_addr_t start, end, addr;
1588        struct mem_section *ms;
1589        u64 i;
1590
1591        /*
1592         * Scanning across all memblock might be expensive
1593         * on some big memory systems. Hence enable this
1594         * validation only with DEBUG_VM.
1595         */
1596        if (!IS_ENABLED(CONFIG_DEBUG_VM))
1597                return;
1598
1599        for_each_mem_range(i, &start, &end) {
1600                for (addr = start; addr < end; addr += (1UL << PA_SECTION_SHIFT)) {
1601                        ms = __pfn_to_section(PHYS_PFN(addr));
1602
1603                        /*
1604                         * All memory ranges in the system at this point
1605                         * should have been marked as early sections.
1606                         */
1607                        WARN_ON(!early_section(ms));
1608
1609                        /*
1610                         * Memory notifier mechanism here to prevent boot
1611                         * memory offlining depends on the fact that each
1612                         * early section memory on the system is initially
1613                         * online. Otherwise a given memory section which
1614                         * is already offline will be overlooked and can
1615                         * be removed completely. Call out such sections.
1616                         */
1617                        if (!online_section(ms))
1618                                pr_err("Boot memory [%llx %llx] is offline, can be removed\n",
1619                                        addr, addr + (1UL << PA_SECTION_SHIFT));
1620                }
1621        }
1622}
1623
1624static int __init prevent_bootmem_remove_init(void)
1625{
1626        int ret = 0;
1627
1628        if (!IS_ENABLED(CONFIG_MEMORY_HOTREMOVE))
1629                return ret;
1630
1631        validate_bootmem_online();
1632        ret = register_memory_notifier(&prevent_bootmem_remove_nb);
1633        if (ret)
1634                pr_err("%s: Notifier registration failed %d\n", __func__, ret);
1635
1636        return ret;
1637}
1638early_initcall(prevent_bootmem_remove_init);
1639#endif
1640
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.