linux/arch/powerpc/mm/book3s64/radix_pgtable.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Page table handling routines for radix page table.
   4 *
   5 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
   6 */
   7
   8#define pr_fmt(fmt) "radix-mmu: " fmt
   9
  10#include <linux/io.h>
  11#include <linux/kernel.h>
  12#include <linux/sched/mm.h>
  13#include <linux/memblock.h>
  14#include <linux/of.h>
  15#include <linux/of_fdt.h>
  16#include <linux/mm.h>
  17#include <linux/hugetlb.h>
  18#include <linux/string_helpers.h>
  19#include <linux/memory.h>
  20
  21#include <asm/pgalloc.h>
  22#include <asm/mmu_context.h>
  23#include <asm/dma.h>
  24#include <asm/machdep.h>
  25#include <asm/mmu.h>
  26#include <asm/firmware.h>
  27#include <asm/powernv.h>
  28#include <asm/sections.h>
  29#include <asm/smp.h>
  30#include <asm/trace.h>
  31#include <asm/uaccess.h>
  32#include <asm/ultravisor.h>
  33
  34#include <trace/events/thp.h>
  35
  36unsigned int mmu_pid_bits;
  37unsigned int mmu_base_pid;
  38unsigned long radix_mem_block_size __ro_after_init;
  39
  40static __ref void *early_alloc_pgtable(unsigned long size, int nid,
  41                        unsigned long region_start, unsigned long region_end)
  42{
  43        phys_addr_t min_addr = MEMBLOCK_LOW_LIMIT;
  44        phys_addr_t max_addr = MEMBLOCK_ALLOC_ANYWHERE;
  45        void *ptr;
  46
  47        if (region_start)
  48                min_addr = region_start;
  49        if (region_end)
  50                max_addr = region_end;
  51
  52        ptr = memblock_alloc_try_nid(size, size, min_addr, max_addr, nid);
  53
  54        if (!ptr)
  55                panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa max_addr=%pa\n",
  56                      __func__, size, size, nid, &min_addr, &max_addr);
  57
  58        return ptr;
  59}
  60
  61/*
  62 * When allocating pud or pmd pointers, we allocate a complete page
  63 * of PAGE_SIZE rather than PUD_TABLE_SIZE or PMD_TABLE_SIZE. This
  64 * is to ensure that the page obtained from the memblock allocator
  65 * can be completely used as page table page and can be freed
  66 * correctly when the page table entries are removed.
  67 */
  68static int early_map_kernel_page(unsigned long ea, unsigned long pa,
  69                          pgprot_t flags,
  70                          unsigned int map_page_size,
  71                          int nid,
  72                          unsigned long region_start, unsigned long region_end)
  73{
  74        unsigned long pfn = pa >> PAGE_SHIFT;
  75        pgd_t *pgdp;
  76        p4d_t *p4dp;
  77        pud_t *pudp;
  78        pmd_t *pmdp;
  79        pte_t *ptep;
  80
  81        pgdp = pgd_offset_k(ea);
  82        p4dp = p4d_offset(pgdp, ea);
  83        if (p4d_none(*p4dp)) {
  84                pudp = early_alloc_pgtable(PAGE_SIZE, nid,
  85                                           region_start, region_end);
  86                p4d_populate(&init_mm, p4dp, pudp);
  87        }
  88        pudp = pud_offset(p4dp, ea);
  89        if (map_page_size == PUD_SIZE) {
  90                ptep = (pte_t *)pudp;
  91                goto set_the_pte;
  92        }
  93        if (pud_none(*pudp)) {
  94                pmdp = early_alloc_pgtable(PAGE_SIZE, nid, region_start,
  95                                           region_end);
  96                pud_populate(&init_mm, pudp, pmdp);
  97        }
  98        pmdp = pmd_offset(pudp, ea);
  99        if (map_page_size == PMD_SIZE) {
 100                ptep = pmdp_ptep(pmdp);
 101                goto set_the_pte;
 102        }
 103        if (!pmd_present(*pmdp)) {
 104                ptep = early_alloc_pgtable(PAGE_SIZE, nid,
 105                                                region_start, region_end);
 106                pmd_populate_kernel(&init_mm, pmdp, ptep);
 107        }
 108        ptep = pte_offset_kernel(pmdp, ea);
 109
 110set_the_pte:
 111        set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
 112        asm volatile("ptesync": : :"memory");
 113        return 0;
 114}
 115
 116/*
 117 * nid, region_start, and region_end are hints to try to place the page
 118 * table memory in the same node or region.
 119 */
 120static int __map_kernel_page(unsigned long ea, unsigned long pa,
 121                          pgprot_t flags,
 122                          unsigned int map_page_size,
 123                          int nid,
 124                          unsigned long region_start, unsigned long region_end)
 125{
 126        unsigned long pfn = pa >> PAGE_SHIFT;
 127        pgd_t *pgdp;
 128        p4d_t *p4dp;
 129        pud_t *pudp;
 130        pmd_t *pmdp;
 131        pte_t *ptep;
 132        /*
 133         * Make sure task size is correct as per the max adddr
 134         */
 135        BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE);
 136
 137#ifdef CONFIG_PPC_64K_PAGES
 138        BUILD_BUG_ON(RADIX_KERN_MAP_SIZE != (1UL << MAX_EA_BITS_PER_CONTEXT));
 139#endif
 140
 141        if (unlikely(!slab_is_available()))
 142                return early_map_kernel_page(ea, pa, flags, map_page_size,
 143                                                nid, region_start, region_end);
 144
 145        /*
 146         * Should make page table allocation functions be able to take a
 147         * node, so we can place kernel page tables on the right nodes after
 148         * boot.
 149         */
 150        pgdp = pgd_offset_k(ea);
 151        p4dp = p4d_offset(pgdp, ea);
 152        pudp = pud_alloc(&init_mm, p4dp, ea);
 153        if (!pudp)
 154                return -ENOMEM;
 155        if (map_page_size == PUD_SIZE) {
 156                ptep = (pte_t *)pudp;
 157                goto set_the_pte;
 158        }
 159        pmdp = pmd_alloc(&init_mm, pudp, ea);
 160        if (!pmdp)
 161                return -ENOMEM;
 162        if (map_page_size == PMD_SIZE) {
 163                ptep = pmdp_ptep(pmdp);
 164                goto set_the_pte;
 165        }
 166        ptep = pte_alloc_kernel(pmdp, ea);
 167        if (!ptep)
 168                return -ENOMEM;
 169
 170set_the_pte:
 171        set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
 172        asm volatile("ptesync": : :"memory");
 173        return 0;
 174}
 175
 176int radix__map_kernel_page(unsigned long ea, unsigned long pa,
 177                          pgprot_t flags,
 178                          unsigned int map_page_size)
 179{
 180        return __map_kernel_page(ea, pa, flags, map_page_size, -1, 0, 0);
 181}
 182
 183#ifdef CONFIG_STRICT_KERNEL_RWX
 184static void radix__change_memory_range(unsigned long start, unsigned long end,
 185                                       unsigned long clear)
 186{
 187        unsigned long idx;
 188        pgd_t *pgdp;
 189        p4d_t *p4dp;
 190        pud_t *pudp;
 191        pmd_t *pmdp;
 192        pte_t *ptep;
 193
 194        start = ALIGN_DOWN(start, PAGE_SIZE);
 195        end = PAGE_ALIGN(end); // aligns up
 196
 197        pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n",
 198                 start, end, clear);
 199
 200        for (idx = start; idx < end; idx += PAGE_SIZE) {
 201                pgdp = pgd_offset_k(idx);
 202                p4dp = p4d_offset(pgdp, idx);
 203                pudp = pud_alloc(&init_mm, p4dp, idx);
 204                if (!pudp)
 205                        continue;
 206                if (pud_is_leaf(*pudp)) {
 207                        ptep = (pte_t *)pudp;
 208                        goto update_the_pte;
 209                }
 210                pmdp = pmd_alloc(&init_mm, pudp, idx);
 211                if (!pmdp)
 212                        continue;
 213                if (pmd_is_leaf(*pmdp)) {
 214                        ptep = pmdp_ptep(pmdp);
 215                        goto update_the_pte;
 216                }
 217                ptep = pte_alloc_kernel(pmdp, idx);
 218                if (!ptep)
 219                        continue;
 220update_the_pte:
 221                radix__pte_update(&init_mm, idx, ptep, clear, 0, 0);
 222        }
 223
 224        radix__flush_tlb_kernel_range(start, end);
 225}
 226
 227void radix__mark_rodata_ro(void)
 228{
 229        unsigned long start, end;
 230
 231        start = (unsigned long)_stext;
 232        end = (unsigned long)__init_begin;
 233
 234        radix__change_memory_range(start, end, _PAGE_WRITE);
 235}
 236
 237void radix__mark_initmem_nx(void)
 238{
 239        unsigned long start = (unsigned long)__init_begin;
 240        unsigned long end = (unsigned long)__init_end;
 241
 242        radix__change_memory_range(start, end, _PAGE_EXEC);
 243}
 244#endif /* CONFIG_STRICT_KERNEL_RWX */
 245
 246static inline void __meminit
 247print_mapping(unsigned long start, unsigned long end, unsigned long size, bool exec)
 248{
 249        char buf[10];
 250
 251        if (end <= start)
 252                return;
 253
 254        string_get_size(size, 1, STRING_UNITS_2, buf, sizeof(buf));
 255
 256        pr_info("Mapped 0x%016lx-0x%016lx with %s pages%s\n", start, end, buf,
 257                exec ? " (exec)" : "");
 258}
 259
 260static unsigned long next_boundary(unsigned long addr, unsigned long end)
 261{
 262#ifdef CONFIG_STRICT_KERNEL_RWX
 263        if (addr < __pa_symbol(__init_begin))
 264                return __pa_symbol(__init_begin);
 265#endif
 266        return end;
 267}
 268
 269static int __meminit create_physical_mapping(unsigned long start,
 270                                             unsigned long end,
 271                                             unsigned long max_mapping_size,
 272                                             int nid, pgprot_t _prot)
 273{
 274        unsigned long vaddr, addr, mapping_size = 0;
 275        bool prev_exec, exec = false;
 276        pgprot_t prot;
 277        int psize;
 278
 279        start = ALIGN(start, PAGE_SIZE);
 280        end   = ALIGN_DOWN(end, PAGE_SIZE);
 281        for (addr = start; addr < end; addr += mapping_size) {
 282                unsigned long gap, previous_size;
 283                int rc;
 284
 285                gap = next_boundary(addr, end) - addr;
 286                if (gap > max_mapping_size)
 287                        gap = max_mapping_size;
 288                previous_size = mapping_size;
 289                prev_exec = exec;
 290
 291                if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
 292                    mmu_psize_defs[MMU_PAGE_1G].shift) {
 293                        mapping_size = PUD_SIZE;
 294                        psize = MMU_PAGE_1G;
 295                } else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
 296                           mmu_psize_defs[MMU_PAGE_2M].shift) {
 297                        mapping_size = PMD_SIZE;
 298                        psize = MMU_PAGE_2M;
 299                } else {
 300                        mapping_size = PAGE_SIZE;
 301                        psize = mmu_virtual_psize;
 302                }
 303
 304                vaddr = (unsigned long)__va(addr);
 305
 306                if (overlaps_kernel_text(vaddr, vaddr + mapping_size) ||
 307                    overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size)) {
 308                        prot = PAGE_KERNEL_X;
 309                        exec = true;
 310                } else {
 311                        prot = _prot;
 312                        exec = false;
 313                }
 314
 315                if (mapping_size != previous_size || exec != prev_exec) {
 316                        print_mapping(start, addr, previous_size, prev_exec);
 317                        start = addr;
 318                }
 319
 320                rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end);
 321                if (rc)
 322                        return rc;
 323
 324                update_page_count(psize, 1);
 325        }
 326
 327        print_mapping(start, addr, mapping_size, exec);
 328        return 0;
 329}
 330
 331static void __init radix_init_pgtable(void)
 332{
 333        unsigned long rts_field;
 334        phys_addr_t start, end;
 335        u64 i;
 336
 337        /* We don't support slb for radix */
 338        mmu_slb_size = 0;
 339
 340        /*
 341         * Create the linear mapping
 342         */
 343        for_each_mem_range(i, &start, &end) {
 344                /*
 345                 * The memblock allocator  is up at this point, so the
 346                 * page tables will be allocated within the range. No
 347                 * need or a node (which we don't have yet).
 348                 */
 349
 350                if (end >= RADIX_VMALLOC_START) {
 351                        pr_warn("Outside the supported range\n");
 352                        continue;
 353                }
 354
 355                WARN_ON(create_physical_mapping(start, end,
 356                                                radix_mem_block_size,
 357                                                -1, PAGE_KERNEL));
 358        }
 359
 360        /* Find out how many PID bits are supported */
 361        if (!cpu_has_feature(CPU_FTR_HVMODE) &&
 362                        cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
 363                /*
 364                 * Older versions of KVM on these machines perfer if the
 365                 * guest only uses the low 19 PID bits.
 366                 */
 367                if (!mmu_pid_bits)
 368                        mmu_pid_bits = 19;
 369        } else {
 370                if (!mmu_pid_bits)
 371                        mmu_pid_bits = 20;
 372        }
 373        mmu_base_pid = 1;
 374
 375        /*
 376         * Allocate Partition table and process table for the
 377         * host.
 378         */
 379        BUG_ON(PRTB_SIZE_SHIFT > 36);
 380        process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT, -1, 0, 0);
 381        /*
 382         * Fill in the process table.
 383         */
 384        rts_field = radix__get_tree_size();
 385        process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
 386
 387        /*
 388         * The init_mm context is given the first available (non-zero) PID,
 389         * which is the "guard PID" and contains no page table. PIDR should
 390         * never be set to zero because that duplicates the kernel address
 391         * space at the 0x0... offset (quadrant 0)!
 392         *
 393         * An arbitrary PID that may later be allocated by the PID allocator
 394         * for userspace processes must not be used either, because that
 395         * would cause stale user mappings for that PID on CPUs outside of
 396         * the TLB invalidation scheme (because it won't be in mm_cpumask).
 397         *
 398         * So permanently carve out one PID for the purpose of a guard PID.
 399         */
 400        init_mm.context.id = mmu_base_pid;
 401        mmu_base_pid++;
 402}
 403
 404static void __init radix_init_partition_table(void)
 405{
 406        unsigned long rts_field, dw0, dw1;
 407
 408        mmu_partition_table_init();
 409        rts_field = radix__get_tree_size();
 410        dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR;
 411        dw1 = __pa(process_tb) | (PRTB_SIZE_SHIFT - 12) | PATB_GR;
 412        mmu_partition_table_set_entry(0, dw0, dw1, false);
 413
 414        pr_info("Initializing Radix MMU\n");
 415}
 416
 417static int __init get_idx_from_shift(unsigned int shift)
 418{
 419        int idx = -1;
 420
 421        switch (shift) {
 422        case 0xc:
 423                idx = MMU_PAGE_4K;
 424                break;
 425        case 0x10:
 426                idx = MMU_PAGE_64K;
 427                break;
 428        case 0x15:
 429                idx = MMU_PAGE_2M;
 430                break;
 431        case 0x1e:
 432                idx = MMU_PAGE_1G;
 433                break;
 434        }
 435        return idx;
 436}
 437
 438static int __init radix_dt_scan_page_sizes(unsigned long node,
 439                                           const char *uname, int depth,
 440                                           void *data)
 441{
 442        int size = 0;
 443        int shift, idx;
 444        unsigned int ap;
 445        const __be32 *prop;
 446        const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
 447
 448        /* We are scanning "cpu" nodes only */
 449        if (type == NULL || strcmp(type, "cpu") != 0)
 450                return 0;
 451
 452        /* Find MMU PID size */
 453        prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size);
 454        if (prop && size == 4)
 455                mmu_pid_bits = be32_to_cpup(prop);
 456
 457        /* Grab page size encodings */
 458        prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
 459        if (!prop)
 460                return 0;
 461
 462        pr_info("Page sizes from device-tree:\n");
 463        for (; size >= 4; size -= 4, ++prop) {
 464
 465                struct mmu_psize_def *def;
 466
 467                /* top 3 bit is AP encoding */
 468                shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
 469                ap = be32_to_cpu(prop[0]) >> 29;
 470                pr_info("Page size shift = %d AP=0x%x\n", shift, ap);
 471
 472                idx = get_idx_from_shift(shift);
 473                if (idx < 0)
 474                        continue;
 475
 476                def = &mmu_psize_defs[idx];
 477                def->shift = shift;
 478                def->ap  = ap;
 479                def->h_rpt_pgsize = psize_to_rpti_pgsize(idx);
 480        }
 481
 482        /* needed ? */
 483        cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
 484        return 1;
 485}
 486
 487#ifdef CONFIG_MEMORY_HOTPLUG
 488static int __init probe_memory_block_size(unsigned long node, const char *uname, int
 489                                          depth, void *data)
 490{
 491        unsigned long *mem_block_size = (unsigned long *)data;
 492        const __be32 *prop;
 493        int len;
 494
 495        if (depth != 1)
 496                return 0;
 497
 498        if (strcmp(uname, "ibm,dynamic-reconfiguration-memory"))
 499                return 0;
 500
 501        prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len);
 502
 503        if (!prop || len < dt_root_size_cells * sizeof(__be32))
 504                /*
 505                 * Nothing in the device tree
 506                 */
 507                *mem_block_size = MIN_MEMORY_BLOCK_SIZE;
 508        else
 509                *mem_block_size = of_read_number(prop, dt_root_size_cells);
 510        return 1;
 511}
 512
 513static unsigned long radix_memory_block_size(void)
 514{
 515        unsigned long mem_block_size = MIN_MEMORY_BLOCK_SIZE;
 516
 517        /*
 518         * OPAL firmware feature is set by now. Hence we are ok
 519         * to test OPAL feature.
 520         */
 521        if (firmware_has_feature(FW_FEATURE_OPAL))
 522                mem_block_size = 1UL * 1024 * 1024 * 1024;
 523        else
 524                of_scan_flat_dt(probe_memory_block_size, &mem_block_size);
 525
 526        return mem_block_size;
 527}
 528
 529#else   /* CONFIG_MEMORY_HOTPLUG */
 530
 531static unsigned long radix_memory_block_size(void)
 532{
 533        return 1UL * 1024 * 1024 * 1024;
 534}
 535
 536#endif /* CONFIG_MEMORY_HOTPLUG */
 537
 538
 539void __init radix__early_init_devtree(void)
 540{
 541        int rc;
 542
 543        /*
 544         * Try to find the available page sizes in the device-tree
 545         */
 546        rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
 547        if (!rc) {
 548                /*
 549                 * No page size details found in device tree.
 550                 * Let's assume we have page 4k and 64k support
 551                 */
 552                mmu_psize_defs[MMU_PAGE_4K].shift = 12;
 553                mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
 554                mmu_psize_defs[MMU_PAGE_4K].h_rpt_pgsize =
 555                        psize_to_rpti_pgsize(MMU_PAGE_4K);
 556
 557                mmu_psize_defs[MMU_PAGE_64K].shift = 16;
 558                mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
 559                mmu_psize_defs[MMU_PAGE_64K].h_rpt_pgsize =
 560                        psize_to_rpti_pgsize(MMU_PAGE_64K);
 561        }
 562
 563        /*
 564         * Max mapping size used when mapping pages. We don't use
 565         * ppc_md.memory_block_size() here because this get called
 566         * early and we don't have machine probe called yet. Also
 567         * the pseries implementation only check for ibm,lmb-size.
 568         * All hypervisor supporting radix do expose that device
 569         * tree node.
 570         */
 571        radix_mem_block_size = radix_memory_block_size();
 572        return;
 573}
 574
 575static void radix_init_amor(void)
 576{
 577        /*
 578        * In HV mode, we init AMOR (Authority Mask Override Register) so that
 579        * the hypervisor and guest can setup IAMR (Instruction Authority Mask
 580        * Register), enable key 0 and set it to 1.
 581        *
 582        * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
 583        */
 584        mtspr(SPRN_AMOR, (3ul << 62));
 585}
 586
 587void __init radix__early_init_mmu(void)
 588{
 589        unsigned long lpcr;
 590
 591#ifdef CONFIG_PPC_64K_PAGES
 592        /* PAGE_SIZE mappings */
 593        mmu_virtual_psize = MMU_PAGE_64K;
 594#else
 595        mmu_virtual_psize = MMU_PAGE_4K;
 596#endif
 597
 598#ifdef CONFIG_SPARSEMEM_VMEMMAP
 599        /* vmemmap mapping */
 600        if (mmu_psize_defs[MMU_PAGE_2M].shift) {
 601                /*
 602                 * map vmemmap using 2M if available
 603                 */
 604                mmu_vmemmap_psize = MMU_PAGE_2M;
 605        } else
 606                mmu_vmemmap_psize = mmu_virtual_psize;
 607#endif
 608        /*
 609         * initialize page table size
 610         */
 611        __pte_index_size = RADIX_PTE_INDEX_SIZE;
 612        __pmd_index_size = RADIX_PMD_INDEX_SIZE;
 613        __pud_index_size = RADIX_PUD_INDEX_SIZE;
 614        __pgd_index_size = RADIX_PGD_INDEX_SIZE;
 615        __pud_cache_index = RADIX_PUD_INDEX_SIZE;
 616        __pte_table_size = RADIX_PTE_TABLE_SIZE;
 617        __pmd_table_size = RADIX_PMD_TABLE_SIZE;
 618        __pud_table_size = RADIX_PUD_TABLE_SIZE;
 619        __pgd_table_size = RADIX_PGD_TABLE_SIZE;
 620
 621        __pmd_val_bits = RADIX_PMD_VAL_BITS;
 622        __pud_val_bits = RADIX_PUD_VAL_BITS;
 623        __pgd_val_bits = RADIX_PGD_VAL_BITS;
 624
 625        __kernel_virt_start = RADIX_KERN_VIRT_START;
 626        __vmalloc_start = RADIX_VMALLOC_START;
 627        __vmalloc_end = RADIX_VMALLOC_END;
 628        __kernel_io_start = RADIX_KERN_IO_START;
 629        __kernel_io_end = RADIX_KERN_IO_END;
 630        vmemmap = (struct page *)RADIX_VMEMMAP_START;
 631        ioremap_bot = IOREMAP_BASE;
 632
 633#ifdef CONFIG_PCI
 634        pci_io_base = ISA_IO_BASE;
 635#endif
 636        __pte_frag_nr = RADIX_PTE_FRAG_NR;
 637        __pte_frag_size_shift = RADIX_PTE_FRAG_SIZE_SHIFT;
 638        __pmd_frag_nr = RADIX_PMD_FRAG_NR;
 639        __pmd_frag_size_shift = RADIX_PMD_FRAG_SIZE_SHIFT;
 640
 641        radix_init_pgtable();
 642
 643        if (!firmware_has_feature(FW_FEATURE_LPAR)) {
 644                lpcr = mfspr(SPRN_LPCR);
 645                mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
 646                radix_init_partition_table();
 647                radix_init_amor();
 648        } else {
 649                radix_init_pseries();
 650        }
 651
 652        memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
 653
 654        /* Switch to the guard PID before turning on MMU */
 655        radix__switch_mmu_context(NULL, &init_mm);
 656        tlbiel_all();
 657}
 658
 659void radix__early_init_mmu_secondary(void)
 660{
 661        unsigned long lpcr;
 662        /*
 663         * update partition table control register and UPRT
 664         */
 665        if (!firmware_has_feature(FW_FEATURE_LPAR)) {
 666                lpcr = mfspr(SPRN_LPCR);
 667                mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
 668
 669                set_ptcr_when_no_uv(__pa(partition_tb) |
 670                                    (PATB_SIZE_SHIFT - 12));
 671
 672                radix_init_amor();
 673        }
 674
 675        radix__switch_mmu_context(NULL, &init_mm);
 676        tlbiel_all();
 677
 678        /* Make sure userspace can't change the AMR */
 679        mtspr(SPRN_UAMOR, 0);
 680}
 681
 682void radix__mmu_cleanup_all(void)
 683{
 684        unsigned long lpcr;
 685
 686        if (!firmware_has_feature(FW_FEATURE_LPAR)) {
 687                lpcr = mfspr(SPRN_LPCR);
 688                mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
 689                set_ptcr_when_no_uv(0);
 690                powernv_set_nmmu_ptcr(0);
 691                radix__flush_tlb_all();
 692        }
 693}
 694
 695#ifdef CONFIG_MEMORY_HOTPLUG
 696static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
 697{
 698        pte_t *pte;
 699        int i;
 700
 701        for (i = 0; i < PTRS_PER_PTE; i++) {
 702                pte = pte_start + i;
 703                if (!pte_none(*pte))
 704                        return;
 705        }
 706
 707        pte_free_kernel(&init_mm, pte_start);
 708        pmd_clear(pmd);
 709}
 710
 711static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
 712{
 713        pmd_t *pmd;
 714        int i;
 715
 716        for (i = 0; i < PTRS_PER_PMD; i++) {
 717                pmd = pmd_start + i;
 718                if (!pmd_none(*pmd))
 719                        return;
 720        }
 721
 722        pmd_free(&init_mm, pmd_start);
 723        pud_clear(pud);
 724}
 725
 726static void free_pud_table(pud_t *pud_start, p4d_t *p4d)
 727{
 728        pud_t *pud;
 729        int i;
 730
 731        for (i = 0; i < PTRS_PER_PUD; i++) {
 732                pud = pud_start + i;
 733                if (!pud_none(*pud))
 734                        return;
 735        }
 736
 737        pud_free(&init_mm, pud_start);
 738        p4d_clear(p4d);
 739}
 740
 741static void remove_pte_table(pte_t *pte_start, unsigned long addr,
 742                             unsigned long end)
 743{
 744        unsigned long next;
 745        pte_t *pte;
 746
 747        pte = pte_start + pte_index(addr);
 748        for (; addr < end; addr = next, pte++) {
 749                next = (addr + PAGE_SIZE) & PAGE_MASK;
 750                if (next > end)
 751                        next = end;
 752
 753                if (!pte_present(*pte))
 754                        continue;
 755
 756                if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(next)) {
 757                        /*
 758                         * The vmemmap_free() and remove_section_mapping()
 759                         * codepaths call us with aligned addresses.
 760                         */
 761                        WARN_ONCE(1, "%s: unaligned range\n", __func__);
 762                        continue;
 763                }
 764
 765                pte_clear(&init_mm, addr, pte);
 766        }
 767}
 768
 769static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
 770                             unsigned long end)
 771{
 772        unsigned long next;
 773        pte_t *pte_base;
 774        pmd_t *pmd;
 775
 776        pmd = pmd_start + pmd_index(addr);
 777        for (; addr < end; addr = next, pmd++) {
 778                next = pmd_addr_end(addr, end);
 779
 780                if (!pmd_present(*pmd))
 781                        continue;
 782
 783                if (pmd_is_leaf(*pmd)) {
 784                        if (!IS_ALIGNED(addr, PMD_SIZE) ||
 785                            !IS_ALIGNED(next, PMD_SIZE)) {
 786                                WARN_ONCE(1, "%s: unaligned range\n", __func__);
 787                                continue;
 788                        }
 789                        pte_clear(&init_mm, addr, (pte_t *)pmd);
 790                        continue;
 791                }
 792
 793                pte_base = (pte_t *)pmd_page_vaddr(*pmd);
 794                remove_pte_table(pte_base, addr, next);
 795                free_pte_table(pte_base, pmd);
 796        }
 797}
 798
 799static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr,
 800                             unsigned long end)
 801{
 802        unsigned long next;
 803        pmd_t *pmd_base;
 804        pud_t *pud;
 805
 806        pud = pud_start + pud_index(addr);
 807        for (; addr < end; addr = next, pud++) {
 808                next = pud_addr_end(addr, end);
 809
 810                if (!pud_present(*pud))
 811                        continue;
 812
 813                if (pud_is_leaf(*pud)) {
 814                        if (!IS_ALIGNED(addr, PUD_SIZE) ||
 815                            !IS_ALIGNED(next, PUD_SIZE)) {
 816                                WARN_ONCE(1, "%s: unaligned range\n", __func__);
 817                                continue;
 818                        }
 819                        pte_clear(&init_mm, addr, (pte_t *)pud);
 820                        continue;
 821                }
 822
 823                pmd_base = pud_pgtable(*pud);
 824                remove_pmd_table(pmd_base, addr, next);
 825                free_pmd_table(pmd_base, pud);
 826        }
 827}
 828
 829static void __meminit remove_pagetable(unsigned long start, unsigned long end)
 830{
 831        unsigned long addr, next;
 832        pud_t *pud_base;
 833        pgd_t *pgd;
 834        p4d_t *p4d;
 835
 836        spin_lock(&init_mm.page_table_lock);
 837
 838        for (addr = start; addr < end; addr = next) {
 839                next = pgd_addr_end(addr, end);
 840
 841                pgd = pgd_offset_k(addr);
 842                p4d = p4d_offset(pgd, addr);
 843                if (!p4d_present(*p4d))
 844                        continue;
 845
 846                if (p4d_is_leaf(*p4d)) {
 847                        if (!IS_ALIGNED(addr, P4D_SIZE) ||
 848                            !IS_ALIGNED(next, P4D_SIZE)) {
 849                                WARN_ONCE(1, "%s: unaligned range\n", __func__);
 850                                continue;
 851                        }
 852
 853                        pte_clear(&init_mm, addr, (pte_t *)pgd);
 854                        continue;
 855                }
 856
 857                pud_base = p4d_pgtable(*p4d);
 858                remove_pud_table(pud_base, addr, next);
 859                free_pud_table(pud_base, p4d);
 860        }
 861
 862        spin_unlock(&init_mm.page_table_lock);
 863        radix__flush_tlb_kernel_range(start, end);
 864}
 865
 866int __meminit radix__create_section_mapping(unsigned long start,
 867                                            unsigned long end, int nid,
 868                                            pgprot_t prot)
 869{
 870        if (end >= RADIX_VMALLOC_START) {
 871                pr_warn("Outside the supported range\n");
 872                return -1;
 873        }
 874
 875        return create_physical_mapping(__pa(start), __pa(end),
 876                                       radix_mem_block_size, nid, prot);
 877}
 878
 879int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
 880{
 881        remove_pagetable(start, end);
 882        return 0;
 883}
 884#endif /* CONFIG_MEMORY_HOTPLUG */
 885
 886#ifdef CONFIG_SPARSEMEM_VMEMMAP
 887static int __map_kernel_page_nid(unsigned long ea, unsigned long pa,
 888                                 pgprot_t flags, unsigned int map_page_size,
 889                                 int nid)
 890{
 891        return __map_kernel_page(ea, pa, flags, map_page_size, nid, 0, 0);
 892}
 893
 894int __meminit radix__vmemmap_create_mapping(unsigned long start,
 895                                      unsigned long page_size,
 896                                      unsigned long phys)
 897{
 898        /* Create a PTE encoding */
 899        unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW;
 900        int nid = early_pfn_to_nid(phys >> PAGE_SHIFT);
 901        int ret;
 902
 903        if ((start + page_size) >= RADIX_VMEMMAP_END) {
 904                pr_warn("Outside the supported range\n");
 905                return -1;
 906        }
 907
 908        ret = __map_kernel_page_nid(start, phys, __pgprot(flags), page_size, nid);
 909        BUG_ON(ret);
 910
 911        return 0;
 912}
 913
 914#ifdef CONFIG_MEMORY_HOTPLUG
 915void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
 916{
 917        remove_pagetable(start, start + page_size);
 918}
 919#endif
 920#endif
 921
 922#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 923
 924unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
 925                                  pmd_t *pmdp, unsigned long clr,
 926                                  unsigned long set)
 927{
 928        unsigned long old;
 929
 930#ifdef CONFIG_DEBUG_VM
 931        WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
 932        assert_spin_locked(pmd_lockptr(mm, pmdp));
 933#endif
 934
 935        old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1);
 936        trace_hugepage_update(addr, old, clr, set);
 937
 938        return old;
 939}
 940
 941pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
 942                        pmd_t *pmdp)
 943
 944{
 945        pmd_t pmd;
 946
 947        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 948        VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
 949        VM_BUG_ON(pmd_devmap(*pmdp));
 950        /*
 951         * khugepaged calls this for normal pmd
 952         */
 953        pmd = *pmdp;
 954        pmd_clear(pmdp);
 955
 956        /*
 957         * pmdp collapse_flush need to ensure that there are no parallel gup
 958         * walk after this call. This is needed so that we can have stable
 959         * page ref count when collapsing a page. We don't allow a collapse page
 960         * if we have gup taken on the page. We can ensure that by sending IPI
 961         * because gup walk happens with IRQ disabled.
 962         */
 963        serialize_against_pte_lookup(vma->vm_mm);
 964
 965        radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
 966
 967        return pmd;
 968}
 969
 970/*
 971 * For us pgtable_t is pte_t *. Inorder to save the deposisted
 972 * page table, we consider the allocated page table as a list
 973 * head. On withdraw we need to make sure we zero out the used
 974 * list_head memory area.
 975 */
 976void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
 977                                 pgtable_t pgtable)
 978{
 979        struct list_head *lh = (struct list_head *) pgtable;
 980
 981        assert_spin_locked(pmd_lockptr(mm, pmdp));
 982
 983        /* FIFO */
 984        if (!pmd_huge_pte(mm, pmdp))
 985                INIT_LIST_HEAD(lh);
 986        else
 987                list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
 988        pmd_huge_pte(mm, pmdp) = pgtable;
 989}
 990
 991pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
 992{
 993        pte_t *ptep;
 994        pgtable_t pgtable;
 995        struct list_head *lh;
 996
 997        assert_spin_locked(pmd_lockptr(mm, pmdp));
 998
 999        /* FIFO */
1000        pgtable = pmd_huge_pte(mm, pmdp);
1001        lh = (struct list_head *) pgtable;
1002        if (list_empty(lh))
1003                pmd_huge_pte(mm, pmdp) = NULL;
1004        else {
1005                pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
1006                list_del(lh);
1007        }
1008        ptep = (pte_t *) pgtable;
1009        *ptep = __pte(0);
1010        ptep++;
1011        *ptep = __pte(0);
1012        return pgtable;
1013}
1014
1015pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
1016                                     unsigned long addr, pmd_t *pmdp)
1017{
1018        pmd_t old_pmd;
1019        unsigned long old;
1020
1021        old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
1022        old_pmd = __pmd(old);
1023        return old_pmd;
1024}
1025
1026#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1027
1028void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
1029                                  pte_t entry, unsigned long address, int psize)
1030{
1031        struct mm_struct *mm = vma->vm_mm;
1032        unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
1033                                              _PAGE_RW | _PAGE_EXEC);
1034
1035        unsigned long change = pte_val(entry) ^ pte_val(*ptep);
1036        /*
1037         * To avoid NMMU hang while relaxing access, we need mark
1038         * the pte invalid in between.
1039         */
1040        if ((change & _PAGE_RW) && atomic_read(&mm->context.copros) > 0) {
1041                unsigned long old_pte, new_pte;
1042
1043                old_pte = __radix_pte_update(ptep, _PAGE_PRESENT, _PAGE_INVALID);
1044                /*
1045                 * new value of pte
1046                 */
1047                new_pte = old_pte | set;
1048                radix__flush_tlb_page_psize(mm, address, psize);
1049                __radix_pte_update(ptep, _PAGE_INVALID, new_pte);
1050        } else {
1051                __radix_pte_update(ptep, 0, set);
1052                /*
1053                 * Book3S does not require a TLB flush when relaxing access
1054                 * restrictions when the address space is not attached to a
1055                 * NMMU, because the core MMU will reload the pte after taking
1056                 * an access fault, which is defined by the architecture.
1057                 */
1058        }
1059        /* See ptesync comment in radix__set_pte_at */
1060}
1061
1062void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
1063                                    unsigned long addr, pte_t *ptep,
1064                                    pte_t old_pte, pte_t pte)
1065{
1066        struct mm_struct *mm = vma->vm_mm;
1067
1068        /*
1069         * To avoid NMMU hang while relaxing access we need to flush the tlb before
1070         * we set the new value. We need to do this only for radix, because hash
1071         * translation does flush when updating the linux pte.
1072         */
1073        if (is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
1074            (atomic_read(&mm->context.copros) > 0))
1075                radix__flush_tlb_page(vma, addr);
1076
1077        set_pte_at(mm, addr, ptep, pte);
1078}
1079
1080int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
1081{
1082        pte_t *ptep = (pte_t *)pud;
1083        pte_t new_pud = pfn_pte(__phys_to_pfn(addr), prot);
1084
1085        if (!radix_enabled())
1086                return 0;
1087
1088        set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pud);
1089
1090        return 1;
1091}
1092
1093int pud_clear_huge(pud_t *pud)
1094{
1095        if (pud_huge(*pud)) {
1096                pud_clear(pud);
1097                return 1;
1098        }
1099
1100        return 0;
1101}
1102
1103int pud_free_pmd_page(pud_t *pud, unsigned long addr)
1104{
1105        pmd_t *pmd;
1106        int i;
1107
1108        pmd = pud_pgtable(*pud);
1109        pud_clear(pud);
1110
1111        flush_tlb_kernel_range(addr, addr + PUD_SIZE);
1112
1113        for (i = 0; i < PTRS_PER_PMD; i++) {
1114                if (!pmd_none(pmd[i])) {
1115                        pte_t *pte;
1116                        pte = (pte_t *)pmd_page_vaddr(pmd[i]);
1117
1118                        pte_free_kernel(&init_mm, pte);
1119                }
1120        }
1121
1122        pmd_free(&init_mm, pmd);
1123
1124        return 1;
1125}
1126
1127int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
1128{
1129        pte_t *ptep = (pte_t *)pmd;
1130        pte_t new_pmd = pfn_pte(__phys_to_pfn(addr), prot);
1131
1132        if (!radix_enabled())
1133                return 0;
1134
1135        set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pmd);
1136
1137        return 1;
1138}
1139
1140int pmd_clear_huge(pmd_t *pmd)
1141{
1142        if (pmd_huge(*pmd)) {
1143                pmd_clear(pmd);
1144                return 1;
1145        }
1146
1147        return 0;
1148}
1149
1150int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
1151{
1152        pte_t *pte;
1153
1154        pte = (pte_t *)pmd_page_vaddr(*pmd);
1155        pmd_clear(pmd);
1156
1157        flush_tlb_kernel_range(addr, addr + PMD_SIZE);
1158
1159        pte_free_kernel(&init_mm, pte);
1160
1161        return 1;
1162}
1163