linux/arch/powerpc/mm/init_64.c
<<
>>
Prefs
   1/*
   2 *  PowerPC version
   3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   4 *
   5 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
   6 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
   7 *    Copyright (C) 1996 Paul Mackerras
   8 *
   9 *  Derived from "arch/i386/mm/init.c"
  10 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  11 *
  12 *  Dave Engebretsen <engebret@us.ibm.com>
  13 *      Rework for PPC64 port.
  14 *
  15 *  This program is free software; you can redistribute it and/or
  16 *  modify it under the terms of the GNU General Public License
  17 *  as published by the Free Software Foundation; either version
  18 *  2 of the License, or (at your option) any later version.
  19 *
  20 */
  21
  22#undef DEBUG
  23
  24#include <linux/signal.h>
  25#include <linux/sched.h>
  26#include <linux/kernel.h>
  27#include <linux/errno.h>
  28#include <linux/string.h>
  29#include <linux/types.h>
  30#include <linux/mman.h>
  31#include <linux/mm.h>
  32#include <linux/swap.h>
  33#include <linux/stddef.h>
  34#include <linux/vmalloc.h>
  35#include <linux/init.h>
  36#include <linux/delay.h>
  37#include <linux/bootmem.h>
  38#include <linux/highmem.h>
  39#include <linux/idr.h>
  40#include <linux/nodemask.h>
  41#include <linux/module.h>
  42#include <linux/poison.h>
  43#include <linux/memblock.h>
  44#include <linux/hugetlb.h>
  45#include <linux/slab.h>
  46
  47#include <asm/pgalloc.h>
  48#include <asm/page.h>
  49#include <asm/prom.h>
  50#include <asm/rtas.h>
  51#include <asm/io.h>
  52#include <asm/mmu_context.h>
  53#include <asm/pgtable.h>
  54#include <asm/mmu.h>
  55#include <asm/uaccess.h>
  56#include <asm/smp.h>
  57#include <asm/machdep.h>
  58#include <asm/tlb.h>
  59#include <asm/eeh.h>
  60#include <asm/processor.h>
  61#include <asm/mmzone.h>
  62#include <asm/cputable.h>
  63#include <asm/sections.h>
  64#include <asm/iommu.h>
  65#include <asm/vdso.h>
  66
  67#include "mmu_decl.h"
  68
  69#ifdef CONFIG_PPC_STD_MMU_64
  70#if PGTABLE_RANGE > USER_VSID_RANGE
  71#warning Limited user VSID range means pagetable space is wasted
  72#endif
  73
  74#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
  75#warning TASK_SIZE is smaller than it needs to be.
  76#endif
  77#endif /* CONFIG_PPC_STD_MMU_64 */
  78
  79phys_addr_t memstart_addr = ~0;
  80EXPORT_SYMBOL_GPL(memstart_addr);
  81phys_addr_t kernstart_addr;
  82EXPORT_SYMBOL_GPL(kernstart_addr);
  83
  84static void pgd_ctor(void *addr)
  85{
  86        memset(addr, 0, PGD_TABLE_SIZE);
  87}
  88
  89static void pmd_ctor(void *addr)
  90{
  91        memset(addr, 0, PMD_TABLE_SIZE);
  92}
  93
  94struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
  95
  96/*
  97 * Create a kmem_cache() for pagetables.  This is not used for PTE
  98 * pages - they're linked to struct page, come from the normal free
  99 * pages pool and have a different entry size (see real_pte_t) to
 100 * everything else.  Caches created by this function are used for all
 101 * the higher level pagetables, and for hugepage pagetables.
 102 */
 103void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
 104{
 105        char *name;
 106        unsigned long table_size = sizeof(void *) << shift;
 107        unsigned long align = table_size;
 108
 109        /* When batching pgtable pointers for RCU freeing, we store
 110         * the index size in the low bits.  Table alignment must be
 111         * big enough to fit it.
 112         *
 113         * Likewise, hugeapge pagetable pointers contain a (different)
 114         * shift value in the low bits.  All tables must be aligned so
 115         * as to leave enough 0 bits in the address to contain it. */
 116        unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
 117                                     HUGEPD_SHIFT_MASK + 1);
 118        struct kmem_cache *new;
 119
 120        /* It would be nice if this was a BUILD_BUG_ON(), but at the
 121         * moment, gcc doesn't seem to recognize is_power_of_2 as a
 122         * constant expression, so so much for that. */
 123        BUG_ON(!is_power_of_2(minalign));
 124        BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE));
 125
 126        if (PGT_CACHE(shift))
 127                return; /* Already have a cache of this size */
 128
 129        align = max_t(unsigned long, align, minalign);
 130        name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
 131        new = kmem_cache_create(name, table_size, align, 0, ctor);
 132        PGT_CACHE(shift) = new;
 133
 134        pr_debug("Allocated pgtable cache for order %d\n", shift);
 135}
 136
 137
 138void pgtable_cache_init(void)
 139{
 140        pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
 141        pgtable_cache_add(PMD_INDEX_SIZE, pmd_ctor);
 142        if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_INDEX_SIZE))
 143                panic("Couldn't allocate pgtable caches");
 144
 145        /* In all current configs, when the PUD index exists it's the
 146         * same size as either the pgd or pmd index.  Verify that the
 147         * initialization above has also created a PUD cache.  This
 148         * will need re-examiniation if we add new possibilities for
 149         * the pagetable layout. */
 150        BUG_ON(PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE));
 151}
 152
 153#ifdef CONFIG_SPARSEMEM_VMEMMAP
 154/*
 155 * Given an address within the vmemmap, determine the pfn of the page that
 156 * represents the start of the section it is within.  Note that we have to
 157 * do this by hand as the proffered address may not be correctly aligned.
 158 * Subtraction of non-aligned pointers produces undefined results.
 159 */
 160static unsigned long __meminit vmemmap_section_start(unsigned long page)
 161{
 162        unsigned long offset = page - ((unsigned long)(vmemmap));
 163
 164        /* Return the pfn of the start of the section. */
 165        return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
 166}
 167
 168/*
 169 * Check if this vmemmap page is already initialised.  If any section
 170 * which overlaps this vmemmap page is initialised then this page is
 171 * initialised already.
 172 */
 173static int __meminit vmemmap_populated(unsigned long start, int page_size)
 174{
 175        unsigned long end = start + page_size;
 176
 177        for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
 178                if (pfn_valid(vmemmap_section_start(start)))
 179                        return 1;
 180
 181        return 0;
 182}
 183
 184/* On hash-based CPUs, the vmemmap is bolted in the hash table.
 185 *
 186 * On Book3E CPUs, the vmemmap is currently mapped in the top half of
 187 * the vmalloc space using normal page tables, though the size of
 188 * pages encoded in the PTEs can be different
 189 */
 190
 191#ifdef CONFIG_PPC_BOOK3E
 192static void __meminit vmemmap_create_mapping(unsigned long start,
 193                                             unsigned long page_size,
 194                                             unsigned long phys)
 195{
 196        /* Create a PTE encoding without page size */
 197        unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED |
 198                _PAGE_KERNEL_RW;
 199
 200        /* PTEs only contain page size encodings up to 32M */
 201        BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf);
 202
 203        /* Encode the size in the PTE */
 204        flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8;
 205
 206        /* For each PTE for that area, map things. Note that we don't
 207         * increment phys because all PTEs are of the large size and
 208         * thus must have the low bits clear
 209         */
 210        for (i = 0; i < page_size; i += PAGE_SIZE)
 211                BUG_ON(map_kernel_page(start + i, phys, flags));
 212}
 213#else /* CONFIG_PPC_BOOK3E */
 214static void __meminit vmemmap_create_mapping(unsigned long start,
 215                                             unsigned long page_size,
 216                                             unsigned long phys)
 217{
 218        int  mapped = htab_bolt_mapping(start, start + page_size, phys,
 219                                        PAGE_KERNEL, mmu_vmemmap_psize,
 220                                        mmu_kernel_ssize);
 221        BUG_ON(mapped < 0);
 222}
 223#endif /* CONFIG_PPC_BOOK3E */
 224
 225struct vmemmap_backing *vmemmap_list;
 226
 227static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
 228{
 229        static struct vmemmap_backing *next;
 230        static int num_left;
 231
 232        /* allocate a page when required and hand out chunks */
 233        if (!next || !num_left) {
 234                next = vmemmap_alloc_block(PAGE_SIZE, node);
 235                if (unlikely(!next)) {
 236                        WARN_ON(1);
 237                        return NULL;
 238                }
 239                num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
 240        }
 241
 242        num_left--;
 243
 244        return next++;
 245}
 246
 247static __meminit void vmemmap_list_populate(unsigned long phys,
 248                                            unsigned long start,
 249                                            int node)
 250{
 251        struct vmemmap_backing *vmem_back;
 252
 253        vmem_back = vmemmap_list_alloc(node);
 254        if (unlikely(!vmem_back)) {
 255                WARN_ON(1);
 256                return;
 257        }
 258
 259        vmem_back->phys = phys;
 260        vmem_back->virt_addr = start;
 261        vmem_back->list = vmemmap_list;
 262
 263        vmemmap_list = vmem_back;
 264}
 265
 266int __meminit vmemmap_populate(struct page *start_page,
 267                               unsigned long nr_pages, int node)
 268{
 269        unsigned long start = (unsigned long)start_page;
 270        unsigned long end = (unsigned long)(start_page + nr_pages);
 271        unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
 272
 273        /* Align to the page size of the linear mapping. */
 274        start = _ALIGN_DOWN(start, page_size);
 275
 276        pr_debug("vmemmap_populate page %p, %ld pages, node %d\n",
 277                 start_page, nr_pages, node);
 278        pr_debug(" -> map %lx..%lx\n", start, end);
 279
 280        for (; start < end; start += page_size) {
 281                void *p;
 282
 283                if (vmemmap_populated(start, page_size))
 284                        continue;
 285
 286                p = vmemmap_alloc_block(page_size, node);
 287                if (!p)
 288                        return -ENOMEM;
 289
 290                vmemmap_list_populate(__pa(p), start, node);
 291
 292                pr_debug("      * %016lx..%016lx allocated at %p\n",
 293                         start, start + page_size, p);
 294
 295                vmemmap_create_mapping(start, page_size, __pa(p));
 296        }
 297
 298        return 0;
 299}
 300#endif /* CONFIG_SPARSEMEM_VMEMMAP */
 301
 302