linux/arch/powerpc/mm/mem.c
<<
>>
Prefs
   1/*
   2 *  PowerPC version
   3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   4 *
   5 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
   6 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
   7 *    Copyright (C) 1996 Paul Mackerras
   8 *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
   9 *
  10 *  Derived from "arch/i386/mm/init.c"
  11 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  12 *
  13 *  This program is free software; you can redistribute it and/or
  14 *  modify it under the terms of the GNU General Public License
  15 *  as published by the Free Software Foundation; either version
  16 *  2 of the License, or (at your option) any later version.
  17 *
  18 */
  19
  20#include <linux/export.h>
  21#include <linux/sched.h>
  22#include <linux/kernel.h>
  23#include <linux/errno.h>
  24#include <linux/string.h>
  25#include <linux/gfp.h>
  26#include <linux/types.h>
  27#include <linux/mm.h>
  28#include <linux/stddef.h>
  29#include <linux/init.h>
  30#include <linux/bootmem.h>
  31#include <linux/highmem.h>
  32#include <linux/initrd.h>
  33#include <linux/pagemap.h>
  34#include <linux/suspend.h>
  35#include <linux/memblock.h>
  36#include <linux/hugetlb.h>
  37#include <linux/slab.h>
  38
  39#include <asm/pgalloc.h>
  40#include <asm/prom.h>
  41#include <asm/io.h>
  42#include <asm/mmu_context.h>
  43#include <asm/pgtable.h>
  44#include <asm/mmu.h>
  45#include <asm/smp.h>
  46#include <asm/machdep.h>
  47#include <asm/btext.h>
  48#include <asm/tlb.h>
  49#include <asm/sections.h>
  50#include <asm/sparsemem.h>
  51#include <asm/vdso.h>
  52#include <asm/fixmap.h>
  53#include <asm/swiotlb.h>
  54#include <asm/rtas.h>
  55
  56#include "mmu_decl.h"
  57
  58#ifndef CPU_FTR_COHERENT_ICACHE
  59#define CPU_FTR_COHERENT_ICACHE 0       /* XXX for now */
  60#define CPU_FTR_NOEXECUTE       0
  61#endif
  62
  63int init_bootmem_done;
  64int mem_init_done;
  65phys_addr_t memory_limit;
  66
  67#ifdef CONFIG_HIGHMEM
  68pte_t *kmap_pte;
  69pgprot_t kmap_prot;
  70
  71EXPORT_SYMBOL(kmap_prot);
  72EXPORT_SYMBOL(kmap_pte);
  73
  74static inline pte_t *virt_to_kpte(unsigned long vaddr)
  75{
  76        return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
  77                        vaddr), vaddr), vaddr);
  78}
  79#endif
  80
  81int page_is_ram(unsigned long pfn)
  82{
  83#ifndef CONFIG_PPC64    /* XXX for now */
  84        return pfn < max_pfn;
  85#else
  86        unsigned long paddr = (pfn << PAGE_SHIFT);
  87        struct memblock_region *reg;
  88
  89        for_each_memblock(memory, reg)
  90                if (paddr >= reg->base && paddr < (reg->base + reg->size))
  91                        return 1;
  92        return 0;
  93#endif
  94}
  95
  96pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  97                              unsigned long size, pgprot_t vma_prot)
  98{
  99        if (ppc_md.phys_mem_access_prot)
 100                return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
 101
 102        if (!page_is_ram(pfn))
 103                vma_prot = pgprot_noncached(vma_prot);
 104
 105        return vma_prot;
 106}
 107EXPORT_SYMBOL(phys_mem_access_prot);
 108
 109#ifdef CONFIG_MEMORY_HOTPLUG
 110
 111#ifdef CONFIG_NUMA
 112int memory_add_physaddr_to_nid(u64 start)
 113{
 114        return hot_add_scn_to_nid(start);
 115}
 116#endif
 117
 118int arch_add_memory(int nid, u64 start, u64 size)
 119{
 120        struct pglist_data *pgdata;
 121        struct zone *zone;
 122        unsigned long start_pfn = start >> PAGE_SHIFT;
 123        unsigned long nr_pages = size >> PAGE_SHIFT;
 124
 125        pgdata = NODE_DATA(nid);
 126
 127        start = (unsigned long)__va(start);
 128        if (create_section_mapping(start, start + size))
 129                return -EINVAL;
 130
 131        /* this should work for most non-highmem platforms */
 132        zone = pgdata->node_zones;
 133
 134        return __add_pages(nid, zone, start_pfn, nr_pages);
 135}
 136#endif /* CONFIG_MEMORY_HOTPLUG */
 137
 138/*
 139 * walk_memory_resource() needs to make sure there is no holes in a given
 140 * memory range.  PPC64 does not maintain the memory layout in /proc/iomem.
 141 * Instead it maintains it in memblock.memory structures.  Walk through the
 142 * memory regions, find holes and callback for contiguous regions.
 143 */
 144int
 145walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
 146                void *arg, int (*func)(unsigned long, unsigned long, void *))
 147{
 148        struct memblock_region *reg;
 149        unsigned long end_pfn = start_pfn + nr_pages;
 150        unsigned long tstart, tend;
 151        int ret = -1;
 152
 153        for_each_memblock(memory, reg) {
 154                tstart = max(start_pfn, memblock_region_memory_base_pfn(reg));
 155                tend = min(end_pfn, memblock_region_memory_end_pfn(reg));
 156                if (tstart >= tend)
 157                        continue;
 158                ret = (*func)(tstart, tend - tstart, arg);
 159                if (ret)
 160                        break;
 161        }
 162        return ret;
 163}
 164EXPORT_SYMBOL_GPL(walk_system_ram_range);
 165
 166/*
 167 * Initialize the bootmem system and give it all the memory we
 168 * have available.  If we are using highmem, we only put the
 169 * lowmem into the bootmem system.
 170 */
 171#ifndef CONFIG_NEED_MULTIPLE_NODES
 172void __init do_init_bootmem(void)
 173{
 174        unsigned long start, bootmap_pages;
 175        unsigned long total_pages;
 176        struct memblock_region *reg;
 177        int boot_mapsize;
 178
 179        max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
 180        total_pages = (memblock_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
 181#ifdef CONFIG_HIGHMEM
 182        total_pages = total_lowmem >> PAGE_SHIFT;
 183        max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
 184#endif
 185
 186        /*
 187         * Find an area to use for the bootmem bitmap.  Calculate the size of
 188         * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
 189         * Add 1 additional page in case the address isn't page-aligned.
 190         */
 191        bootmap_pages = bootmem_bootmap_pages(total_pages);
 192
 193        start = memblock_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
 194
 195        min_low_pfn = MEMORY_START >> PAGE_SHIFT;
 196        boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);
 197
 198        /* Add active regions with valid PFNs */
 199        for_each_memblock(memory, reg) {
 200                unsigned long start_pfn, end_pfn;
 201                start_pfn = memblock_region_memory_base_pfn(reg);
 202                end_pfn = memblock_region_memory_end_pfn(reg);
 203                memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
 204        }
 205
 206        /* Add all physical memory to the bootmem map, mark each area
 207         * present.
 208         */
 209#ifdef CONFIG_HIGHMEM
 210        free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT);
 211
 212        /* reserve the sections we're already using */
 213        for_each_memblock(reserved, reg) {
 214                unsigned long top = reg->base + reg->size - 1;
 215                if (top < lowmem_end_addr)
 216                        reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
 217                else if (reg->base < lowmem_end_addr) {
 218                        unsigned long trunc_size = lowmem_end_addr - reg->base;
 219                        reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT);
 220                }
 221        }
 222#else
 223        free_bootmem_with_active_regions(0, max_pfn);
 224
 225        /* reserve the sections we're already using */
 226        for_each_memblock(reserved, reg)
 227                reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
 228#endif
 229        /* XXX need to clip this if using highmem? */
 230        sparse_memory_present_with_active_regions(0);
 231
 232        init_bootmem_done = 1;
 233}
 234
 235/* mark pages that don't exist as nosave */
 236static int __init mark_nonram_nosave(void)
 237{
 238        struct memblock_region *reg, *prev = NULL;
 239
 240        for_each_memblock(memory, reg) {
 241                if (prev &&
 242                    memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
 243                        register_nosave_region(memblock_region_memory_end_pfn(prev),
 244                                               memblock_region_memory_base_pfn(reg));
 245                prev = reg;
 246        }
 247        return 0;
 248}
 249
 250/*
 251 * paging_init() sets up the page tables - in fact we've already done this.
 252 */
 253void __init paging_init(void)
 254{
 255        unsigned long long total_ram = memblock_phys_mem_size();
 256        phys_addr_t top_of_ram = memblock_end_of_DRAM();
 257        unsigned long max_zone_pfns[MAX_NR_ZONES];
 258
 259#ifdef CONFIG_PPC32
 260        unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
 261        unsigned long end = __fix_to_virt(FIX_HOLE);
 262
 263        for (; v < end; v += PAGE_SIZE)
 264                map_page(v, 0, 0); /* XXX gross */
 265#endif
 266
 267#ifdef CONFIG_HIGHMEM
 268        map_page(PKMAP_BASE, 0, 0);     /* XXX gross */
 269        pkmap_page_table = virt_to_kpte(PKMAP_BASE);
 270
 271        kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
 272        kmap_prot = PAGE_KERNEL;
 273#endif /* CONFIG_HIGHMEM */
 274
 275        printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
 276               (unsigned long long)top_of_ram, total_ram);
 277        printk(KERN_DEBUG "Memory hole size: %ldMB\n",
 278               (long int)((top_of_ram - total_ram) >> 20));
 279        memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
 280#ifdef CONFIG_HIGHMEM
 281        max_zone_pfns[ZONE_DMA] = lowmem_end_addr >> PAGE_SHIFT;
 282        max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT;
 283#else
 284        max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
 285#endif
 286        free_area_init_nodes(max_zone_pfns);
 287
 288        mark_nonram_nosave();
 289}
 290#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
 291
 292void __init mem_init(void)
 293{
 294#ifdef CONFIG_NEED_MULTIPLE_NODES
 295        int nid;
 296#endif
 297        pg_data_t *pgdat;
 298        unsigned long i;
 299        struct page *page;
 300        unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
 301
 302#ifdef CONFIG_SWIOTLB
 303        if (ppc_swiotlb_enable)
 304                swiotlb_init(1);
 305#endif
 306
 307        num_physpages = memblock_phys_mem_size() >> PAGE_SHIFT;
 308        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
 309
 310#ifdef CONFIG_NEED_MULTIPLE_NODES
 311        for_each_online_node(nid) {
 312                if (NODE_DATA(nid)->node_spanned_pages != 0) {
 313                        printk("freeing bootmem node %d\n", nid);
 314                        totalram_pages +=
 315                                free_all_bootmem_node(NODE_DATA(nid));
 316                }
 317        }
 318#else
 319        max_mapnr = max_pfn;
 320        totalram_pages += free_all_bootmem();
 321#endif
 322        for_each_online_pgdat(pgdat) {
 323                for (i = 0; i < pgdat->node_spanned_pages; i++) {
 324                        if (!pfn_valid(pgdat->node_start_pfn + i))
 325                                continue;
 326                        page = pgdat_page_nr(pgdat, i);
 327                        if (PageReserved(page))
 328                                reservedpages++;
 329                }
 330        }
 331
 332        codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
 333        datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
 334        initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
 335        bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
 336
 337#ifdef CONFIG_HIGHMEM
 338        {
 339                unsigned long pfn, highmem_mapnr;
 340
 341                highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
 342                for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
 343                        phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
 344                        struct page *page = pfn_to_page(pfn);
 345                        if (memblock_is_reserved(paddr))
 346                                continue;
 347                        ClearPageReserved(page);
 348                        init_page_count(page);
 349                        __free_page(page);
 350                        totalhigh_pages++;
 351                        reservedpages--;
 352                }
 353                totalram_pages += totalhigh_pages;
 354                printk(KERN_DEBUG "High memory: %luk\n",
 355                       totalhigh_pages << (PAGE_SHIFT-10));
 356        }
 357#endif /* CONFIG_HIGHMEM */
 358
 359#if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
 360        /*
 361         * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
 362         * functions.... do it here for the non-smp case.
 363         */
 364        per_cpu(next_tlbcam_idx, smp_processor_id()) =
 365                (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
 366#endif
 367
 368        printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
 369               "%luk reserved, %luk data, %luk bss, %luk init)\n",
 370                nr_free_pages() << (PAGE_SHIFT-10),
 371                num_physpages << (PAGE_SHIFT-10),
 372                codesize >> 10,
 373                reservedpages << (PAGE_SHIFT-10),
 374                datasize >> 10,
 375                bsssize >> 10,
 376                initsize >> 10);
 377
 378#ifdef CONFIG_PPC32
 379        pr_info("Kernel virtual memory layout:\n");
 380        pr_info("  * 0x%08lx..0x%08lx  : fixmap\n", FIXADDR_START, FIXADDR_TOP);
 381#ifdef CONFIG_HIGHMEM
 382        pr_info("  * 0x%08lx..0x%08lx  : highmem PTEs\n",
 383                PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
 384#endif /* CONFIG_HIGHMEM */
 385#ifdef CONFIG_NOT_COHERENT_CACHE
 386        pr_info("  * 0x%08lx..0x%08lx  : consistent mem\n",
 387                IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
 388#endif /* CONFIG_NOT_COHERENT_CACHE */
 389        pr_info("  * 0x%08lx..0x%08lx  : early ioremap\n",
 390                ioremap_bot, IOREMAP_TOP);
 391        pr_info("  * 0x%08lx..0x%08lx  : vmalloc & ioremap\n",
 392                VMALLOC_START, VMALLOC_END);
 393#endif /* CONFIG_PPC32 */
 394
 395        mem_init_done = 1;
 396}
 397
 398void free_initmem(void)
 399{
 400        unsigned long addr;
 401
 402        ppc_md.progress = ppc_printk_progress;
 403
 404        addr = (unsigned long)__init_begin;
 405        for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
 406                memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
 407                ClearPageReserved(virt_to_page(addr));
 408                init_page_count(virt_to_page(addr));
 409                free_page(addr);
 410                totalram_pages++;
 411        }
 412        pr_info("Freeing unused kernel memory: %luk freed\n",
 413                ((unsigned long)__init_end -
 414                (unsigned long)__init_begin) >> 10);
 415}
 416
 417#ifdef CONFIG_BLK_DEV_INITRD
 418void __init free_initrd_mem(unsigned long start, unsigned long end)
 419{
 420        if (start >= end)
 421                return;
 422
 423        start = _ALIGN_DOWN(start, PAGE_SIZE);
 424        end = _ALIGN_UP(end, PAGE_SIZE);
 425        pr_info("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
 426
 427        for (; start < end; start += PAGE_SIZE) {
 428                ClearPageReserved(virt_to_page(start));
 429                init_page_count(virt_to_page(start));
 430                free_page(start);
 431                totalram_pages++;
 432        }
 433}
 434#endif
 435
 436/*
 437 * This is called when a page has been modified by the kernel.
 438 * It just marks the page as not i-cache clean.  We do the i-cache
 439 * flush later when the page is given to a user process, if necessary.
 440 */
 441void flush_dcache_page(struct page *page)
 442{
 443        if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
 444                return;
 445        /* avoid an atomic op if possible */
 446        if (test_bit(PG_arch_1, &page->flags))
 447                clear_bit(PG_arch_1, &page->flags);
 448}
 449EXPORT_SYMBOL(flush_dcache_page);
 450
 451void flush_dcache_icache_page(struct page *page)
 452{
 453#ifdef CONFIG_HUGETLB_PAGE
 454        if (PageCompound(page)) {
 455                flush_dcache_icache_hugepage(page);
 456                return;
 457        }
 458#endif
 459#ifdef CONFIG_BOOKE
 460        {
 461                void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
 462                __flush_dcache_icache(start);
 463                kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
 464        }
 465#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
 466        /* On 8xx there is no need to kmap since highmem is not supported */
 467        __flush_dcache_icache(page_address(page)); 
 468#else
 469        __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
 470#endif
 471}
 472
 473void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
 474{
 475        clear_page(page);
 476
 477        /*
 478         * We shouldn't have to do this, but some versions of glibc
 479         * require it (ld.so assumes zero filled pages are icache clean)
 480         * - Anton
 481         */
 482        flush_dcache_page(pg);
 483}
 484EXPORT_SYMBOL(clear_user_page);
 485
 486void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
 487                    struct page *pg)
 488{
 489        copy_page(vto, vfrom);
 490
 491        /*
 492         * We should be able to use the following optimisation, however
 493         * there are two problems.
 494         * Firstly a bug in some versions of binutils meant PLT sections
 495         * were not marked executable.
 496         * Secondly the first word in the GOT section is blrl, used
 497         * to establish the GOT address. Until recently the GOT was
 498         * not marked executable.
 499         * - Anton
 500         */
 501#if 0
 502        if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
 503                return;
 504#endif
 505
 506        flush_dcache_page(pg);
 507}
 508
 509void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
 510                             unsigned long addr, int len)
 511{
 512        unsigned long maddr;
 513
 514        maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
 515        flush_icache_range(maddr, maddr + len);
 516        kunmap(page);
 517}
 518EXPORT_SYMBOL(flush_icache_user_range);
 519
 520/*
 521 * This is called at the end of handling a user page fault, when the
 522 * fault has been handled by updating a PTE in the linux page tables.
 523 * We use it to preload an HPTE into the hash table corresponding to
 524 * the updated linux PTE.
 525 * 
 526 * This must always be called with the pte lock held.
 527 */
 528void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
 529                      pte_t *ptep)
 530{
 531#ifdef CONFIG_PPC_STD_MMU
 532        unsigned long access = 0, trap;
 533
 534        /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
 535        if (!pte_young(*ptep) || address >= TASK_SIZE)
 536                return;
 537
 538        /* We try to figure out if we are coming from an instruction
 539         * access fault and pass that down to __hash_page so we avoid
 540         * double-faulting on execution of fresh text. We have to test
 541         * for regs NULL since init will get here first thing at boot
 542         *
 543         * We also avoid filling the hash if not coming from a fault
 544         */
 545        if (current->thread.regs == NULL)
 546                return;
 547        trap = TRAP(current->thread.regs);
 548        if (trap == 0x400)
 549                access |= _PAGE_EXEC;
 550        else if (trap != 0x300)
 551                return;
 552        hash_preload(vma->vm_mm, address, access, trap);
 553#endif /* CONFIG_PPC_STD_MMU */
 554#if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
 555        && defined(CONFIG_HUGETLB_PAGE)
 556        if (is_vm_hugetlb_page(vma))
 557                book3e_hugetlb_preload(vma, address, *ptep);
 558#endif
 559}
 560
 561/*
 562 * System memory should not be in /proc/iomem but various tools expect it
 563 * (eg kdump).
 564 */
 565static int add_system_ram_resources(void)
 566{
 567        struct memblock_region *reg;
 568
 569        for_each_memblock(memory, reg) {
 570                struct resource *res;
 571                unsigned long base = reg->base;
 572                unsigned long size = reg->size;
 573
 574                res = kzalloc(sizeof(struct resource), GFP_KERNEL);
 575                WARN_ON(!res);
 576
 577                if (res) {
 578                        res->name = "System RAM";
 579                        res->start = base;
 580                        res->end = base + size - 1;
 581                        res->flags = IORESOURCE_MEM;
 582                        WARN_ON(request_resource(&iomem_resource, res) < 0);
 583                }
 584        }
 585
 586        return 0;
 587}
 588subsys_initcall(add_system_ram_resources);
 589
 590#ifdef CONFIG_STRICT_DEVMEM
 591/*
 592 * devmem_is_allowed(): check to see if /dev/mem access to a certain address
 593 * is valid. The argument is a physical page number.
 594 *
 595 * Access has to be given to non-kernel-ram areas as well, these contain the
 596 * PCI mmio resources as well as potential bios/acpi data regions.
 597 */
 598int devmem_is_allowed(unsigned long pfn)
 599{
 600        if (iomem_is_exclusive(pfn << PAGE_SHIFT))
 601                return 0;
 602        if (!page_is_ram(pfn))
 603                return 1;
 604        if (page_is_rtas_user_buf(pfn))
 605                return 1;
 606        return 0;
 607}
 608#endif /* CONFIG_STRICT_DEVMEM */
 609