linux/arch/ia64/mm/contig.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1998-2003 Hewlett-Packard Co
   7 *      David Mosberger-Tang <davidm@hpl.hp.com>
   8 *      Stephane Eranian <eranian@hpl.hp.com>
   9 * Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com>
  10 * Copyright (C) 1999 VA Linux Systems
  11 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
  12 * Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
  13 *
  14 * Routines used by ia64 machines with contiguous (or virtually contiguous)
  15 * memory.
  16 */
  17#include <linux/efi.h>
  18#include <linux/memblock.h>
  19#include <linux/mm.h>
  20#include <linux/nmi.h>
  21#include <linux/swap.h>
  22#include <linux/sizes.h>
  23
  24#include <asm/efi.h>
  25#include <asm/meminit.h>
  26#include <asm/sections.h>
  27#include <asm/mca.h>
  28
  29/* physical address where the bootmem map is located */
  30unsigned long bootmap_start;
  31
  32#ifdef CONFIG_SMP
  33static void *cpu_data;
  34/**
  35 * per_cpu_init - setup per-cpu variables
  36 *
  37 * Allocate and setup per-cpu data areas.
  38 */
  39void *per_cpu_init(void)
  40{
  41        static bool first_time = true;
  42        void *cpu0_data = __cpu0_per_cpu;
  43        unsigned int cpu;
  44
  45        if (!first_time)
  46                goto skip;
  47        first_time = false;
  48
  49        /*
  50         * get_free_pages() cannot be used before cpu_init() done.
  51         * BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs
  52         * to avoid that AP calls get_zeroed_page().
  53         */
  54        for_each_possible_cpu(cpu) {
  55                void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start;
  56
  57                memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start);
  58                __per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start;
  59                per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
  60
  61                /*
  62                 * percpu area for cpu0 is moved from the __init area
  63                 * which is setup by head.S and used till this point.
  64                 * Update ar.k3.  This move is ensures that percpu
  65                 * area for cpu0 is on the correct node and its
  66                 * virtual address isn't insanely far from other
  67                 * percpu areas which is important for congruent
  68                 * percpu allocator.
  69                 */
  70                if (cpu == 0)
  71                        ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) -
  72                                    (unsigned long)__per_cpu_start);
  73
  74                cpu_data += PERCPU_PAGE_SIZE;
  75        }
  76skip:
  77        return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
  78}
  79
  80static inline void
  81alloc_per_cpu_data(void)
  82{
  83        size_t size = PERCPU_PAGE_SIZE * num_possible_cpus();
  84
  85        cpu_data = memblock_alloc_from(size, PERCPU_PAGE_SIZE,
  86                                       __pa(MAX_DMA_ADDRESS));
  87        if (!cpu_data)
  88                panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
  89                      __func__, size, PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
  90}
  91
  92/**
  93 * setup_per_cpu_areas - setup percpu areas
  94 *
  95 * Arch code has already allocated and initialized percpu areas.  All
  96 * this function has to do is to teach the determined layout to the
  97 * dynamic percpu allocator, which happens to be more complex than
  98 * creating whole new ones using helpers.
  99 */
 100void __init
 101setup_per_cpu_areas(void)
 102{
 103        struct pcpu_alloc_info *ai;
 104        struct pcpu_group_info *gi;
 105        unsigned int cpu;
 106        ssize_t static_size, reserved_size, dyn_size;
 107
 108        ai = pcpu_alloc_alloc_info(1, num_possible_cpus());
 109        if (!ai)
 110                panic("failed to allocate pcpu_alloc_info");
 111        gi = &ai->groups[0];
 112
 113        /* units are assigned consecutively to possible cpus */
 114        for_each_possible_cpu(cpu)
 115                gi->cpu_map[gi->nr_units++] = cpu;
 116
 117        /* set parameters */
 118        static_size = __per_cpu_end - __per_cpu_start;
 119        reserved_size = PERCPU_MODULE_RESERVE;
 120        dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
 121        if (dyn_size < 0)
 122                panic("percpu area overflow static=%zd reserved=%zd\n",
 123                      static_size, reserved_size);
 124
 125        ai->static_size         = static_size;
 126        ai->reserved_size       = reserved_size;
 127        ai->dyn_size            = dyn_size;
 128        ai->unit_size           = PERCPU_PAGE_SIZE;
 129        ai->atom_size           = PAGE_SIZE;
 130        ai->alloc_size          = PERCPU_PAGE_SIZE;
 131
 132        pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]);
 133        pcpu_free_alloc_info(ai);
 134}
 135#else
 136#define alloc_per_cpu_data() do { } while (0)
 137#endif /* CONFIG_SMP */
 138
 139/**
 140 * find_memory - setup memory map
 141 *
 142 * Walk the EFI memory map and find usable memory for the system, taking
 143 * into account reserved areas.
 144 */
 145void __init
 146find_memory (void)
 147{
 148        reserve_memory();
 149
 150        /* first find highest page frame number */
 151        min_low_pfn = ~0UL;
 152        max_low_pfn = 0;
 153        efi_memmap_walk(find_max_min_low_pfn, NULL);
 154        max_pfn = max_low_pfn;
 155
 156        memblock_add_node(0, PFN_PHYS(max_low_pfn), 0);
 157
 158        find_initrd();
 159
 160        alloc_per_cpu_data();
 161}
 162
 163static int __init find_largest_hole(u64 start, u64 end, void *arg)
 164{
 165        u64 *max_gap = arg;
 166
 167        static u64 last_end = PAGE_OFFSET;
 168
 169        /* NOTE: this algorithm assumes efi memmap table is ordered */
 170
 171        if (*max_gap < (start - last_end))
 172                *max_gap = start - last_end;
 173        last_end = end;
 174        return 0;
 175}
 176
 177static void __init verify_gap_absence(void)
 178{
 179        unsigned long max_gap;
 180
 181        /* Forbid FLATMEM if hole is > than 1G */
 182        efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
 183        if (max_gap >= SZ_1G)
 184                panic("Cannot use FLATMEM with %ldMB hole\n"
 185                      "Please switch over to SPARSEMEM\n",
 186                      (max_gap >> 20));
 187}
 188
 189/*
 190 * Set up the page tables.
 191 */
 192
 193void __init
 194paging_init (void)
 195{
 196        unsigned long max_dma;
 197        unsigned long max_zone_pfns[MAX_NR_ZONES];
 198
 199        memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
 200        max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
 201        max_zone_pfns[ZONE_DMA32] = max_dma;
 202        max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
 203
 204        verify_gap_absence();
 205
 206        free_area_init(max_zone_pfns);
 207        zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
 208}
 209
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.