linux/mm/nobootmem.c
<<
>>
Prefs
   1/*
   2 *  bootmem - A boot-time physical memory allocator and configurator
   3 *
   4 *  Copyright (C) 1999 Ingo Molnar
   5 *                1999 Kanoj Sarcar, SGI
   6 *                2008 Johannes Weiner
   7 *
   8 * Access to this subsystem has to be serialized externally (which is true
   9 * for the boot process anyway).
  10 */
  11#include <linux/init.h>
  12#include <linux/pfn.h>
  13#include <linux/slab.h>
  14#include <linux/bootmem.h>
  15#include <linux/module.h>
  16#include <linux/kmemleak.h>
  17#include <linux/range.h>
  18#include <linux/memblock.h>
  19
  20#include <asm/bug.h>
  21#include <asm/io.h>
  22#include <asm/processor.h>
  23
  24#include "internal.h"
  25
  26#ifndef CONFIG_NEED_MULTIPLE_NODES
  27struct pglist_data __refdata contig_page_data;
  28EXPORT_SYMBOL(contig_page_data);
  29#endif
  30
  31unsigned long max_low_pfn;
  32unsigned long min_low_pfn;
  33unsigned long max_pfn;
  34
  35static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
  36                                        u64 goal, u64 limit)
  37{
  38        void *ptr;
  39        u64 addr;
  40
  41        if (limit > memblock.current_limit)
  42                limit = memblock.current_limit;
  43
  44        addr = find_memory_core_early(nid, size, align, goal, limit);
  45
  46        if (addr == MEMBLOCK_ERROR)
  47                return NULL;
  48
  49        ptr = phys_to_virt(addr);
  50        memset(ptr, 0, size);
  51        memblock_x86_reserve_range(addr, addr + size, "BOOTMEM");
  52        /*
  53         * The min_count is set to 0 so that bootmem allocated blocks
  54         * are never reported as leaks.
  55         */
  56        kmemleak_alloc(ptr, size, 0, 0);
  57        return ptr;
  58}
  59
  60/*
  61 * free_bootmem_late - free bootmem pages directly to page allocator
  62 * @addr: starting address of the range
  63 * @size: size of the range in bytes
  64 *
  65 * This is only useful when the bootmem allocator has already been torn
  66 * down, but we are still initializing the system.  Pages are given directly
  67 * to the page allocator, no bootmem metadata is updated because it is gone.
  68 */
  69void __init free_bootmem_late(unsigned long addr, unsigned long size)
  70{
  71        unsigned long cursor, end;
  72
  73        kmemleak_free_part(__va(addr), size);
  74
  75        cursor = PFN_UP(addr);
  76        end = PFN_DOWN(addr + size);
  77
  78        for (; cursor < end; cursor++) {
  79                __free_pages_bootmem(pfn_to_page(cursor), 0);
  80                totalram_pages++;
  81        }
  82}
  83
  84static void __init __free_pages_memory(unsigned long start, unsigned long end)
  85{
  86        int i;
  87        unsigned long start_aligned, end_aligned;
  88        int order = ilog2(BITS_PER_LONG);
  89
  90        start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
  91        end_aligned = end & ~(BITS_PER_LONG - 1);
  92
  93        if (end_aligned <= start_aligned) {
  94                for (i = start; i < end; i++)
  95                        __free_pages_bootmem(pfn_to_page(i), 0);
  96
  97                return;
  98        }
  99
 100        for (i = start; i < start_aligned; i++)
 101                __free_pages_bootmem(pfn_to_page(i), 0);
 102
 103        for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG)
 104                __free_pages_bootmem(pfn_to_page(i), order);
 105
 106        for (i = end_aligned; i < end; i++)
 107                __free_pages_bootmem(pfn_to_page(i), 0);
 108}
 109
 110unsigned long __init free_all_memory_core_early(int nodeid)
 111{
 112        int i;
 113        u64 start, end;
 114        unsigned long count = 0;
 115        struct range *range = NULL;
 116        int nr_range;
 117
 118        nr_range = get_free_all_memory_range(&range, nodeid);
 119
 120        for (i = 0; i < nr_range; i++) {
 121                start = range[i].start;
 122                end = range[i].end;
 123                count += end - start;
 124                __free_pages_memory(start, end);
 125        }
 126
 127        return count;
 128}
 129
 130/**
 131 * free_all_bootmem_node - release a node's free pages to the buddy allocator
 132 * @pgdat: node to be released
 133 *
 134 * Returns the number of pages actually released.
 135 */
 136unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
 137{
 138        register_page_bootmem_info_node(pgdat);
 139
 140        /* free_all_memory_core_early(MAX_NUMNODES) will be called later */
 141        return 0;
 142}
 143
 144/**
 145 * free_all_bootmem - release free pages to the buddy allocator
 146 *
 147 * Returns the number of pages actually released.
 148 */
 149unsigned long __init free_all_bootmem(void)
 150{
 151        /*
 152         * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id
 153         *  because in some case like Node0 doesn't have RAM installed
 154         *  low ram will be on Node1
 155         * Use MAX_NUMNODES will make sure all ranges in early_node_map[]
 156         *  will be used instead of only Node0 related
 157         */
 158        return free_all_memory_core_early(MAX_NUMNODES);
 159}
 160
 161/**
 162 * free_bootmem_node - mark a page range as usable
 163 * @pgdat: node the range resides on
 164 * @physaddr: starting address of the range
 165 * @size: size of the range in bytes
 166 *
 167 * Partial pages will be considered reserved and left as they are.
 168 *
 169 * The range must reside completely on the specified node.
 170 */
 171void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
 172                              unsigned long size)
 173{
 174        kmemleak_free_part(__va(physaddr), size);
 175        memblock_x86_free_range(physaddr, physaddr + size);
 176}
 177
 178/**
 179 * free_bootmem - mark a page range as usable
 180 * @addr: starting address of the range
 181 * @size: size of the range in bytes
 182 *
 183 * Partial pages will be considered reserved and left as they are.
 184 *
 185 * The range must be contiguous but may span node boundaries.
 186 */
 187void __init free_bootmem(unsigned long addr, unsigned long size)
 188{
 189        kmemleak_free_part(__va(addr), size);
 190        memblock_x86_free_range(addr, addr + size);
 191}
 192
 193static void * __init ___alloc_bootmem_nopanic(unsigned long size,
 194                                        unsigned long align,
 195                                        unsigned long goal,
 196                                        unsigned long limit)
 197{
 198        void *ptr;
 199
 200        if (WARN_ON_ONCE(slab_is_available()))
 201                return kzalloc(size, GFP_NOWAIT);
 202
 203restart:
 204
 205        ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit);
 206
 207        if (ptr)
 208                return ptr;
 209
 210        if (goal != 0) {
 211                goal = 0;
 212                goto restart;
 213        }
 214
 215        return NULL;
 216}
 217
 218/**
 219 * __alloc_bootmem_nopanic - allocate boot memory without panicking
 220 * @size: size of the request in bytes
 221 * @align: alignment of the region
 222 * @goal: preferred starting address of the region
 223 *
 224 * The goal is dropped if it can not be satisfied and the allocation will
 225 * fall back to memory below @goal.
 226 *
 227 * Allocation may happen on any node in the system.
 228 *
 229 * Returns NULL on failure.
 230 */
 231void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
 232                                        unsigned long goal)
 233{
 234        unsigned long limit = -1UL;
 235
 236        return ___alloc_bootmem_nopanic(size, align, goal, limit);
 237}
 238
 239static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
 240                                        unsigned long goal, unsigned long limit)
 241{
 242        void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
 243
 244        if (mem)
 245                return mem;
 246        /*
 247         * Whoops, we cannot satisfy the allocation request.
 248         */
 249        printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
 250        panic("Out of memory");
 251        return NULL;
 252}
 253
 254/**
 255 * __alloc_bootmem - allocate boot memory
 256 * @size: size of the request in bytes
 257 * @align: alignment of the region
 258 * @goal: preferred starting address of the region
 259 *
 260 * The goal is dropped if it can not be satisfied and the allocation will
 261 * fall back to memory below @goal.
 262 *
 263 * Allocation may happen on any node in the system.
 264 *
 265 * The function panics if the request can not be satisfied.
 266 */
 267void * __init __alloc_bootmem(unsigned long size, unsigned long align,
 268                              unsigned long goal)
 269{
 270        unsigned long limit = -1UL;
 271
 272        return ___alloc_bootmem(size, align, goal, limit);
 273}
 274
 275/**
 276 * __alloc_bootmem_node - allocate boot memory from a specific node
 277 * @pgdat: node to allocate from
 278 * @size: size of the request in bytes
 279 * @align: alignment of the region
 280 * @goal: preferred starting address of the region
 281 *
 282 * The goal is dropped if it can not be satisfied and the allocation will
 283 * fall back to memory below @goal.
 284 *
 285 * Allocation may fall back to any node in the system if the specified node
 286 * can not hold the requested memory.
 287 *
 288 * The function panics if the request can not be satisfied.
 289 */
 290void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
 291                                   unsigned long align, unsigned long goal)
 292{
 293        void *ptr;
 294
 295        if (WARN_ON_ONCE(slab_is_available()))
 296                return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 297
 298        ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
 299                                         goal, -1ULL);
 300        if (ptr)
 301                return ptr;
 302
 303        return __alloc_memory_core_early(MAX_NUMNODES, size, align,
 304                                         goal, -1ULL);
 305}
 306
 307void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
 308                                   unsigned long align, unsigned long goal)
 309{
 310        return __alloc_bootmem_node(pgdat, size, align, goal);
 311}
 312
 313#ifdef CONFIG_SPARSEMEM
 314/**
 315 * alloc_bootmem_section - allocate boot memory from a specific section
 316 * @size: size of the request in bytes
 317 * @section_nr: sparse map section to allocate from
 318 *
 319 * Return NULL on failure.
 320 */
 321void * __init alloc_bootmem_section(unsigned long size,
 322                                    unsigned long section_nr)
 323{
 324        unsigned long pfn, goal, limit;
 325
 326        pfn = section_nr_to_pfn(section_nr);
 327        goal = pfn << PAGE_SHIFT;
 328        limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT;
 329
 330        return __alloc_memory_core_early(early_pfn_to_nid(pfn), size,
 331                                         SMP_CACHE_BYTES, goal, limit);
 332}
 333#endif
 334
 335void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
 336                                   unsigned long align, unsigned long goal)
 337{
 338        void *ptr;
 339
 340        if (WARN_ON_ONCE(slab_is_available()))
 341                return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 342
 343        ptr =  __alloc_memory_core_early(pgdat->node_id, size, align,
 344                                                 goal, -1ULL);
 345        if (ptr)
 346                return ptr;
 347
 348        return __alloc_bootmem_nopanic(size, align, goal);
 349}
 350
 351#ifndef ARCH_LOW_ADDRESS_LIMIT
 352#define ARCH_LOW_ADDRESS_LIMIT  0xffffffffUL
 353#endif
 354
 355/**
 356 * __alloc_bootmem_low - allocate low boot memory
 357 * @size: size of the request in bytes
 358 * @align: alignment of the region
 359 * @goal: preferred starting address of the region
 360 *
 361 * The goal is dropped if it can not be satisfied and the allocation will
 362 * fall back to memory below @goal.
 363 *
 364 * Allocation may happen on any node in the system.
 365 *
 366 * The function panics if the request can not be satisfied.
 367 */
 368void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
 369                                  unsigned long goal)
 370{
 371        return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
 372}
 373
 374/**
 375 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
 376 * @pgdat: node to allocate from
 377 * @size: size of the request in bytes
 378 * @align: alignment of the region
 379 * @goal: preferred starting address of the region
 380 *
 381 * The goal is dropped if it can not be satisfied and the allocation will
 382 * fall back to memory below @goal.
 383 *
 384 * Allocation may fall back to any node in the system if the specified node
 385 * can not hold the requested memory.
 386 *
 387 * The function panics if the request can not be satisfied.
 388 */
 389void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
 390                                       unsigned long align, unsigned long goal)
 391{
 392        void *ptr;
 393
 394        if (WARN_ON_ONCE(slab_is_available()))
 395                return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 396
 397        ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
 398                                goal, ARCH_LOW_ADDRESS_LIMIT);
 399        if (ptr)
 400                return ptr;
 401
 402        return  __alloc_memory_core_early(MAX_NUMNODES, size, align,
 403                                goal, ARCH_LOW_ADDRESS_LIMIT);
 404}
 405
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.