linux/mm/nobootmem.c
<<
>>
Prefs
   1/*
   2 *  bootmem - A boot-time physical memory allocator and configurator
   3 *
   4 *  Copyright (C) 1999 Ingo Molnar
   5 *                1999 Kanoj Sarcar, SGI
   6 *                2008 Johannes Weiner
   7 *
   8 * Access to this subsystem has to be serialized externally (which is true
   9 * for the boot process anyway).
  10 */
  11#include <linux/init.h>
  12#include <linux/pfn.h>
  13#include <linux/slab.h>
  14#include <linux/bootmem.h>
  15#include <linux/export.h>
  16#include <linux/kmemleak.h>
  17#include <linux/range.h>
  18#include <linux/memblock.h>
  19
  20#include <asm/bug.h>
  21#include <asm/io.h>
  22#include <asm/processor.h>
  23
  24#include "internal.h"
  25
  26#ifndef CONFIG_NEED_MULTIPLE_NODES
  27struct pglist_data __refdata contig_page_data;
  28EXPORT_SYMBOL(contig_page_data);
  29#endif
  30
  31unsigned long max_low_pfn;
  32unsigned long min_low_pfn;
  33unsigned long max_pfn;
  34
  35static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
  36                                        u64 goal, u64 limit)
  37{
  38        void *ptr;
  39        u64 addr;
  40
  41        if (limit > memblock.current_limit)
  42                limit = memblock.current_limit;
  43
  44        addr = memblock_find_in_range_node(goal, limit, size, align, nid);
  45        if (!addr)
  46                return NULL;
  47
  48        ptr = phys_to_virt(addr);
  49        memset(ptr, 0, size);
  50        memblock_reserve(addr, size);
  51        /*
  52         * The min_count is set to 0 so that bootmem allocated blocks
  53         * are never reported as leaks.
  54         */
  55        kmemleak_alloc(ptr, size, 0, 0);
  56        return ptr;
  57}
  58
  59/*
  60 * free_bootmem_late - free bootmem pages directly to page allocator
  61 * @addr: starting address of the range
  62 * @size: size of the range in bytes
  63 *
  64 * This is only useful when the bootmem allocator has already been torn
  65 * down, but we are still initializing the system.  Pages are given directly
  66 * to the page allocator, no bootmem metadata is updated because it is gone.
  67 */
  68void __init free_bootmem_late(unsigned long addr, unsigned long size)
  69{
  70        unsigned long cursor, end;
  71
  72        kmemleak_free_part(__va(addr), size);
  73
  74        cursor = PFN_UP(addr);
  75        end = PFN_DOWN(addr + size);
  76
  77        for (; cursor < end; cursor++) {
  78                __free_pages_bootmem(pfn_to_page(cursor), 0);
  79                totalram_pages++;
  80        }
  81}
  82
  83static void __init __free_pages_memory(unsigned long start, unsigned long end)
  84{
  85        unsigned long i, start_aligned, end_aligned;
  86        int order = ilog2(BITS_PER_LONG);
  87
  88        start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
  89        end_aligned = end & ~(BITS_PER_LONG - 1);
  90
  91        if (end_aligned <= start_aligned) {
  92                for (i = start; i < end; i++)
  93                        __free_pages_bootmem(pfn_to_page(i), 0);
  94
  95                return;
  96        }
  97
  98        for (i = start; i < start_aligned; i++)
  99                __free_pages_bootmem(pfn_to_page(i), 0);
 100
 101        for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG)
 102                __free_pages_bootmem(pfn_to_page(i), order);
 103
 104        for (i = end_aligned; i < end; i++)
 105                __free_pages_bootmem(pfn_to_page(i), 0);
 106}
 107
 108static unsigned long __init __free_memory_core(phys_addr_t start,
 109                                 phys_addr_t end)
 110{
 111        unsigned long start_pfn = PFN_UP(start);
 112        unsigned long end_pfn = min_t(unsigned long,
 113                                      PFN_DOWN(end), max_low_pfn);
 114
 115        if (start_pfn > end_pfn)
 116                return 0;
 117
 118        __free_pages_memory(start_pfn, end_pfn);
 119
 120        return end_pfn - start_pfn;
 121}
 122
 123unsigned long __init free_low_memory_core_early(int nodeid)
 124{
 125        unsigned long count = 0;
 126        phys_addr_t start, end, size;
 127        u64 i;
 128
 129        for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL)
 130                count += __free_memory_core(start, end);
 131
 132        /* free range that is used for reserved array if we allocate it */
 133        size = get_allocated_memblock_reserved_regions_info(&start);
 134        if (size)
 135                count += __free_memory_core(start, start + size);
 136
 137        return count;
 138}
 139
 140/**
 141 * free_all_bootmem_node - release a node's free pages to the buddy allocator
 142 * @pgdat: node to be released
 143 *
 144 * Returns the number of pages actually released.
 145 */
 146unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
 147{
 148        register_page_bootmem_info_node(pgdat);
 149
 150        /* free_low_memory_core_early(MAX_NUMNODES) will be called later */
 151        return 0;
 152}
 153
 154/**
 155 * free_all_bootmem - release free pages to the buddy allocator
 156 *
 157 * Returns the number of pages actually released.
 158 */
 159unsigned long __init free_all_bootmem(void)
 160{
 161        /*
 162         * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id
 163         *  because in some case like Node0 doesn't have RAM installed
 164         *  low ram will be on Node1
 165         * Use MAX_NUMNODES will make sure all ranges in early_node_map[]
 166         *  will be used instead of only Node0 related
 167         */
 168        return free_low_memory_core_early(MAX_NUMNODES);
 169}
 170
 171/**
 172 * free_bootmem_node - mark a page range as usable
 173 * @pgdat: node the range resides on
 174 * @physaddr: starting address of the range
 175 * @size: size of the range in bytes
 176 *
 177 * Partial pages will be considered reserved and left as they are.
 178 *
 179 * The range must reside completely on the specified node.
 180 */
 181void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
 182                              unsigned long size)
 183{
 184        kmemleak_free_part(__va(physaddr), size);
 185        memblock_free(physaddr, size);
 186}
 187
 188/**
 189 * free_bootmem - mark a page range as usable
 190 * @addr: starting address of the range
 191 * @size: size of the range in bytes
 192 *
 193 * Partial pages will be considered reserved and left as they are.
 194 *
 195 * The range must be contiguous but may span node boundaries.
 196 */
 197void __init free_bootmem(unsigned long addr, unsigned long size)
 198{
 199        kmemleak_free_part(__va(addr), size);
 200        memblock_free(addr, size);
 201}
 202
 203static void * __init ___alloc_bootmem_nopanic(unsigned long size,
 204                                        unsigned long align,
 205                                        unsigned long goal,
 206                                        unsigned long limit)
 207{
 208        void *ptr;
 209
 210        if (WARN_ON_ONCE(slab_is_available()))
 211                return kzalloc(size, GFP_NOWAIT);
 212
 213restart:
 214
 215        ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit);
 216
 217        if (ptr)
 218                return ptr;
 219
 220        if (goal != 0) {
 221                goal = 0;
 222                goto restart;
 223        }
 224
 225        return NULL;
 226}
 227
 228/**
 229 * __alloc_bootmem_nopanic - allocate boot memory without panicking
 230 * @size: size of the request in bytes
 231 * @align: alignment of the region
 232 * @goal: preferred starting address of the region
 233 *
 234 * The goal is dropped if it can not be satisfied and the allocation will
 235 * fall back to memory below @goal.
 236 *
 237 * Allocation may happen on any node in the system.
 238 *
 239 * Returns NULL on failure.
 240 */
 241void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
 242                                        unsigned long goal)
 243{
 244        unsigned long limit = -1UL;
 245
 246        return ___alloc_bootmem_nopanic(size, align, goal, limit);
 247}
 248
 249static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
 250                                        unsigned long goal, unsigned long limit)
 251{
 252        void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
 253
 254        if (mem)
 255                return mem;
 256        /*
 257         * Whoops, we cannot satisfy the allocation request.
 258         */
 259        printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
 260        panic("Out of memory");
 261        return NULL;
 262}
 263
 264/**
 265 * __alloc_bootmem - allocate boot memory
 266 * @size: size of the request in bytes
 267 * @align: alignment of the region
 268 * @goal: preferred starting address of the region
 269 *
 270 * The goal is dropped if it can not be satisfied and the allocation will
 271 * fall back to memory below @goal.
 272 *
 273 * Allocation may happen on any node in the system.
 274 *
 275 * The function panics if the request can not be satisfied.
 276 */
 277void * __init __alloc_bootmem(unsigned long size, unsigned long align,
 278                              unsigned long goal)
 279{
 280        unsigned long limit = -1UL;
 281
 282        return ___alloc_bootmem(size, align, goal, limit);
 283}
 284
 285void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
 286                                                   unsigned long size,
 287                                                   unsigned long align,
 288                                                   unsigned long goal,
 289                                                   unsigned long limit)
 290{
 291        void *ptr;
 292
 293again:
 294        ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
 295                                        goal, limit);
 296        if (ptr)
 297                return ptr;
 298
 299        ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align,
 300                                        goal, limit);
 301        if (ptr)
 302                return ptr;
 303
 304        if (goal) {
 305                goal = 0;
 306                goto again;
 307        }
 308
 309        return NULL;
 310}
 311
 312void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
 313                                   unsigned long align, unsigned long goal)
 314{
 315        if (WARN_ON_ONCE(slab_is_available()))
 316                return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 317
 318        return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
 319}
 320
 321void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
 322                                    unsigned long align, unsigned long goal,
 323                                    unsigned long limit)
 324{
 325        void *ptr;
 326
 327        ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, limit);
 328        if (ptr)
 329                return ptr;
 330
 331        printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
 332        panic("Out of memory");
 333        return NULL;
 334}
 335
 336/**
 337 * __alloc_bootmem_node - allocate boot memory from a specific node
 338 * @pgdat: node to allocate from
 339 * @size: size of the request in bytes
 340 * @align: alignment of the region
 341 * @goal: preferred starting address of the region
 342 *
 343 * The goal is dropped if it can not be satisfied and the allocation will
 344 * fall back to memory below @goal.
 345 *
 346 * Allocation may fall back to any node in the system if the specified node
 347 * can not hold the requested memory.
 348 *
 349 * The function panics if the request can not be satisfied.
 350 */
 351void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
 352                                   unsigned long align, unsigned long goal)
 353{
 354        if (WARN_ON_ONCE(slab_is_available()))
 355                return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 356
 357        return ___alloc_bootmem_node(pgdat, size, align, goal, 0);
 358}
 359
 360void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
 361                                   unsigned long align, unsigned long goal)
 362{
 363        return __alloc_bootmem_node(pgdat, size, align, goal);
 364}
 365
 366#ifndef ARCH_LOW_ADDRESS_LIMIT
 367#define ARCH_LOW_ADDRESS_LIMIT  0xffffffffUL
 368#endif
 369
 370/**
 371 * __alloc_bootmem_low - allocate low boot memory
 372 * @size: size of the request in bytes
 373 * @align: alignment of the region
 374 * @goal: preferred starting address of the region
 375 *
 376 * The goal is dropped if it can not be satisfied and the allocation will
 377 * fall back to memory below @goal.
 378 *
 379 * Allocation may happen on any node in the system.
 380 *
 381 * The function panics if the request can not be satisfied.
 382 */
 383void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
 384                                  unsigned long goal)
 385{
 386        return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
 387}
 388
 389/**
 390 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
 391 * @pgdat: node to allocate from
 392 * @size: size of the request in bytes
 393 * @align: alignment of the region
 394 * @goal: preferred starting address of the region
 395 *
 396 * The goal is dropped if it can not be satisfied and the allocation will
 397 * fall back to memory below @goal.
 398 *
 399 * Allocation may fall back to any node in the system if the specified node
 400 * can not hold the requested memory.
 401 *
 402 * The function panics if the request can not be satisfied.
 403 */
 404void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
 405                                       unsigned long align, unsigned long goal)
 406{
 407        if (WARN_ON_ONCE(slab_is_available()))
 408                return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 409
 410        return ___alloc_bootmem_node(pgdat, size, align, goal,
 411                                     ARCH_LOW_ADDRESS_LIMIT);
 412}
 413
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.