linux/mm/nobootmem.c
<<
>>
Prefs
   1/*
   2 *  bootmem - A boot-time physical memory allocator and configurator
   3 *
   4 *  Copyright (C) 1999 Ingo Molnar
   5 *                1999 Kanoj Sarcar, SGI
   6 *                2008 Johannes Weiner
   7 *
   8 * Access to this subsystem has to be serialized externally (which is true
   9 * for the boot process anyway).
  10 */
  11#include <linux/init.h>
  12#include <linux/pfn.h>
  13#include <linux/slab.h>
  14#include <linux/bootmem.h>
  15#include <linux/export.h>
  16#include <linux/kmemleak.h>
  17#include <linux/range.h>
  18#include <linux/memblock.h>
  19
  20#include <asm/bug.h>
  21#include <asm/io.h>
  22#include <asm/processor.h>
  23
  24#include "internal.h"
  25
  26#ifndef CONFIG_NEED_MULTIPLE_NODES
  27struct pglist_data __refdata contig_page_data;
  28EXPORT_SYMBOL(contig_page_data);
  29#endif
  30
  31unsigned long max_low_pfn;
  32unsigned long min_low_pfn;
  33unsigned long max_pfn;
  34
  35static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
  36                                        u64 goal, u64 limit)
  37{
  38        void *ptr;
  39        u64 addr;
  40
  41        if (limit > memblock.current_limit)
  42                limit = memblock.current_limit;
  43
  44        addr = memblock_find_in_range_node(goal, limit, size, align, nid);
  45        if (!addr)
  46                return NULL;
  47
  48        ptr = phys_to_virt(addr);
  49        memset(ptr, 0, size);
  50        memblock_reserve(addr, size);
  51        /*
  52         * The min_count is set to 0 so that bootmem allocated blocks
  53         * are never reported as leaks.
  54         */
  55        kmemleak_alloc(ptr, size, 0, 0);
  56        return ptr;
  57}
  58
  59/*
  60 * free_bootmem_late - free bootmem pages directly to page allocator
  61 * @addr: starting address of the range
  62 * @size: size of the range in bytes
  63 *
  64 * This is only useful when the bootmem allocator has already been torn
  65 * down, but we are still initializing the system.  Pages are given directly
  66 * to the page allocator, no bootmem metadata is updated because it is gone.
  67 */
  68void __init free_bootmem_late(unsigned long addr, unsigned long size)
  69{
  70        unsigned long cursor, end;
  71
  72        kmemleak_free_part(__va(addr), size);
  73
  74        cursor = PFN_UP(addr);
  75        end = PFN_DOWN(addr + size);
  76
  77        for (; cursor < end; cursor++) {
  78                __free_pages_bootmem(pfn_to_page(cursor), 0);
  79                totalram_pages++;
  80        }
  81}
  82
  83static void __init __free_pages_memory(unsigned long start, unsigned long end)
  84{
  85        unsigned long i, start_aligned, end_aligned;
  86        int order = ilog2(BITS_PER_LONG);
  87
  88        start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1);
  89        end_aligned = end & ~(BITS_PER_LONG - 1);
  90
  91        if (end_aligned <= start_aligned) {
  92                for (i = start; i < end; i++)
  93                        __free_pages_bootmem(pfn_to_page(i), 0);
  94
  95                return;
  96        }
  97
  98        for (i = start; i < start_aligned; i++)
  99                __free_pages_bootmem(pfn_to_page(i), 0);
 100
 101        for (i = start_aligned; i < end_aligned; i += BITS_PER_LONG)
 102                __free_pages_bootmem(pfn_to_page(i), order);
 103
 104        for (i = end_aligned; i < end; i++)
 105                __free_pages_bootmem(pfn_to_page(i), 0);
 106}
 107
 108static unsigned long __init __free_memory_core(phys_addr_t start,
 109                                 phys_addr_t end)
 110{
 111        unsigned long start_pfn = PFN_UP(start);
 112        unsigned long end_pfn = min_t(unsigned long,
 113                                      PFN_DOWN(end), max_low_pfn);
 114
 115        if (start_pfn > end_pfn)
 116                return 0;
 117
 118        __free_pages_memory(start_pfn, end_pfn);
 119
 120        return end_pfn - start_pfn;
 121}
 122
 123unsigned long __init free_low_memory_core_early(int nodeid)
 124{
 125        unsigned long count = 0;
 126        phys_addr_t start, end, size;
 127        u64 i;
 128
 129        for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL)
 130                count += __free_memory_core(start, end);
 131
 132        /* free range that is used for reserved array if we allocate it */
 133        size = get_allocated_memblock_reserved_regions_info(&start);
 134        if (size)
 135                count += __free_memory_core(start, start + size);
 136
 137        return count;
 138}
 139
 140/**
 141 * free_all_bootmem_node - release a node's free pages to the buddy allocator
 142 * @pgdat: node to be released
 143 *
 144 * Returns the number of pages actually released.
 145 */
 146unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
 147{
 148        register_page_bootmem_info_node(pgdat);
 149
 150        /* free_low_memory_core_early(MAX_NUMNODES) will be called later */
 151        return 0;
 152}
 153
 154/**
 155 * free_all_bootmem - release free pages to the buddy allocator
 156 *
 157 * Returns the number of pages actually released.
 158 */
 159unsigned long __init free_all_bootmem(void)
 160{
 161        /*
 162         * We need to use MAX_NUMNODES instead of NODE_DATA(0)->node_id
 163         *  because in some case like Node0 doesn't have RAM installed
 164         *  low ram will be on Node1
 165         */
 166        return free_low_memory_core_early(MAX_NUMNODES);
 167}
 168
 169/**
 170 * free_bootmem_node - mark a page range as usable
 171 * @pgdat: node the range resides on
 172 * @physaddr: starting address of the range
 173 * @size: size of the range in bytes
 174 *
 175 * Partial pages will be considered reserved and left as they are.
 176 *
 177 * The range must reside completely on the specified node.
 178 */
 179void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
 180                              unsigned long size)
 181{
 182        kmemleak_free_part(__va(physaddr), size);
 183        memblock_free(physaddr, size);
 184}
 185
 186/**
 187 * free_bootmem - mark a page range as usable
 188 * @addr: starting address of the range
 189 * @size: size of the range in bytes
 190 *
 191 * Partial pages will be considered reserved and left as they are.
 192 *
 193 * The range must be contiguous but may span node boundaries.
 194 */
 195void __init free_bootmem(unsigned long addr, unsigned long size)
 196{
 197        kmemleak_free_part(__va(addr), size);
 198        memblock_free(addr, size);
 199}
 200
 201static void * __init ___alloc_bootmem_nopanic(unsigned long size,
 202                                        unsigned long align,
 203                                        unsigned long goal,
 204                                        unsigned long limit)
 205{
 206        void *ptr;
 207
 208        if (WARN_ON_ONCE(slab_is_available()))
 209                return kzalloc(size, GFP_NOWAIT);
 210
 211restart:
 212
 213        ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, goal, limit);
 214
 215        if (ptr)
 216                return ptr;
 217
 218        if (goal != 0) {
 219                goal = 0;
 220                goto restart;
 221        }
 222
 223        return NULL;
 224}
 225
 226/**
 227 * __alloc_bootmem_nopanic - allocate boot memory without panicking
 228 * @size: size of the request in bytes
 229 * @align: alignment of the region
 230 * @goal: preferred starting address of the region
 231 *
 232 * The goal is dropped if it can not be satisfied and the allocation will
 233 * fall back to memory below @goal.
 234 *
 235 * Allocation may happen on any node in the system.
 236 *
 237 * Returns NULL on failure.
 238 */
 239void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
 240                                        unsigned long goal)
 241{
 242        unsigned long limit = -1UL;
 243
 244        return ___alloc_bootmem_nopanic(size, align, goal, limit);
 245}
 246
 247static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
 248                                        unsigned long goal, unsigned long limit)
 249{
 250        void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
 251
 252        if (mem)
 253                return mem;
 254        /*
 255         * Whoops, we cannot satisfy the allocation request.
 256         */
 257        printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
 258        panic("Out of memory");
 259        return NULL;
 260}
 261
 262/**
 263 * __alloc_bootmem - allocate boot memory
 264 * @size: size of the request in bytes
 265 * @align: alignment of the region
 266 * @goal: preferred starting address of the region
 267 *
 268 * The goal is dropped if it can not be satisfied and the allocation will
 269 * fall back to memory below @goal.
 270 *
 271 * Allocation may happen on any node in the system.
 272 *
 273 * The function panics if the request can not be satisfied.
 274 */
 275void * __init __alloc_bootmem(unsigned long size, unsigned long align,
 276                              unsigned long goal)
 277{
 278        unsigned long limit = -1UL;
 279
 280        return ___alloc_bootmem(size, align, goal, limit);
 281}
 282
 283void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
 284                                                   unsigned long size,
 285                                                   unsigned long align,
 286                                                   unsigned long goal,
 287                                                   unsigned long limit)
 288{
 289        void *ptr;
 290
 291again:
 292        ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
 293                                        goal, limit);
 294        if (ptr)
 295                return ptr;
 296
 297        ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align,
 298                                        goal, limit);
 299        if (ptr)
 300                return ptr;
 301
 302        if (goal) {
 303                goal = 0;
 304                goto again;
 305        }
 306
 307        return NULL;
 308}
 309
 310void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
 311                                   unsigned long align, unsigned long goal)
 312{
 313        if (WARN_ON_ONCE(slab_is_available()))
 314                return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 315
 316        return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
 317}
 318
 319void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
 320                                    unsigned long align, unsigned long goal,
 321                                    unsigned long limit)
 322{
 323        void *ptr;
 324
 325        ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, limit);
 326        if (ptr)
 327                return ptr;
 328
 329        printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
 330        panic("Out of memory");
 331        return NULL;
 332}
 333
 334/**
 335 * __alloc_bootmem_node - allocate boot memory from a specific node
 336 * @pgdat: node to allocate from
 337 * @size: size of the request in bytes
 338 * @align: alignment of the region
 339 * @goal: preferred starting address of the region
 340 *
 341 * The goal is dropped if it can not be satisfied and the allocation will
 342 * fall back to memory below @goal.
 343 *
 344 * Allocation may fall back to any node in the system if the specified node
 345 * can not hold the requested memory.
 346 *
 347 * The function panics if the request can not be satisfied.
 348 */
 349void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
 350                                   unsigned long align, unsigned long goal)
 351{
 352        if (WARN_ON_ONCE(slab_is_available()))
 353                return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 354
 355        return ___alloc_bootmem_node(pgdat, size, align, goal, 0);
 356}
 357
 358void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
 359                                   unsigned long align, unsigned long goal)
 360{
 361        return __alloc_bootmem_node(pgdat, size, align, goal);
 362}
 363
 364#ifndef ARCH_LOW_ADDRESS_LIMIT
 365#define ARCH_LOW_ADDRESS_LIMIT  0xffffffffUL
 366#endif
 367
 368/**
 369 * __alloc_bootmem_low - allocate low boot memory
 370 * @size: size of the request in bytes
 371 * @align: alignment of the region
 372 * @goal: preferred starting address of the region
 373 *
 374 * The goal is dropped if it can not be satisfied and the allocation will
 375 * fall back to memory below @goal.
 376 *
 377 * Allocation may happen on any node in the system.
 378 *
 379 * The function panics if the request can not be satisfied.
 380 */
 381void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
 382                                  unsigned long goal)
 383{
 384        return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
 385}
 386
 387/**
 388 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
 389 * @pgdat: node to allocate from
 390 * @size: size of the request in bytes
 391 * @align: alignment of the region
 392 * @goal: preferred starting address of the region
 393 *
 394 * The goal is dropped if it can not be satisfied and the allocation will
 395 * fall back to memory below @goal.
 396 *
 397 * Allocation may fall back to any node in the system if the specified node
 398 * can not hold the requested memory.
 399 *
 400 * The function panics if the request can not be satisfied.
 401 */
 402void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
 403                                       unsigned long align, unsigned long goal)
 404{
 405        if (WARN_ON_ONCE(slab_is_available()))
 406                return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 407
 408        return ___alloc_bootmem_node(pgdat, size, align, goal,
 409                                     ARCH_LOW_ADDRESS_LIMIT);
 410}
 411
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.