linux/mm/bootmem.c
<<
>>
Prefs
   1/*
   2 *  bootmem - A boot-time physical memory allocator and configurator
   3 *
   4 *  Copyright (C) 1999 Ingo Molnar
   5 *                1999 Kanoj Sarcar, SGI
   6 *                2008 Johannes Weiner
   7 *
   8 * Access to this subsystem has to be serialized externally (which is true
   9 * for the boot process anyway).
  10 */
  11#include <linux/init.h>
  12#include <linux/pfn.h>
  13#include <linux/slab.h>
  14#include <linux/bootmem.h>
  15#include <linux/export.h>
  16#include <linux/kmemleak.h>
  17#include <linux/range.h>
  18#include <linux/memblock.h>
  19
  20#include <asm/bug.h>
  21#include <asm/io.h>
  22#include <asm/processor.h>
  23
  24#include "internal.h"
  25
  26#ifndef CONFIG_NEED_MULTIPLE_NODES
  27struct pglist_data __refdata contig_page_data = {
  28        .bdata = &bootmem_node_data[0]
  29};
  30EXPORT_SYMBOL(contig_page_data);
  31#endif
  32
  33unsigned long max_low_pfn;
  34unsigned long min_low_pfn;
  35unsigned long max_pfn;
  36
  37bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
  38
  39static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list);
  40
  41static int bootmem_debug;
  42
  43static int __init bootmem_debug_setup(char *buf)
  44{
  45        bootmem_debug = 1;
  46        return 0;
  47}
  48early_param("bootmem_debug", bootmem_debug_setup);
  49
  50#define bdebug(fmt, args...) ({                         \
  51        if (unlikely(bootmem_debug))                    \
  52                printk(KERN_INFO                        \
  53                        "bootmem::%s " fmt,             \
  54                        __func__, ## args);             \
  55})
  56
  57static unsigned long __init bootmap_bytes(unsigned long pages)
  58{
  59        unsigned long bytes = DIV_ROUND_UP(pages, 8);
  60
  61        return ALIGN(bytes, sizeof(long));
  62}
  63
  64/**
  65 * bootmem_bootmap_pages - calculate bitmap size in pages
  66 * @pages: number of pages the bitmap has to represent
  67 */
  68unsigned long __init bootmem_bootmap_pages(unsigned long pages)
  69{
  70        unsigned long bytes = bootmap_bytes(pages);
  71
  72        return PAGE_ALIGN(bytes) >> PAGE_SHIFT;
  73}
  74
  75/*
  76 * link bdata in order
  77 */
  78static void __init link_bootmem(bootmem_data_t *bdata)
  79{
  80        bootmem_data_t *ent;
  81
  82        list_for_each_entry(ent, &bdata_list, list) {
  83                if (bdata->node_min_pfn < ent->node_min_pfn) {
  84                        list_add_tail(&bdata->list, &ent->list);
  85                        return;
  86                }
  87        }
  88
  89        list_add_tail(&bdata->list, &bdata_list);
  90}
  91
  92/*
  93 * Called once to set up the allocator itself.
  94 */
  95static unsigned long __init init_bootmem_core(bootmem_data_t *bdata,
  96        unsigned long mapstart, unsigned long start, unsigned long end)
  97{
  98        unsigned long mapsize;
  99
 100        mminit_validate_memmodel_limits(&start, &end);
 101        bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
 102        bdata->node_min_pfn = start;
 103        bdata->node_low_pfn = end;
 104        link_bootmem(bdata);
 105
 106        /*
 107         * Initially all pages are reserved - setup_arch() has to
 108         * register free RAM areas explicitly.
 109         */
 110        mapsize = bootmap_bytes(end - start);
 111        memset(bdata->node_bootmem_map, 0xff, mapsize);
 112
 113        bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n",
 114                bdata - bootmem_node_data, start, mapstart, end, mapsize);
 115
 116        return mapsize;
 117}
 118
 119/**
 120 * init_bootmem_node - register a node as boot memory
 121 * @pgdat: node to register
 122 * @freepfn: pfn where the bitmap for this node is to be placed
 123 * @startpfn: first pfn on the node
 124 * @endpfn: first pfn after the node
 125 *
 126 * Returns the number of bytes needed to hold the bitmap for this node.
 127 */
 128unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
 129                                unsigned long startpfn, unsigned long endpfn)
 130{
 131        return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn);
 132}
 133
 134/**
 135 * init_bootmem - register boot memory
 136 * @start: pfn where the bitmap is to be placed
 137 * @pages: number of available physical pages
 138 *
 139 * Returns the number of bytes needed to hold the bitmap.
 140 */
 141unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
 142{
 143        max_low_pfn = pages;
 144        min_low_pfn = start;
 145        return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
 146}
 147
 148/*
 149 * free_bootmem_late - free bootmem pages directly to page allocator
 150 * @addr: starting address of the range
 151 * @size: size of the range in bytes
 152 *
 153 * This is only useful when the bootmem allocator has already been torn
 154 * down, but we are still initializing the system.  Pages are given directly
 155 * to the page allocator, no bootmem metadata is updated because it is gone.
 156 */
 157void __init free_bootmem_late(unsigned long addr, unsigned long size)
 158{
 159        unsigned long cursor, end;
 160
 161        kmemleak_free_part(__va(addr), size);
 162
 163        cursor = PFN_UP(addr);
 164        end = PFN_DOWN(addr + size);
 165
 166        for (; cursor < end; cursor++) {
 167                __free_pages_bootmem(pfn_to_page(cursor), 0);
 168                totalram_pages++;
 169        }
 170}
 171
 172static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
 173{
 174        struct page *page;
 175        unsigned long start, end, pages, count = 0;
 176
 177        if (!bdata->node_bootmem_map)
 178                return 0;
 179
 180        start = bdata->node_min_pfn;
 181        end = bdata->node_low_pfn;
 182
 183        bdebug("nid=%td start=%lx end=%lx\n",
 184                bdata - bootmem_node_data, start, end);
 185
 186        while (start < end) {
 187                unsigned long *map, idx, vec;
 188                unsigned shift;
 189
 190                map = bdata->node_bootmem_map;
 191                idx = start - bdata->node_min_pfn;
 192                shift = idx & (BITS_PER_LONG - 1);
 193                /*
 194                 * vec holds at most BITS_PER_LONG map bits,
 195                 * bit 0 corresponds to start.
 196                 */
 197                vec = ~map[idx / BITS_PER_LONG];
 198
 199                if (shift) {
 200                        vec >>= shift;
 201                        if (end - start >= BITS_PER_LONG)
 202                                vec |= ~map[idx / BITS_PER_LONG + 1] <<
 203                                        (BITS_PER_LONG - shift);
 204                }
 205                /*
 206                 * If we have a properly aligned and fully unreserved
 207                 * BITS_PER_LONG block of pages in front of us, free
 208                 * it in one go.
 209                 */
 210                if (IS_ALIGNED(start, BITS_PER_LONG) && vec == ~0UL) {
 211                        int order = ilog2(BITS_PER_LONG);
 212
 213                        __free_pages_bootmem(pfn_to_page(start), order);
 214                        count += BITS_PER_LONG;
 215                        start += BITS_PER_LONG;
 216                } else {
 217                        unsigned long cur = start;
 218
 219                        start = ALIGN(start + 1, BITS_PER_LONG);
 220                        while (vec && cur != start) {
 221                                if (vec & 1) {
 222                                        page = pfn_to_page(cur);
 223                                        __free_pages_bootmem(page, 0);
 224                                        count++;
 225                                }
 226                                vec >>= 1;
 227                                ++cur;
 228                        }
 229                }
 230        }
 231
 232        page = virt_to_page(bdata->node_bootmem_map);
 233        pages = bdata->node_low_pfn - bdata->node_min_pfn;
 234        pages = bootmem_bootmap_pages(pages);
 235        count += pages;
 236        while (pages--)
 237                __free_pages_bootmem(page++, 0);
 238
 239        bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
 240
 241        return count;
 242}
 243
 244/**
 245 * free_all_bootmem_node - release a node's free pages to the buddy allocator
 246 * @pgdat: node to be released
 247 *
 248 * Returns the number of pages actually released.
 249 */
 250unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
 251{
 252        register_page_bootmem_info_node(pgdat);
 253        return free_all_bootmem_core(pgdat->bdata);
 254}
 255
 256/**
 257 * free_all_bootmem - release free pages to the buddy allocator
 258 *
 259 * Returns the number of pages actually released.
 260 */
 261unsigned long __init free_all_bootmem(void)
 262{
 263        unsigned long total_pages = 0;
 264        bootmem_data_t *bdata;
 265
 266        list_for_each_entry(bdata, &bdata_list, list)
 267                total_pages += free_all_bootmem_core(bdata);
 268
 269        return total_pages;
 270}
 271
 272static void __init __free(bootmem_data_t *bdata,
 273                        unsigned long sidx, unsigned long eidx)
 274{
 275        unsigned long idx;
 276
 277        bdebug("nid=%td start=%lx end=%lx\n", bdata - bootmem_node_data,
 278                sidx + bdata->node_min_pfn,
 279                eidx + bdata->node_min_pfn);
 280
 281        if (bdata->hint_idx > sidx)
 282                bdata->hint_idx = sidx;
 283
 284        for (idx = sidx; idx < eidx; idx++)
 285                if (!test_and_clear_bit(idx, bdata->node_bootmem_map))
 286                        BUG();
 287}
 288
 289static int __init __reserve(bootmem_data_t *bdata, unsigned long sidx,
 290                        unsigned long eidx, int flags)
 291{
 292        unsigned long idx;
 293        int exclusive = flags & BOOTMEM_EXCLUSIVE;
 294
 295        bdebug("nid=%td start=%lx end=%lx flags=%x\n",
 296                bdata - bootmem_node_data,
 297                sidx + bdata->node_min_pfn,
 298                eidx + bdata->node_min_pfn,
 299                flags);
 300
 301        for (idx = sidx; idx < eidx; idx++)
 302                if (test_and_set_bit(idx, bdata->node_bootmem_map)) {
 303                        if (exclusive) {
 304                                __free(bdata, sidx, idx);
 305                                return -EBUSY;
 306                        }
 307                        bdebug("silent double reserve of PFN %lx\n",
 308                                idx + bdata->node_min_pfn);
 309                }
 310        return 0;
 311}
 312
 313static int __init mark_bootmem_node(bootmem_data_t *bdata,
 314                                unsigned long start, unsigned long end,
 315                                int reserve, int flags)
 316{
 317        unsigned long sidx, eidx;
 318
 319        bdebug("nid=%td start=%lx end=%lx reserve=%d flags=%x\n",
 320                bdata - bootmem_node_data, start, end, reserve, flags);
 321
 322        BUG_ON(start < bdata->node_min_pfn);
 323        BUG_ON(end > bdata->node_low_pfn);
 324
 325        sidx = start - bdata->node_min_pfn;
 326        eidx = end - bdata->node_min_pfn;
 327
 328        if (reserve)
 329                return __reserve(bdata, sidx, eidx, flags);
 330        else
 331                __free(bdata, sidx, eidx);
 332        return 0;
 333}
 334
 335static int __init mark_bootmem(unsigned long start, unsigned long end,
 336                                int reserve, int flags)
 337{
 338        unsigned long pos;
 339        bootmem_data_t *bdata;
 340
 341        pos = start;
 342        list_for_each_entry(bdata, &bdata_list, list) {
 343                int err;
 344                unsigned long max;
 345
 346                if (pos < bdata->node_min_pfn ||
 347                    pos >= bdata->node_low_pfn) {
 348                        BUG_ON(pos != start);
 349                        continue;
 350                }
 351
 352                max = min(bdata->node_low_pfn, end);
 353
 354                err = mark_bootmem_node(bdata, pos, max, reserve, flags);
 355                if (reserve && err) {
 356                        mark_bootmem(start, pos, 0, 0);
 357                        return err;
 358                }
 359
 360                if (max == end)
 361                        return 0;
 362                pos = bdata->node_low_pfn;
 363        }
 364        BUG();
 365}
 366
 367/**
 368 * free_bootmem_node - mark a page range as usable
 369 * @pgdat: node the range resides on
 370 * @physaddr: starting address of the range
 371 * @size: size of the range in bytes
 372 *
 373 * Partial pages will be considered reserved and left as they are.
 374 *
 375 * The range must reside completely on the specified node.
 376 */
 377void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
 378                              unsigned long size)
 379{
 380        unsigned long start, end;
 381
 382        kmemleak_free_part(__va(physaddr), size);
 383
 384        start = PFN_UP(physaddr);
 385        end = PFN_DOWN(physaddr + size);
 386
 387        mark_bootmem_node(pgdat->bdata, start, end, 0, 0);
 388}
 389
 390/**
 391 * free_bootmem - mark a page range as usable
 392 * @addr: starting address of the range
 393 * @size: size of the range in bytes
 394 *
 395 * Partial pages will be considered reserved and left as they are.
 396 *
 397 * The range must be contiguous but may span node boundaries.
 398 */
 399void __init free_bootmem(unsigned long addr, unsigned long size)
 400{
 401        unsigned long start, end;
 402
 403        kmemleak_free_part(__va(addr), size);
 404
 405        start = PFN_UP(addr);
 406        end = PFN_DOWN(addr + size);
 407
 408        mark_bootmem(start, end, 0, 0);
 409}
 410
 411/**
 412 * reserve_bootmem_node - mark a page range as reserved
 413 * @pgdat: node the range resides on
 414 * @physaddr: starting address of the range
 415 * @size: size of the range in bytes
 416 * @flags: reservation flags (see linux/bootmem.h)
 417 *
 418 * Partial pages will be reserved.
 419 *
 420 * The range must reside completely on the specified node.
 421 */
 422int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
 423                                 unsigned long size, int flags)
 424{
 425        unsigned long start, end;
 426
 427        start = PFN_DOWN(physaddr);
 428        end = PFN_UP(physaddr + size);
 429
 430        return mark_bootmem_node(pgdat->bdata, start, end, 1, flags);
 431}
 432
 433/**
 434 * reserve_bootmem - mark a page range as reserved
 435 * @addr: starting address of the range
 436 * @size: size of the range in bytes
 437 * @flags: reservation flags (see linux/bootmem.h)
 438 *
 439 * Partial pages will be reserved.
 440 *
 441 * The range must be contiguous but may span node boundaries.
 442 */
 443int __init reserve_bootmem(unsigned long addr, unsigned long size,
 444                            int flags)
 445{
 446        unsigned long start, end;
 447
 448        start = PFN_DOWN(addr);
 449        end = PFN_UP(addr + size);
 450
 451        return mark_bootmem(start, end, 1, flags);
 452}
 453
 454int __weak __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
 455                                   int flags)
 456{
 457        return reserve_bootmem(phys, len, flags);
 458}
 459
 460static unsigned long __init align_idx(struct bootmem_data *bdata,
 461                                      unsigned long idx, unsigned long step)
 462{
 463        unsigned long base = bdata->node_min_pfn;
 464
 465        /*
 466         * Align the index with respect to the node start so that the
 467         * combination of both satisfies the requested alignment.
 468         */
 469
 470        return ALIGN(base + idx, step) - base;
 471}
 472
 473static unsigned long __init align_off(struct bootmem_data *bdata,
 474                                      unsigned long off, unsigned long align)
 475{
 476        unsigned long base = PFN_PHYS(bdata->node_min_pfn);
 477
 478        /* Same as align_idx for byte offsets */
 479
 480        return ALIGN(base + off, align) - base;
 481}
 482
 483static void * __init alloc_bootmem_bdata(struct bootmem_data *bdata,
 484                                        unsigned long size, unsigned long align,
 485                                        unsigned long goal, unsigned long limit)
 486{
 487        unsigned long fallback = 0;
 488        unsigned long min, max, start, sidx, midx, step;
 489
 490        bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n",
 491                bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT,
 492                align, goal, limit);
 493
 494        BUG_ON(!size);
 495        BUG_ON(align & (align - 1));
 496        BUG_ON(limit && goal + size > limit);
 497
 498        if (!bdata->node_bootmem_map)
 499                return NULL;
 500
 501        min = bdata->node_min_pfn;
 502        max = bdata->node_low_pfn;
 503
 504        goal >>= PAGE_SHIFT;
 505        limit >>= PAGE_SHIFT;
 506
 507        if (limit && max > limit)
 508                max = limit;
 509        if (max <= min)
 510                return NULL;
 511
 512        step = max(align >> PAGE_SHIFT, 1UL);
 513
 514        if (goal && min < goal && goal < max)
 515                start = ALIGN(goal, step);
 516        else
 517                start = ALIGN(min, step);
 518
 519        sidx = start - bdata->node_min_pfn;
 520        midx = max - bdata->node_min_pfn;
 521
 522        if (bdata->hint_idx > sidx) {
 523                /*
 524                 * Handle the valid case of sidx being zero and still
 525                 * catch the fallback below.
 526                 */
 527                fallback = sidx + 1;
 528                sidx = align_idx(bdata, bdata->hint_idx, step);
 529        }
 530
 531        while (1) {
 532                int merge;
 533                void *region;
 534                unsigned long eidx, i, start_off, end_off;
 535find_block:
 536                sidx = find_next_zero_bit(bdata->node_bootmem_map, midx, sidx);
 537                sidx = align_idx(bdata, sidx, step);
 538                eidx = sidx + PFN_UP(size);
 539
 540                if (sidx >= midx || eidx > midx)
 541                        break;
 542
 543                for (i = sidx; i < eidx; i++)
 544                        if (test_bit(i, bdata->node_bootmem_map)) {
 545                                sidx = align_idx(bdata, i, step);
 546                                if (sidx == i)
 547                                        sidx += step;
 548                                goto find_block;
 549                        }
 550
 551                if (bdata->last_end_off & (PAGE_SIZE - 1) &&
 552                                PFN_DOWN(bdata->last_end_off) + 1 == sidx)
 553                        start_off = align_off(bdata, bdata->last_end_off, align);
 554                else
 555                        start_off = PFN_PHYS(sidx);
 556
 557                merge = PFN_DOWN(start_off) < sidx;
 558                end_off = start_off + size;
 559
 560                bdata->last_end_off = end_off;
 561                bdata->hint_idx = PFN_UP(end_off);
 562
 563                /*
 564                 * Reserve the area now:
 565                 */
 566                if (__reserve(bdata, PFN_DOWN(start_off) + merge,
 567                                PFN_UP(end_off), BOOTMEM_EXCLUSIVE))
 568                        BUG();
 569
 570                region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) +
 571                                start_off);
 572                memset(region, 0, size);
 573                /*
 574                 * The min_count is set to 0 so that bootmem allocated blocks
 575                 * are never reported as leaks.
 576                 */
 577                kmemleak_alloc(region, size, 0, 0);
 578                return region;
 579        }
 580
 581        if (fallback) {
 582                sidx = align_idx(bdata, fallback - 1, step);
 583                fallback = 0;
 584                goto find_block;
 585        }
 586
 587        return NULL;
 588}
 589
 590static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata,
 591                                        unsigned long size, unsigned long align,
 592                                        unsigned long goal, unsigned long limit)
 593{
 594        if (WARN_ON_ONCE(slab_is_available()))
 595                return kzalloc(size, GFP_NOWAIT);
 596
 597#ifdef CONFIG_HAVE_ARCH_BOOTMEM
 598        {
 599                bootmem_data_t *p_bdata;
 600
 601                p_bdata = bootmem_arch_preferred_node(bdata, size, align,
 602                                                        goal, limit);
 603                if (p_bdata)
 604                        return alloc_bootmem_bdata(p_bdata, size, align,
 605                                                        goal, limit);
 606        }
 607#endif
 608        return NULL;
 609}
 610
 611static void * __init alloc_bootmem_core(unsigned long size,
 612                                        unsigned long align,
 613                                        unsigned long goal,
 614                                        unsigned long limit)
 615{
 616        bootmem_data_t *bdata;
 617        void *region;
 618
 619        region = alloc_arch_preferred_bootmem(NULL, size, align, goal, limit);
 620        if (region)
 621                return region;
 622
 623        list_for_each_entry(bdata, &bdata_list, list) {
 624                if (goal && bdata->node_low_pfn <= PFN_DOWN(goal))
 625                        continue;
 626                if (limit && bdata->node_min_pfn >= PFN_DOWN(limit))
 627                        break;
 628
 629                region = alloc_bootmem_bdata(bdata, size, align, goal, limit);
 630                if (region)
 631                        return region;
 632        }
 633
 634        return NULL;
 635}
 636
 637static void * __init ___alloc_bootmem_nopanic(unsigned long size,
 638                                              unsigned long align,
 639                                              unsigned long goal,
 640                                              unsigned long limit)
 641{
 642        void *ptr;
 643
 644restart:
 645        ptr = alloc_bootmem_core(size, align, goal, limit);
 646        if (ptr)
 647                return ptr;
 648        if (goal) {
 649                goal = 0;
 650                goto restart;
 651        }
 652
 653        return NULL;
 654}
 655
 656/**
 657 * __alloc_bootmem_nopanic - allocate boot memory without panicking
 658 * @size: size of the request in bytes
 659 * @align: alignment of the region
 660 * @goal: preferred starting address of the region
 661 *
 662 * The goal is dropped if it can not be satisfied and the allocation will
 663 * fall back to memory below @goal.
 664 *
 665 * Allocation may happen on any node in the system.
 666 *
 667 * Returns NULL on failure.
 668 */
 669void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
 670                                        unsigned long goal)
 671{
 672        unsigned long limit = 0;
 673
 674        return ___alloc_bootmem_nopanic(size, align, goal, limit);
 675}
 676
 677static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
 678                                        unsigned long goal, unsigned long limit)
 679{
 680        void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
 681
 682        if (mem)
 683                return mem;
 684        /*
 685         * Whoops, we cannot satisfy the allocation request.
 686         */
 687        printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
 688        panic("Out of memory");
 689        return NULL;
 690}
 691
 692/**
 693 * __alloc_bootmem - allocate boot memory
 694 * @size: size of the request in bytes
 695 * @align: alignment of the region
 696 * @goal: preferred starting address of the region
 697 *
 698 * The goal is dropped if it can not be satisfied and the allocation will
 699 * fall back to memory below @goal.
 700 *
 701 * Allocation may happen on any node in the system.
 702 *
 703 * The function panics if the request can not be satisfied.
 704 */
 705void * __init __alloc_bootmem(unsigned long size, unsigned long align,
 706                              unsigned long goal)
 707{
 708        unsigned long limit = 0;
 709
 710        return ___alloc_bootmem(size, align, goal, limit);
 711}
 712
 713void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
 714                                unsigned long size, unsigned long align,
 715                                unsigned long goal, unsigned long limit)
 716{
 717        void *ptr;
 718
 719again:
 720        ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size,
 721                                           align, goal, limit);
 722        if (ptr)
 723                return ptr;
 724
 725        /* do not panic in alloc_bootmem_bdata() */
 726        if (limit && goal + size > limit)
 727                limit = 0;
 728
 729        ptr = alloc_bootmem_bdata(pgdat->bdata, size, align, goal, limit);
 730        if (ptr)
 731                return ptr;
 732
 733        ptr = alloc_bootmem_core(size, align, goal, limit);
 734        if (ptr)
 735                return ptr;
 736
 737        if (goal) {
 738                goal = 0;
 739                goto again;
 740        }
 741
 742        return NULL;
 743}
 744
 745void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
 746                                   unsigned long align, unsigned long goal)
 747{
 748        if (WARN_ON_ONCE(slab_is_available()))
 749                return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 750
 751        return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
 752}
 753
 754void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
 755                                    unsigned long align, unsigned long goal,
 756                                    unsigned long limit)
 757{
 758        void *ptr;
 759
 760        ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
 761        if (ptr)
 762                return ptr;
 763
 764        printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
 765        panic("Out of memory");
 766        return NULL;
 767}
 768
 769/**
 770 * __alloc_bootmem_node - allocate boot memory from a specific node
 771 * @pgdat: node to allocate from
 772 * @size: size of the request in bytes
 773 * @align: alignment of the region
 774 * @goal: preferred starting address of the region
 775 *
 776 * The goal is dropped if it can not be satisfied and the allocation will
 777 * fall back to memory below @goal.
 778 *
 779 * Allocation may fall back to any node in the system if the specified node
 780 * can not hold the requested memory.
 781 *
 782 * The function panics if the request can not be satisfied.
 783 */
 784void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
 785                                   unsigned long align, unsigned long goal)
 786{
 787        if (WARN_ON_ONCE(slab_is_available()))
 788                return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 789
 790        return  ___alloc_bootmem_node(pgdat, size, align, goal, 0);
 791}
 792
 793void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
 794                                   unsigned long align, unsigned long goal)
 795{
 796#ifdef MAX_DMA32_PFN
 797        unsigned long end_pfn;
 798
 799        if (WARN_ON_ONCE(slab_is_available()))
 800                return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 801
 802        /* update goal according ...MAX_DMA32_PFN */
 803        end_pfn = pgdat->node_start_pfn + pgdat->node_spanned_pages;
 804
 805        if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) &&
 806            (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) {
 807                void *ptr;
 808                unsigned long new_goal;
 809
 810                new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
 811                ptr = alloc_bootmem_bdata(pgdat->bdata, size, align,
 812                                                 new_goal, 0);
 813                if (ptr)
 814                        return ptr;
 815        }
 816#endif
 817
 818        return __alloc_bootmem_node(pgdat, size, align, goal);
 819
 820}
 821
 822#ifndef ARCH_LOW_ADDRESS_LIMIT
 823#define ARCH_LOW_ADDRESS_LIMIT  0xffffffffUL
 824#endif
 825
 826/**
 827 * __alloc_bootmem_low - allocate low boot memory
 828 * @size: size of the request in bytes
 829 * @align: alignment of the region
 830 * @goal: preferred starting address of the region
 831 *
 832 * The goal is dropped if it can not be satisfied and the allocation will
 833 * fall back to memory below @goal.
 834 *
 835 * Allocation may happen on any node in the system.
 836 *
 837 * The function panics if the request can not be satisfied.
 838 */
 839void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
 840                                  unsigned long goal)
 841{
 842        return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
 843}
 844
 845/**
 846 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
 847 * @pgdat: node to allocate from
 848 * @size: size of the request in bytes
 849 * @align: alignment of the region
 850 * @goal: preferred starting address of the region
 851 *
 852 * The goal is dropped if it can not be satisfied and the allocation will
 853 * fall back to memory below @goal.
 854 *
 855 * Allocation may fall back to any node in the system if the specified node
 856 * can not hold the requested memory.
 857 *
 858 * The function panics if the request can not be satisfied.
 859 */
 860void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
 861                                       unsigned long align, unsigned long goal)
 862{
 863        if (WARN_ON_ONCE(slab_is_available()))
 864                return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
 865
 866        return ___alloc_bootmem_node(pgdat, size, align,
 867                                     goal, ARCH_LOW_ADDRESS_LIMIT);
 868}
 869
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.