linux/mm/memblock.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Procedures for maintaining information about logical memory blocks.
   4 *
   5 * Peter Bergner, IBM Corp.     June 2001.
   6 * Copyright (C) 2001 Peter Bergner.
   7 */
   8
   9#include <linux/kernel.h>
  10#include <linux/slab.h>
  11#include <linux/init.h>
  12#include <linux/bitops.h>
  13#include <linux/poison.h>
  14#include <linux/pfn.h>
  15#include <linux/debugfs.h>
  16#include <linux/kmemleak.h>
  17#include <linux/seq_file.h>
  18#include <linux/memblock.h>
  19
  20#include <asm/sections.h>
  21#include <linux/io.h>
  22
  23#include "internal.h"
  24
  25#define INIT_MEMBLOCK_REGIONS                   128
  26#define INIT_PHYSMEM_REGIONS                    4
  27
  28#ifndef INIT_MEMBLOCK_RESERVED_REGIONS
  29# define INIT_MEMBLOCK_RESERVED_REGIONS         INIT_MEMBLOCK_REGIONS
  30#endif
  31
  32/**
  33 * DOC: memblock overview
  34 *
  35 * Memblock is a method of managing memory regions during the early
  36 * boot period when the usual kernel memory allocators are not up and
  37 * running.
  38 *
  39 * Memblock views the system memory as collections of contiguous
  40 * regions. There are several types of these collections:
  41 *
  42 * * ``memory`` - describes the physical memory available to the
  43 *   kernel; this may differ from the actual physical memory installed
  44 *   in the system, for instance when the memory is restricted with
  45 *   ``mem=`` command line parameter
  46 * * ``reserved`` - describes the regions that were allocated
  47 * * ``physmem`` - describes the actual physical memory available during
  48 *   boot regardless of the possible restrictions and memory hot(un)plug;
  49 *   the ``physmem`` type is only available on some architectures.
  50 *
  51 * Each region is represented by struct memblock_region that
  52 * defines the region extents, its attributes and NUMA node id on NUMA
  53 * systems. Every memory type is described by the struct memblock_type
  54 * which contains an array of memory regions along with
  55 * the allocator metadata. The "memory" and "reserved" types are nicely
  56 * wrapped with struct memblock. This structure is statically
  57 * initialized at build time. The region arrays are initially sized to
  58 * %INIT_MEMBLOCK_REGIONS for "memory" and %INIT_MEMBLOCK_RESERVED_REGIONS
  59 * for "reserved". The region array for "physmem" is initially sized to
  60 * %INIT_PHYSMEM_REGIONS.
  61 * The memblock_allow_resize() enables automatic resizing of the region
  62 * arrays during addition of new regions. This feature should be used
  63 * with care so that memory allocated for the region array will not
  64 * overlap with areas that should be reserved, for example initrd.
  65 *
  66 * The early architecture setup should tell memblock what the physical
  67 * memory layout is by using memblock_add() or memblock_add_node()
  68 * functions. The first function does not assign the region to a NUMA
  69 * node and it is appropriate for UMA systems. Yet, it is possible to
  70 * use it on NUMA systems as well and assign the region to a NUMA node
  71 * later in the setup process using memblock_set_node(). The
  72 * memblock_add_node() performs such an assignment directly.
  73 *
  74 * Once memblock is setup the memory can be allocated using one of the
  75 * API variants:
  76 *
  77 * * memblock_phys_alloc*() - these functions return the **physical**
  78 *   address of the allocated memory
  79 * * memblock_alloc*() - these functions return the **virtual** address
  80 *   of the allocated memory.
  81 *
  82 * Note, that both API variants use implicit assumptions about allowed
  83 * memory ranges and the fallback methods. Consult the documentation
  84 * of memblock_alloc_internal() and memblock_alloc_range_nid()
  85 * functions for more elaborate description.
  86 *
  87 * As the system boot progresses, the architecture specific mem_init()
  88 * function frees all the memory to the buddy page allocator.
  89 *
  90 * Unless an architecture enables %CONFIG_ARCH_KEEP_MEMBLOCK, the
  91 * memblock data structures (except "physmem") will be discarded after the
  92 * system initialization completes.
  93 */
  94
  95#ifndef CONFIG_NUMA
  96struct pglist_data __refdata contig_page_data;
  97EXPORT_SYMBOL(contig_page_data);
  98#endif
  99
 100unsigned long max_low_pfn;
 101unsigned long min_low_pfn;
 102unsigned long max_pfn;
 103unsigned long long max_possible_pfn;
 104
 105static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
 106static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
 107#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
 108static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS];
 109#endif
 110
 111struct memblock memblock __initdata_memblock = {
 112        .memory.regions         = memblock_memory_init_regions,
 113        .memory.cnt             = 1,    /* empty dummy entry */
 114        .memory.max             = INIT_MEMBLOCK_REGIONS,
 115        .memory.name            = "memory",
 116
 117        .reserved.regions       = memblock_reserved_init_regions,
 118        .reserved.cnt           = 1,    /* empty dummy entry */
 119        .reserved.max           = INIT_MEMBLOCK_RESERVED_REGIONS,
 120        .reserved.name          = "reserved",
 121
 122        .bottom_up              = false,
 123        .current_limit          = MEMBLOCK_ALLOC_ANYWHERE,
 124};
 125
 126#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
 127struct memblock_type physmem = {
 128        .regions                = memblock_physmem_init_regions,
 129        .cnt                    = 1,    /* empty dummy entry */
 130        .max                    = INIT_PHYSMEM_REGIONS,
 131        .name                   = "physmem",
 132};
 133#endif
 134
 135/*
 136 * keep a pointer to &memblock.memory in the text section to use it in
 137 * __next_mem_range() and its helpers.
 138 *  For architectures that do not keep memblock data after init, this
 139 * pointer will be reset to NULL at memblock_discard()
 140 */
 141static __refdata struct memblock_type *memblock_memory = &memblock.memory;
 142
 143#define for_each_memblock_type(i, memblock_type, rgn)                   \
 144        for (i = 0, rgn = &memblock_type->regions[0];                   \
 145             i < memblock_type->cnt;                                    \
 146             i++, rgn = &memblock_type->regions[i])
 147
 148#define memblock_dbg(fmt, ...)                                          \
 149        do {                                                            \
 150                if (memblock_debug)                                     \
 151                        pr_info(fmt, ##__VA_ARGS__);                    \
 152        } while (0)
 153
 154static int memblock_debug __initdata_memblock;
 155static bool system_has_some_mirror __initdata_memblock = false;
 156static int memblock_can_resize __initdata_memblock;
 157static int memblock_memory_in_slab __initdata_memblock = 0;
 158static int memblock_reserved_in_slab __initdata_memblock = 0;
 159
 160static enum memblock_flags __init_memblock choose_memblock_flags(void)
 161{
 162        return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
 163}
 164
 165/* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
 166static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
 167{
 168        return *size = min(*size, PHYS_ADDR_MAX - base);
 169}
 170
 171/*
 172 * Address comparison utilities
 173 */
 174static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
 175                                       phys_addr_t base2, phys_addr_t size2)
 176{
 177        return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
 178}
 179
 180bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
 181                                        phys_addr_t base, phys_addr_t size)
 182{
 183        unsigned long i;
 184
 185        memblock_cap_size(base, &size);
 186
 187        for (i = 0; i < type->cnt; i++)
 188                if (memblock_addrs_overlap(base, size, type->regions[i].base,
 189                                           type->regions[i].size))
 190                        break;
 191        return i < type->cnt;
 192}
 193
 194/**
 195 * __memblock_find_range_bottom_up - find free area utility in bottom-up
 196 * @start: start of candidate range
 197 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
 198 *       %MEMBLOCK_ALLOC_ACCESSIBLE
 199 * @size: size of free area to find
 200 * @align: alignment of free area to find
 201 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 202 * @flags: pick from blocks based on memory attributes
 203 *
 204 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
 205 *
 206 * Return:
 207 * Found address on success, 0 on failure.
 208 */
 209static phys_addr_t __init_memblock
 210__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
 211                                phys_addr_t size, phys_addr_t align, int nid,
 212                                enum memblock_flags flags)
 213{
 214        phys_addr_t this_start, this_end, cand;
 215        u64 i;
 216
 217        for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
 218                this_start = clamp(this_start, start, end);
 219                this_end = clamp(this_end, start, end);
 220
 221                cand = round_up(this_start, align);
 222                if (cand < this_end && this_end - cand >= size)
 223                        return cand;
 224        }
 225
 226        return 0;
 227}
 228
 229/**
 230 * __memblock_find_range_top_down - find free area utility, in top-down
 231 * @start: start of candidate range
 232 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
 233 *       %MEMBLOCK_ALLOC_ACCESSIBLE
 234 * @size: size of free area to find
 235 * @align: alignment of free area to find
 236 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 237 * @flags: pick from blocks based on memory attributes
 238 *
 239 * Utility called from memblock_find_in_range_node(), find free area top-down.
 240 *
 241 * Return:
 242 * Found address on success, 0 on failure.
 243 */
 244static phys_addr_t __init_memblock
 245__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
 246                               phys_addr_t size, phys_addr_t align, int nid,
 247                               enum memblock_flags flags)
 248{
 249        phys_addr_t this_start, this_end, cand;
 250        u64 i;
 251
 252        for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
 253                                        NULL) {
 254                this_start = clamp(this_start, start, end);
 255                this_end = clamp(this_end, start, end);
 256
 257                if (this_end < size)
 258                        continue;
 259
 260                cand = round_down(this_end - size, align);
 261                if (cand >= this_start)
 262                        return cand;
 263        }
 264
 265        return 0;
 266}
 267
 268/**
 269 * memblock_find_in_range_node - find free area in given range and node
 270 * @size: size of free area to find
 271 * @align: alignment of free area to find
 272 * @start: start of candidate range
 273 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
 274 *       %MEMBLOCK_ALLOC_ACCESSIBLE
 275 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 276 * @flags: pick from blocks based on memory attributes
 277 *
 278 * Find @size free area aligned to @align in the specified range and node.
 279 *
 280 * Return:
 281 * Found address on success, 0 on failure.
 282 */
 283static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
 284                                        phys_addr_t align, phys_addr_t start,
 285                                        phys_addr_t end, int nid,
 286                                        enum memblock_flags flags)
 287{
 288        /* pump up @end */
 289        if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
 290            end == MEMBLOCK_ALLOC_KASAN)
 291                end = memblock.current_limit;
 292
 293        /* avoid allocating the first page */
 294        start = max_t(phys_addr_t, start, PAGE_SIZE);
 295        end = max(start, end);
 296
 297        if (memblock_bottom_up())
 298                return __memblock_find_range_bottom_up(start, end, size, align,
 299                                                       nid, flags);
 300        else
 301                return __memblock_find_range_top_down(start, end, size, align,
 302                                                      nid, flags);
 303}
 304
 305/**
 306 * memblock_find_in_range - find free area in given range
 307 * @start: start of candidate range
 308 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
 309 *       %MEMBLOCK_ALLOC_ACCESSIBLE
 310 * @size: size of free area to find
 311 * @align: alignment of free area to find
 312 *
 313 * Find @size free area aligned to @align in the specified range.
 314 *
 315 * Return:
 316 * Found address on success, 0 on failure.
 317 */
 318phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
 319                                        phys_addr_t end, phys_addr_t size,
 320                                        phys_addr_t align)
 321{
 322        phys_addr_t ret;
 323        enum memblock_flags flags = choose_memblock_flags();
 324
 325again:
 326        ret = memblock_find_in_range_node(size, align, start, end,
 327                                            NUMA_NO_NODE, flags);
 328
 329        if (!ret && (flags & MEMBLOCK_MIRROR)) {
 330                pr_warn("Could not allocate %pap bytes of mirrored memory\n",
 331                        &size);
 332                flags &= ~MEMBLOCK_MIRROR;
 333                goto again;
 334        }
 335
 336        return ret;
 337}
 338
 339static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
 340{
 341        type->total_size -= type->regions[r].size;
 342        memmove(&type->regions[r], &type->regions[r + 1],
 343                (type->cnt - (r + 1)) * sizeof(type->regions[r]));
 344        type->cnt--;
 345
 346        /* Special case for empty arrays */
 347        if (type->cnt == 0) {
 348                WARN_ON(type->total_size != 0);
 349                type->cnt = 1;
 350                type->regions[0].base = 0;
 351                type->regions[0].size = 0;
 352                type->regions[0].flags = 0;
 353                memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
 354        }
 355}
 356
 357#ifndef CONFIG_ARCH_KEEP_MEMBLOCK
 358/**
 359 * memblock_discard - discard memory and reserved arrays if they were allocated
 360 */
 361void __init memblock_discard(void)
 362{
 363        phys_addr_t addr, size;
 364
 365        if (memblock.reserved.regions != memblock_reserved_init_regions) {
 366                addr = __pa(memblock.reserved.regions);
 367                size = PAGE_ALIGN(sizeof(struct memblock_region) *
 368                                  memblock.reserved.max);
 369                __memblock_free_late(addr, size);
 370        }
 371
 372        if (memblock.memory.regions != memblock_memory_init_regions) {
 373                addr = __pa(memblock.memory.regions);
 374                size = PAGE_ALIGN(sizeof(struct memblock_region) *
 375                                  memblock.memory.max);
 376                __memblock_free_late(addr, size);
 377        }
 378
 379        memblock_memory = NULL;
 380}
 381#endif
 382
 383/**
 384 * memblock_double_array - double the size of the memblock regions array
 385 * @type: memblock type of the regions array being doubled
 386 * @new_area_start: starting address of memory range to avoid overlap with
 387 * @new_area_size: size of memory range to avoid overlap with
 388 *
 389 * Double the size of the @type regions array. If memblock is being used to
 390 * allocate memory for a new reserved regions array and there is a previously
 391 * allocated memory range [@new_area_start, @new_area_start + @new_area_size]
 392 * waiting to be reserved, ensure the memory used by the new array does
 393 * not overlap.
 394 *
 395 * Return:
 396 * 0 on success, -1 on failure.
 397 */
 398static int __init_memblock memblock_double_array(struct memblock_type *type,
 399                                                phys_addr_t new_area_start,
 400                                                phys_addr_t new_area_size)
 401{
 402        struct memblock_region *new_array, *old_array;
 403        phys_addr_t old_alloc_size, new_alloc_size;
 404        phys_addr_t old_size, new_size, addr, new_end;
 405        int use_slab = slab_is_available();
 406        int *in_slab;
 407
 408        /* We don't allow resizing until we know about the reserved regions
 409         * of memory that aren't suitable for allocation
 410         */
 411        if (!memblock_can_resize)
 412                return -1;
 413
 414        /* Calculate new doubled size */
 415        old_size = type->max * sizeof(struct memblock_region);
 416        new_size = old_size << 1;
 417        /*
 418         * We need to allocated new one align to PAGE_SIZE,
 419         *   so we can free them completely later.
 420         */
 421        old_alloc_size = PAGE_ALIGN(old_size);
 422        new_alloc_size = PAGE_ALIGN(new_size);
 423
 424        /* Retrieve the slab flag */
 425        if (type == &memblock.memory)
 426                in_slab = &memblock_memory_in_slab;
 427        else
 428                in_slab = &memblock_reserved_in_slab;
 429
 430        /* Try to find some space for it */
 431        if (use_slab) {
 432                new_array = kmalloc(new_size, GFP_KERNEL);
 433                addr = new_array ? __pa(new_array) : 0;
 434        } else {
 435                /* only exclude range when trying to double reserved.regions */
 436                if (type != &memblock.reserved)
 437                        new_area_start = new_area_size = 0;
 438
 439                addr = memblock_find_in_range(new_area_start + new_area_size,
 440                                                memblock.current_limit,
 441                                                new_alloc_size, PAGE_SIZE);
 442                if (!addr && new_area_size)
 443                        addr = memblock_find_in_range(0,
 444                                min(new_area_start, memblock.current_limit),
 445                                new_alloc_size, PAGE_SIZE);
 446
 447                new_array = addr ? __va(addr) : NULL;
 448        }
 449        if (!addr) {
 450                pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
 451                       type->name, type->max, type->max * 2);
 452                return -1;
 453        }
 454
 455        new_end = addr + new_size - 1;
 456        memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
 457                        type->name, type->max * 2, &addr, &new_end);
 458
 459        /*
 460         * Found space, we now need to move the array over before we add the
 461         * reserved region since it may be our reserved array itself that is
 462         * full.
 463         */
 464        memcpy(new_array, type->regions, old_size);
 465        memset(new_array + type->max, 0, old_size);
 466        old_array = type->regions;
 467        type->regions = new_array;
 468        type->max <<= 1;
 469
 470        /* Free old array. We needn't free it if the array is the static one */
 471        if (*in_slab)
 472                kfree(old_array);
 473        else if (old_array != memblock_memory_init_regions &&
 474                 old_array != memblock_reserved_init_regions)
 475                memblock_free(__pa(old_array), old_alloc_size);
 476
 477        /*
 478         * Reserve the new array if that comes from the memblock.  Otherwise, we
 479         * needn't do it
 480         */
 481        if (!use_slab)
 482                BUG_ON(memblock_reserve(addr, new_alloc_size));
 483
 484        /* Update slab flag */
 485        *in_slab = use_slab;
 486
 487        return 0;
 488}
 489
 490/**
 491 * memblock_merge_regions - merge neighboring compatible regions
 492 * @type: memblock type to scan
 493 *
 494 * Scan @type and merge neighboring compatible regions.
 495 */
 496static void __init_memblock memblock_merge_regions(struct memblock_type *type)
 497{
 498        int i = 0;
 499
 500        /* cnt never goes below 1 */
 501        while (i < type->cnt - 1) {
 502                struct memblock_region *this = &type->regions[i];
 503                struct memblock_region *next = &type->regions[i + 1];
 504
 505                if (this->base + this->size != next->base ||
 506                    memblock_get_region_node(this) !=
 507                    memblock_get_region_node(next) ||
 508                    this->flags != next->flags) {
 509                        BUG_ON(this->base + this->size > next->base);
 510                        i++;
 511                        continue;
 512                }
 513
 514                this->size += next->size;
 515                /* move forward from next + 1, index of which is i + 2 */
 516                memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
 517                type->cnt--;
 518        }
 519}
 520
 521/**
 522 * memblock_insert_region - insert new memblock region
 523 * @type:       memblock type to insert into
 524 * @idx:        index for the insertion point
 525 * @base:       base address of the new region
 526 * @size:       size of the new region
 527 * @nid:        node id of the new region
 528 * @flags:      flags of the new region
 529 *
 530 * Insert new memblock region [@base, @base + @size) into @type at @idx.
 531 * @type must already have extra room to accommodate the new region.
 532 */
 533static void __init_memblock memblock_insert_region(struct memblock_type *type,
 534                                                   int idx, phys_addr_t base,
 535                                                   phys_addr_t size,
 536                                                   int nid,
 537                                                   enum memblock_flags flags)
 538{
 539        struct memblock_region *rgn = &type->regions[idx];
 540
 541        BUG_ON(type->cnt >= type->max);
 542        memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
 543        rgn->base = base;
 544        rgn->size = size;
 545        rgn->flags = flags;
 546        memblock_set_region_node(rgn, nid);
 547        type->cnt++;
 548        type->total_size += size;
 549}
 550
 551/**
 552 * memblock_add_range - add new memblock region
 553 * @type: memblock type to add new region into
 554 * @base: base address of the new region
 555 * @size: size of the new region
 556 * @nid: nid of the new region
 557 * @flags: flags of the new region
 558 *
 559 * Add new memblock region [@base, @base + @size) into @type.  The new region
 560 * is allowed to overlap with existing ones - overlaps don't affect already
 561 * existing regions.  @type is guaranteed to be minimal (all neighbouring
 562 * compatible regions are merged) after the addition.
 563 *
 564 * Return:
 565 * 0 on success, -errno on failure.
 566 */
 567static int __init_memblock memblock_add_range(struct memblock_type *type,
 568                                phys_addr_t base, phys_addr_t size,
 569                                int nid, enum memblock_flags flags)
 570{
 571        bool insert = false;
 572        phys_addr_t obase = base;
 573        phys_addr_t end = base + memblock_cap_size(base, &size);
 574        int idx, nr_new;
 575        struct memblock_region *rgn;
 576
 577        if (!size)
 578                return 0;
 579
 580        /* special case for empty array */
 581        if (type->regions[0].size == 0) {
 582                WARN_ON(type->cnt != 1 || type->total_size);
 583                type->regions[0].base = base;
 584                type->regions[0].size = size;
 585                type->regions[0].flags = flags;
 586                memblock_set_region_node(&type->regions[0], nid);
 587                type->total_size = size;
 588                return 0;
 589        }
 590repeat:
 591        /*
 592         * The following is executed twice.  Once with %false @insert and
 593         * then with %true.  The first counts the number of regions needed
 594         * to accommodate the new area.  The second actually inserts them.
 595         */
 596        base = obase;
 597        nr_new = 0;
 598
 599        for_each_memblock_type(idx, type, rgn) {
 600                phys_addr_t rbase = rgn->base;
 601                phys_addr_t rend = rbase + rgn->size;
 602
 603                if (rbase >= end)
 604                        break;
 605                if (rend <= base)
 606                        continue;
 607                /*
 608                 * @rgn overlaps.  If it separates the lower part of new
 609                 * area, insert that portion.
 610                 */
 611                if (rbase > base) {
 612#ifdef CONFIG_NUMA
 613                        WARN_ON(nid != memblock_get_region_node(rgn));
 614#endif
 615                        WARN_ON(flags != rgn->flags);
 616                        nr_new++;
 617                        if (insert)
 618                                memblock_insert_region(type, idx++, base,
 619                                                       rbase - base, nid,
 620                                                       flags);
 621                }
 622                /* area below @rend is dealt with, forget about it */
 623                base = min(rend, end);
 624        }
 625
 626        /* insert the remaining portion */
 627        if (base < end) {
 628                nr_new++;
 629                if (insert)
 630                        memblock_insert_region(type, idx, base, end - base,
 631                                               nid, flags);
 632        }
 633
 634        if (!nr_new)
 635                return 0;
 636
 637        /*
 638         * If this was the first round, resize array and repeat for actual
 639         * insertions; otherwise, merge and return.
 640         */
 641        if (!insert) {
 642                while (type->cnt + nr_new > type->max)
 643                        if (memblock_double_array(type, obase, size) < 0)
 644                                return -ENOMEM;
 645                insert = true;
 646                goto repeat;
 647        } else {
 648                memblock_merge_regions(type);
 649                return 0;
 650        }
 651}
 652
 653/**
 654 * memblock_add_node - add new memblock region within a NUMA node
 655 * @base: base address of the new region
 656 * @size: size of the new region
 657 * @nid: nid of the new region
 658 *
 659 * Add new memblock region [@base, @base + @size) to the "memory"
 660 * type. See memblock_add_range() description for mode details
 661 *
 662 * Return:
 663 * 0 on success, -errno on failure.
 664 */
 665int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
 666                                       int nid)
 667{
 668        return memblock_add_range(&memblock.memory, base, size, nid, 0);
 669}
 670
 671/**
 672 * memblock_add - add new memblock region
 673 * @base: base address of the new region
 674 * @size: size of the new region
 675 *
 676 * Add new memblock region [@base, @base + @size) to the "memory"
 677 * type. See memblock_add_range() description for mode details
 678 *
 679 * Return:
 680 * 0 on success, -errno on failure.
 681 */
 682int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
 683{
 684        phys_addr_t end = base + size - 1;
 685
 686        memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
 687                     &base, &end, (void *)_RET_IP_);
 688
 689        return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
 690}
 691
 692/**
 693 * memblock_isolate_range - isolate given range into disjoint memblocks
 694 * @type: memblock type to isolate range for
 695 * @base: base of range to isolate
 696 * @size: size of range to isolate
 697 * @start_rgn: out parameter for the start of isolated region
 698 * @end_rgn: out parameter for the end of isolated region
 699 *
 700 * Walk @type and ensure that regions don't cross the boundaries defined by
 701 * [@base, @base + @size).  Crossing regions are split at the boundaries,
 702 * which may create at most two more regions.  The index of the first
 703 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
 704 *
 705 * Return:
 706 * 0 on success, -errno on failure.
 707 */
 708static int __init_memblock memblock_isolate_range(struct memblock_type *type,
 709                                        phys_addr_t base, phys_addr_t size,
 710                                        int *start_rgn, int *end_rgn)
 711{
 712        phys_addr_t end = base + memblock_cap_size(base, &size);
 713        int idx;
 714        struct memblock_region *rgn;
 715
 716        *start_rgn = *end_rgn = 0;
 717
 718        if (!size)
 719                return 0;
 720
 721        /* we'll create at most two more regions */
 722        while (type->cnt + 2 > type->max)
 723                if (memblock_double_array(type, base, size) < 0)
 724                        return -ENOMEM;
 725
 726        for_each_memblock_type(idx, type, rgn) {
 727                phys_addr_t rbase = rgn->base;
 728                phys_addr_t rend = rbase + rgn->size;
 729
 730                if (rbase >= end)
 731                        break;
 732                if (rend <= base)
 733                        continue;
 734
 735                if (rbase < base) {
 736                        /*
 737                         * @rgn intersects from below.  Split and continue
 738                         * to process the next region - the new top half.
 739                         */
 740                        rgn->base = base;
 741                        rgn->size -= base - rbase;
 742                        type->total_size -= base - rbase;
 743                        memblock_insert_region(type, idx, rbase, base - rbase,
 744                                               memblock_get_region_node(rgn),
 745                                               rgn->flags);
 746                } else if (rend > end) {
 747                        /*
 748                         * @rgn intersects from above.  Split and redo the
 749                         * current region - the new bottom half.
 750                         */
 751                        rgn->base = end;
 752                        rgn->size -= end - rbase;
 753                        type->total_size -= end - rbase;
 754                        memblock_insert_region(type, idx--, rbase, end - rbase,
 755                                               memblock_get_region_node(rgn),
 756                                               rgn->flags);
 757                } else {
 758                        /* @rgn is fully contained, record it */
 759                        if (!*end_rgn)
 760                                *start_rgn = idx;
 761                        *end_rgn = idx + 1;
 762                }
 763        }
 764
 765        return 0;
 766}
 767
 768static int __init_memblock memblock_remove_range(struct memblock_type *type,
 769                                          phys_addr_t base, phys_addr_t size)
 770{
 771        int start_rgn, end_rgn;
 772        int i, ret;
 773
 774        ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
 775        if (ret)
 776                return ret;
 777
 778        for (i = end_rgn - 1; i >= start_rgn; i--)
 779                memblock_remove_region(type, i);
 780        return 0;
 781}
 782
 783int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
 784{
 785        phys_addr_t end = base + size - 1;
 786
 787        memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
 788                     &base, &end, (void *)_RET_IP_);
 789
 790        return memblock_remove_range(&memblock.memory, base, size);
 791}
 792
 793/**
 794 * memblock_free - free boot memory block
 795 * @base: phys starting address of the  boot memory block
 796 * @size: size of the boot memory block in bytes
 797 *
 798 * Free boot memory block previously allocated by memblock_alloc_xx() API.
 799 * The freeing memory will not be released to the buddy allocator.
 800 */
 801int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
 802{
 803        phys_addr_t end = base + size - 1;
 804
 805        memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
 806                     &base, &end, (void *)_RET_IP_);
 807
 808        kmemleak_free_part_phys(base, size);
 809        return memblock_remove_range(&memblock.reserved, base, size);
 810}
 811
 812int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
 813{
 814        phys_addr_t end = base + size - 1;
 815
 816        memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
 817                     &base, &end, (void *)_RET_IP_);
 818
 819        return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
 820}
 821
 822#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
 823int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size)
 824{
 825        phys_addr_t end = base + size - 1;
 826
 827        memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
 828                     &base, &end, (void *)_RET_IP_);
 829
 830        return memblock_add_range(&physmem, base, size, MAX_NUMNODES, 0);
 831}
 832#endif
 833
 834/**
 835 * memblock_setclr_flag - set or clear flag for a memory region
 836 * @base: base address of the region
 837 * @size: size of the region
 838 * @set: set or clear the flag
 839 * @flag: the flag to update
 840 *
 841 * This function isolates region [@base, @base + @size), and sets/clears flag
 842 *
 843 * Return: 0 on success, -errno on failure.
 844 */
 845static int __init_memblock memblock_setclr_flag(phys_addr_t base,
 846                                phys_addr_t size, int set, int flag)
 847{
 848        struct memblock_type *type = &memblock.memory;
 849        int i, ret, start_rgn, end_rgn;
 850
 851        ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
 852        if (ret)
 853                return ret;
 854
 855        for (i = start_rgn; i < end_rgn; i++) {
 856                struct memblock_region *r = &type->regions[i];
 857
 858                if (set)
 859                        r->flags |= flag;
 860                else
 861                        r->flags &= ~flag;
 862        }
 863
 864        memblock_merge_regions(type);
 865        return 0;
 866}
 867
 868/**
 869 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
 870 * @base: the base phys addr of the region
 871 * @size: the size of the region
 872 *
 873 * Return: 0 on success, -errno on failure.
 874 */
 875int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
 876{
 877        return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
 878}
 879
 880/**
 881 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
 882 * @base: the base phys addr of the region
 883 * @size: the size of the region
 884 *
 885 * Return: 0 on success, -errno on failure.
 886 */
 887int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
 888{
 889        return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
 890}
 891
 892/**
 893 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
 894 * @base: the base phys addr of the region
 895 * @size: the size of the region
 896 *
 897 * Return: 0 on success, -errno on failure.
 898 */
 899int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
 900{
 901        system_has_some_mirror = true;
 902
 903        return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
 904}
 905
 906/**
 907 * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
 908 * @base: the base phys addr of the region
 909 * @size: the size of the region
 910 *
 911 * The memory regions marked with %MEMBLOCK_NOMAP will not be added to the
 912 * direct mapping of the physical memory. These regions will still be
 913 * covered by the memory map. The struct page representing NOMAP memory
 914 * frames in the memory map will be PageReserved()
 915 *
 916 * Return: 0 on success, -errno on failure.
 917 */
 918int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
 919{
 920        return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
 921}
 922
 923/**
 924 * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region.
 925 * @base: the base phys addr of the region
 926 * @size: the size of the region
 927 *
 928 * Return: 0 on success, -errno on failure.
 929 */
 930int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
 931{
 932        return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP);
 933}
 934
 935static bool should_skip_region(struct memblock_type *type,
 936                               struct memblock_region *m,
 937                               int nid, int flags)
 938{
 939        int m_nid = memblock_get_region_node(m);
 940
 941        /* we never skip regions when iterating memblock.reserved or physmem */
 942        if (type != memblock_memory)
 943                return false;
 944
 945        /* only memory regions are associated with nodes, check it */
 946        if (nid != NUMA_NO_NODE && nid != m_nid)
 947                return true;
 948
 949        /* skip hotpluggable memory regions if needed */
 950        if (movable_node_is_enabled() && memblock_is_hotpluggable(m) &&
 951            !(flags & MEMBLOCK_HOTPLUG))
 952                return true;
 953
 954        /* if we want mirror memory skip non-mirror memory regions */
 955        if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
 956                return true;
 957
 958        /* skip nomap memory unless we were asked for it explicitly */
 959        if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
 960                return true;
 961
 962        return false;
 963}
 964
 965/**
 966 * __next_mem_range - next function for for_each_free_mem_range() etc.
 967 * @idx: pointer to u64 loop variable
 968 * @nid: node selector, %NUMA_NO_NODE for all nodes
 969 * @flags: pick from blocks based on memory attributes
 970 * @type_a: pointer to memblock_type from where the range is taken
 971 * @type_b: pointer to memblock_type which excludes memory from being taken
 972 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
 973 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
 974 * @out_nid: ptr to int for nid of the range, can be %NULL
 975 *
 976 * Find the first area from *@idx which matches @nid, fill the out
 977 * parameters, and update *@idx for the next iteration.  The lower 32bit of
 978 * *@idx contains index into type_a and the upper 32bit indexes the
 979 * areas before each region in type_b.  For example, if type_b regions
 980 * look like the following,
 981 *
 982 *      0:[0-16), 1:[32-48), 2:[128-130)
 983 *
 984 * The upper 32bit indexes the following regions.
 985 *
 986 *      0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
 987 *
 988 * As both region arrays are sorted, the function advances the two indices
 989 * in lockstep and returns each intersection.
 990 */
 991void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
 992                      struct memblock_type *type_a,
 993                      struct memblock_type *type_b, phys_addr_t *out_start,
 994                      phys_addr_t *out_end, int *out_nid)
 995{
 996        int idx_a = *idx & 0xffffffff;
 997        int idx_b = *idx >> 32;
 998
 999        if (WARN_ONCE(nid == MAX_NUMNODES,
1000        "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1001                nid = NUMA_NO_NODE;
1002
1003        for (; idx_a < type_a->cnt; idx_a++) {
1004                struct memblock_region *m = &type_a->regions[idx_a];
1005
1006                phys_addr_t m_start = m->base;
1007                phys_addr_t m_end = m->base + m->size;
1008                int         m_nid = memblock_get_region_node(m);
1009
1010                if (should_skip_region(type_a, m, nid, flags))
1011                        continue;
1012
1013                if (!type_b) {
1014                        if (out_start)
1015                                *out_start = m_start;
1016                        if (out_end)
1017                                *out_end = m_end;
1018                        if (out_nid)
1019                                *out_nid = m_nid;
1020                        idx_a++;
1021                        *idx = (u32)idx_a | (u64)idx_b << 32;
1022                        return;
1023                }
1024
1025                /* scan areas before each reservation */
1026                for (; idx_b < type_b->cnt + 1; idx_b++) {
1027                        struct memblock_region *r;
1028                        phys_addr_t r_start;
1029                        phys_addr_t r_end;
1030
1031                        r = &type_b->regions[idx_b];
1032                        r_start = idx_b ? r[-1].base + r[-1].size : 0;
1033                        r_end = idx_b < type_b->cnt ?
1034                                r->base : PHYS_ADDR_MAX;
1035
1036                        /*
1037                         * if idx_b advanced past idx_a,
1038                         * break out to advance idx_a
1039                         */
1040                        if (r_start >= m_end)
1041                                break;
1042                        /* if the two regions intersect, we're done */
1043                        if (m_start < r_end) {
1044                                if (out_start)
1045                                        *out_start =
1046                                                max(m_start, r_start);
1047                                if (out_end)
1048                                        *out_end = min(m_end, r_end);
1049                                if (out_nid)
1050                                        *out_nid = m_nid;
1051                                /*
1052                                 * The region which ends first is
1053                                 * advanced for the next iteration.
1054                                 */
1055                                if (m_end <= r_end)
1056                                        idx_a++;
1057                                else
1058                                        idx_b++;
1059                                *idx = (u32)idx_a | (u64)idx_b << 32;
1060                                return;
1061                        }
1062                }
1063        }
1064
1065        /* signal end of iteration */
1066        *idx = ULLONG_MAX;
1067}
1068
1069/**
1070 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
1071 *
1072 * @idx: pointer to u64 loop variable
1073 * @nid: node selector, %NUMA_NO_NODE for all nodes
1074 * @flags: pick from blocks based on memory attributes
1075 * @type_a: pointer to memblock_type from where the range is taken
1076 * @type_b: pointer to memblock_type which excludes memory from being taken
1077 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
1078 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1079 * @out_nid: ptr to int for nid of the range, can be %NULL
1080 *
1081 * Finds the next range from type_a which is not marked as unsuitable
1082 * in type_b.
1083 *
1084 * Reverse of __next_mem_range().
1085 */
1086void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
1087                                          enum memblock_flags flags,
1088                                          struct memblock_type *type_a,
1089                                          struct memblock_type *type_b,
1090                                          phys_addr_t *out_start,
1091                                          phys_addr_t *out_end, int *out_nid)
1092{
1093        int idx_a = *idx & 0xffffffff;
1094        int idx_b = *idx >> 32;
1095
1096        if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1097                nid = NUMA_NO_NODE;
1098
1099        if (*idx == (u64)ULLONG_MAX) {
1100                idx_a = type_a->cnt - 1;
1101                if (type_b != NULL)
1102                        idx_b = type_b->cnt;
1103                else
1104                        idx_b = 0;
1105        }
1106
1107        for (; idx_a >= 0; idx_a--) {
1108                struct memblock_region *m = &type_a->regions[idx_a];
1109
1110                phys_addr_t m_start = m->base;
1111                phys_addr_t m_end = m->base + m->size;
1112                int m_nid = memblock_get_region_node(m);
1113
1114                if (should_skip_region(type_a, m, nid, flags))
1115                        continue;
1116
1117                if (!type_b) {
1118                        if (out_start)
1119                                *out_start = m_start;
1120                        if (out_end)
1121                                *out_end = m_end;
1122                        if (out_nid)
1123                                *out_nid = m_nid;
1124                        idx_a--;
1125                        *idx = (u32)idx_a | (u64)idx_b << 32;
1126                        return;
1127                }
1128
1129                /* scan areas before each reservation */
1130                for (; idx_b >= 0; idx_b--) {
1131                        struct memblock_region *r;
1132                        phys_addr_t r_start;
1133                        phys_addr_t r_end;
1134
1135                        r = &type_b->regions[idx_b];
1136                        r_start = idx_b ? r[-1].base + r[-1].size : 0;
1137                        r_end = idx_b < type_b->cnt ?
1138                                r->base : PHYS_ADDR_MAX;
1139                        /*
1140                         * if idx_b advanced past idx_a,
1141                         * break out to advance idx_a
1142                         */
1143
1144                        if (r_end <= m_start)
1145                                break;
1146                        /* if the two regions intersect, we're done */
1147                        if (m_end > r_start) {
1148                                if (out_start)
1149                                        *out_start = max(m_start, r_start);
1150                                if (out_end)
1151                                        *out_end = min(m_end, r_end);
1152                                if (out_nid)
1153                                        *out_nid = m_nid;
1154                                if (m_start >= r_start)
1155                                        idx_a--;
1156                                else
1157                                        idx_b--;
1158                                *idx = (u32)idx_a | (u64)idx_b << 32;
1159                                return;
1160                        }
1161                }
1162        }
1163        /* signal end of iteration */
1164        *idx = ULLONG_MAX;
1165}
1166
1167/*
1168 * Common iterator interface used to define for_each_mem_pfn_range().
1169 */
1170void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1171                                unsigned long *out_start_pfn,
1172                                unsigned long *out_end_pfn, int *out_nid)
1173{
1174        struct memblock_type *type = &memblock.memory;
1175        struct memblock_region *r;
1176        int r_nid;
1177
1178        while (++*idx < type->cnt) {
1179                r = &type->regions[*idx];
1180                r_nid = memblock_get_region_node(r);
1181
1182                if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1183                        continue;
1184                if (nid == MAX_NUMNODES || nid == r_nid)
1185                        break;
1186        }
1187        if (*idx >= type->cnt) {
1188                *idx = -1;
1189                return;
1190        }
1191
1192        if (out_start_pfn)
1193                *out_start_pfn = PFN_UP(r->base);
1194        if (out_end_pfn)
1195                *out_end_pfn = PFN_DOWN(r->base + r->size);
1196        if (out_nid)
1197                *out_nid = r_nid;
1198}
1199
1200/**
1201 * memblock_set_node - set node ID on memblock regions
1202 * @base: base of area to set node ID for
1203 * @size: size of area to set node ID for
1204 * @type: memblock type to set node ID for
1205 * @nid: node ID to set
1206 *
1207 * Set the nid of memblock @type regions in [@base, @base + @size) to @nid.
1208 * Regions which cross the area boundaries are split as necessary.
1209 *
1210 * Return:
1211 * 0 on success, -errno on failure.
1212 */
1213int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1214                                      struct memblock_type *type, int nid)
1215{
1216#ifdef CONFIG_NUMA
1217        int start_rgn, end_rgn;
1218        int i, ret;
1219
1220        ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1221        if (ret)
1222                return ret;
1223
1224        for (i = start_rgn; i < end_rgn; i++)
1225                memblock_set_region_node(&type->regions[i], nid);
1226
1227        memblock_merge_regions(type);
1228#endif
1229        return 0;
1230}
1231
1232#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1233/**
1234 * __next_mem_pfn_range_in_zone - iterator for for_each_*_range_in_zone()
1235 *
1236 * @idx: pointer to u64 loop variable
1237 * @zone: zone in which all of the memory blocks reside
1238 * @out_spfn: ptr to ulong for start pfn of the range, can be %NULL
1239 * @out_epfn: ptr to ulong for end pfn of the range, can be %NULL
1240 *
1241 * This function is meant to be a zone/pfn specific wrapper for the
1242 * for_each_mem_range type iterators. Specifically they are used in the
1243 * deferred memory init routines and as such we were duplicating much of
1244 * this logic throughout the code. So instead of having it in multiple
1245 * locations it seemed like it would make more sense to centralize this to
1246 * one new iterator that does everything they need.
1247 */
1248void __init_memblock
1249__next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
1250                             unsigned long *out_spfn, unsigned long *out_epfn)
1251{
1252        int zone_nid = zone_to_nid(zone);
1253        phys_addr_t spa, epa;
1254        int nid;
1255
1256        __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1257                         &memblock.memory, &memblock.reserved,
1258                         &spa, &epa, &nid);
1259
1260        while (*idx != U64_MAX) {
1261                unsigned long epfn = PFN_DOWN(epa);
1262                unsigned long spfn = PFN_UP(spa);
1263
1264                /*
1265                 * Verify the end is at least past the start of the zone and
1266                 * that we have at least one PFN to initialize.
1267                 */
1268                if (zone->zone_start_pfn < epfn && spfn < epfn) {
1269                        /* if we went too far just stop searching */
1270                        if (zone_end_pfn(zone) <= spfn) {
1271                                *idx = U64_MAX;
1272                                break;
1273                        }
1274
1275                        if (out_spfn)
1276                                *out_spfn = max(zone->zone_start_pfn, spfn);
1277                        if (out_epfn)
1278                                *out_epfn = min(zone_end_pfn(zone), epfn);
1279
1280                        return;
1281                }
1282
1283                __next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
1284                                 &memblock.memory, &memblock.reserved,
1285                                 &spa, &epa, &nid);
1286        }
1287
1288        /* signal end of iteration */
1289        if (out_spfn)
1290                *out_spfn = ULONG_MAX;
1291        if (out_epfn)
1292                *out_epfn = 0;
1293}
1294
1295#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1296
1297/**
1298 * memblock_alloc_range_nid - allocate boot memory block
1299 * @size: size of memory block to be allocated in bytes
1300 * @align: alignment of the region and block's size
1301 * @start: the lower bound of the memory region to allocate (phys address)
1302 * @end: the upper bound of the memory region to allocate (phys address)
1303 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1304 * @exact_nid: control the allocation fall back to other nodes
1305 *
1306 * The allocation is performed from memory region limited by
1307 * memblock.current_limit if @end == %MEMBLOCK_ALLOC_ACCESSIBLE.
1308 *
1309 * If the specified node can not hold the requested memory and @exact_nid
1310 * is false, the allocation falls back to any node in the system.
1311 *
1312 * For systems with memory mirroring, the allocation is attempted first
1313 * from the regions with mirroring enabled and then retried from any
1314 * memory region.
1315 *
1316 * In addition, function sets the min_count to 0 using kmemleak_alloc_phys for
1317 * allocated boot memory block, so that it is never reported as leaks.
1318 *
1319 * Return:
1320 * Physical address of allocated memory block on success, %0 on failure.
1321 */
1322phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1323                                        phys_addr_t align, phys_addr_t start,
1324                                        phys_addr_t end, int nid,
1325                                        bool exact_nid)
1326{
1327        enum memblock_flags flags = choose_memblock_flags();
1328        phys_addr_t found;
1329
1330        if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1331                nid = NUMA_NO_NODE;
1332
1333        if (!align) {
1334                /* Can't use WARNs this early in boot on powerpc */
1335                dump_stack();
1336                align = SMP_CACHE_BYTES;
1337        }
1338
1339again:
1340        found = memblock_find_in_range_node(size, align, start, end, nid,
1341                                            flags);
1342        if (found && !memblock_reserve(found, size))
1343                goto done;
1344
1345        if (nid != NUMA_NO_NODE && !exact_nid) {
1346                found = memblock_find_in_range_node(size, align, start,
1347                                                    end, NUMA_NO_NODE,
1348                                                    flags);
1349                if (found && !memblock_reserve(found, size))
1350                        goto done;
1351        }
1352
1353        if (flags & MEMBLOCK_MIRROR) {
1354                flags &= ~MEMBLOCK_MIRROR;
1355                pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1356                        &size);
1357                goto again;
1358        }
1359
1360        return 0;
1361
1362done:
1363        /* Skip kmemleak for kasan_init() due to high volume. */
1364        if (end != MEMBLOCK_ALLOC_KASAN)
1365                /*
1366                 * The min_count is set to 0 so that memblock allocated
1367                 * blocks are never reported as leaks. This is because many
1368                 * of these blocks are only referred via the physical
1369                 * address which is not looked up by kmemleak.
1370                 */
1371                kmemleak_alloc_phys(found, size, 0, 0);
1372
1373        return found;
1374}
1375
1376/**
1377 * memblock_phys_alloc_range - allocate a memory block inside specified range
1378 * @size: size of memory block to be allocated in bytes
1379 * @align: alignment of the region and block's size
1380 * @start: the lower bound of the memory region to allocate (physical address)
1381 * @end: the upper bound of the memory region to allocate (physical address)
1382 *
1383 * Allocate @size bytes in the between @start and @end.
1384 *
1385 * Return: physical address of the allocated memory block on success,
1386 * %0 on failure.
1387 */
1388phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
1389                                             phys_addr_t align,
1390                                             phys_addr_t start,
1391                                             phys_addr_t end)
1392{
1393        memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n",
1394                     __func__, (u64)size, (u64)align, &start, &end,
1395                     (void *)_RET_IP_);
1396        return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
1397                                        false);
1398}
1399
1400/**
1401 * memblock_phys_alloc_try_nid - allocate a memory block from specified NUMA node
1402 * @size: size of memory block to be allocated in bytes
1403 * @align: alignment of the region and block's size
1404 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1405 *
1406 * Allocates memory block from the specified NUMA node. If the node
1407 * has no available memory, attempts to allocated from any node in the
1408 * system.
1409 *
1410 * Return: physical address of the allocated memory block on success,
1411 * %0 on failure.
1412 */
1413phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1414{
1415        return memblock_alloc_range_nid(size, align, 0,
1416                                        MEMBLOCK_ALLOC_ACCESSIBLE, nid, false);
1417}
1418
1419/**
1420 * memblock_alloc_internal - allocate boot memory block
1421 * @size: size of memory block to be allocated in bytes
1422 * @align: alignment of the region and block's size
1423 * @min_addr: the lower bound of the memory region to allocate (phys address)
1424 * @max_addr: the upper bound of the memory region to allocate (phys address)
1425 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1426 * @exact_nid: control the allocation fall back to other nodes
1427 *
1428 * Allocates memory block using memblock_alloc_range_nid() and
1429 * converts the returned physical address to virtual.
1430 *
1431 * The @min_addr limit is dropped if it can not be satisfied and the allocation
1432 * will fall back to memory below @min_addr. Other constraints, such
1433 * as node and mirrored memory will be handled again in
1434 * memblock_alloc_range_nid().
1435 *
1436 * Return:
1437 * Virtual address of allocated memory block on success, NULL on failure.
1438 */
1439static void * __init memblock_alloc_internal(
1440                                phys_addr_t size, phys_addr_t align,
1441                                phys_addr_t min_addr, phys_addr_t max_addr,
1442                                int nid, bool exact_nid)
1443{
1444        phys_addr_t alloc;
1445
1446        /*
1447         * Detect any accidental use of these APIs after slab is ready, as at
1448         * this moment memblock may be deinitialized already and its
1449         * internal data may be destroyed (after execution of memblock_free_all)
1450         */
1451        if (WARN_ON_ONCE(slab_is_available()))
1452                return kzalloc_node(size, GFP_NOWAIT, nid);
1453
1454        if (max_addr > memblock.current_limit)
1455                max_addr = memblock.current_limit;
1456
1457        alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid,
1458                                        exact_nid);
1459
1460        /* retry allocation without lower limit */
1461        if (!alloc && min_addr)
1462                alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid,
1463                                                exact_nid);
1464
1465        if (!alloc)
1466                return NULL;
1467
1468        return phys_to_virt(alloc);
1469}
1470
1471/**
1472 * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node
1473 * without zeroing memory
1474 * @size: size of memory block to be allocated in bytes
1475 * @align: alignment of the region and block's size
1476 * @min_addr: the lower bound of the memory region from where the allocation
1477 *        is preferred (phys address)
1478 * @max_addr: the upper bound of the memory region from where the allocation
1479 *            is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1480 *            allocate only from memory limited by memblock.current_limit value
1481 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1482 *
1483 * Public function, provides additional debug information (including caller
1484 * info), if enabled. Does not zero allocated memory.
1485 *
1486 * Return:
1487 * Virtual address of allocated memory block on success, NULL on failure.
1488 */
1489void * __init memblock_alloc_exact_nid_raw(
1490                        phys_addr_t size, phys_addr_t align,
1491                        phys_addr_t min_addr, phys_addr_t max_addr,
1492                        int nid)
1493{
1494        void *ptr;
1495
1496        memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1497                     __func__, (u64)size, (u64)align, nid, &min_addr,
1498                     &max_addr, (void *)_RET_IP_);
1499
1500        ptr = memblock_alloc_internal(size, align,
1501                                           min_addr, max_addr, nid, true);
1502        if (ptr && size > 0)
1503                page_init_poison(ptr, size);
1504
1505        return ptr;
1506}
1507
1508/**
1509 * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing
1510 * memory and without panicking
1511 * @size: size of memory block to be allocated in bytes
1512 * @align: alignment of the region and block's size
1513 * @min_addr: the lower bound of the memory region from where the allocation
1514 *        is preferred (phys address)
1515 * @max_addr: the upper bound of the memory region from where the allocation
1516 *            is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1517 *            allocate only from memory limited by memblock.current_limit value
1518 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1519 *
1520 * Public function, provides additional debug information (including caller
1521 * info), if enabled. Does not zero allocated memory, does not panic if request
1522 * cannot be satisfied.
1523 *
1524 * Return:
1525 * Virtual address of allocated memory block on success, NULL on failure.
1526 */
1527void * __init memblock_alloc_try_nid_raw(
1528                        phys_addr_t size, phys_addr_t align,
1529                        phys_addr_t min_addr, phys_addr_t max_addr,
1530                        int nid)
1531{
1532        void *ptr;
1533
1534        memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1535                     __func__, (u64)size, (u64)align, nid, &min_addr,
1536                     &max_addr, (void *)_RET_IP_);
1537
1538        ptr = memblock_alloc_internal(size, align,
1539                                           min_addr, max_addr, nid, false);
1540        if (ptr && size > 0)
1541                page_init_poison(ptr, size);
1542
1543        return ptr;
1544}
1545
1546/**
1547 * memblock_alloc_try_nid - allocate boot memory block
1548 * @size: size of memory block to be allocated in bytes
1549 * @align: alignment of the region and block's size
1550 * @min_addr: the lower bound of the memory region from where the allocation
1551 *        is preferred (phys address)
1552 * @max_addr: the upper bound of the memory region from where the allocation
1553 *            is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1554 *            allocate only from memory limited by memblock.current_limit value
1555 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1556 *
1557 * Public function, provides additional debug information (including caller
1558 * info), if enabled. This function zeroes the allocated memory.
1559 *
1560 * Return:
1561 * Virtual address of allocated memory block on success, NULL on failure.
1562 */
1563void * __init memblock_alloc_try_nid(
1564                        phys_addr_t size, phys_addr_t align,
1565                        phys_addr_t min_addr, phys_addr_t max_addr,
1566                        int nid)
1567{
1568        void *ptr;
1569
1570        memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
1571                     __func__, (u64)size, (u64)align, nid, &min_addr,
1572                     &max_addr, (void *)_RET_IP_);
1573        ptr = memblock_alloc_internal(size, align,
1574                                           min_addr, max_addr, nid, false);
1575        if (ptr)
1576                memset(ptr, 0, size);
1577
1578        return ptr;
1579}
1580
1581/**
1582 * __memblock_free_late - free pages directly to buddy allocator
1583 * @base: phys starting address of the  boot memory block
1584 * @size: size of the boot memory block in bytes
1585 *
1586 * This is only useful when the memblock allocator has already been torn
1587 * down, but we are still initializing the system.  Pages are released directly
1588 * to the buddy allocator.
1589 */
1590void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1591{
1592        phys_addr_t cursor, end;
1593
1594        end = base + size - 1;
1595        memblock_dbg("%s: [%pa-%pa] %pS\n",
1596                     __func__, &base, &end, (void *)_RET_IP_);
1597        kmemleak_free_part_phys(base, size);
1598        cursor = PFN_UP(base);
1599        end = PFN_DOWN(base + size);
1600
1601        for (; cursor < end; cursor++) {
1602                memblock_free_pages(pfn_to_page(cursor), cursor, 0);
1603                totalram_pages_inc();
1604        }
1605}
1606
1607/*
1608 * Remaining API functions
1609 */
1610
1611phys_addr_t __init_memblock memblock_phys_mem_size(void)
1612{
1613        return memblock.memory.total_size;
1614}
1615
1616phys_addr_t __init_memblock memblock_reserved_size(void)
1617{
1618        return memblock.reserved.total_size;
1619}
1620
1621/* lowest address */
1622phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1623{
1624        return memblock.memory.regions[0].base;
1625}
1626
1627phys_addr_t __init_memblock memblock_end_of_DRAM(void)
1628{
1629        int idx = memblock.memory.cnt - 1;
1630
1631        return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1632}
1633
1634static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
1635{
1636        phys_addr_t max_addr = PHYS_ADDR_MAX;
1637        struct memblock_region *r;
1638
1639        /*
1640         * translate the memory @limit size into the max address within one of
1641         * the memory memblock regions, if the @limit exceeds the total size
1642         * of those regions, max_addr will keep original value PHYS_ADDR_MAX
1643         */
1644        for_each_mem_region(r) {
1645                if (limit <= r->size) {
1646                        max_addr = r->base + limit;
1647                        break;
1648                }
1649                limit -= r->size;
1650        }
1651
1652        return max_addr;
1653}
1654
1655void __init memblock_enforce_memory_limit(phys_addr_t limit)
1656{
1657        phys_addr_t max_addr;
1658
1659        if (!limit)
1660                return;
1661
1662        max_addr = __find_max_addr(limit);
1663
1664        /* @limit exceeds the total size of the memory, do nothing */
1665        if (max_addr == PHYS_ADDR_MAX)
1666                return;
1667
1668        /* truncate both memory and reserved regions */
1669        memblock_remove_range(&memblock.memory, max_addr,
1670                              PHYS_ADDR_MAX);
1671        memblock_remove_range(&memblock.reserved, max_addr,
1672                              PHYS_ADDR_MAX);
1673}
1674
1675void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
1676{
1677        int start_rgn, end_rgn;
1678        int i, ret;
1679
1680        if (!size)
1681                return;
1682
1683        ret = memblock_isolate_range(&memblock.memory, base, size,
1684                                                &start_rgn, &end_rgn);
1685        if (ret)
1686                return;
1687
1688        /* remove all the MAP regions */
1689        for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
1690                if (!memblock_is_nomap(&memblock.memory.regions[i]))
1691                        memblock_remove_region(&memblock.memory, i);
1692
1693        for (i = start_rgn - 1; i >= 0; i--)
1694                if (!memblock_is_nomap(&memblock.memory.regions[i]))
1695                        memblock_remove_region(&memblock.memory, i);
1696
1697        /* truncate the reserved regions */
1698        memblock_remove_range(&memblock.reserved, 0, base);
1699        memblock_remove_range(&memblock.reserved,
1700                        base + size, PHYS_ADDR_MAX);
1701}
1702
1703void __init memblock_mem_limit_remove_map(phys_addr_t limit)
1704{
1705        phys_addr_t max_addr;
1706
1707        if (!limit)
1708                return;
1709
1710        max_addr = __find_max_addr(limit);
1711
1712        /* @limit exceeds the total size of the memory, do nothing */
1713        if (max_addr == PHYS_ADDR_MAX)
1714                return;
1715
1716        memblock_cap_memory_range(0, max_addr);
1717}
1718
1719static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1720{
1721        unsigned int left = 0, right = type->cnt;
1722
1723        do {
1724                unsigned int mid = (right + left) / 2;
1725
1726                if (addr < type->regions[mid].base)
1727                        right = mid;
1728                else if (addr >= (type->regions[mid].base +
1729                                  type->regions[mid].size))
1730                        left = mid + 1;
1731                else
1732                        return mid;
1733        } while (left < right);
1734        return -1;
1735}
1736
1737bool __init_memblock memblock_is_reserved(phys_addr_t addr)
1738{
1739        return memblock_search(&memblock.reserved, addr) != -1;
1740}
1741
1742bool __init_memblock memblock_is_memory(phys_addr_t addr)
1743{
1744        return memblock_search(&memblock.memory, addr) != -1;
1745}
1746
1747bool __init_memblock memblock_is_map_memory(phys_addr_t addr)
1748{
1749        int i = memblock_search(&memblock.memory, addr);
1750
1751        if (i == -1)
1752                return false;
1753        return !memblock_is_nomap(&memblock.memory.regions[i]);
1754}
1755
1756int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1757                         unsigned long *start_pfn, unsigned long *end_pfn)
1758{
1759        struct memblock_type *type = &memblock.memory;
1760        int mid = memblock_search(type, PFN_PHYS(pfn));
1761
1762        if (mid == -1)
1763                return -1;
1764
1765        *start_pfn = PFN_DOWN(type->regions[mid].base);
1766        *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1767
1768        return memblock_get_region_node(&type->regions[mid]);
1769}
1770
1771/**
1772 * memblock_is_region_memory - check if a region is a subset of memory
1773 * @base: base of region to check
1774 * @size: size of region to check
1775 *
1776 * Check if the region [@base, @base + @size) is a subset of a memory block.
1777 *
1778 * Return:
1779 * 0 if false, non-zero if true
1780 */
1781bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1782{
1783        int idx = memblock_search(&memblock.memory, base);
1784        phys_addr_t end = base + memblock_cap_size(base, &size);
1785
1786        if (idx == -1)
1787                return false;
1788        return (memblock.memory.regions[idx].base +
1789                 memblock.memory.regions[idx].size) >= end;
1790}
1791
1792/**
1793 * memblock_is_region_reserved - check if a region intersects reserved memory
1794 * @base: base of region to check
1795 * @size: size of region to check
1796 *
1797 * Check if the region [@base, @base + @size) intersects a reserved
1798 * memory block.
1799 *
1800 * Return:
1801 * True if they intersect, false if not.
1802 */
1803bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
1804{
1805        return memblock_overlaps_region(&memblock.reserved, base, size);
1806}
1807
1808void __init_memblock memblock_trim_memory(phys_addr_t align)
1809{
1810        phys_addr_t start, end, orig_start, orig_end;
1811        struct memblock_region *r;
1812
1813        for_each_mem_region(r) {
1814                orig_start = r->base;
1815                orig_end = r->base + r->size;
1816                start = round_up(orig_start, align);
1817                end = round_down(orig_end, align);
1818
1819                if (start == orig_start && end == orig_end)
1820                        continue;
1821
1822                if (start < end) {
1823                        r->base = start;
1824                        r->size = end - start;
1825                } else {
1826                        memblock_remove_region(&memblock.memory,
1827                                               r - memblock.memory.regions);
1828                        r--;
1829                }
1830        }
1831}
1832
1833void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1834{
1835        memblock.current_limit = limit;
1836}
1837
1838phys_addr_t __init_memblock memblock_get_current_limit(void)
1839{
1840        return memblock.current_limit;
1841}
1842
1843static void __init_memblock memblock_dump(struct memblock_type *type)
1844{
1845        phys_addr_t base, end, size;
1846        enum memblock_flags flags;
1847        int idx;
1848        struct memblock_region *rgn;
1849
1850        pr_info(" %s.cnt  = 0x%lx\n", type->name, type->cnt);
1851
1852        for_each_memblock_type(idx, type, rgn) {
1853                char nid_buf[32] = "";
1854
1855                base = rgn->base;
1856                size = rgn->size;
1857                end = base + size - 1;
1858                flags = rgn->flags;
1859#ifdef CONFIG_NUMA
1860                if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1861                        snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1862                                 memblock_get_region_node(rgn));
1863#endif
1864                pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
1865                        type->name, idx, &base, &end, &size, nid_buf, flags);
1866        }
1867}
1868
1869static void __init_memblock __memblock_dump_all(void)
1870{
1871        pr_info("MEMBLOCK configuration:\n");
1872        pr_info(" memory size = %pa reserved size = %pa\n",
1873                &memblock.memory.total_size,
1874                &memblock.reserved.total_size);
1875
1876        memblock_dump(&memblock.memory);
1877        memblock_dump(&memblock.reserved);
1878#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1879        memblock_dump(&physmem);
1880#endif
1881}
1882
1883void __init_memblock memblock_dump_all(void)
1884{
1885        if (memblock_debug)
1886                __memblock_dump_all();
1887}
1888
1889void __init memblock_allow_resize(void)
1890{
1891        memblock_can_resize = 1;
1892}
1893
1894static int __init early_memblock(char *p)
1895{
1896        if (p && strstr(p, "debug"))
1897                memblock_debug = 1;
1898        return 0;
1899}
1900early_param("memblock", early_memblock);
1901
1902static void __init free_memmap(unsigned long start_pfn, unsigned long end_pfn)
1903{
1904        struct page *start_pg, *end_pg;
1905        phys_addr_t pg, pgend;
1906
1907        /*
1908         * Convert start_pfn/end_pfn to a struct page pointer.
1909         */
1910        start_pg = pfn_to_page(start_pfn - 1) + 1;
1911        end_pg = pfn_to_page(end_pfn - 1) + 1;
1912
1913        /*
1914         * Convert to physical addresses, and round start upwards and end
1915         * downwards.
1916         */
1917        pg = PAGE_ALIGN(__pa(start_pg));
1918        pgend = __pa(end_pg) & PAGE_MASK;
1919
1920        /*
1921         * If there are free pages between these, free the section of the
1922         * memmap array.
1923         */
1924        if (pg < pgend)
1925                memblock_free(pg, pgend - pg);
1926}
1927
1928/*
1929 * The mem_map array can get very big.  Free the unused area of the memory map.
1930 */
1931static void __init free_unused_memmap(void)
1932{
1933        unsigned long start, end, prev_end = 0;
1934        int i;
1935
1936        if (!IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) ||
1937            IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP))
1938                return;
1939
1940        /*
1941         * This relies on each bank being in address order.
1942         * The banks are sorted previously in bootmem_init().
1943         */
1944        for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
1945#ifdef CONFIG_SPARSEMEM
1946                /*
1947                 * Take care not to free memmap entries that don't exist
1948                 * due to SPARSEMEM sections which aren't present.
1949                 */
1950                start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
1951#endif
1952                /*
1953                 * Align down here since many operations in VM subsystem
1954                 * presume that there are no holes in the memory map inside
1955                 * a pageblock
1956                 */
1957                start = round_down(start, pageblock_nr_pages);
1958
1959                /*
1960                 * If we had a previous bank, and there is a space
1961                 * between the current bank and the previous, free it.
1962                 */
1963                if (prev_end && prev_end < start)
1964                        free_memmap(prev_end, start);
1965
1966                /*
1967                 * Align up here since many operations in VM subsystem
1968                 * presume that there are no holes in the memory map inside
1969                 * a pageblock
1970                 */
1971                prev_end = ALIGN(end, pageblock_nr_pages);
1972        }
1973
1974#ifdef CONFIG_SPARSEMEM
1975        if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) {
1976                prev_end = ALIGN(end, pageblock_nr_pages);
1977                free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
1978        }
1979#endif
1980}
1981
1982static void __init __free_pages_memory(unsigned long start, unsigned long end)
1983{
1984        int order;
1985
1986        while (start < end) {
1987                order = min(MAX_ORDER - 1UL, __ffs(start));
1988
1989                while (start + (1UL << order) > end)
1990                        order--;
1991
1992                memblock_free_pages(pfn_to_page(start), start, order);
1993
1994                start += (1UL << order);
1995        }
1996}
1997
1998static unsigned long __init __free_memory_core(phys_addr_t start,
1999                                 phys_addr_t end)
2000{
2001        unsigned long start_pfn = PFN_UP(start);
2002        unsigned long end_pfn = min_t(unsigned long,
2003                                      PFN_DOWN(end), max_low_pfn);
2004
2005        if (start_pfn >= end_pfn)
2006                return 0;
2007
2008        __free_pages_memory(start_pfn, end_pfn);
2009
2010        return end_pfn - start_pfn;
2011}
2012
2013static void __init memmap_init_reserved_pages(void)
2014{
2015        struct memblock_region *region;
2016        phys_addr_t start, end;
2017        u64 i;
2018
2019        /* initialize struct pages for the reserved regions */
2020        for_each_reserved_mem_range(i, &start, &end)
2021                reserve_bootmem_region(start, end);
2022
2023        /* and also treat struct pages for the NOMAP regions as PageReserved */
2024        for_each_mem_region(region) {
2025                if (memblock_is_nomap(region)) {
2026                        start = region->base;
2027                        end = start + region->size;
2028                        reserve_bootmem_region(start, end);
2029                }
2030        }
2031}
2032
2033static unsigned long __init free_low_memory_core_early(void)
2034{
2035        unsigned long count = 0;
2036        phys_addr_t start, end;
2037        u64 i;
2038
2039        memblock_clear_hotplug(0, -1);
2040
2041        memmap_init_reserved_pages();
2042
2043        /*
2044         * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
2045         *  because in some case like Node0 doesn't have RAM installed
2046         *  low ram will be on Node1
2047         */
2048        for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
2049                                NULL)
2050                count += __free_memory_core(start, end);
2051
2052        return count;
2053}
2054
2055static int reset_managed_pages_done __initdata;
2056
2057void reset_node_managed_pages(pg_data_t *pgdat)
2058{
2059        struct zone *z;
2060
2061        for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
2062                atomic_long_set(&z->managed_pages, 0);
2063}
2064
2065void __init reset_all_zones_managed_pages(void)
2066{
2067        struct pglist_data *pgdat;
2068
2069        if (reset_managed_pages_done)
2070                return;
2071
2072        for_each_online_pgdat(pgdat)
2073                reset_node_managed_pages(pgdat);
2074
2075        reset_managed_pages_done = 1;
2076}
2077
2078/**
2079 * memblock_free_all - release free pages to the buddy allocator
2080 */
2081void __init memblock_free_all(void)
2082{
2083        unsigned long pages;
2084
2085        free_unused_memmap();
2086        reset_all_zones_managed_pages();
2087
2088        pages = free_low_memory_core_early();
2089        totalram_pages_add(pages);
2090}
2091
2092#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK)
2093
2094static int memblock_debug_show(struct seq_file *m, void *private)
2095{
2096        struct memblock_type *type = m->private;
2097        struct memblock_region *reg;
2098        int i;
2099        phys_addr_t end;
2100
2101        for (i = 0; i < type->cnt; i++) {
2102                reg = &type->regions[i];
2103                end = reg->base + reg->size - 1;
2104
2105                seq_printf(m, "%4d: ", i);
2106                seq_printf(m, "%pa..%pa\n", &reg->base, &end);
2107        }
2108        return 0;
2109}
2110DEFINE_SHOW_ATTRIBUTE(memblock_debug);
2111
2112static int __init memblock_init_debugfs(void)
2113{
2114        struct dentry *root = debugfs_create_dir("memblock", NULL);
2115
2116        debugfs_create_file("memory", 0444, root,
2117                            &memblock.memory, &memblock_debug_fops);
2118        debugfs_create_file("reserved", 0444, root,
2119                            &memblock.reserved, &memblock_debug_fops);
2120#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
2121        debugfs_create_file("physmem", 0444, root, &physmem,
2122                            &memblock_debug_fops);
2123#endif
2124
2125        return 0;
2126}
2127__initcall(memblock_init_debugfs);
2128
2129#endif /* CONFIG_DEBUG_FS */
2130
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.