linux/mm/page_alloc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/mm/page_alloc.c
   4 *
   5 *  Manages the free list, the system allocates free pages here.
   6 *  Note that kmalloc() lives in slab.c
   7 *
   8 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   9 *  Swap reorganised 29.12.95, Stephen Tweedie
  10 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  11 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
  12 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
  13 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
  14 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
  15 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
  16 */
  17
  18#include <linux/stddef.h>
  19#include <linux/mm.h>
  20#include <linux/highmem.h>
  21#include <linux/swap.h>
  22#include <linux/interrupt.h>
  23#include <linux/pagemap.h>
  24#include <linux/jiffies.h>
  25#include <linux/memblock.h>
  26#include <linux/compiler.h>
  27#include <linux/kernel.h>
  28#include <linux/kasan.h>
  29#include <linux/module.h>
  30#include <linux/suspend.h>
  31#include <linux/pagevec.h>
  32#include <linux/blkdev.h>
  33#include <linux/slab.h>
  34#include <linux/ratelimit.h>
  35#include <linux/oom.h>
  36#include <linux/topology.h>
  37#include <linux/sysctl.h>
  38#include <linux/cpu.h>
  39#include <linux/cpuset.h>
  40#include <linux/memory_hotplug.h>
  41#include <linux/nodemask.h>
  42#include <linux/vmalloc.h>
  43#include <linux/vmstat.h>
  44#include <linux/mempolicy.h>
  45#include <linux/memremap.h>
  46#include <linux/stop_machine.h>
  47#include <linux/random.h>
  48#include <linux/sort.h>
  49#include <linux/pfn.h>
  50#include <linux/backing-dev.h>
  51#include <linux/fault-inject.h>
  52#include <linux/page-isolation.h>
  53#include <linux/debugobjects.h>
  54#include <linux/kmemleak.h>
  55#include <linux/compaction.h>
  56#include <trace/events/kmem.h>
  57#include <trace/events/oom.h>
  58#include <linux/prefetch.h>
  59#include <linux/mm_inline.h>
  60#include <linux/mmu_notifier.h>
  61#include <linux/migrate.h>
  62#include <linux/hugetlb.h>
  63#include <linux/sched/rt.h>
  64#include <linux/sched/mm.h>
  65#include <linux/page_owner.h>
  66#include <linux/kthread.h>
  67#include <linux/memcontrol.h>
  68#include <linux/ftrace.h>
  69#include <linux/lockdep.h>
  70#include <linux/nmi.h>
  71#include <linux/psi.h>
  72#include <linux/padata.h>
  73#include <linux/khugepaged.h>
  74#include <linux/buffer_head.h>
  75#include <asm/sections.h>
  76#include <asm/tlbflush.h>
  77#include <asm/div64.h>
  78#include "internal.h"
  79#include "shuffle.h"
  80#include "page_reporting.h"
  81
  82/* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
  83typedef int __bitwise fpi_t;
  84
  85/* No special request */
  86#define FPI_NONE                ((__force fpi_t)0)
  87
  88/*
  89 * Skip free page reporting notification for the (possibly merged) page.
  90 * This does not hinder free page reporting from grabbing the page,
  91 * reporting it and marking it "reported" -  it only skips notifying
  92 * the free page reporting infrastructure about a newly freed page. For
  93 * example, used when temporarily pulling a page from a freelist and
  94 * putting it back unmodified.
  95 */
  96#define FPI_SKIP_REPORT_NOTIFY  ((__force fpi_t)BIT(0))
  97
  98/*
  99 * Place the (possibly merged) page to the tail of the freelist. Will ignore
 100 * page shuffling (relevant code - e.g., memory onlining - is expected to
 101 * shuffle the whole zone).
 102 *
 103 * Note: No code should rely on this flag for correctness - it's purely
 104 *       to allow for optimizations when handing back either fresh pages
 105 *       (memory onlining) or untouched pages (page isolation, free page
 106 *       reporting).
 107 */
 108#define FPI_TO_TAIL             ((__force fpi_t)BIT(1))
 109
 110/*
 111 * Don't poison memory with KASAN (only for the tag-based modes).
 112 * During boot, all non-reserved memblock memory is exposed to page_alloc.
 113 * Poisoning all that memory lengthens boot time, especially on systems with
 114 * large amount of RAM. This flag is used to skip that poisoning.
 115 * This is only done for the tag-based KASAN modes, as those are able to
 116 * detect memory corruptions with the memory tags assigned by default.
 117 * All memory allocated normally after boot gets poisoned as usual.
 118 */
 119#define FPI_SKIP_KASAN_POISON   ((__force fpi_t)BIT(2))
 120
 121/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
 122static DEFINE_MUTEX(pcp_batch_high_lock);
 123#define MIN_PERCPU_PAGELIST_FRACTION    (8)
 124
 125#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
 126DEFINE_PER_CPU(int, numa_node);
 127EXPORT_PER_CPU_SYMBOL(numa_node);
 128#endif
 129
 130DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
 131
 132#ifdef CONFIG_HAVE_MEMORYLESS_NODES
 133/*
 134 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
 135 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
 136 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
 137 * defined in <linux/topology.h>.
 138 */
 139DEFINE_PER_CPU(int, _numa_mem_);                /* Kernel "local memory" node */
 140EXPORT_PER_CPU_SYMBOL(_numa_mem_);
 141#endif
 142
 143/* work_structs for global per-cpu drains */
 144struct pcpu_drain {
 145        struct zone *zone;
 146        struct work_struct work;
 147};
 148static DEFINE_MUTEX(pcpu_drain_mutex);
 149static DEFINE_PER_CPU(struct pcpu_drain, pcpu_drain);
 150
 151#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
 152volatile unsigned long latent_entropy __latent_entropy;
 153EXPORT_SYMBOL(latent_entropy);
 154#endif
 155
 156/*
 157 * Array of node states.
 158 */
 159nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
 160        [N_POSSIBLE] = NODE_MASK_ALL,
 161        [N_ONLINE] = { { [0] = 1UL } },
 162#ifndef CONFIG_NUMA
 163        [N_NORMAL_MEMORY] = { { [0] = 1UL } },
 164#ifdef CONFIG_HIGHMEM
 165        [N_HIGH_MEMORY] = { { [0] = 1UL } },
 166#endif
 167        [N_MEMORY] = { { [0] = 1UL } },
 168        [N_CPU] = { { [0] = 1UL } },
 169#endif  /* NUMA */
 170};
 171EXPORT_SYMBOL(node_states);
 172
 173atomic_long_t _totalram_pages __read_mostly;
 174EXPORT_SYMBOL(_totalram_pages);
 175unsigned long totalreserve_pages __read_mostly;
 176unsigned long totalcma_pages __read_mostly;
 177
 178int percpu_pagelist_fraction;
 179gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
 180DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
 181EXPORT_SYMBOL(init_on_alloc);
 182
 183DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
 184EXPORT_SYMBOL(init_on_free);
 185
 186static bool _init_on_alloc_enabled_early __read_mostly
 187                                = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
 188static int __init early_init_on_alloc(char *buf)
 189{
 190
 191        return kstrtobool(buf, &_init_on_alloc_enabled_early);
 192}
 193early_param("init_on_alloc", early_init_on_alloc);
 194
 195static bool _init_on_free_enabled_early __read_mostly
 196                                = IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON);
 197static int __init early_init_on_free(char *buf)
 198{
 199        return kstrtobool(buf, &_init_on_free_enabled_early);
 200}
 201early_param("init_on_free", early_init_on_free);
 202
 203/*
 204 * A cached value of the page's pageblock's migratetype, used when the page is
 205 * put on a pcplist. Used to avoid the pageblock migratetype lookup when
 206 * freeing from pcplists in most cases, at the cost of possibly becoming stale.
 207 * Also the migratetype set in the page does not necessarily match the pcplist
 208 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
 209 * other index - this ensures that it will be put on the correct CMA freelist.
 210 */
 211static inline int get_pcppage_migratetype(struct page *page)
 212{
 213        return page->index;
 214}
 215
 216static inline void set_pcppage_migratetype(struct page *page, int migratetype)
 217{
 218        page->index = migratetype;
 219}
 220
 221#ifdef CONFIG_PM_SLEEP
 222/*
 223 * The following functions are used by the suspend/hibernate code to temporarily
 224 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
 225 * while devices are suspended.  To avoid races with the suspend/hibernate code,
 226 * they should always be called with system_transition_mutex held
 227 * (gfp_allowed_mask also should only be modified with system_transition_mutex
 228 * held, unless the suspend/hibernate code is guaranteed not to run in parallel
 229 * with that modification).
 230 */
 231
 232static gfp_t saved_gfp_mask;
 233
 234void pm_restore_gfp_mask(void)
 235{
 236        WARN_ON(!mutex_is_locked(&system_transition_mutex));
 237        if (saved_gfp_mask) {
 238                gfp_allowed_mask = saved_gfp_mask;
 239                saved_gfp_mask = 0;
 240        }
 241}
 242
 243void pm_restrict_gfp_mask(void)
 244{
 245        WARN_ON(!mutex_is_locked(&system_transition_mutex));
 246        WARN_ON(saved_gfp_mask);
 247        saved_gfp_mask = gfp_allowed_mask;
 248        gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
 249}
 250
 251bool pm_suspended_storage(void)
 252{
 253        if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
 254                return false;
 255        return true;
 256}
 257#endif /* CONFIG_PM_SLEEP */
 258
 259#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
 260unsigned int pageblock_order __read_mostly;
 261#endif
 262
 263static void __free_pages_ok(struct page *page, unsigned int order,
 264                            fpi_t fpi_flags);
 265
 266/*
 267 * results with 256, 32 in the lowmem_reserve sysctl:
 268 *      1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
 269 *      1G machine -> (16M dma, 784M normal, 224M high)
 270 *      NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
 271 *      HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
 272 *      HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
 273 *
 274 * TBD: should special case ZONE_DMA32 machines here - in those we normally
 275 * don't need any ZONE_NORMAL reservation
 276 */
 277int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
 278#ifdef CONFIG_ZONE_DMA
 279        [ZONE_DMA] = 256,
 280#endif
 281#ifdef CONFIG_ZONE_DMA32
 282        [ZONE_DMA32] = 256,
 283#endif
 284        [ZONE_NORMAL] = 32,
 285#ifdef CONFIG_HIGHMEM
 286        [ZONE_HIGHMEM] = 0,
 287#endif
 288        [ZONE_MOVABLE] = 0,
 289};
 290
 291static char * const zone_names[MAX_NR_ZONES] = {
 292#ifdef CONFIG_ZONE_DMA
 293         "DMA",
 294#endif
 295#ifdef CONFIG_ZONE_DMA32
 296         "DMA32",
 297#endif
 298         "Normal",
 299#ifdef CONFIG_HIGHMEM
 300         "HighMem",
 301#endif
 302         "Movable",
 303#ifdef CONFIG_ZONE_DEVICE
 304         "Device",
 305#endif
 306};
 307
 308const char * const migratetype_names[MIGRATE_TYPES] = {
 309        "Unmovable",
 310        "Movable",
 311        "Reclaimable",
 312        "HighAtomic",
 313#ifdef CONFIG_CMA
 314        "CMA",
 315#endif
 316#ifdef CONFIG_MEMORY_ISOLATION
 317        "Isolate",
 318#endif
 319};
 320
 321compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = {
 322        [NULL_COMPOUND_DTOR] = NULL,
 323        [COMPOUND_PAGE_DTOR] = free_compound_page,
 324#ifdef CONFIG_HUGETLB_PAGE
 325        [HUGETLB_PAGE_DTOR] = free_huge_page,
 326#endif
 327#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 328        [TRANSHUGE_PAGE_DTOR] = free_transhuge_page,
 329#endif
 330};
 331
 332int min_free_kbytes = 1024;
 333int user_min_free_kbytes = -1;
 334#ifdef CONFIG_DISCONTIGMEM
 335/*
 336 * DiscontigMem defines memory ranges as separate pg_data_t even if the ranges
 337 * are not on separate NUMA nodes. Functionally this works but with
 338 * watermark_boost_factor, it can reclaim prematurely as the ranges can be
 339 * quite small. By default, do not boost watermarks on discontigmem as in
 340 * many cases very high-order allocations like THP are likely to be
 341 * unsupported and the premature reclaim offsets the advantage of long-term
 342 * fragmentation avoidance.
 343 */
 344int watermark_boost_factor __read_mostly;
 345#else
 346int watermark_boost_factor __read_mostly = 15000;
 347#endif
 348int watermark_scale_factor = 10;
 349
 350static unsigned long nr_kernel_pages __initdata;
 351static unsigned long nr_all_pages __initdata;
 352static unsigned long dma_reserve __initdata;
 353
 354static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
 355static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
 356static unsigned long required_kernelcore __initdata;
 357static unsigned long required_kernelcore_percent __initdata;
 358static unsigned long required_movablecore __initdata;
 359static unsigned long required_movablecore_percent __initdata;
 360static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;
 361static bool mirrored_kernelcore __meminitdata;
 362
 363/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
 364int movable_zone;
 365EXPORT_SYMBOL(movable_zone);
 366
 367#if MAX_NUMNODES > 1
 368unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
 369unsigned int nr_online_nodes __read_mostly = 1;
 370EXPORT_SYMBOL(nr_node_ids);
 371EXPORT_SYMBOL(nr_online_nodes);
 372#endif
 373
 374int page_group_by_mobility_disabled __read_mostly;
 375
 376#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
 377/*
 378 * During boot we initialize deferred pages on-demand, as needed, but once
 379 * page_alloc_init_late() has finished, the deferred pages are all initialized,
 380 * and we can permanently disable that path.
 381 */
 382static DEFINE_STATIC_KEY_TRUE(deferred_pages);
 383
 384/*
 385 * Calling kasan_free_pages() only after deferred memory initialization
 386 * has completed. Poisoning pages during deferred memory init will greatly
 387 * lengthen the process and cause problem in large memory systems as the
 388 * deferred pages initialization is done with interrupt disabled.
 389 *
 390 * Assuming that there will be no reference to those newly initialized
 391 * pages before they are ever allocated, this should have no effect on
 392 * KASAN memory tracking as the poison will be properly inserted at page
 393 * allocation time. The only corner case is when pages are allocated by
 394 * on-demand allocation and then freed again before the deferred pages
 395 * initialization is done, but this is not likely to happen.
 396 */
 397static inline void kasan_free_nondeferred_pages(struct page *page, int order,
 398                                                bool init, fpi_t fpi_flags)
 399{
 400        if (static_branch_unlikely(&deferred_pages))
 401                return;
 402        if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
 403                        (fpi_flags & FPI_SKIP_KASAN_POISON))
 404                return;
 405        kasan_free_pages(page, order, init);
 406}
 407
 408/* Returns true if the struct page for the pfn is uninitialised */
 409static inline bool __meminit early_page_uninitialised(unsigned long pfn)
 410{
 411        int nid = early_pfn_to_nid(pfn);
 412
 413        if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
 414                return true;
 415
 416        return false;
 417}
 418
 419/*
 420 * Returns true when the remaining initialisation should be deferred until
 421 * later in the boot cycle when it can be parallelised.
 422 */
 423static bool __meminit
 424defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
 425{
 426        static unsigned long prev_end_pfn, nr_initialised;
 427
 428        /*
 429         * prev_end_pfn static that contains the end of previous zone
 430         * No need to protect because called very early in boot before smp_init.
 431         */
 432        if (prev_end_pfn != end_pfn) {
 433                prev_end_pfn = end_pfn;
 434                nr_initialised = 0;
 435        }
 436
 437        /* Always populate low zones for address-constrained allocations */
 438        if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
 439                return false;
 440
 441        if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
 442                return true;
 443        /*
 444         * We start only with one section of pages, more pages are added as
 445         * needed until the rest of deferred pages are initialized.
 446         */
 447        nr_initialised++;
 448        if ((nr_initialised > PAGES_PER_SECTION) &&
 449            (pfn & (PAGES_PER_SECTION - 1)) == 0) {
 450                NODE_DATA(nid)->first_deferred_pfn = pfn;
 451                return true;
 452        }
 453        return false;
 454}
 455#else
 456static inline void kasan_free_nondeferred_pages(struct page *page, int order,
 457                                                bool init, fpi_t fpi_flags)
 458{
 459        if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
 460                        (fpi_flags & FPI_SKIP_KASAN_POISON))
 461                return;
 462        kasan_free_pages(page, order, init);
 463}
 464
 465static inline bool early_page_uninitialised(unsigned long pfn)
 466{
 467        return false;
 468}
 469
 470static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
 471{
 472        return false;
 473}
 474#endif
 475
 476/* Return a pointer to the bitmap storing bits affecting a block of pages */
 477static inline unsigned long *get_pageblock_bitmap(struct page *page,
 478                                                        unsigned long pfn)
 479{
 480#ifdef CONFIG_SPARSEMEM
 481        return section_to_usemap(__pfn_to_section(pfn));
 482#else
 483        return page_zone(page)->pageblock_flags;
 484#endif /* CONFIG_SPARSEMEM */
 485}
 486
 487static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
 488{
 489#ifdef CONFIG_SPARSEMEM
 490        pfn &= (PAGES_PER_SECTION-1);
 491#else
 492        pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
 493#endif /* CONFIG_SPARSEMEM */
 494        return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
 495}
 496
 497static __always_inline
 498unsigned long __get_pfnblock_flags_mask(struct page *page,
 499                                        unsigned long pfn,
 500                                        unsigned long mask)
 501{
 502        unsigned long *bitmap;
 503        unsigned long bitidx, word_bitidx;
 504        unsigned long word;
 505
 506        bitmap = get_pageblock_bitmap(page, pfn);
 507        bitidx = pfn_to_bitidx(page, pfn);
 508        word_bitidx = bitidx / BITS_PER_LONG;
 509        bitidx &= (BITS_PER_LONG-1);
 510
 511        word = bitmap[word_bitidx];
 512        return (word >> bitidx) & mask;
 513}
 514
 515/**
 516 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
 517 * @page: The page within the block of interest
 518 * @pfn: The target page frame number
 519 * @mask: mask of bits that the caller is interested in
 520 *
 521 * Return: pageblock_bits flags
 522 */
 523unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
 524                                        unsigned long mask)
 525{
 526        return __get_pfnblock_flags_mask(page, pfn, mask);
 527}
 528
 529static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
 530{
 531        return __get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK);
 532}
 533
 534/**
 535 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
 536 * @page: The page within the block of interest
 537 * @flags: The flags to set
 538 * @pfn: The target page frame number
 539 * @mask: mask of bits that the caller is interested in
 540 */
 541void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
 542                                        unsigned long pfn,
 543                                        unsigned long mask)
 544{
 545        unsigned long *bitmap;
 546        unsigned long bitidx, word_bitidx;
 547        unsigned long old_word, word;
 548
 549        BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
 550        BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits));
 551
 552        bitmap = get_pageblock_bitmap(page, pfn);
 553        bitidx = pfn_to_bitidx(page, pfn);
 554        word_bitidx = bitidx / BITS_PER_LONG;
 555        bitidx &= (BITS_PER_LONG-1);
 556
 557        VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
 558
 559        mask <<= bitidx;
 560        flags <<= bitidx;
 561
 562        word = READ_ONCE(bitmap[word_bitidx]);
 563        for (;;) {
 564                old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
 565                if (word == old_word)
 566                        break;
 567                word = old_word;
 568        }
 569}
 570
 571void set_pageblock_migratetype(struct page *page, int migratetype)
 572{
 573        if (unlikely(page_group_by_mobility_disabled &&
 574                     migratetype < MIGRATE_PCPTYPES))
 575                migratetype = MIGRATE_UNMOVABLE;
 576
 577        set_pfnblock_flags_mask(page, (unsigned long)migratetype,
 578                                page_to_pfn(page), MIGRATETYPE_MASK);
 579}
 580
 581#ifdef CONFIG_DEBUG_VM
 582static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
 583{
 584        int ret = 0;
 585        unsigned seq;
 586        unsigned long pfn = page_to_pfn(page);
 587        unsigned long sp, start_pfn;
 588
 589        do {
 590                seq = zone_span_seqbegin(zone);
 591                start_pfn = zone->zone_start_pfn;
 592                sp = zone->spanned_pages;
 593                if (!zone_spans_pfn(zone, pfn))
 594                        ret = 1;
 595        } while (zone_span_seqretry(zone, seq));
 596
 597        if (ret)
 598                pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
 599                        pfn, zone_to_nid(zone), zone->name,
 600                        start_pfn, start_pfn + sp);
 601
 602        return ret;
 603}
 604
 605static int page_is_consistent(struct zone *zone, struct page *page)
 606{
 607        if (!pfn_valid_within(page_to_pfn(page)))
 608                return 0;
 609        if (zone != page_zone(page))
 610                return 0;
 611
 612        return 1;
 613}
 614/*
 615 * Temporary debugging check for pages not lying within a given zone.
 616 */
 617static int __maybe_unused bad_range(struct zone *zone, struct page *page)
 618{
 619        if (page_outside_zone_boundaries(zone, page))
 620                return 1;
 621        if (!page_is_consistent(zone, page))
 622                return 1;
 623
 624        return 0;
 625}
 626#else
 627static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
 628{
 629        return 0;
 630}
 631#endif
 632
 633static void bad_page(struct page *page, const char *reason)
 634{
 635        static unsigned long resume;
 636        static unsigned long nr_shown;
 637        static unsigned long nr_unshown;
 638
 639        /*
 640         * Allow a burst of 60 reports, then keep quiet for that minute;
 641         * or allow a steady drip of one report per second.
 642         */
 643        if (nr_shown == 60) {
 644                if (time_before(jiffies, resume)) {
 645                        nr_unshown++;
 646                        goto out;
 647                }
 648                if (nr_unshown) {
 649                        pr_alert(
 650                              "BUG: Bad page state: %lu messages suppressed\n",
 651                                nr_unshown);
 652                        nr_unshown = 0;
 653                }
 654                nr_shown = 0;
 655        }
 656        if (nr_shown++ == 0)
 657                resume = jiffies + 60 * HZ;
 658
 659        pr_alert("BUG: Bad page state in process %s  pfn:%05lx\n",
 660                current->comm, page_to_pfn(page));
 661        __dump_page(page, reason);
 662        dump_page_owner(page);
 663
 664        print_modules();
 665        dump_stack();
 666out:
 667        /* Leave bad fields for debug, except PageBuddy could make trouble */
 668        page_mapcount_reset(page); /* remove PageBuddy */
 669        add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
 670}
 671
 672/*
 673 * Higher-order pages are called "compound pages".  They are structured thusly:
 674 *
 675 * The first PAGE_SIZE page is called the "head page" and have PG_head set.
 676 *
 677 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
 678 * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
 679 *
 680 * The first tail page's ->compound_dtor holds the offset in array of compound
 681 * page destructors. See compound_page_dtors.
 682 *
 683 * The first tail page's ->compound_order holds the order of allocation.
 684 * This usage means that zero-order pages may not be compound.
 685 */
 686
 687void free_compound_page(struct page *page)
 688{
 689        mem_cgroup_uncharge(page);
 690        __free_pages_ok(page, compound_order(page), FPI_NONE);
 691}
 692
 693void prep_compound_page(struct page *page, unsigned int order)
 694{
 695        int i;
 696        int nr_pages = 1 << order;
 697
 698        __SetPageHead(page);
 699        for (i = 1; i < nr_pages; i++) {
 700                struct page *p = page + i;
 701                set_page_count(p, 0);
 702                p->mapping = TAIL_MAPPING;
 703                set_compound_head(p, page);
 704        }
 705
 706        set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
 707        set_compound_order(page, order);
 708        atomic_set(compound_mapcount_ptr(page), -1);
 709        if (hpage_pincount_available(page))
 710                atomic_set(compound_pincount_ptr(page), 0);
 711}
 712
 713#ifdef CONFIG_DEBUG_PAGEALLOC
 714unsigned int _debug_guardpage_minorder;
 715
 716bool _debug_pagealloc_enabled_early __read_mostly
 717                        = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
 718EXPORT_SYMBOL(_debug_pagealloc_enabled_early);
 719DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
 720EXPORT_SYMBOL(_debug_pagealloc_enabled);
 721
 722DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
 723
 724static int __init early_debug_pagealloc(char *buf)
 725{
 726        return kstrtobool(buf, &_debug_pagealloc_enabled_early);
 727}
 728early_param("debug_pagealloc", early_debug_pagealloc);
 729
 730static int __init debug_guardpage_minorder_setup(char *buf)
 731{
 732        unsigned long res;
 733
 734        if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
 735                pr_err("Bad debug_guardpage_minorder value\n");
 736                return 0;
 737        }
 738        _debug_guardpage_minorder = res;
 739        pr_info("Setting debug_guardpage_minorder to %lu\n", res);
 740        return 0;
 741}
 742early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
 743
 744static inline bool set_page_guard(struct zone *zone, struct page *page,
 745                                unsigned int order, int migratetype)
 746{
 747        if (!debug_guardpage_enabled())
 748                return false;
 749
 750        if (order >= debug_guardpage_minorder())
 751                return false;
 752
 753        __SetPageGuard(page);
 754        INIT_LIST_HEAD(&page->lru);
 755        set_page_private(page, order);
 756        /* Guard pages are not available for any usage */
 757        __mod_zone_freepage_state(zone, -(1 << order), migratetype);
 758
 759        return true;
 760}
 761
 762static inline void clear_page_guard(struct zone *zone, struct page *page,
 763                                unsigned int order, int migratetype)
 764{
 765        if (!debug_guardpage_enabled())
 766                return;
 767
 768        __ClearPageGuard(page);
 769
 770        set_page_private(page, 0);
 771        if (!is_migrate_isolate(migratetype))
 772                __mod_zone_freepage_state(zone, (1 << order), migratetype);
 773}
 774#else
 775static inline bool set_page_guard(struct zone *zone, struct page *page,
 776                        unsigned int order, int migratetype) { return false; }
 777static inline void clear_page_guard(struct zone *zone, struct page *page,
 778                                unsigned int order, int migratetype) {}
 779#endif
 780
 781/*
 782 * Enable static keys related to various memory debugging and hardening options.
 783 * Some override others, and depend on early params that are evaluated in the
 784 * order of appearance. So we need to first gather the full picture of what was
 785 * enabled, and then make decisions.
 786 */
 787void init_mem_debugging_and_hardening(void)
 788{
 789        bool page_poisoning_requested = false;
 790
 791#ifdef CONFIG_PAGE_POISONING
 792        /*
 793         * Page poisoning is debug page alloc for some arches. If
 794         * either of those options are enabled, enable poisoning.
 795         */
 796        if (page_poisoning_enabled() ||
 797             (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
 798              debug_pagealloc_enabled())) {
 799                static_branch_enable(&_page_poisoning_enabled);
 800                page_poisoning_requested = true;
 801        }
 802#endif
 803
 804        if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) &&
 805            page_poisoning_requested) {
 806                pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
 807                        "will take precedence over init_on_alloc and init_on_free\n");
 808                _init_on_alloc_enabled_early = false;
 809                _init_on_free_enabled_early = false;
 810        }
 811
 812        if (_init_on_alloc_enabled_early)
 813                static_branch_enable(&init_on_alloc);
 814        else
 815                static_branch_disable(&init_on_alloc);
 816
 817        if (_init_on_free_enabled_early)
 818                static_branch_enable(&init_on_free);
 819        else
 820                static_branch_disable(&init_on_free);
 821
 822#ifdef CONFIG_DEBUG_PAGEALLOC
 823        if (!debug_pagealloc_enabled())
 824                return;
 825
 826        static_branch_enable(&_debug_pagealloc_enabled);
 827
 828        if (!debug_guardpage_minorder())
 829                return;
 830
 831        static_branch_enable(&_debug_guardpage_enabled);
 832#endif
 833}
 834
 835static inline void set_buddy_order(struct page *page, unsigned int order)
 836{
 837        set_page_private(page, order);
 838        __SetPageBuddy(page);
 839}
 840
 841/*
 842 * This function checks whether a page is free && is the buddy
 843 * we can coalesce a page and its buddy if
 844 * (a) the buddy is not in a hole (check before calling!) &&
 845 * (b) the buddy is in the buddy system &&
 846 * (c) a page and its buddy have the same order &&
 847 * (d) a page and its buddy are in the same zone.
 848 *
 849 * For recording whether a page is in the buddy system, we set PageBuddy.
 850 * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
 851 *
 852 * For recording page's order, we use page_private(page).
 853 */
 854static inline bool page_is_buddy(struct page *page, struct page *buddy,
 855                                                        unsigned int order)
 856{
 857        if (!page_is_guard(buddy) && !PageBuddy(buddy))
 858                return false;
 859
 860        if (buddy_order(buddy) != order)
 861                return false;
 862
 863        /*
 864         * zone check is done late to avoid uselessly calculating
 865         * zone/node ids for pages that could never merge.
 866         */
 867        if (page_zone_id(page) != page_zone_id(buddy))
 868                return false;
 869
 870        VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
 871
 872        return true;
 873}
 874
 875#ifdef CONFIG_COMPACTION
 876static inline struct capture_control *task_capc(struct zone *zone)
 877{
 878        struct capture_control *capc = current->capture_control;
 879
 880        return unlikely(capc) &&
 881                !(current->flags & PF_KTHREAD) &&
 882                !capc->page &&
 883                capc->cc->zone == zone ? capc : NULL;
 884}
 885
 886static inline bool
 887compaction_capture(struct capture_control *capc, struct page *page,
 888                   int order, int migratetype)
 889{
 890        if (!capc || order != capc->cc->order)
 891                return false;
 892
 893        /* Do not accidentally pollute CMA or isolated regions*/
 894        if (is_migrate_cma(migratetype) ||
 895            is_migrate_isolate(migratetype))
 896                return false;
 897
 898        /*
 899         * Do not let lower order allocations pollute a movable pageblock.
 900         * This might let an unmovable request use a reclaimable pageblock
 901         * and vice-versa but no more than normal fallback logic which can
 902         * have trouble finding a high-order free page.
 903         */
 904        if (order < pageblock_order && migratetype == MIGRATE_MOVABLE)
 905                return false;
 906
 907        capc->page = page;
 908        return true;
 909}
 910
 911#else
 912static inline struct capture_control *task_capc(struct zone *zone)
 913{
 914        return NULL;
 915}
 916
 917static inline bool
 918compaction_capture(struct capture_control *capc, struct page *page,
 919                   int order, int migratetype)
 920{
 921        return false;
 922}
 923#endif /* CONFIG_COMPACTION */
 924
 925/* Used for pages not on another list */
 926static inline void add_to_free_list(struct page *page, struct zone *zone,
 927                                    unsigned int order, int migratetype)
 928{
 929        struct free_area *area = &zone->free_area[order];
 930
 931        list_add(&page->lru, &area->free_list[migratetype]);
 932        area->nr_free++;
 933}
 934
 935/* Used for pages not on another list */
 936static inline void add_to_free_list_tail(struct page *page, struct zone *zone,
 937                                         unsigned int order, int migratetype)
 938{
 939        struct free_area *area = &zone->free_area[order];
 940
 941        list_add_tail(&page->lru, &area->free_list[migratetype]);
 942        area->nr_free++;
 943}
 944
 945/*
 946 * Used for pages which are on another list. Move the pages to the tail
 947 * of the list - so the moved pages won't immediately be considered for
 948 * allocation again (e.g., optimization for memory onlining).
 949 */
 950static inline void move_to_free_list(struct page *page, struct zone *zone,
 951                                     unsigned int order, int migratetype)
 952{
 953        struct free_area *area = &zone->free_area[order];
 954
 955        list_move_tail(&page->lru, &area->free_list[migratetype]);
 956}
 957
 958static inline void del_page_from_free_list(struct page *page, struct zone *zone,
 959                                           unsigned int order)
 960{
 961        /* clear reported state and update reported page count */
 962        if (page_reported(page))
 963                __ClearPageReported(page);
 964
 965        list_del(&page->lru);
 966        __ClearPageBuddy(page);
 967        set_page_private(page, 0);
 968        zone->free_area[order].nr_free--;
 969}
 970
 971/*
 972 * If this is not the largest possible page, check if the buddy
 973 * of the next-highest order is free. If it is, it's possible
 974 * that pages are being freed that will coalesce soon. In case,
 975 * that is happening, add the free page to the tail of the list
 976 * so it's less likely to be used soon and more likely to be merged
 977 * as a higher order page
 978 */
 979static inline bool
 980buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
 981                   struct page *page, unsigned int order)
 982{
 983        struct page *higher_page, *higher_buddy;
 984        unsigned long combined_pfn;
 985
 986        if (order >= MAX_ORDER - 2)
 987                return false;
 988
 989        if (!pfn_valid_within(buddy_pfn))
 990                return false;
 991
 992        combined_pfn = buddy_pfn & pfn;
 993        higher_page = page + (combined_pfn - pfn);
 994        buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
 995        higher_buddy = higher_page + (buddy_pfn - combined_pfn);
 996
 997        return pfn_valid_within(buddy_pfn) &&
 998               page_is_buddy(higher_page, higher_buddy, order + 1);
 999}
1000
1001/*
1002 * Freeing function for a buddy system allocator.
1003 *
1004 * The concept of a buddy system is to maintain direct-mapped table
1005 * (containing bit values) for memory blocks of various "orders".
1006 * The bottom level table contains the map for the smallest allocatable
1007 * units of memory (here, pages), and each level above it describes
1008 * pairs of units from the levels below, hence, "buddies".
1009 * At a high level, all that happens here is marking the table entry
1010 * at the bottom level available, and propagating the changes upward
1011 * as necessary, plus some accounting needed to play nicely with other
1012 * parts of the VM system.
1013 * At each level, we keep a list of pages, which are heads of continuous
1014 * free pages of length of (1 << order) and marked with PageBuddy.
1015 * Page's order is recorded in page_private(page) field.
1016 * So when we are allocating or freeing one, we can derive the state of the
1017 * other.  That is, if we allocate a small block, and both were
1018 * free, the remainder of the region must be split into blocks.
1019 * If a block is freed, and its buddy is also free, then this
1020 * triggers coalescing into a block of larger size.
1021 *
1022 * -- nyc
1023 */
1024
1025static inline void __free_one_page(struct page *page,
1026                unsigned long pfn,
1027                struct zone *zone, unsigned int order,
1028                int migratetype, fpi_t fpi_flags)
1029{
1030        struct capture_control *capc = task_capc(zone);
1031        unsigned long buddy_pfn;
1032        unsigned long combined_pfn;
1033        unsigned int max_order;
1034        struct page *buddy;
1035        bool to_tail;
1036
1037        max_order = min_t(unsigned int, MAX_ORDER - 1, pageblock_order);
1038
1039        VM_BUG_ON(!zone_is_initialized(zone));
1040        VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
1041
1042        VM_BUG_ON(migratetype == -1);
1043        if (likely(!is_migrate_isolate(migratetype)))
1044                __mod_zone_freepage_state(zone, 1 << order, migratetype);
1045
1046        VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
1047        VM_BUG_ON_PAGE(bad_range(zone, page), page);
1048
1049continue_merging:
1050        while (order < max_order) {
1051                if (compaction_capture(capc, page, order, migratetype)) {
1052                        __mod_zone_freepage_state(zone, -(1 << order),
1053                                                                migratetype);
1054                        return;
1055                }
1056                buddy_pfn = __find_buddy_pfn(pfn, order);
1057                buddy = page + (buddy_pfn - pfn);
1058
1059                if (!pfn_valid_within(buddy_pfn))
1060                        goto done_merging;
1061                if (!page_is_buddy(page, buddy, order))
1062                        goto done_merging;
1063                /*
1064                 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
1065                 * merge with it and move up one order.
1066                 */
1067                if (page_is_guard(buddy))
1068                        clear_page_guard(zone, buddy, order, migratetype);
1069                else
1070                        del_page_from_free_list(buddy, zone, order);
1071                combined_pfn = buddy_pfn & pfn;
1072                page = page + (combined_pfn - pfn);
1073                pfn = combined_pfn;
1074                order++;
1075        }
1076        if (order < MAX_ORDER - 1) {
1077                /* If we are here, it means order is >= pageblock_order.
1078                 * We want to prevent merge between freepages on isolate
1079                 * pageblock and normal pageblock. Without this, pageblock
1080                 * isolation could cause incorrect freepage or CMA accounting.
1081                 *
1082                 * We don't want to hit this code for the more frequent
1083                 * low-order merging.
1084                 */
1085                if (unlikely(has_isolate_pageblock(zone))) {
1086                        int buddy_mt;
1087
1088                        buddy_pfn = __find_buddy_pfn(pfn, order);
1089                        buddy = page + (buddy_pfn - pfn);
1090                        buddy_mt = get_pageblock_migratetype(buddy);
1091
1092                        if (migratetype != buddy_mt
1093                                        && (is_migrate_isolate(migratetype) ||
1094                                                is_migrate_isolate(buddy_mt)))
1095                                goto done_merging;
1096                }
1097                max_order = order + 1;
1098                goto continue_merging;
1099        }
1100
1101done_merging:
1102        set_buddy_order(page, order);
1103
1104        if (fpi_flags & FPI_TO_TAIL)
1105                to_tail = true;
1106        else if (is_shuffle_order(order))
1107                to_tail = shuffle_pick_tail();
1108        else
1109                to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
1110
1111        if (to_tail)
1112                add_to_free_list_tail(page, zone, order, migratetype);
1113        else
1114                add_to_free_list(page, zone, order, migratetype);
1115
1116        /* Notify page reporting subsystem of freed page */
1117        if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY))
1118                page_reporting_notify_free(order);
1119}
1120
1121/*
1122 * A bad page could be due to a number of fields. Instead of multiple branches,
1123 * try and check multiple fields with one check. The caller must do a detailed
1124 * check if necessary.
1125 */
1126static inline bool page_expected_state(struct page *page,
1127                                        unsigned long check_flags)
1128{
1129        if (unlikely(atomic_read(&page->_mapcount) != -1))
1130                return false;
1131
1132        if (unlikely((unsigned long)page->mapping |
1133                        page_ref_count(page) |
1134#ifdef CONFIG_MEMCG
1135                        page->memcg_data |
1136#endif
1137                        (page->flags & check_flags)))
1138                return false;
1139
1140        return true;
1141}
1142
1143static const char *page_bad_reason(struct page *page, unsigned long flags)
1144{
1145        const char *bad_reason = NULL;
1146
1147        if (unlikely(atomic_read(&page->_mapcount) != -1))
1148                bad_reason = "nonzero mapcount";
1149        if (unlikely(page->mapping != NULL))
1150                bad_reason = "non-NULL mapping";
1151        if (unlikely(page_ref_count(page) != 0))
1152                bad_reason = "nonzero _refcount";
1153        if (unlikely(page->flags & flags)) {
1154                if (flags == PAGE_FLAGS_CHECK_AT_PREP)
1155                        bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set";
1156                else
1157                        bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
1158        }
1159#ifdef CONFIG_MEMCG
1160        if (unlikely(page->memcg_data))
1161                bad_reason = "page still charged to cgroup";
1162#endif
1163        return bad_reason;
1164}
1165
1166static void check_free_page_bad(struct page *page)
1167{
1168        bad_page(page,
1169                 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE));
1170}
1171
1172static inline int check_free_page(struct page *page)
1173{
1174        if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
1175                return 0;
1176
1177        /* Something has gone sideways, find it */
1178        check_free_page_bad(page);
1179        return 1;
1180}
1181
1182static int free_tail_pages_check(struct page *head_page, struct page *page)
1183{
1184        int ret = 1;
1185
1186        /*
1187         * We rely page->lru.next never has bit 0 set, unless the page
1188         * is PageTail(). Let's make sure that's true even for poisoned ->lru.
1189         */
1190        BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
1191
1192        if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
1193                ret = 0;
1194                goto out;
1195        }
1196        switch (page - head_page) {
1197        case 1:
1198                /* the first tail page: ->mapping may be compound_mapcount() */
1199                if (unlikely(compound_mapcount(page))) {
1200                        bad_page(page, "nonzero compound_mapcount");
1201                        goto out;
1202                }
1203                break;
1204        case 2:
1205                /*
1206                 * the second tail page: ->mapping is
1207                 * deferred_list.next -- ignore value.
1208                 */
1209                break;
1210        default:
1211                if (page->mapping != TAIL_MAPPING) {
1212                        bad_page(page, "corrupted mapping in tail page");
1213                        goto out;
1214                }
1215                break;
1216        }
1217        if (unlikely(!PageTail(page))) {
1218                bad_page(page, "PageTail not set");
1219                goto out;
1220        }
1221        if (unlikely(compound_head(page) != head_page)) {
1222                bad_page(page, "compound_head not consistent");
1223                goto out;
1224        }
1225        ret = 0;
1226out:
1227        page->mapping = NULL;
1228        clear_compound_head(page);
1229        return ret;
1230}
1231
1232static void kernel_init_free_pages(struct page *page, int numpages)
1233{
1234        int i;
1235
1236        /* s390's use of memset() could override KASAN redzones. */
1237        kasan_disable_current();
1238        for (i = 0; i < numpages; i++) {
1239                u8 tag = page_kasan_tag(page + i);
1240                page_kasan_tag_reset(page + i);
1241                clear_highpage(page + i);
1242                page_kasan_tag_set(page + i, tag);
1243        }
1244        kasan_enable_current();
1245}
1246
1247static __always_inline bool free_pages_prepare(struct page *page,
1248                        unsigned int order, bool check_free, fpi_t fpi_flags)
1249{
1250        int bad = 0;
1251        bool init;
1252
1253        VM_BUG_ON_PAGE(PageTail(page), page);
1254
1255        trace_mm_page_free(page, order);
1256
1257        if (unlikely(PageHWPoison(page)) && !order) {
1258                /*
1259                 * Do not let hwpoison pages hit pcplists/buddy
1260                 * Untie memcg state and reset page's owner
1261                 */
1262                if (memcg_kmem_enabled() && PageMemcgKmem(page))
1263                        __memcg_kmem_uncharge_page(page, order);
1264                reset_page_owner(page, order);
1265                return false;
1266        }
1267
1268        /*
1269         * Check tail pages before head page information is cleared to
1270         * avoid checking PageCompound for order-0 pages.
1271         */
1272        if (unlikely(order)) {
1273                bool compound = PageCompound(page);
1274                int i;
1275
1276                VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1277
1278                if (compound)
1279                        ClearPageDoubleMap(page);
1280                for (i = 1; i < (1 << order); i++) {
1281                        if (compound)
1282                                bad += free_tail_pages_check(page, page + i);
1283                        if (unlikely(check_free_page(page + i))) {
1284                                bad++;
1285                                continue;
1286                        }
1287                        (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1288                }
1289        }
1290        if (PageMappingFlags(page))
1291                page->mapping = NULL;
1292        if (memcg_kmem_enabled() && PageMemcgKmem(page))
1293                __memcg_kmem_uncharge_page(page, order);
1294        if (check_free)
1295                bad += check_free_page(page);
1296        if (bad)
1297                return false;
1298
1299        page_cpupid_reset_last(page);
1300        page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1301        reset_page_owner(page, order);
1302
1303        if (!PageHighMem(page)) {
1304                debug_check_no_locks_freed(page_address(page),
1305                                           PAGE_SIZE << order);
1306                debug_check_no_obj_freed(page_address(page),
1307                                           PAGE_SIZE << order);
1308        }
1309
1310        kernel_poison_pages(page, 1 << order);
1311
1312        /*
1313         * As memory initialization might be integrated into KASAN,
1314         * kasan_free_pages and kernel_init_free_pages must be
1315         * kept together to avoid discrepancies in behavior.
1316         *
1317         * With hardware tag-based KASAN, memory tags must be set before the
1318         * page becomes unavailable via debug_pagealloc or arch_free_page.
1319         */
1320        init = want_init_on_free();
1321        if (init && !kasan_has_integrated_init())
1322                kernel_init_free_pages(page, 1 << order);
1323        kasan_free_nondeferred_pages(page, order, init, fpi_flags);
1324
1325        /*
1326         * arch_free_page() can make the page's contents inaccessible.  s390
1327         * does this.  So nothing which can access the page's contents should
1328         * happen after this.
1329         */
1330        arch_free_page(page, order);
1331
1332        debug_pagealloc_unmap_pages(page, 1 << order);
1333
1334        return true;
1335}
1336
1337#ifdef CONFIG_DEBUG_VM
1338/*
1339 * With DEBUG_VM enabled, order-0 pages are checked immediately when being freed
1340 * to pcp lists. With debug_pagealloc also enabled, they are also rechecked when
1341 * moved from pcp lists to free lists.
1342 */
1343static bool free_pcp_prepare(struct page *page)
1344{
1345        return free_pages_prepare(page, 0, true, FPI_NONE);
1346}
1347
1348static bool bulkfree_pcp_prepare(struct page *page)
1349{
1350        if (debug_pagealloc_enabled_static())
1351                return check_free_page(page);
1352        else
1353                return false;
1354}
1355#else
1356/*
1357 * With DEBUG_VM disabled, order-0 pages being freed are checked only when
1358 * moving from pcp lists to free list in order to reduce overhead. With
1359 * debug_pagealloc enabled, they are checked also immediately when being freed
1360 * to the pcp lists.
1361 */
1362static bool free_pcp_prepare(struct page *page)
1363{
1364        if (debug_pagealloc_enabled_static())
1365                return free_pages_prepare(page, 0, true, FPI_NONE);
1366        else
1367                return free_pages_prepare(page, 0, false, FPI_NONE);
1368}
1369
1370static bool bulkfree_pcp_prepare(struct page *page)
1371{
1372        return check_free_page(page);
1373}
1374#endif /* CONFIG_DEBUG_VM */
1375
1376static inline void prefetch_buddy(struct page *page)
1377{
1378        unsigned long pfn = page_to_pfn(page);
1379        unsigned long buddy_pfn = __find_buddy_pfn(pfn, 0);
1380        struct page *buddy = page + (buddy_pfn - pfn);
1381
1382        prefetch(buddy);
1383}
1384
1385/*
1386 * Frees a number of pages from the PCP lists
1387 * Assumes all pages on list are in same zone, and of same order.
1388 * count is the number of pages to free.
1389 *
1390 * If the zone was previously in an "all pages pinned" state then look to
1391 * see if this freeing clears that state.
1392 *
1393 * And clear the zone's pages_scanned counter, to hold off the "all pages are
1394 * pinned" detection logic.
1395 */
1396static void free_pcppages_bulk(struct zone *zone, int count,
1397                                        struct per_cpu_pages *pcp)
1398{
1399        int migratetype = 0;
1400        int batch_free = 0;
1401        int prefetch_nr = READ_ONCE(pcp->batch);
1402        bool isolated_pageblocks;
1403        struct page *page, *tmp;
1404        LIST_HEAD(head);
1405
1406        /*
1407         * Ensure proper count is passed which otherwise would stuck in the
1408         * below while (list_empty(list)) loop.
1409         */
1410        count = min(pcp->count, count);
1411        while (count) {
1412                struct list_head *list;
1413
1414                /*
1415                 * Remove pages from lists in a round-robin fashion. A
1416                 * batch_free count is maintained that is incremented when an
1417                 * empty list is encountered.  This is so more pages are freed
1418                 * off fuller lists instead of spinning excessively around empty
1419                 * lists
1420                 */
1421                do {
1422                        batch_free++;
1423                        if (++migratetype == MIGRATE_PCPTYPES)
1424                                migratetype = 0;
1425                        list = &pcp->lists[migratetype];
1426                } while (list_empty(list));
1427
1428                /* This is the only non-empty list. Free them all. */
1429                if (batch_free == MIGRATE_PCPTYPES)
1430                        batch_free = count;
1431
1432                do {
1433                        page = list_last_entry(list, struct page, lru);
1434                        /* must delete to avoid corrupting pcp list */
1435                        list_del(&page->lru);
1436                        pcp->count--;
1437
1438                        if (bulkfree_pcp_prepare(page))
1439                                continue;
1440
1441                        list_add_tail(&page->lru, &head);
1442
1443                        /*
1444                         * We are going to put the page back to the global
1445                         * pool, prefetch its buddy to speed up later access
1446                         * under zone->lock. It is believed the overhead of
1447                         * an additional test and calculating buddy_pfn here
1448                         * can be offset by reduced memory latency later. To
1449                         * avoid excessive prefetching due to large count, only
1450                         * prefetch buddy for the first pcp->batch nr of pages.
1451                         */
1452                        if (prefetch_nr) {
1453                                prefetch_buddy(page);
1454                                prefetch_nr--;
1455                        }
1456                } while (--count && --batch_free && !list_empty(list));
1457        }
1458
1459        spin_lock(&zone->lock);
1460        isolated_pageblocks = has_isolate_pageblock(zone);
1461
1462        /*
1463         * Use safe version since after __free_one_page(),
1464         * page->lru.next will not point to original list.
1465         */
1466        list_for_each_entry_safe(page, tmp, &head, lru) {
1467                int mt = get_pcppage_migratetype(page);
1468                /* MIGRATE_ISOLATE page should not go to pcplists */
1469                VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
1470                /* Pageblock could have been isolated meanwhile */
1471                if (unlikely(isolated_pageblocks))
1472                        mt = get_pageblock_migratetype(page);
1473
1474                __free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE);
1475                trace_mm_page_pcpu_drain(page, 0, mt);
1476        }
1477        spin_unlock(&zone->lock);
1478}
1479
1480static void free_one_page(struct zone *zone,
1481                                struct page *page, unsigned long pfn,
1482                                unsigned int order,
1483                                int migratetype, fpi_t fpi_flags)
1484{
1485        spin_lock(&zone->lock);
1486        if (unlikely(has_isolate_pageblock(zone) ||
1487                is_migrate_isolate(migratetype))) {
1488                migratetype = get_pfnblock_migratetype(page, pfn);
1489        }
1490        __free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1491        spin_unlock(&zone->lock);
1492}
1493
1494static void __meminit __init_single_page(struct page *page, unsigned long pfn,
1495                                unsigned long zone, int nid)
1496{
1497        mm_zero_struct_page(page);
1498        set_page_links(page, zone, nid, pfn);
1499        init_page_count(page);
1500        page_mapcount_reset(page);
1501        page_cpupid_reset_last(page);
1502        page_kasan_tag_reset(page);
1503
1504        INIT_LIST_HEAD(&page->lru);
1505#ifdef WANT_PAGE_VIRTUAL
1506        /* The shift won't overflow because ZONE_NORMAL is below 4G. */
1507        if (!is_highmem_idx(zone))
1508                set_page_address(page, __va(pfn << PAGE_SHIFT));
1509#endif
1510}
1511
1512#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1513static void __meminit init_reserved_page(unsigned long pfn)
1514{
1515        pg_data_t *pgdat;
1516        int nid, zid;
1517
1518        if (!early_page_uninitialised(pfn))
1519                return;
1520
1521        nid = early_pfn_to_nid(pfn);
1522        pgdat = NODE_DATA(nid);
1523
1524        for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1525                struct zone *zone = &pgdat->node_zones[zid];
1526
1527                if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
1528                        break;
1529        }
1530        __init_single_page(pfn_to_page(pfn), pfn, zid, nid);
1531}
1532#else
1533static inline void init_reserved_page(unsigned long pfn)
1534{
1535}
1536#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1537
1538/*
1539 * Initialised pages do not have PageReserved set. This function is
1540 * called for each range allocated by the bootmem allocator and
1541 * marks the pages PageReserved. The remaining valid pages are later
1542 * sent to the buddy page allocator.
1543 */
1544void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
1545{
1546        unsigned long start_pfn = PFN_DOWN(start);
1547        unsigned long end_pfn = PFN_UP(end);
1548
1549        for (; start_pfn < end_pfn; start_pfn++) {
1550                if (pfn_valid(start_pfn)) {
1551                        struct page *page = pfn_to_page(start_pfn);
1552
1553                        init_reserved_page(start_pfn);
1554
1555                        /* Avoid false-positive PageTail() */
1556                        INIT_LIST_HEAD(&page->lru);
1557
1558                        /*
1559                         * no need for atomic set_bit because the struct
1560                         * page is not visible yet so nobody should
1561                         * access it yet.
1562                         */
1563                        __SetPageReserved(page);
1564                }
1565        }
1566}
1567
1568static void __free_pages_ok(struct page *page, unsigned int order,
1569                            fpi_t fpi_flags)
1570{
1571        unsigned long flags;
1572        int migratetype;
1573        unsigned long pfn = page_to_pfn(page);
1574
1575        if (!free_pages_prepare(page, order, true, fpi_flags))
1576                return;
1577
1578        migratetype = get_pfnblock_migratetype(page, pfn);
1579        local_irq_save(flags);
1580        __count_vm_events(PGFREE, 1 << order);
1581        free_one_page(page_zone(page), page, pfn, order, migratetype,
1582                      fpi_flags);
1583        local_irq_restore(flags);
1584}
1585
1586void __free_pages_core(struct page *page, unsigned int order)
1587{
1588        unsigned int nr_pages = 1 << order;
1589        struct page *p = page;
1590        unsigned int loop;
1591
1592        /*
1593         * When initializing the memmap, __init_single_page() sets the refcount
1594         * of all pages to 1 ("allocated"/"not free"). We have to set the
1595         * refcount of all involved pages to 0.
1596         */
1597        prefetchw(p);
1598        for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
1599                prefetchw(p + 1);
1600                __ClearPageReserved(p);
1601                set_page_count(p, 0);
1602        }
1603        __ClearPageReserved(p);
1604        set_page_count(p, 0);
1605
1606        atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
1607
1608        /*
1609         * Bypass PCP and place fresh pages right to the tail, primarily
1610         * relevant for memory onlining.
1611         */
1612        __free_pages_ok(page, order, FPI_TO_TAIL | FPI_SKIP_KASAN_POISON);
1613}
1614
1615#ifdef CONFIG_NEED_MULTIPLE_NODES
1616
1617/*
1618 * During memory init memblocks map pfns to nids. The search is expensive and
1619 * this caches recent lookups. The implementation of __early_pfn_to_nid
1620 * treats start/end as pfns.
1621 */
1622struct mminit_pfnnid_cache {
1623        unsigned long last_start;
1624        unsigned long last_end;
1625        int last_nid;
1626};
1627
1628static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1629
1630/*
1631 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
1632 */
1633static int __meminit __early_pfn_to_nid(unsigned long pfn,
1634                                        struct mminit_pfnnid_cache *state)
1635{
1636        unsigned long start_pfn, end_pfn;
1637        int nid;
1638
1639        if (state->last_start <= pfn && pfn < state->last_end)
1640                return state->last_nid;
1641
1642        nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
1643        if (nid != NUMA_NO_NODE) {
1644                state->last_start = start_pfn;
1645                state->last_end = end_pfn;
1646                state->last_nid = nid;
1647        }
1648
1649        return nid;
1650}
1651
1652int __meminit early_pfn_to_nid(unsigned long pfn)
1653{
1654        static DEFINE_SPINLOCK(early_pfn_lock);
1655        int nid;
1656
1657        spin_lock(&early_pfn_lock);
1658        nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
1659        if (nid < 0)
1660                nid = first_online_node;
1661        spin_unlock(&early_pfn_lock);
1662
1663        return nid;
1664}
1665#endif /* CONFIG_NEED_MULTIPLE_NODES */
1666
1667void __init memblock_free_pages(struct page *page, unsigned long pfn,
1668                                                        unsigned int order)
1669{
1670        if (early_page_uninitialised(pfn))
1671                return;
1672        __free_pages_core(page, order);
1673}
1674
1675/*
1676 * Check that the whole (or subset of) a pageblock given by the interval of
1677 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1678 * with the migration of free compaction scanner. The scanners then need to
1679 * use only pfn_valid_within() check for arches that allow holes within
1680 * pageblocks.
1681 *
1682 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1683 *
1684 * It's possible on some configurations to have a setup like node0 node1 node0
1685 * i.e. it's possible that all pages within a zones range of pages do not
1686 * belong to a single zone. We assume that a border between node0 and node1
1687 * can occur within a single pageblock, but not a node0 node1 node0
1688 * interleaving within a single pageblock. It is therefore sufficient to check
1689 * the first and last page of a pageblock and avoid checking each individual
1690 * page in a pageblock.
1691 */
1692struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
1693                                     unsigned long end_pfn, struct zone *zone)
1694{
1695        struct page *start_page;
1696        struct page *end_page;
1697
1698        /* end_pfn is one past the range we are checking */
1699        end_pfn--;
1700
1701        if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
1702                return NULL;
1703
1704        start_page = pfn_to_online_page(start_pfn);
1705        if (!start_page)
1706                return NULL;
1707
1708        if (page_zone(start_page) != zone)
1709                return NULL;
1710
1711        end_page = pfn_to_page(end_pfn);
1712
1713        /* This gives a shorter code than deriving page_zone(end_page) */
1714        if (page_zone_id(start_page) != page_zone_id(end_page))
1715                return NULL;
1716
1717        return start_page;
1718}
1719
1720void set_zone_contiguous(struct zone *zone)
1721{
1722        unsigned long block_start_pfn = zone->zone_start_pfn;
1723        unsigned long block_end_pfn;
1724
1725        block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
1726        for (; block_start_pfn < zone_end_pfn(zone);
1727                        block_start_pfn = block_end_pfn,
1728                         block_end_pfn += pageblock_nr_pages) {
1729
1730                block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
1731
1732                if (!__pageblock_pfn_to_page(block_start_pfn,
1733                                             block_end_pfn, zone))
1734                        return;
1735                cond_resched();
1736        }
1737
1738        /* We confirm that there is no hole */
1739        zone->contiguous = true;
1740}
1741
1742void clear_zone_contiguous(struct zone *zone)
1743{
1744        zone->contiguous = false;
1745}
1746
1747#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1748static void __init deferred_free_range(unsigned long pfn,
1749                                       unsigned long nr_pages)
1750{
1751        struct page *page;
1752        unsigned long i;
1753
1754        if (!nr_pages)
1755                return;
1756
1757        page = pfn_to_page(pfn);
1758
1759        /* Free a large naturally-aligned chunk if possible */
1760        if (nr_pages == pageblock_nr_pages &&
1761            (pfn & (pageblock_nr_pages - 1)) == 0) {
1762                set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1763                __free_pages_core(page, pageblock_order);
1764                return;
1765        }
1766
1767        for (i = 0; i < nr_pages; i++, page++, pfn++) {
1768                if ((pfn & (pageblock_nr_pages - 1)) == 0)
1769                        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1770                __free_pages_core(page, 0);
1771        }
1772}
1773
1774/* Completion tracking for deferred_init_memmap() threads */
1775static atomic_t pgdat_init_n_undone __initdata;
1776static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1777
1778static inline void __init pgdat_init_report_one_done(void)
1779{
1780        if (atomic_dec_and_test(&pgdat_init_n_undone))
1781                complete(&pgdat_init_all_done_comp);
1782}
1783
1784/*
1785 * Returns true if page needs to be initialized or freed to buddy allocator.
1786 *
1787 * First we check if pfn is valid on architectures where it is possible to have
1788 * holes within pageblock_nr_pages. On systems where it is not possible, this
1789 * function is optimized out.
1790 *
1791 * Then, we check if a current large page is valid by only checking the validity
1792 * of the head pfn.
1793 */
1794static inline bool __init deferred_pfn_valid(unsigned long pfn)
1795{
1796        if (!pfn_valid_within(pfn))
1797                return false;
1798        if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
1799                return false;
1800        return true;
1801}
1802
1803/*
1804 * Free pages to buddy allocator. Try to free aligned pages in
1805 * pageblock_nr_pages sizes.
1806 */
1807static void __init deferred_free_pages(unsigned long pfn,
1808                                       unsigned long end_pfn)
1809{
1810        unsigned long nr_pgmask = pageblock_nr_pages - 1;
1811        unsigned long nr_free = 0;
1812
1813        for (; pfn < end_pfn; pfn++) {
1814                if (!deferred_pfn_valid(pfn)) {
1815                        deferred_free_range(pfn - nr_free, nr_free);
1816                        nr_free = 0;
1817                } else if (!(pfn & nr_pgmask)) {
1818                        deferred_free_range(pfn - nr_free, nr_free);
1819                        nr_free = 1;
1820                } else {
1821                        nr_free++;
1822                }
1823        }
1824        /* Free the last block of pages to allocator */
1825        deferred_free_range(pfn - nr_free, nr_free);
1826}
1827
1828/*
1829 * Initialize struct pages.  We minimize pfn page lookups and scheduler checks
1830 * by performing it only once every pageblock_nr_pages.
1831 * Return number of pages initialized.
1832 */
1833static unsigned long  __init deferred_init_pages(struct zone *zone,
1834                                                 unsigned long pfn,
1835                                                 unsigned long end_pfn)
1836{
1837        unsigned long nr_pgmask = pageblock_nr_pages - 1;
1838        int nid = zone_to_nid(zone);
1839        unsigned long nr_pages = 0;
1840        int zid = zone_idx(zone);
1841        struct page *page = NULL;
1842
1843        for (; pfn < end_pfn; pfn++) {
1844                if (!deferred_pfn_valid(pfn)) {
1845                        page = NULL;
1846                        continue;
1847                } else if (!page || !(pfn & nr_pgmask)) {
1848                        page = pfn_to_page(pfn);
1849                } else {
1850                        page++;
1851                }
1852                __init_single_page(page, pfn, zid, nid);
1853                nr_pages++;
1854        }
1855        return (nr_pages);
1856}
1857
1858/*
1859 * This function is meant to pre-load the iterator for the zone init.
1860 * Specifically it walks through the ranges until we are caught up to the
1861 * first_init_pfn value and exits there. If we never encounter the value we
1862 * return false indicating there are no valid ranges left.
1863 */
1864static bool __init
1865deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
1866                                    unsigned long *spfn, unsigned long *epfn,
1867                                    unsigned long first_init_pfn)
1868{
1869        u64 j;
1870
1871        /*
1872         * Start out by walking through the ranges in this zone that have
1873         * already been initialized. We don't need to do anything with them
1874         * so we just need to flush them out of the system.
1875         */
1876        for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) {
1877                if (*epfn <= first_init_pfn)
1878                        continue;
1879                if (*spfn < first_init_pfn)
1880                        *spfn = first_init_pfn;
1881                *i = j;
1882                return true;
1883        }
1884
1885        return false;
1886}
1887
1888/*
1889 * Initialize and free pages. We do it in two loops: first we initialize
1890 * struct page, then free to buddy allocator, because while we are
1891 * freeing pages we can access pages that are ahead (computing buddy
1892 * page in __free_one_page()).
1893 *
1894 * In order to try and keep some memory in the cache we have the loop
1895 * broken along max page order boundaries. This way we will not cause
1896 * any issues with the buddy page computation.
1897 */
1898static unsigned long __init
1899deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
1900                       unsigned long *end_pfn)
1901{
1902        unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
1903        unsigned long spfn = *start_pfn, epfn = *end_pfn;
1904        unsigned long nr_pages = 0;
1905        u64 j = *i;
1906
1907        /* First we loop through and initialize the page values */
1908        for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) {
1909                unsigned long t;
1910
1911                if (mo_pfn <= *start_pfn)
1912                        break;
1913
1914                t = min(mo_pfn, *end_pfn);
1915                nr_pages += deferred_init_pages(zone, *start_pfn, t);
1916
1917                if (mo_pfn < *end_pfn) {
1918                        *start_pfn = mo_pfn;
1919                        break;
1920                }
1921        }
1922
1923        /* Reset values and now loop through freeing pages as needed */
1924        swap(j, *i);
1925
1926        for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) {
1927                unsigned long t;
1928
1929                if (mo_pfn <= spfn)
1930                        break;
1931
1932                t = min(mo_pfn, epfn);
1933                deferred_free_pages(spfn, t);
1934
1935                if (mo_pfn <= epfn)
1936                        break;
1937        }
1938
1939        return nr_pages;
1940}
1941
1942static void __init
1943deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
1944                           void *arg)
1945{
1946        unsigned long spfn, epfn;
1947        struct zone *zone = arg;
1948        u64 i;
1949
1950        deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);
1951
1952        /*
1953         * Initialize and free pages in MAX_ORDER sized increments so that we
1954         * can avoid introducing any issues with the buddy allocator.
1955         */
1956        while (spfn < end_pfn) {
1957                deferred_init_maxorder(&i, zone, &spfn, &epfn);
1958                cond_resched();
1959        }
1960}
1961
1962/* An arch may override for more concurrency. */
1963__weak int __init
1964deferred_page_init_max_threads(const struct cpumask *node_cpumask)
1965{
1966        return 1;
1967}
1968
1969/* Initialise remaining memory on a node */
1970static int __init deferred_init_memmap(void *data)
1971{
1972        pg_data_t *pgdat = data;
1973        const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1974        unsigned long spfn = 0, epfn = 0;
1975        unsigned long first_init_pfn, flags;
1976        unsigned long start = jiffies;
1977        struct zone *zone;
1978        int zid, max_threads;
1979        u64 i;
1980
1981        /* Bind memory initialisation thread to a local node if possible */
1982        if (!cpumask_empty(cpumask))
1983                set_cpus_allowed_ptr(current, cpumask);
1984
1985        pgdat_resize_lock(pgdat, &flags);
1986        first_init_pfn = pgdat->first_deferred_pfn;
1987        if (first_init_pfn == ULONG_MAX) {
1988                pgdat_resize_unlock(pgdat, &flags);
1989                pgdat_init_report_one_done();
1990                return 0;
1991        }
1992
1993        /* Sanity check boundaries */
1994        BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
1995        BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
1996        pgdat->first_deferred_pfn = ULONG_MAX;
1997
1998        /*
1999         * Once we unlock here, the zone cannot be grown anymore, thus if an
2000         * interrupt thread must allocate this early in boot, zone must be
2001         * pre-grown prior to start of deferred page initialization.
2002         */
2003        pgdat_resize_unlock(pgdat, &flags);
2004
2005        /* Only the highest zone is deferred so find it */
2006        for (zid = 0; zid < MAX_NR_ZONES; zid++) {
2007                zone = pgdat->node_zones + zid;
2008                if (first_init_pfn < zone_end_pfn(zone))
2009                        break;
2010        }
2011
2012        /* If the zone is empty somebody else may have cleared out the zone */
2013        if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2014                                                 first_init_pfn))
2015                goto zone_empty;
2016
2017        max_threads = deferred_page_init_max_threads(cpumask);
2018
2019        while (spfn < epfn) {
2020                unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION);
2021                struct padata_mt_job job = {
2022                        .thread_fn   = deferred_init_memmap_chunk,
2023                        .fn_arg      = zone,
2024                        .start       = spfn,
2025                        .size        = epfn_align - spfn,
2026                        .align       = PAGES_PER_SECTION,
2027                        .min_chunk   = PAGES_PER_SECTION,
2028                        .max_threads = max_threads,
2029                };
2030
2031                padata_do_multithreaded(&job);
2032                deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2033                                                    epfn_align);
2034        }
2035zone_empty:
2036        /* Sanity check that the next zone really is unpopulated */
2037        WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
2038
2039        pr_info("node %d deferred pages initialised in %ums\n",
2040                pgdat->node_id, jiffies_to_msecs(jiffies - start));
2041
2042        pgdat_init_report_one_done();
2043        return 0;
2044}
2045
2046/*
2047 * If this zone has deferred pages, try to grow it by initializing enough
2048 * deferred pages to satisfy the allocation specified by order, rounded up to
2049 * the nearest PAGES_PER_SECTION boundary.  So we're adding memory in increments
2050 * of SECTION_SIZE bytes by initializing struct pages in increments of
2051 * PAGES_PER_SECTION * sizeof(struct page) bytes.
2052 *
2053 * Return true when zone was grown, otherwise return false. We return true even
2054 * when we grow less than requested, to let the caller decide if there are
2055 * enough pages to satisfy the allocation.
2056 *
2057 * Note: We use noinline because this function is needed only during boot, and
2058 * it is called from a __ref function _deferred_grow_zone. This way we are
2059 * making sure that it is not inlined into permanent text section.
2060 */
2061static noinline bool __init
2062deferred_grow_zone(struct zone *zone, unsigned int order)
2063{
2064        unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
2065        pg_data_t *pgdat = zone->zone_pgdat;
2066        unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
2067        unsigned long spfn, epfn, flags;
2068        unsigned long nr_pages = 0;
2069        u64 i;
2070
2071        /* Only the last zone may have deferred pages */
2072        if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
2073                return false;
2074
2075        pgdat_resize_lock(pgdat, &flags);
2076
2077        /*
2078         * If someone grew this zone while we were waiting for spinlock, return
2079         * true, as there might be enough pages already.
2080         */
2081        if (first_deferred_pfn != pgdat->first_deferred_pfn) {
2082                pgdat_resize_unlock(pgdat, &flags);
2083                return true;
2084        }
2085
2086        /* If the zone is empty somebody else may have cleared out the zone */
2087        if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
2088                                                 first_deferred_pfn)) {
2089                pgdat->first_deferred_pfn = ULONG_MAX;
2090                pgdat_resize_unlock(pgdat, &flags);
2091                /* Retry only once. */
2092                return first_deferred_pfn != ULONG_MAX;
2093        }
2094
2095        /*
2096         * Initialize and free pages in MAX_ORDER sized increments so
2097         * that we can avoid introducing any issues with the buddy
2098         * allocator.
2099         */
2100        while (spfn < epfn) {
2101                /* update our first deferred PFN for this section */
2102                first_deferred_pfn = spfn;
2103
2104                nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
2105                touch_nmi_watchdog();
2106
2107                /* We should only stop along section boundaries */
2108                if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
2109                        continue;
2110
2111                /* If our quota has been met we can stop here */
2112                if (nr_pages >= nr_pages_needed)
2113                        break;
2114        }
2115
2116        pgdat->first_deferred_pfn = spfn;
2117        pgdat_resize_unlock(pgdat, &flags);
2118
2119        return nr_pages > 0;
2120}
2121
2122/*
2123 * deferred_grow_zone() is __init, but it is called from
2124 * get_page_from_freelist() during early boot until deferred_pages permanently
2125 * disables this call. This is why we have refdata wrapper to avoid warning,
2126 * and to ensure that the function body gets unloaded.
2127 */
2128static bool __ref
2129_deferred_grow_zone(struct zone *zone, unsigned int order)
2130{
2131        return deferred_grow_zone(zone, order);
2132}
2133
2134#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
2135
2136void __init page_alloc_init_late(void)
2137{
2138        struct zone *zone;
2139        int nid;
2140
2141#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
2142
2143        /* There will be num_node_state(N_MEMORY) threads */
2144        atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
2145        for_each_node_state(nid, N_MEMORY) {
2146                kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
2147        }
2148
2149        /* Block until all are initialised */
2150        wait_for_completion(&pgdat_init_all_done_comp);
2151
2152        /*
2153         * The number of managed pages has changed due to the initialisation
2154         * so the pcpu batch and high limits needs to be updated or the limits
2155         * will be artificially small.
2156         */
2157        for_each_populated_zone(zone)
2158                zone_pcp_update(zone);
2159
2160        /*
2161         * We initialized the rest of the deferred pages.  Permanently disable
2162         * on-demand struct page initialization.
2163         */
2164        static_branch_disable(&deferred_pages);
2165
2166        /* Reinit limits that are based on free pages after the kernel is up */
2167        files_maxfiles_init();
2168#endif
2169
2170        buffer_init();
2171
2172        /* Discard memblock private memory */
2173        memblock_discard();
2174
2175        for_each_node_state(nid, N_MEMORY)
2176                shuffle_free_memory(NODE_DATA(nid));
2177
2178        for_each_populated_zone(zone)
2179                set_zone_contiguous(zone);
2180}
2181
2182#ifdef CONFIG_CMA
2183/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
2184void __init init_cma_reserved_pageblock(struct page *page)
2185{
2186        unsigned i = pageblock_nr_pages;
2187        struct page *p = page;
2188
2189        do {
2190                __ClearPageReserved(p);
2191                set_page_count(p, 0);
2192        } while (++p, --i);
2193
2194        set_pageblock_migratetype(page, MIGRATE_CMA);
2195
2196        if (pageblock_order >= MAX_ORDER) {
2197                i = pageblock_nr_pages;
2198                p = page;
2199                do {
2200                        set_page_refcounted(p);
2201                        __free_pages(p, MAX_ORDER - 1);
2202                        p += MAX_ORDER_NR_PAGES;
2203                } while (i -= MAX_ORDER_NR_PAGES);
2204        } else {
2205                set_page_refcounted(page);
2206                __free_pages(page, pageblock_order);
2207        }
2208
2209        adjust_managed_page_count(page, pageblock_nr_pages);
2210        page_zone(page)->cma_pages += pageblock_nr_pages;
2211}
2212#endif
2213
2214/*
2215 * The order of subdivision here is critical for the IO subsystem.
2216 * Please do not alter this order without good reasons and regression
2217 * testing. Specifically, as large blocks of memory are subdivided,
2218 * the order in which smaller blocks are delivered depends on the order
2219 * they're subdivided in this function. This is the primary factor
2220 * influencing the order in which pages are delivered to the IO
2221 * subsystem according to empirical testing, and this is also justified
2222 * by considering the behavior of a buddy system containing a single
2223 * large block of memory acted on by a series of small allocations.
2224 * This behavior is a critical factor in sglist merging's success.
2225 *
2226 * -- nyc
2227 */
2228static inline void expand(struct zone *zone, struct page *page,
2229        int low, int high, int migratetype)
2230{
2231        unsigned long size = 1 << high;
2232
2233        while (high > low) {
2234                high--;
2235                size >>= 1;
2236                VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
2237
2238                /*
2239                 * Mark as guard pages (or page), that will allow to
2240                 * merge back to allocator when buddy will be freed.
2241                 * Corresponding page table entries will not be touched,
2242                 * pages will stay not present in virtual address space
2243                 */
2244                if (set_page_guard(zone, &page[size], high, migratetype))
2245                        continue;
2246
2247                add_to_free_list(&page[size], zone, high, migratetype);
2248                set_buddy_order(&page[size], high);
2249        }
2250}
2251
2252static void check_new_page_bad(struct page *page)
2253{
2254        if (unlikely(page->flags & __PG_HWPOISON)) {
2255                /* Don't complain about hwpoisoned pages */
2256                page_mapcount_reset(page); /* remove PageBuddy */
2257                return;
2258        }
2259
2260        bad_page(page,
2261                 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP));
2262}
2263
2264/*
2265 * This page is about to be returned from the page allocator
2266 */
2267static inline int check_new_page(struct page *page)
2268{
2269        if (likely(page_expected_state(page,
2270                                PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
2271                return 0;
2272
2273        check_new_page_bad(page);
2274        return 1;
2275}
2276
2277#ifdef CONFIG_DEBUG_VM
2278/*
2279 * With DEBUG_VM enabled, order-0 pages are checked for expected state when
2280 * being allocated from pcp lists. With debug_pagealloc also enabled, they are
2281 * also checked when pcp lists are refilled from the free lists.
2282 */
2283static inline bool check_pcp_refill(struct page *page)
2284{
2285        if (debug_pagealloc_enabled_static())
2286                return check_new_page(page);
2287        else
2288                return false;
2289}
2290
2291static inline bool check_new_pcp(struct page *page)
2292{
2293        return check_new_page(page);
2294}
2295#else
2296/*
2297 * With DEBUG_VM disabled, free order-0 pages are checked for expected state
2298 * when pcp lists are being refilled from the free lists. With debug_pagealloc
2299 * enabled, they are also checked when being allocated from the pcp lists.
2300 */
2301static inline bool check_pcp_refill(struct page *page)
2302{
2303        return check_new_page(page);
2304}
2305static inline bool check_new_pcp(struct page *page)
2306{
2307        if (debug_pagealloc_enabled_static())
2308                return check_new_page(page);
2309        else
2310                return false;
2311}
2312#endif /* CONFIG_DEBUG_VM */
2313
2314static bool check_new_pages(struct page *page, unsigned int order)
2315{
2316        int i;
2317        for (i = 0; i < (1 << order); i++) {
2318                struct page *p = page + i;
2319
2320                if (unlikely(check_new_page(p)))
2321                        return true;
2322        }
2323
2324        return false;
2325}
2326
2327inline void post_alloc_hook(struct page *page, unsigned int order,
2328                                gfp_t gfp_flags)
2329{
2330        bool init;
2331
2332        set_page_private(page, 0);
2333        set_page_refcounted(page);
2334
2335        arch_alloc_page(page, order);
2336        debug_pagealloc_map_pages(page, 1 << order);
2337
2338        /*
2339         * Page unpoisoning must happen before memory initialization.
2340         * Otherwise, the poison pattern will be overwritten for __GFP_ZERO
2341         * allocations and the page unpoisoning code will complain.
2342         */
2343        kernel_unpoison_pages(page, 1 << order);
2344
2345        /*
2346         * As memory initialization might be integrated into KASAN,
2347         * kasan_alloc_pages and kernel_init_free_pages must be
2348         * kept together to avoid discrepancies in behavior.
2349         */
2350        init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
2351        kasan_alloc_pages(page, order, init);
2352        if (init && !kasan_has_integrated_init())
2353                kernel_init_free_pages(page, 1 << order);
2354
2355        set_page_owner(page, order, gfp_flags);
2356}
2357
2358static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
2359                                                        unsigned int alloc_flags)
2360{
2361        post_alloc_hook(page, order, gfp_flags);
2362
2363        if (order && (gfp_flags & __GFP_COMP))
2364                prep_compound_page(page, order);
2365
2366        /*
2367         * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
2368         * allocate the page. The expectation is that the caller is taking
2369         * steps that will free more memory. The caller should avoid the page
2370         * being used for !PFMEMALLOC purposes.
2371         */
2372        if (alloc_flags & ALLOC_NO_WATERMARKS)
2373                set_page_pfmemalloc(page);
2374        else
2375                clear_page_pfmemalloc(page);
2376}
2377
2378/*
2379 * Go through the free lists for the given migratetype and remove
2380 * the smallest available page from the freelists
2381 */
2382static __always_inline
2383struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
2384                                                int migratetype)
2385{
2386        unsigned int current_order;
2387        struct free_area *area;
2388        struct page *page;
2389
2390        /* Find a page of the appropriate size in the preferred list */
2391        for (current_order = order; current_order < MAX_ORDER; ++current_order) {
2392                area = &(zone->free_area[current_order]);
2393                page = get_page_from_free_area(area, migratetype);
2394                if (!page)
2395                        continue;
2396                del_page_from_free_list(page, zone, current_order);
2397                expand(zone, page, order, current_order, migratetype);
2398                set_pcppage_migratetype(page, migratetype);
2399                return page;
2400        }
2401
2402        return NULL;
2403}
2404
2405
2406/*
2407 * This array describes the order lists are fallen back to when
2408 * the free lists for the desirable migrate type are depleted
2409 */
2410static int fallbacks[MIGRATE_TYPES][3] = {
2411        [MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_TYPES },
2412        [MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
2413        [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_TYPES },
2414#ifdef CONFIG_CMA
2415        [MIGRATE_CMA]         = { MIGRATE_TYPES }, /* Never used */
2416#endif
2417#ifdef CONFIG_MEMORY_ISOLATION
2418        [MIGRATE_ISOLATE]     = { MIGRATE_TYPES }, /* Never used */
2419#endif
2420};
2421
2422#ifdef CONFIG_CMA
2423static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2424                                        unsigned int order)
2425{
2426        return __rmqueue_smallest(zone, order, MIGRATE_CMA);
2427}
2428#else
2429static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2430                                        unsigned int order) { return NULL; }
2431#endif
2432
2433/*
2434 * Move the free pages in a range to the freelist tail of the requested type.
2435 * Note that start_page and end_pages are not aligned on a pageblock
2436 * boundary. If alignment is required, use move_freepages_block()
2437 */
2438static int move_freepages(struct zone *zone,
2439                          unsigned long start_pfn, unsigned long end_pfn,
2440                          int migratetype, int *num_movable)
2441{
2442        struct page *page;
2443        unsigned long pfn;
2444        unsigned int order;
2445        int pages_moved = 0;
2446
2447        for (pfn = start_pfn; pfn <= end_pfn;) {
2448                if (!pfn_valid_within(pfn)) {
2449                        pfn++;
2450                        continue;
2451                }
2452
2453                page = pfn_to_page(pfn);
2454                if (!PageBuddy(page)) {
2455                        /*
2456                         * We assume that pages that could be isolated for
2457                         * migration are movable. But we don't actually try
2458                         * isolating, as that would be expensive.
2459                         */
2460                        if (num_movable &&
2461                                        (PageLRU(page) || __PageMovable(page)))
2462                                (*num_movable)++;
2463                        pfn++;
2464                        continue;
2465                }
2466
2467                /* Make sure we are not inadvertently changing nodes */
2468                VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
2469                VM_BUG_ON_PAGE(page_zone(page) != zone, page);
2470
2471                order = buddy_order(page);
2472                move_to_free_list(page, zone, order, migratetype);
2473                pfn += 1 << order;
2474                pages_moved += 1 << order;
2475        }
2476
2477        return pages_moved;
2478}
2479
2480int move_freepages_block(struct zone *zone, struct page *page,
2481                                int migratetype, int *num_movable)
2482{
2483        unsigned long start_pfn, end_pfn, pfn;
2484
2485        if (num_movable)
2486                *num_movable = 0;
2487
2488        pfn = page_to_pfn(page);
2489        start_pfn = pfn & ~(pageblock_nr_pages - 1);
2490        end_pfn = start_pfn + pageblock_nr_pages - 1;
2491
2492        /* Do not cross zone boundaries */
2493        if (!zone_spans_pfn(zone, start_pfn))
2494                start_pfn = pfn;
2495        if (!zone_spans_pfn(zone, end_pfn))
2496                return 0;
2497
2498        return move_freepages(zone, start_pfn, end_pfn, migratetype,
2499                                                                num_movable);
2500}
2501
2502static void change_pageblock_range(struct page *pageblock_page,
2503                                        int start_order, int migratetype)
2504{
2505        int nr_pageblocks = 1 << (start_order - pageblock_order);
2506
2507        while (nr_pageblocks--) {
2508                set_pageblock_migratetype(pageblock_page, migratetype);
2509                pageblock_page += pageblock_nr_pages;
2510        }
2511}
2512
2513/*
2514 * When we are falling back to another migratetype during allocation, try to
2515 * steal extra free pages from the same pageblocks to satisfy further
2516 * allocations, instead of polluting multiple pageblocks.
2517 *
2518 * If we are stealing a relatively large buddy page, it is likely there will
2519 * be more free pages in the pageblock, so try to steal them all. For
2520 * reclaimable and unmovable allocations, we steal regardless of page size,
2521 * as fragmentation caused by those allocations polluting movable pageblocks
2522 * is worse than movable allocations stealing from unmovable and reclaimable
2523 * pageblocks.
2524 */
2525static bool can_steal_fallback(unsigned int order, int start_mt)
2526{
2527        /*
2528         * Leaving this order check is intended, although there is
2529         * relaxed order check in next check. The reason is that
2530         * we can actually steal whole pageblock if this condition met,
2531         * but, below check doesn't guarantee it and that is just heuristic
2532         * so could be changed anytime.
2533         */
2534        if (order >= pageblock_order)
2535                return true;
2536
2537        if (order >= pageblock_order / 2 ||
2538                start_mt == MIGRATE_RECLAIMABLE ||
2539                start_mt == MIGRATE_UNMOVABLE ||
2540                page_group_by_mobility_disabled)
2541                return true;
2542
2543        return false;
2544}
2545
2546static inline bool boost_watermark(struct zone *zone)
2547{
2548        unsigned long max_boost;
2549
2550        if (!watermark_boost_factor)
2551                return false;
2552        /*
2553         * Don't bother in zones that are unlikely to produce results.
2554         * On small machines, including kdump capture kernels running
2555         * in a small area, boosting the watermark can cause an out of
2556         * memory situation immediately.
2557         */
2558        if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
2559                return false;
2560
2561        max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
2562                        watermark_boost_factor, 10000);
2563
2564        /*
2565         * high watermark may be uninitialised if fragmentation occurs
2566         * very early in boot so do not boost. We do not fall
2567         * through and boost by pageblock_nr_pages as failing
2568         * allocations that early means that reclaim is not going
2569         * to help and it may even be impossible to reclaim the
2570         * boosted watermark resulting in a hang.
2571         */
2572        if (!max_boost)
2573                return false;
2574
2575        max_boost = max(pageblock_nr_pages, max_boost);
2576
2577        zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
2578                max_boost);
2579
2580        return true;
2581}
2582
2583/*
2584 * This function implements actual steal behaviour. If order is large enough,
2585 * we can steal whole pageblock. If not, we first move freepages in this
2586 * pageblock to our migratetype and determine how many already-allocated pages
2587 * are there in the pageblock with a compatible migratetype. If at least half
2588 * of pages are free or compatible, we can change migratetype of the pageblock
2589 * itself, so pages freed in the future will be put on the correct free list.
2590 */
2591static void steal_suitable_fallback(struct zone *zone, struct page *page,
2592                unsigned int alloc_flags, int start_type, bool whole_block)
2593{
2594        unsigned int current_order = buddy_order(page);
2595        int free_pages, movable_pages, alike_pages;
2596        int old_block_type;
2597
2598        old_block_type = get_pageblock_migratetype(page);
2599
2600        /*
2601         * This can happen due to races and we want to prevent broken
2602         * highatomic accounting.
2603         */
2604        if (is_migrate_highatomic(old_block_type))
2605                goto single_page;
2606
2607        /* Take ownership for orders >= pageblock_order */
2608        if (current_order >= pageblock_order) {
2609                change_pageblock_range(page, current_order, start_type);
2610                goto single_page;
2611        }
2612
2613        /*
2614         * Boost watermarks to increase reclaim pressure to reduce the
2615         * likelihood of future fallbacks. Wake kswapd now as the node
2616         * may be balanced overall and kswapd will not wake naturally.
2617         */
2618        if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))
2619                set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
2620
2621        /* We are not allowed to try stealing from the whole block */
2622        if (!whole_block)
2623                goto single_page;
2624
2625        free_pages = move_freepages_block(zone, page, start_type,
2626                                                &movable_pages);
2627        /*
2628         * Determine how many pages are compatible with our allocation.
2629         * For movable allocation, it's the number of movable pages which
2630         * we just obtained. For other types it's a bit more tricky.
2631         */
2632        if (start_type == MIGRATE_MOVABLE) {
2633                alike_pages = movable_pages;
2634        } else {
2635                /*
2636                 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
2637                 * to MOVABLE pageblock, consider all non-movable pages as
2638                 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
2639                 * vice versa, be conservative since we can't distinguish the
2640                 * exact migratetype of non-movable pages.
2641                 */
2642                if (old_block_type == MIGRATE_MOVABLE)
2643                        alike_pages = pageblock_nr_pages
2644                                                - (free_pages + movable_pages);
2645                else
2646                        alike_pages = 0;
2647        }
2648
2649        /* moving whole block can fail due to zone boundary conditions */
2650        if (!free_pages)
2651                goto single_page;
2652
2653        /*
2654         * If a sufficient number of pages in the block are either free or of
2655         * comparable migratability as our allocation, claim the whole block.
2656         */
2657        if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
2658                        page_group_by_mobility_disabled)
2659                set_pageblock_migratetype(page, start_type);
2660
2661        return;
2662
2663single_page:
2664        move_to_free_list(page, zone, current_order, start_type);
2665}
2666
2667/*
2668 * Check whether there is a suitable fallback freepage with requested order.
2669 * If only_stealable is true, this function returns fallback_mt only if
2670 * we can steal other freepages all together. This would help to reduce
2671 * fragmentation due to mixed migratetype pages in one pageblock.
2672 */
2673int find_suitable_fallback(struct free_area *area, unsigned int order,
2674                        int migratetype, bool only_stealable, bool *can_steal)
2675{
2676        int i;
2677        int fallback_mt;
2678
2679        if (area->nr_free == 0)
2680                return -1;
2681
2682        *can_steal = false;
2683        for (i = 0;; i++) {
2684                fallback_mt = fallbacks[migratetype][i];
2685                if (fallback_mt == MIGRATE_TYPES)
2686                        break;
2687
2688                if (free_area_empty(area, fallback_mt))
2689                        continue;
2690
2691                if (can_steal_fallback(order, migratetype))
2692                        *can_steal = true;
2693
2694                if (!only_stealable)
2695                        return fallback_mt;
2696
2697                if (*can_steal)
2698                        return fallback_mt;
2699        }
2700
2701        return -1;
2702}
2703
2704/*
2705 * Reserve a pageblock for exclusive use of high-order atomic allocations if
2706 * there are no empty page blocks that contain a page with a suitable order
2707 */
2708static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
2709                                unsigned int alloc_order)
2710{
2711        int mt;
2712        unsigned long max_managed, flags;
2713
2714        /*
2715         * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
2716         * Check is race-prone but harmless.
2717         */
2718        max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages;
2719        if (zone->nr_reserved_highatomic >= max_managed)
2720                return;
2721
2722        spin_lock_irqsave(&zone->lock, flags);
2723
2724        /* Recheck the nr_reserved_highatomic limit under the lock */
2725        if (zone->nr_reserved_highatomic >= max_managed)
2726                goto out_unlock;
2727
2728        /* Yoink! */
2729        mt = get_pageblock_migratetype(page);
2730        if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt)
2731            && !is_migrate_cma(mt)) {
2732                zone->nr_reserved_highatomic += pageblock_nr_pages;
2733                set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
2734                move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
2735        }
2736
2737out_unlock:
2738        spin_unlock_irqrestore(&zone->lock, flags);
2739}
2740
2741/*
2742 * Used when an allocation is about to fail under memory pressure. This
2743 * potentially hurts the reliability of high-order allocations when under
2744 * intense memory pressure but failed atomic allocations should be easier
2745 * to recover from than an OOM.
2746 *
2747 * If @force is true, try to unreserve a pageblock even though highatomic
2748 * pageblock is exhausted.
2749 */
2750static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
2751                                                bool force)
2752{
2753        struct zonelist *zonelist = ac->zonelist;
2754        unsigned long flags;
2755        struct zoneref *z;
2756        struct zone *zone;
2757        struct page *page;
2758        int order;
2759        bool ret;
2760
2761        for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
2762                                                                ac->nodemask) {
2763                /*
2764                 * Preserve at least one pageblock unless memory pressure
2765                 * is really high.
2766                 */
2767                if (!force && zone->nr_reserved_highatomic <=
2768                                        pageblock_nr_pages)
2769                        continue;
2770
2771                spin_lock_irqsave(&zone->lock, flags);
2772                for (order = 0; order < MAX_ORDER; order++) {
2773                        struct free_area *area = &(zone->free_area[order]);
2774
2775                        page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
2776                        if (!page)
2777                                continue;
2778
2779                        /*
2780                         * In page freeing path, migratetype change is racy so
2781                         * we can counter several free pages in a pageblock
2782                         * in this loop although we changed the pageblock type
2783                         * from highatomic to ac->migratetype. So we should
2784                         * adjust the count once.
2785                         */
2786                        if (is_migrate_highatomic_page(page)) {
2787                                /*
2788                                 * It should never happen but changes to
2789                                 * locking could inadvertently allow a per-cpu
2790                                 * drain to add pages to MIGRATE_HIGHATOMIC
2791                                 * while unreserving so be safe and watch for
2792                                 * underflows.
2793                                 */
2794                                zone->nr_reserved_highatomic -= min(
2795                                                pageblock_nr_pages,
2796                                                zone->nr_reserved_highatomic);
2797                        }
2798
2799                        /*
2800                         * Convert to ac->migratetype and avoid the normal
2801                         * pageblock stealing heuristics. Minimally, the caller
2802                         * is doing the work and needs the pages. More
2803                         * importantly, if the block was always converted to
2804                         * MIGRATE_UNMOVABLE or another type then the number
2805                         * of pageblocks that cannot be completely freed
2806                         * may increase.
2807                         */
2808                        set_pageblock_migratetype(page, ac->migratetype);
2809                        ret = move_freepages_block(zone, page, ac->migratetype,
2810                                                                        NULL);
2811                        if (ret) {
2812                                spin_unlock_irqrestore(&zone->lock, flags);
2813                                return ret;
2814                        }
2815                }
2816                spin_unlock_irqrestore(&zone->lock, flags);
2817        }
2818
2819        return false;
2820}
2821
2822/*
2823 * Try finding a free buddy page on the fallback list and put it on the free
2824 * list of requested migratetype, possibly along with other pages from the same
2825 * block, depending on fragmentation avoidance heuristics. Returns true if
2826 * fallback was found so that __rmqueue_smallest() can grab it.
2827 *
2828 * The use of signed ints for order and current_order is a deliberate
2829 * deviation from the rest of this file, to make the for loop
2830 * condition simpler.
2831 */
2832static __always_inline bool
2833__rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
2834                                                unsigned int alloc_flags)
2835{
2836        struct free_area *area;
2837        int current_order;
2838        int min_order = order;
2839        struct page *page;
2840        int fallback_mt;
2841        bool can_steal;
2842
2843        /*
2844         * Do not steal pages from freelists belonging to other pageblocks
2845         * i.e. orders < pageblock_order. If there are no local zones free,
2846         * the zonelists will be reiterated without ALLOC_NOFRAGMENT.
2847         */
2848        if (alloc_flags & ALLOC_NOFRAGMENT)
2849                min_order = pageblock_order;
2850
2851        /*
2852         * Find the largest available free page in the other list. This roughly
2853         * approximates finding the pageblock with the most free pages, which
2854         * would be too costly to do exactly.
2855         */
2856        for (current_order = MAX_ORDER - 1; current_order >= min_order;
2857                                --current_order) {
2858                area = &(zone->free_area[current_order]);
2859                fallback_mt = find_suitable_fallback(area, current_order,
2860                                start_migratetype, false, &can_steal);
2861                if (fallback_mt == -1)
2862                        continue;
2863
2864                /*
2865                 * We cannot steal all free pages from the pageblock and the
2866                 * requested migratetype is movable. In that case it's better to
2867                 * steal and split the smallest available page instead of the
2868                 * largest available page, because even if the next movable
2869                 * allocation falls back into a different pageblock than this
2870                 * one, it won't cause permanent fragmentation.
2871                 */
2872                if (!can_steal && start_migratetype == MIGRATE_MOVABLE
2873                                        && current_order > order)
2874                        goto find_smallest;
2875
2876                goto do_steal;
2877        }
2878
2879        return false;
2880
2881find_smallest:
2882        for (current_order = order; current_order < MAX_ORDER;
2883                                                        current_order++) {
2884                area = &(zone->free_area[current_order]);
2885                fallback_mt = find_suitable_fallback(area, current_order,
2886                                start_migratetype, false, &can_steal);
2887                if (fallback_mt != -1)
2888                        break;
2889        }
2890
2891        /*
2892         * This should not happen - we already found a suitable fallback
2893         * when looking for the largest page.
2894         */
2895        VM_BUG_ON(current_order == MAX_ORDER);
2896
2897do_steal:
2898        page = get_page_from_free_area(area, fallback_mt);
2899
2900        steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
2901                                                                can_steal);
2902
2903        trace_mm_page_alloc_extfrag(page, order, current_order,
2904                start_migratetype, fallback_mt);
2905
2906        return true;
2907
2908}
2909
2910/*
2911 * Do the hard work of removing an element from the buddy allocator.
2912 * Call me with the zone->lock already held.
2913 */
2914static __always_inline struct page *
2915__rmqueue(struct zone *zone, unsigned int order, int migratetype,
2916                                                unsigned int alloc_flags)
2917{
2918        struct page *page;
2919
2920        if (IS_ENABLED(CONFIG_CMA)) {
2921                /*
2922                 * Balance movable allocations between regular and CMA areas by
2923                 * allocating from CMA when over half of the zone's free memory
2924                 * is in the CMA area.
2925                 */
2926                if (alloc_flags & ALLOC_CMA &&
2927                    zone_page_state(zone, NR_FREE_CMA_PAGES) >
2928                    zone_page_state(zone, NR_FREE_PAGES) / 2) {
2929                        page = __rmqueue_cma_fallback(zone, order);
2930                        if (page)
2931                                goto out;
2932                }
2933        }
2934retry:
2935        page = __rmqueue_smallest(zone, order, migratetype);
2936        if (unlikely(!page)) {
2937                if (alloc_flags & ALLOC_CMA)
2938                        page = __rmqueue_cma_fallback(zone, order);
2939
2940                if (!page && __rmqueue_fallback(zone, order, migratetype,
2941                                                                alloc_flags))
2942                        goto retry;
2943        }
2944out:
2945        if (page)
2946                trace_mm_page_alloc_zone_locked(page, order, migratetype);
2947        return page;
2948}
2949
2950/*
2951 * Obtain a specified number of elements from the buddy allocator, all under
2952 * a single hold of the lock, for efficiency.  Add them to the supplied list.
2953 * Returns the number of new pages which were placed at *list.
2954 */
2955static int rmqueue_bulk(struct zone *zone, unsigned int order,
2956                        unsigned long count, struct list_head *list,
2957                        int migratetype, unsigned int alloc_flags)
2958{
2959        int i, allocated = 0;
2960
2961        spin_lock(&zone->lock);
2962        for (i = 0; i < count; ++i) {
2963                struct page *page = __rmqueue(zone, order, migratetype,
2964                                                                alloc_flags);
2965                if (unlikely(page == NULL))
2966                        break;
2967
2968                if (unlikely(check_pcp_refill(page)))
2969                        continue;
2970
2971                /*
2972                 * Split buddy pages returned by expand() are received here in
2973                 * physical page order. The page is added to the tail of
2974                 * caller's list. From the callers perspective, the linked list
2975                 * is ordered by page number under some conditions. This is
2976                 * useful for IO devices that can forward direction from the
2977                 * head, thus also in the physical page order. This is useful
2978                 * for IO devices that can merge IO requests if the physical
2979                 * pages are ordered properly.
2980                 */
2981                list_add_tail(&page->lru, list);
2982                allocated++;
2983                if (is_migrate_cma(get_pcppage_migratetype(page)))
2984                        __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
2985                                              -(1 << order));
2986        }
2987
2988        /*
2989         * i pages were removed from the buddy list even if some leak due
2990         * to check_pcp_refill failing so adjust NR_FREE_PAGES based
2991         * on i. Do not confuse with 'allocated' which is the number of
2992         * pages added to the pcp list.
2993         */
2994        __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
2995        spin_unlock(&zone->lock);
2996        return allocated;
2997}
2998
2999#ifdef CONFIG_NUMA
3000/*
3001 * Called from the vmstat counter updater to drain pagesets of this
3002 * currently executing processor on remote nodes after they have
3003 * expired.
3004 *
3005 * Note that this function must be called with the thread pinned to
3006 * a single processor.
3007 */
3008void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
3009{
3010        unsigned long flags;
3011        int to_drain, batch;
3012
3013        local_irq_save(flags);
3014        batch = READ_ONCE(pcp->batch);
3015        to_drain = min(pcp->count, batch);
3016        if (to_drain > 0)
3017                free_pcppages_bulk(zone, to_drain, pcp);
3018        local_irq_restore(flags);
3019}
3020#endif
3021
3022/*
3023 * Drain pcplists of the indicated processor and zone.
3024 *
3025 * The processor must either be the current processor and the
3026 * thread pinned to the current processor or a processor that
3027 * is not online.
3028 */
3029static void drain_pages_zone(unsigned int cpu, struct zone *zone)
3030{
3031        unsigned long flags;
3032        struct per_cpu_pageset *pset;
3033        struct per_cpu_pages *pcp;
3034
3035        local_irq_save(flags);
3036        pset = per_cpu_ptr(zone->pageset, cpu);
3037
3038        pcp = &pset->pcp;
3039        if (pcp->count)
3040                free_pcppages_bulk(zone, pcp->count, pcp);
3041        local_irq_restore(flags);
3042}
3043
3044/*
3045 * Drain pcplists of all zones on the indicated processor.
3046 *
3047 * The processor must either be the current processor and the
3048 * thread pinned to the current processor or a processor that
3049 * is not online.
3050 */
3051static void drain_pages(unsigned int cpu)
3052{
3053        struct zone *zone;
3054
3055        for_each_populated_zone(zone) {
3056                drain_pages_zone(cpu, zone);
3057        }
3058}
3059
3060/*
3061 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
3062 *
3063 * The CPU has to be pinned. When zone parameter is non-NULL, spill just
3064 * the single zone's pages.
3065 */
3066void drain_local_pages(struct zone *zone)
3067{
3068        int cpu = smp_processor_id();
3069
3070        if (zone)
3071                drain_pages_zone(cpu, zone);
3072        else
3073                drain_pages(cpu);
3074}
3075
3076static void drain_local_pages_wq(struct work_struct *work)
3077{
3078        struct pcpu_drain *drain;
3079
3080        drain = container_of(work, struct pcpu_drain, work);
3081
3082        /*
3083         * drain_all_pages doesn't use proper cpu hotplug protection so
3084         * we can race with cpu offline when the WQ can move this from
3085         * a cpu pinned worker to an unbound one. We can operate on a different
3086         * cpu which is alright but we also have to make sure to not move to
3087         * a different one.
3088         */
3089        preempt_disable();
3090        drain_local_pages(drain->zone);
3091        preempt_enable();
3092}
3093
3094/*
3095 * The implementation of drain_all_pages(), exposing an extra parameter to
3096 * drain on all cpus.
3097 *
3098 * drain_all_pages() is optimized to only execute on cpus where pcplists are
3099 * not empty. The check for non-emptiness can however race with a free to
3100 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers
3101 * that need the guarantee that every CPU has drained can disable the
3102 * optimizing racy check.
3103 */
3104static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
3105{
3106        int cpu;
3107
3108        /*
3109         * Allocate in the BSS so we wont require allocation in
3110         * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
3111         */
3112        static cpumask_t cpus_with_pcps;
3113
3114        /*
3115         * Make sure nobody triggers this path before mm_percpu_wq is fully
3116         * initialized.
3117         */
3118        if (WARN_ON_ONCE(!mm_percpu_wq))
3119                return;
3120
3121        /*
3122         * Do not drain if one is already in progress unless it's specific to
3123         * a zone. Such callers are primarily CMA and memory hotplug and need
3124         * the drain to be complete when the call returns.
3125         */
3126        if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
3127                if (!zone)
3128                        return;
3129                mutex_lock(&pcpu_drain_mutex);
3130        }
3131
3132        /*
3133         * We don't care about racing with CPU hotplug event
3134         * as offline notification will cause the notified
3135         * cpu to drain that CPU pcps and on_each_cpu_mask
3136         * disables preemption as part of its processing
3137         */
3138        for_each_online_cpu(cpu) {
3139                struct per_cpu_pageset *pcp;
3140                struct zone *z;
3141                bool has_pcps = false;
3142
3143                if (force_all_cpus) {
3144                        /*
3145                         * The pcp.count check is racy, some callers need a
3146                         * guarantee that no cpu is missed.
3147                         */
3148                        has_pcps = true;
3149                } else if (zone) {
3150                        pcp = per_cpu_ptr(zone->pageset, cpu);
3151                        if (pcp->pcp.count)
3152                                has_pcps = true;
3153                } else {
3154                        for_each_populated_zone(z) {
3155                                pcp = per_cpu_ptr(z->pageset, cpu);
3156                                if (pcp->pcp.count) {
3157                                        has_pcps = true;
3158                                        break;
3159                                }
3160                        }
3161                }
3162
3163                if (has_pcps)
3164                        cpumask_set_cpu(cpu, &cpus_with_pcps);
3165                else
3166                        cpumask_clear_cpu(cpu, &cpus_with_pcps);
3167        }
3168
3169        for_each_cpu(cpu, &cpus_with_pcps) {
3170                struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu);
3171
3172                drain->zone = zone;
3173                INIT_WORK(&drain->work, drain_local_pages_wq);
3174                queue_work_on(cpu, mm_percpu_wq, &drain->work);
3175        }
3176        for_each_cpu(cpu, &cpus_with_pcps)
3177                flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work);
3178
3179        mutex_unlock(&pcpu_drain_mutex);
3180}
3181
3182/*
3183 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
3184 *
3185 * When zone parameter is non-NULL, spill just the single zone's pages.
3186 *
3187 * Note that this can be extremely slow as the draining happens in a workqueue.
3188 */
3189void drain_all_pages(struct zone *zone)
3190{
3191        __drain_all_pages(zone, false);
3192}
3193
3194#ifdef CONFIG_HIBERNATION
3195
3196/*
3197 * Touch the watchdog for every WD_PAGE_COUNT pages.
3198 */
3199#define WD_PAGE_COUNT   (128*1024)
3200
3201void mark_free_pages(struct zone *zone)
3202{
3203        unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
3204        unsigned long flags;
3205        unsigned int order, t;
3206        struct page *page;
3207
3208        if (zone_is_empty(zone))
3209                return;
3210
3211        spin_lock_irqsave(&zone->lock, flags);
3212
3213        max_zone_pfn = zone_end_pfn(zone);
3214        for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
3215                if (pfn_valid(pfn)) {
3216                        page = pfn_to_page(pfn);
3217
3218                        if (!--page_count) {
3219                                touch_nmi_watchdog();
3220                                page_count = WD_PAGE_COUNT;
3221                        }
3222
3223                        if (page_zone(page) != zone)
3224                                continue;
3225
3226                        if (!swsusp_page_is_forbidden(page))
3227                                swsusp_unset_page_free(page);
3228                }
3229
3230        for_each_migratetype_order(order, t) {
3231                list_for_each_entry(page,
3232                                &zone->free_area[order].free_list[t], lru) {
3233                        unsigned long i;
3234
3235                        pfn = page_to_pfn(page);
3236                        for (i = 0; i < (1UL << order); i++) {
3237                                if (!--page_count) {
3238                                        touch_nmi_watchdog();
3239                                        page_count = WD_PAGE_COUNT;
3240                                }
3241                                swsusp_set_page_free(pfn_to_page(pfn + i));
3242                        }
3243                }
3244        }
3245        spin_unlock_irqrestore(&zone->lock, flags);
3246}
3247#endif /* CONFIG_PM */
3248
3249static bool free_unref_page_prepare(struct page *page, unsigned long pfn)
3250{
3251        int migratetype;
3252
3253        if (!free_pcp_prepare(page))
3254                return false;
3255
3256        migratetype = get_pfnblock_migratetype(page, pfn);
3257        set_pcppage_migratetype(page, migratetype);
3258        return true;
3259}
3260
3261static void free_unref_page_commit(struct page *page, unsigned long pfn)
3262{
3263        struct zone *zone = page_zone(page);
3264        struct per_cpu_pages *pcp;
3265        int migratetype;
3266
3267        migratetype = get_pcppage_migratetype(page);
3268        __count_vm_event(PGFREE);
3269
3270        /*
3271         * We only track unmovable, reclaimable and movable on pcp lists.
3272         * Free ISOLATE pages back to the allocator because they are being
3273         * offlined but treat HIGHATOMIC as movable pages so we can get those
3274         * areas back if necessary. Otherwise, we may have to free
3275         * excessively into the page allocator
3276         */
3277        if (migratetype >= MIGRATE_PCPTYPES) {
3278                if (unlikely(is_migrate_isolate(migratetype))) {
3279                        free_one_page(zone, page, pfn, 0, migratetype,
3280                                      FPI_NONE);
3281                        return;
3282                }
3283                migratetype = MIGRATE_MOVABLE;
3284        }
3285
3286        pcp = &this_cpu_ptr(zone->pageset)->pcp;
3287        list_add(&page->lru, &pcp->lists[migratetype]);
3288        pcp->count++;
3289        if (pcp->count >= READ_ONCE(pcp->high))
3290                free_pcppages_bulk(zone, READ_ONCE(pcp->batch), pcp);
3291}
3292
3293/*
3294 * Free a 0-order page
3295 */
3296void free_unref_page(struct page *page)
3297{
3298        unsigned long flags;
3299        unsigned long pfn = page_to_pfn(page);
3300
3301        if (!free_unref_page_prepare(page, pfn))
3302                return;
3303
3304        local_irq_save(flags);
3305        free_unref_page_commit(page, pfn);
3306        local_irq_restore(flags);
3307}
3308
3309/*
3310 * Free a list of 0-order pages
3311 */
3312void free_unref_page_list(struct list_head *list)
3313{
3314        struct page *page, *next;
3315        unsigned long flags, pfn;
3316        int batch_count = 0;
3317
3318        /* Prepare pages for freeing */
3319        list_for_each_entry_safe(page, next, list, lru) {
3320                pfn = page_to_pfn(page);
3321                if (!free_unref_page_prepare(page, pfn))
3322                        list_del(&page->lru);
3323                set_page_private(page, pfn);
3324        }
3325
3326        local_irq_save(flags);
3327        list_for_each_entry_safe(page, next, list, lru) {
3328                unsigned long pfn = page_private(page);
3329
3330                set_page_private(page, 0);
3331                trace_mm_page_free_batched(page);
3332                free_unref_page_commit(page, pfn);
3333
3334                /*
3335                 * Guard against excessive IRQ disabled times when we get
3336                 * a large list of pages to free.
3337                 */
3338                if (++batch_count == SWAP_CLUSTER_MAX) {
3339                        local_irq_restore(flags);
3340                        batch_count = 0;
3341                        local_irq_save(flags);
3342                }
3343        }
3344        local_irq_restore(flags);
3345}
3346
3347/*
3348 * split_page takes a non-compound higher-order page, and splits it into
3349 * n (1<<order) sub-pages: page[0..n]
3350 * Each sub-page must be freed individually.
3351 *
3352 * Note: this is probably too low level an operation for use in drivers.
3353 * Please consult with lkml before using this in your driver.
3354 */
3355void split_page(struct page *page, unsigned int order)
3356{
3357        int i;
3358
3359        VM_BUG_ON_PAGE(PageCompound(page), page);
3360        VM_BUG_ON_PAGE(!page_count(page), page);
3361
3362        for (i = 1; i < (1 << order); i++)
3363                set_page_refcounted(page + i);
3364        split_page_owner(page, 1 << order);
3365        split_page_memcg(page, 1 << order);
3366}
3367EXPORT_SYMBOL_GPL(split_page);
3368
3369int __isolate_free_page(struct page *page, unsigned int order)
3370{
3371        unsigned long watermark;
3372        struct zone *zone;
3373        int mt;
3374
3375        BUG_ON(!PageBuddy(page));
3376
3377        zone = page_zone(page);
3378        mt = get_pageblock_migratetype(page);
3379
3380        if (!is_migrate_isolate(mt)) {
3381                /*
3382                 * Obey watermarks as if the page was being allocated. We can
3383                 * emulate a high-order watermark check with a raised order-0
3384                 * watermark, because we already know our high-order page
3385                 * exists.
3386                 */
3387                watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
3388                if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
3389                        return 0;
3390
3391                __mod_zone_freepage_state(zone, -(1UL << order), mt);
3392        }
3393
3394        /* Remove page from free list */
3395
3396        del_page_from_free_list(page, zone, order);
3397
3398        /*
3399         * Set the pageblock if the isolated page is at least half of a
3400         * pageblock
3401         */
3402        if (order >= pageblock_order - 1) {
3403                struct page *endpage = page + (1 << order) - 1;
3404                for (; page < endpage; page += pageblock_nr_pages) {
3405                        int mt = get_pageblock_migratetype(page);
3406                        if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
3407                            && !is_migrate_highatomic(mt))
3408                                set_pageblock_migratetype(page,
3409                                                          MIGRATE_MOVABLE);
3410                }
3411        }
3412
3413
3414        return 1UL << order;
3415}
3416
3417/**
3418 * __putback_isolated_page - Return a now-isolated page back where we got it
3419 * @page: Page that was isolated
3420 * @order: Order of the isolated page
3421 * @mt: The page's pageblock's migratetype
3422 *
3423 * This function is meant to return a page pulled from the free lists via
3424 * __isolate_free_page back to the free lists they were pulled from.
3425 */
3426void __putback_isolated_page(struct page *page, unsigned int order, int mt)
3427{
3428        struct zone *zone = page_zone(page);
3429
3430        /* zone lock should be held when this function is called */
3431        lockdep_assert_held(&zone->lock);
3432
3433        /* Return isolated page to tail of freelist. */
3434        __free_one_page(page, page_to_pfn(page), zone, order, mt,
3435                        FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL);
3436}
3437
3438/*
3439 * Update NUMA hit/miss statistics
3440 *
3441 * Must be called with interrupts disabled.
3442 */
3443static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
3444{
3445#ifdef CONFIG_NUMA
3446        enum numa_stat_item local_stat = NUMA_LOCAL;
3447
3448        /* skip numa counters update if numa stats is disabled */
3449        if (!static_branch_likely(&vm_numa_stat_key))
3450                return;
3451
3452        if (zone_to_nid(z) != numa_node_id())
3453                local_stat = NUMA_OTHER;
3454
3455        if (zone_to_nid(z) == zone_to_nid(preferred_zone))
3456                __inc_numa_state(z, NUMA_HIT);
3457        else {
3458                __inc_numa_state(z, NUMA_MISS);
3459                __inc_numa_state(preferred_zone, NUMA_FOREIGN);
3460        }
3461        __inc_numa_state(z, local_stat);
3462#endif
3463}
3464
3465/* Remove page from the per-cpu list, caller must protect the list */
3466static inline
3467struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
3468                        unsigned int alloc_flags,
3469                        struct per_cpu_pages *pcp,
3470                        struct list_head *list)
3471{
3472        struct page *page;
3473
3474        do {
3475                if (list_empty(list)) {
3476                        pcp->count += rmqueue_bulk(zone, 0,
3477                                        READ_ONCE(pcp->batch), list,
3478                                        migratetype, alloc_flags);
3479                        if (unlikely(list_empty(list)))
3480                                return NULL;
3481                }
3482
3483                page = list_first_entry(list, struct page, lru);
3484                list_del(&page->lru);
3485                pcp->count--;
3486        } while (check_new_pcp(page));
3487
3488        return page;
3489}
3490
3491/* Lock and remove page from the per-cpu list */
3492static struct page *rmqueue_pcplist(struct zone *preferred_zone,
3493                        struct zone *zone, gfp_t gfp_flags,
3494                        int migratetype, unsigned int alloc_flags)
3495{
3496        struct per_cpu_pages *pcp;
3497        struct list_head *list;
3498        struct page *page;
3499        unsigned long flags;
3500
3501        local_irq_save(flags);
3502        pcp = &this_cpu_ptr(zone->pageset)->pcp;
3503        list = &pcp->lists[migratetype];
3504        page = __rmqueue_pcplist(zone,  migratetype, alloc_flags, pcp, list);
3505        if (page) {
3506                __count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
3507                zone_statistics(preferred_zone, zone);
3508        }
3509        local_irq_restore(flags);
3510        return page;
3511}
3512
3513/*
3514 * Allocate a page from the given zone. Use pcplists for order-0 allocations.
3515 */
3516static inline
3517struct page *rmqueue(struct zone *preferred_zone,
3518                        struct zone *zone, unsigned int order,
3519                        gfp_t gfp_flags, unsigned int alloc_flags,
3520                        int migratetype)
3521{
3522        unsigned long flags;
3523        struct page *page;
3524
3525        if (likely(order == 0)) {
3526                /*
3527                 * MIGRATE_MOVABLE pcplist could have the pages on CMA area and
3528                 * we need to skip it when CMA area isn't allowed.
3529                 */
3530                if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA ||
3531                                migratetype != MIGRATE_MOVABLE) {
3532                        page = rmqueue_pcplist(preferred_zone, zone, gfp_flags,
3533                                        migratetype, alloc_flags);
3534                        goto out;
3535                }
3536        }
3537
3538        /*
3539         * We most definitely don't want callers attempting to
3540         * allocate greater than order-1 page units with __GFP_NOFAIL.
3541         */
3542        WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
3543        spin_lock_irqsave(&zone->lock, flags);
3544
3545        do {
3546                page = NULL;
3547                /*
3548                 * order-0 request can reach here when the pcplist is skipped
3549                 * due to non-CMA allocation context. HIGHATOMIC area is
3550                 * reserved for high-order atomic allocation, so order-0
3551                 * request should skip it.
3552                 */
3553                if (order > 0 && alloc_flags & ALLOC_HARDER) {
3554                        page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
3555                        if (page)
3556                                trace_mm_page_alloc_zone_locked(page, order, migratetype);
3557                }
3558                if (!page)
3559                        page = __rmqueue(zone, order, migratetype, alloc_flags);
3560        } while (page && check_new_pages(page, order));
3561        spin_unlock(&zone->lock);
3562        if (!page)
3563                goto failed;
3564        __mod_zone_freepage_state(zone, -(1 << order),
3565                                  get_pcppage_migratetype(page));
3566
3567        __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
3568        zone_statistics(preferred_zone, zone);
3569        local_irq_restore(flags);
3570
3571out:
3572        /* Separate test+clear to avoid unnecessary atomics */
3573        if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) {
3574                clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
3575                wakeup_kswapd(zone, 0, 0, zone_idx(zone));
3576        }
3577
3578        VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
3579        return page;
3580
3581failed:
3582        local_irq_restore(flags);
3583        return NULL;
3584}
3585
3586#ifdef CONFIG_FAIL_PAGE_ALLOC
3587
3588static struct {
3589        struct fault_attr attr;
3590
3591        bool ignore_gfp_highmem;
3592        bool ignore_gfp_reclaim;
3593        u32 min_order;
3594} fail_page_alloc = {
3595        .attr = FAULT_ATTR_INITIALIZER,
3596        .ignore_gfp_reclaim = true,
3597        .ignore_gfp_highmem = true,
3598        .min_order = 1,
3599};
3600
3601static int __init setup_fail_page_alloc(char *str)
3602{
3603        return setup_fault_attr(&fail_page_alloc.attr, str);
3604}
3605__setup("fail_page_alloc=", setup_fail_page_alloc);
3606
3607static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3608{
3609        if (order < fail_page_alloc.min_order)
3610                return false;
3611        if (gfp_mask & __GFP_NOFAIL)
3612                return false;
3613        if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
3614                return false;
3615        if (fail_page_alloc.ignore_gfp_reclaim &&
3616                        (gfp_mask & __GFP_DIRECT_RECLAIM))
3617                return false;
3618
3619        return should_fail(&fail_page_alloc.attr, 1 << order);
3620}
3621
3622#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3623
3624static int __init fail_page_alloc_debugfs(void)
3625{
3626        umode_t mode = S_IFREG | 0600;
3627        struct dentry *dir;
3628
3629        dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
3630                                        &fail_page_alloc.attr);
3631
3632        debugfs_create_bool("ignore-gfp-wait", mode, dir,
3633                            &fail_page_alloc.ignore_gfp_reclaim);
3634        debugfs_create_bool("ignore-gfp-highmem", mode, dir,
3635                            &fail_page_alloc.ignore_gfp_highmem);
3636        debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order);
3637
3638        return 0;
3639}
3640
3641late_initcall(fail_page_alloc_debugfs);
3642
3643#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
3644
3645#else /* CONFIG_FAIL_PAGE_ALLOC */
3646
3647static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3648{
3649        return false;
3650}
3651
3652#endif /* CONFIG_FAIL_PAGE_ALLOC */
3653
3654noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3655{
3656        return __should_fail_alloc_page(gfp_mask, order);
3657}
3658ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);
3659
3660static inline long __zone_watermark_unusable_free(struct zone *z,
3661                                unsigned int order, unsigned int alloc_flags)
3662{
3663        const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
3664        long unusable_free = (1 << order) - 1;
3665
3666        /*
3667         * If the caller does not have rights to ALLOC_HARDER then subtract
3668         * the high-atomic reserves. This will over-estimate the size of the
3669         * atomic reserve but it avoids a search.
3670         */
3671        if (likely(!alloc_harder))
3672                unusable_free += z->nr_reserved_highatomic;
3673
3674#ifdef CONFIG_CMA
3675        /* If allocation can't use CMA areas don't use free CMA pages */
3676        if (!(alloc_flags & ALLOC_CMA))
3677                unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
3678#endif
3679
3680        return unusable_free;
3681}
3682
3683/*
3684 * Return true if free base pages are above 'mark'. For high-order checks it
3685 * will return true of the order-0 watermark is reached and there is at least
3686 * one free page of a suitable size. Checking now avoids taking the zone lock
3687 * to check in the allocation paths if no pages are free.
3688 */
3689bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3690                         int highest_zoneidx, unsigned int alloc_flags,
3691                         long free_pages)
3692{
3693        long min = mark;
3694        int o;
3695        const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
3696
3697        /* free_pages may go negative - that's OK */
3698        free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags);
3699
3700        if (alloc_flags & ALLOC_HIGH)
3701                min -= min / 2;
3702
3703        if (unlikely(alloc_harder)) {
3704                /*
3705                 * OOM victims can try even harder than normal ALLOC_HARDER
3706                 * users on the grounds that it's definitely going to be in
3707                 * the exit path shortly and free memory. Any allocation it
3708                 * makes during the free path will be small and short-lived.
3709                 */
3710                if (alloc_flags & ALLOC_OOM)
3711                        min -= min / 2;
3712                else
3713                        min -= min / 4;
3714        }
3715
3716        /*
3717         * Check watermarks for an order-0 allocation request. If these
3718         * are not met, then a high-order request also cannot go ahead
3719         * even if a suitable page happened to be free.
3720         */
3721        if (free_pages <= min + z->lowmem_reserve[highest_zoneidx])
3722                return false;
3723
3724        /* If this is an order-0 request then the watermark is fine */
3725        if (!order)
3726                return true;
3727
3728        /* For a high-order request, check at least one suitable page is free */
3729        for (o = order; o < MAX_ORDER; o++) {
3730                struct free_area *area = &z->free_area[o];
3731                int mt;
3732
3733                if (!area->nr_free)
3734                        continue;
3735
3736                for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
3737                        if (!free_area_empty(area, mt))
3738                                return true;
3739                }
3740
3741#ifdef CONFIG_CMA
3742                if ((alloc_flags & ALLOC_CMA) &&
3743                    !free_area_empty(area, MIGRATE_CMA)) {
3744                        return true;
3745                }
3746#endif
3747                if (alloc_harder && !free_area_empty(area, MIGRATE_HIGHATOMIC))
3748                        return true;
3749        }
3750        return false;
3751}
3752
3753bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3754                      int highest_zoneidx, unsigned int alloc_flags)
3755{
3756        return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3757                                        zone_page_state(z, NR_FREE_PAGES));
3758}
3759
3760static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
3761                                unsigned long mark, int highest_zoneidx,
3762                                unsigned int alloc_flags, gfp_t gfp_mask)
3763{
3764        long free_pages;
3765
3766        free_pages = zone_page_state(z, NR_FREE_PAGES);
3767
3768        /*
3769         * Fast check for order-0 only. If this fails then the reserves
3770         * need to be calculated.
3771         */
3772        if (!order) {
3773                long fast_free;
3774
3775                fast_free = free_pages;
3776                fast_free -= __zone_watermark_unusable_free(z, 0, alloc_flags);
3777                if (fast_free > mark + z->lowmem_reserve[highest_zoneidx])
3778                        return true;
3779        }
3780
3781        if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3782                                        free_pages))
3783                return true;
3784        /*
3785         * Ignore watermark boosting for GFP_ATOMIC order-0 allocations
3786         * when checking the min watermark. The min watermark is the
3787         * point where boosting is ignored so that kswapd is woken up
3788         * when below the low watermark.
3789         */
3790        if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost
3791                && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) {
3792                mark = z->_watermark[WMARK_MIN];
3793                return __zone_watermark_ok(z, order, mark, highest_zoneidx,
3794                                        alloc_flags, free_pages);
3795        }
3796
3797        return false;
3798}
3799
3800bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
3801                        unsigned long mark, int highest_zoneidx)
3802{
3803        long free_pages = zone_page_state(z, NR_FREE_PAGES);
3804
3805        if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
3806                free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
3807
3808        return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0,
3809                                                                free_pages);
3810}
3811
3812#ifdef CONFIG_NUMA
3813static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3814{
3815        return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
3816                                node_reclaim_distance;
3817}
3818#else   /* CONFIG_NUMA */
3819static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
3820{
3821        return true;
3822}
3823#endif  /* CONFIG_NUMA */
3824
3825/*
3826 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
3827 * fragmentation is subtle. If the preferred zone was HIGHMEM then
3828 * premature use of a lower zone may cause lowmem pressure problems that
3829 * are worse than fragmentation. If the next zone is ZONE_DMA then it is
3830 * probably too small. It only makes sense to spread allocations to avoid
3831 * fragmentation between the Normal and DMA32 zones.
3832 */
3833static inline unsigned int
3834alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
3835{
3836        unsigned int alloc_flags;
3837
3838        /*
3839         * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
3840         * to save a branch.
3841         */
3842        alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
3843
3844#ifdef CONFIG_ZONE_DMA32
3845        if (!zone)
3846                return alloc_flags;
3847
3848        if (zone_idx(zone) != ZONE_NORMAL)
3849                return alloc_flags;
3850
3851        /*
3852         * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
3853         * the pointer is within zone->zone_pgdat->node_zones[]. Also assume
3854         * on UMA that if Normal is populated then so is DMA32.
3855         */
3856        BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
3857        if (nr_online_nodes > 1 && !populated_zone(--zone))
3858                return alloc_flags;
3859
3860        alloc_flags |= ALLOC_NOFRAGMENT;
3861#endif /* CONFIG_ZONE_DMA32 */
3862        return alloc_flags;
3863}
3864
3865/* Must be called after current_gfp_context() which can change gfp_mask */
3866static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask,
3867                                                  unsigned int alloc_flags)
3868{
3869#ifdef CONFIG_CMA
3870        if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
3871                alloc_flags |= ALLOC_CMA;
3872#endif
3873        return alloc_flags;
3874}
3875
3876/*
3877 * get_page_from_freelist goes through the zonelist trying to allocate
3878 * a page.
3879 */
3880static struct page *
3881get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
3882                                                const struct alloc_context *ac)
3883{
3884        struct zoneref *z;
3885        struct zone *zone;
3886        struct pglist_data *last_pgdat_dirty_limit = NULL;
3887        bool no_fallback;
3888
3889retry:
3890        /*
3891         * Scan zonelist, looking for a zone with enough free.
3892         * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
3893         */
3894        no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
3895        z = ac->preferred_zoneref;
3896        for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx,
3897                                        ac->nodemask) {
3898                struct page *page;
3899                unsigned long mark;
3900
3901                if (cpusets_enabled() &&
3902                        (alloc_flags & ALLOC_CPUSET) &&
3903                        !__cpuset_zone_allowed(zone, gfp_mask))
3904                                continue;
3905                /*
3906                 * When allocating a page cache page for writing, we
3907                 * want to get it from a node that is within its dirty
3908                 * limit, such that no single node holds more than its
3909                 * proportional share of globally allowed dirty pages.
3910                 * The dirty limits take into account the node's
3911                 * lowmem reserves and high watermark so that kswapd
3912                 * should be able to balance it without having to
3913                 * write pages from its LRU list.
3914                 *
3915                 * XXX: For now, allow allocations to potentially
3916                 * exceed the per-node dirty limit in the slowpath
3917                 * (spread_dirty_pages unset) before going into reclaim,
3918                 * which is important when on a NUMA setup the allowed
3919                 * nodes are together not big enough to reach the
3920                 * global limit.  The proper fix for these situations
3921                 * will require awareness of nodes in the
3922                 * dirty-throttling and the flusher threads.
3923                 */
3924                if (ac->spread_dirty_pages) {
3925                        if (last_pgdat_dirty_limit == zone->zone_pgdat)
3926                                continue;
3927
3928                        if (!node_dirty_ok(zone->zone_pgdat)) {
3929                                last_pgdat_dirty_limit = zone->zone_pgdat;
3930                                continue;
3931                        }
3932                }
3933
3934                if (no_fallback && nr_online_nodes > 1 &&
3935                    zone != ac->preferred_zoneref->zone) {
3936                        int local_nid;
3937
3938                        /*
3939                         * If moving to a remote node, retry but allow
3940                         * fragmenting fallbacks. Locality is more important
3941                         * than fragmentation avoidance.
3942                         */
3943                        local_nid = zone_to_nid(ac->preferred_zoneref->zone);
3944                        if (zone_to_nid(zone) != local_nid) {
3945                                alloc_flags &= ~ALLOC_NOFRAGMENT;
3946                                goto retry;
3947                        }
3948                }
3949
3950                mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
3951                if (!zone_watermark_fast(zone, order, mark,
3952                                       ac->highest_zoneidx, alloc_flags,
3953                                       gfp_mask)) {
3954                        int ret;
3955
3956#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
3957                        /*
3958                         * Watermark failed for this zone, but see if we can
3959                         * grow this zone if it contains deferred pages.
3960                         */
3961                        if (static_branch_unlikely(&deferred_pages)) {
3962                                if (_deferred_grow_zone(zone, order))
3963                                        goto try_this_zone;
3964                        }
3965#endif
3966                        /* Checked here to keep the fast path fast */
3967                        BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
3968                        if (alloc_flags & ALLOC_NO_WATERMARKS)
3969                                goto try_this_zone;
3970
3971                        if (!node_reclaim_enabled() ||
3972                            !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
3973                                continue;
3974
3975                        ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
3976                        switch (ret) {
3977                        case NODE_RECLAIM_NOSCAN:
3978                                /* did not scan */
3979                                continue;
3980                        case NODE_RECLAIM_FULL:
3981                                /* scanned but unreclaimable */
3982                                continue;
3983                        default:
3984                                /* did we reclaim enough */
3985                                if (zone_watermark_ok(zone, order, mark,
3986                                        ac->highest_zoneidx, alloc_flags))
3987                                        goto try_this_zone;
3988
3989                                continue;
3990                        }
3991                }
3992
3993try_this_zone:
3994                page = rmqueue(ac->preferred_zoneref->zone, zone, order,
3995                                gfp_mask, alloc_flags, ac->migratetype);
3996                if (page) {
3997                        prep_new_page(page, order, gfp_mask, alloc_flags);
3998
3999                        /*
4000                         * If this is a high-order atomic allocation then check
4001                         * if the pageblock should be reserved for the future
4002                         */
4003                        if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
4004                                reserve_highatomic_pageblock(page, zone, order);
4005
4006                        return page;
4007                } else {
4008#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
4009                        /* Try again if zone has deferred pages */
4010                        if (static_branch_unlikely(&deferred_pages)) {
4011                                if (_deferred_grow_zone(zone, order))
4012                                        goto try_this_zone;
4013                        }
4014#endif
4015                }
4016        }
4017
4018        /*
4019         * It's possible on a UMA machine to get through all zones that are
4020         * fragmented. If avoiding fragmentation, reset and try again.
4021         */
4022        if (no_fallback) {
4023                alloc_flags &= ~ALLOC_NOFRAGMENT;
4024                goto retry;
4025        }
4026
4027        return NULL;
4028}
4029
4030static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
4031{
4032        unsigned int filter = SHOW_MEM_FILTER_NODES;
4033
4034        /*
4035         * This documents exceptions given to allocations in certain
4036         * contexts that are allowed to allocate outside current's set
4037         * of allowed nodes.
4038         */
4039        if (!(gfp_mask & __GFP_NOMEMALLOC))
4040                if (tsk_is_oom_victim(current) ||
4041                    (current->flags & (PF_MEMALLOC | PF_EXITING)))
4042                        filter &= ~SHOW_MEM_FILTER_NODES;
4043        if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
4044                filter &= ~SHOW_MEM_FILTER_NODES;
4045
4046        show_mem(filter, nodemask);
4047}
4048
4049void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
4050{
4051        struct va_format vaf;
4052        va_list args;
4053        static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
4054
4055        if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
4056                return;
4057
4058        va_start(args, fmt);
4059        vaf.fmt = fmt;
4060        vaf.va = &args;
4061        pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl",
4062                        current->comm, &vaf, gfp_mask, &gfp_mask,
4063                        nodemask_pr_args(nodemask));
4064        va_end(args);
4065
4066        cpuset_print_current_mems_allowed();
4067        pr_cont("\n");
4068        dump_stack();
4069        warn_alloc_show_mem(gfp_mask, nodemask);
4070}
4071
4072static inline struct page *
4073__alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
4074                              unsigned int alloc_flags,
4075                              const struct alloc_context *ac)
4076{
4077        struct page *page;
4078
4079        page = get_page_from_freelist(gfp_mask, order,
4080                        alloc_flags|ALLOC_CPUSET, ac);
4081        /*
4082         * fallback to ignore cpuset restriction if our nodes
4083         * are depleted
4084         */
4085        if (!page)
4086                page = get_page_from_freelist(gfp_mask, order,
4087                                alloc_flags, ac);
4088
4089        return page;
4090}
4091
4092static inline struct page *
4093__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
4094        const struct alloc_context *ac, unsigned long *did_some_progress)
4095{
4096        struct oom_control oc = {
4097                .zonelist = ac->zonelist,
4098                .nodemask = ac->nodemask,
4099                .memcg = NULL,
4100                .gfp_mask = gfp_mask,
4101                .order = order,
4102        };
4103        struct page *page;
4104
4105        *did_some_progress = 0;
4106
4107        /*
4108         * Acquire the oom lock.  If that fails, somebody else is
4109         * making progress for us.
4110         */
4111        if (!mutex_trylock(&oom_lock)) {
4112                *did_some_progress = 1;
4113                schedule_timeout_uninterruptible(1);
4114                return NULL;
4115        }
4116
4117        /*
4118         * Go through the zonelist yet one more time, keep very high watermark
4119         * here, this is only to catch a parallel oom killing, we must fail if
4120         * we're still under heavy pressure. But make sure that this reclaim
4121         * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
4122         * allocation which will never fail due to oom_lock already held.
4123         */
4124        page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
4125                                      ~__GFP_DIRECT_RECLAIM, order,
4126                                      ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
4127        if (page)
4128                goto out;
4129
4130        /* Coredumps can quickly deplete all memory reserves */
4131        if (current->flags & PF_DUMPCORE)
4132                goto out;
4133        /* The OOM killer will not help higher order allocs */
4134        if (order > PAGE_ALLOC_COSTLY_ORDER)
4135                goto out;
4136        /*
4137         * We have already exhausted all our reclaim opportunities without any
4138         * success so it is time to admit defeat. We will skip the OOM killer
4139         * because it is very likely that the caller has a more reasonable
4140         * fallback than shooting a random task.
4141         *
4142         * The OOM killer may not free memory on a specific node.
4143         */
4144        if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE))
4145                goto out;
4146        /* The OOM killer does not needlessly kill tasks for lowmem */
4147        if (ac->highest_zoneidx < ZONE_NORMAL)
4148                goto out;
4149        if (pm_suspended_storage())
4150                goto out;
4151        /*
4152         * XXX: GFP_NOFS allocations should rather fail than rely on
4153         * other request to make a forward progress.
4154         * We are in an unfortunate situation where out_of_memory cannot
4155         * do much for this context but let's try it to at least get
4156         * access to memory reserved if the current task is killed (see
4157         * out_of_memory). Once filesystems are ready to handle allocation
4158         * failures more gracefully we should just bail out here.
4159         */
4160
4161        /* Exhausted what can be done so it's blame time */
4162        if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
4163                *did_some_progress = 1;
4164
4165                /*
4166                 * Help non-failing allocations by giving them access to memory
4167                 * reserves
4168                 */
4169                if (gfp_mask & __GFP_NOFAIL)
4170                        page = __alloc_pages_cpuset_fallback(gfp_mask, order,
4171                                        ALLOC_NO_WATERMARKS, ac);
4172        }
4173out:
4174        mutex_unlock(&oom_lock);
4175        return page;
4176}
4177
4178/*
4179 * Maximum number of compaction retries with a progress before OOM
4180 * killer is consider as the only way to move forward.
4181 */
4182#define MAX_COMPACT_RETRIES 16
4183
4184#ifdef CONFIG_COMPACTION
4185/* Try memory compaction for high-order allocations before reclaim */
4186static struct page *
4187__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4188                unsigned int alloc_flags, const struct alloc_context *ac,
4189                enum compact_priority prio, enum compact_result *compact_result)
4190{
4191        struct page *page = NULL;
4192        unsigned long pflags;
4193        unsigned int noreclaim_flag;
4194
4195        if (!order)
4196                return NULL;
4197
4198        psi_memstall_enter(&pflags);
4199        noreclaim_flag = memalloc_noreclaim_save();
4200
4201        *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
4202                                                                prio, &page);
4203
4204        memalloc_noreclaim_restore(noreclaim_flag);
4205        psi_memstall_leave(&pflags);
4206
4207        if (*compact_result == COMPACT_SKIPPED)
4208                return NULL;
4209        /*
4210         * At least in one zone compaction wasn't deferred or skipped, so let's
4211         * count a compaction stall
4212         */
4213        count_vm_event(COMPACTSTALL);
4214
4215        /* Prep a captured page if available */
4216        if (page)
4217                prep_new_page(page, order, gfp_mask, alloc_flags);
4218
4219        /* Try get a page from the freelist if available */
4220        if (!page)
4221                page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4222
4223        if (page) {
4224                struct zone *zone = page_zone(page);
4225
4226                zone->compact_blockskip_flush = false;
4227                compaction_defer_reset(zone, order, true);
4228                count_vm_event(COMPACTSUCCESS);
4229                return page;
4230        }
4231
4232        /*
4233         * It's bad if compaction run occurs and fails. The most likely reason
4234         * is that pages exist, but not enough to satisfy watermarks.
4235         */
4236        count_vm_event(COMPACTFAIL);
4237
4238        cond_resched();
4239
4240        return NULL;
4241}
4242
4243static inline bool
4244should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
4245                     enum compact_result compact_result,
4246                     enum compact_priority *compact_priority,
4247                     int *compaction_retries)
4248{
4249        int max_retries = MAX_COMPACT_RETRIES;
4250        int min_priority;
4251        bool ret = false;
4252        int retries = *compaction_retries;
4253        enum compact_priority priority = *compact_priority;
4254
4255        if (!order)
4256                return false;
4257
4258        if (compaction_made_progress(compact_result))
4259                (*compaction_retries)++;
4260
4261        /*
4262         * compaction considers all the zone as desperately out of memory
4263         * so it doesn't really make much sense to retry except when the
4264         * failure could be caused by insufficient priority
4265         */
4266        if (compaction_failed(compact_result))
4267                goto check_priority;
4268
4269        /*
4270         * compaction was skipped because there are not enough order-0 pages
4271         * to work with, so we retry only if it looks like reclaim can help.
4272         */
4273        if (compaction_needs_reclaim(compact_result)) {
4274                ret = compaction_zonelist_suitable(ac, order, alloc_flags);
4275                goto out;
4276        }
4277
4278        /*
4279         * make sure the compaction wasn't deferred or didn't bail out early
4280         * due to locks contention before we declare that we should give up.
4281         * But the next retry should use a higher priority if allowed, so
4282         * we don't just keep bailing out endlessly.
4283         */
4284        if (compaction_withdrawn(compact_result)) {
4285                goto check_priority;
4286        }
4287
4288        /*
4289         * !costly requests are much more important than __GFP_RETRY_MAYFAIL
4290         * costly ones because they are de facto nofail and invoke OOM
4291         * killer to move on while costly can fail and users are ready
4292         * to cope with that. 1/4 retries is rather arbitrary but we
4293         * would need much more detailed feedback from compaction to
4294         * make a better decision.
4295         */
4296        if (order > PAGE_ALLOC_COSTLY_ORDER)
4297                max_retries /= 4;
4298        if (*compaction_retries <= max_retries) {
4299                ret = true;
4300                goto out;
4301        }
4302
4303        /*
4304         * Make sure there are attempts at the highest priority if we exhausted
4305         * all retries or failed at the lower priorities.
4306         */
4307check_priority:
4308        min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
4309                        MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
4310
4311        if (*compact_priority > min_priority) {
4312                (*compact_priority)--;
4313                *compaction_retries = 0;
4314                ret = true;
4315        }
4316out:
4317        trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
4318        return ret;
4319}
4320#else
4321static inline struct page *
4322__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4323                unsigned int alloc_flags, const struct alloc_context *ac,
4324                enum compact_priority prio, enum compact_result *compact_result)
4325{
4326        *compact_result = COMPACT_SKIPPED;
4327        return NULL;
4328}
4329
4330static inline bool
4331should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
4332                     enum compact_result compact_result,
4333                     enum compact_priority *compact_priority,
4334                     int *compaction_retries)
4335{
4336        struct zone *zone;
4337        struct zoneref *z;
4338
4339        if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
4340                return false;
4341
4342        /*
4343         * There are setups with compaction disabled which would prefer to loop
4344         * inside the allocator rather than hit the oom killer prematurely.
4345         * Let's give them a good hope and keep retrying while the order-0
4346         * watermarks are OK.
4347         */
4348        for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4349                                ac->highest_zoneidx, ac->nodemask) {
4350                if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
4351                                        ac->highest_zoneidx, alloc_flags))
4352                        return true;
4353        }
4354        return false;
4355}
4356#endif /* CONFIG_COMPACTION */
4357
4358#ifdef CONFIG_LOCKDEP
4359static struct lockdep_map __fs_reclaim_map =
4360        STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
4361
4362static bool __need_reclaim(gfp_t gfp_mask)
4363{
4364        /* no reclaim without waiting on it */
4365        if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
4366                return false;
4367
4368        /* this guy won't enter reclaim */
4369        if (current->flags & PF_MEMALLOC)
4370                return false;
4371
4372        if (gfp_mask & __GFP_NOLOCKDEP)
4373                return false;
4374
4375        return true;
4376}
4377
4378void __fs_reclaim_acquire(void)
4379{
4380        lock_map_acquire(&__fs_reclaim_map);
4381}
4382
4383void __fs_reclaim_release(void)
4384{
4385        lock_map_release(&__fs_reclaim_map);
4386}
4387
4388void fs_reclaim_acquire(gfp_t gfp_mask)
4389{
4390        gfp_mask = current_gfp_context(gfp_mask);
4391
4392        if (__need_reclaim(gfp_mask)) {
4393                if (gfp_mask & __GFP_FS)
4394                        __fs_reclaim_acquire();
4395
4396#ifdef CONFIG_MMU_NOTIFIER
4397                lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
4398                lock_map_release(&__mmu_notifier_invalidate_range_start_map);
4399#endif
4400
4401        }
4402}
4403EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
4404
4405void fs_reclaim_release(gfp_t gfp_mask)
4406{
4407        gfp_mask = current_gfp_context(gfp_mask);
4408
4409        if (__need_reclaim(gfp_mask)) {
4410                if (gfp_mask & __GFP_FS)
4411                        __fs_reclaim_release();
4412        }
4413}
4414EXPORT_SYMBOL_GPL(fs_reclaim_release);
4415#endif
4416
4417/* Perform direct synchronous page reclaim */
4418static unsigned long
4419__perform_reclaim(gfp_t gfp_mask, unsigned int order,
4420                                        const struct alloc_context *ac)
4421{
4422        unsigned int noreclaim_flag;
4423        unsigned long pflags, progress;
4424
4425        cond_resched();
4426
4427        /* We now go into synchronous reclaim */
4428        cpuset_memory_pressure_bump();
4429        psi_memstall_enter(&pflags);
4430        fs_reclaim_acquire(gfp_mask);
4431        noreclaim_flag = memalloc_noreclaim_save();
4432
4433        progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
4434                                                                ac->nodemask);
4435
4436        memalloc_noreclaim_restore(noreclaim_flag);
4437        fs_reclaim_release(gfp_mask);
4438        psi_memstall_leave(&pflags);
4439
4440        cond_resched();
4441
4442        return progress;
4443}
4444
4445/* The really slow allocator path where we enter direct reclaim */
4446static inline struct page *
4447__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
4448                unsigned int alloc_flags, const struct alloc_context *ac,
4449                unsigned long *did_some_progress)
4450{
4451        struct page *page = NULL;
4452        bool drained = false;
4453
4454        *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
4455        if (unlikely(!(*did_some_progress)))
4456                return NULL;
4457
4458retry:
4459        page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4460
4461        /*
4462         * If an allocation failed after direct reclaim, it could be because
4463         * pages are pinned on the per-cpu lists or in high alloc reserves.
4464         * Shrink them and try again
4465         */
4466        if (!page && !drained) {
4467                unreserve_highatomic_pageblock(ac, false);
4468                drain_all_pages(NULL);
4469                drained = true;
4470                goto retry;
4471        }
4472
4473        return page;
4474}
4475
4476static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
4477                             const struct alloc_context *ac)
4478{
4479        struct zoneref *z;
4480        struct zone *zone;
4481        pg_data_t *last_pgdat = NULL;
4482        enum zone_type highest_zoneidx = ac->highest_zoneidx;
4483
4484        for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx,
4485                                        ac->nodemask) {
4486                if (last_pgdat != zone->zone_pgdat)
4487                        wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx);
4488                last_pgdat = zone->zone_pgdat;
4489        }
4490}
4491
4492static inline unsigned int
4493gfp_to_alloc_flags(gfp_t gfp_mask)
4494{
4495        unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
4496
4497        /*
4498         * __GFP_HIGH is assumed to be the same as ALLOC_HIGH
4499         * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
4500         * to save two branches.
4501         */
4502        BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
4503        BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD);
4504
4505        /*
4506         * The caller may dip into page reserves a bit more if the caller
4507         * cannot run direct reclaim, or if the caller has realtime scheduling
4508         * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
4509         * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
4510         */
4511        alloc_flags |= (__force int)
4512                (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
4513
4514        if (gfp_mask & __GFP_ATOMIC) {
4515                /*
4516                 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
4517                 * if it can't schedule.
4518                 */
4519                if (!(gfp_mask & __GFP_NOMEMALLOC))
4520                        alloc_flags |= ALLOC_HARDER;
4521                /*
4522                 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
4523                 * comment for __cpuset_node_allowed().
4524                 */
4525                alloc_flags &= ~ALLOC_CPUSET;
4526        } else if (unlikely(rt_task(current)) && !in_interrupt())
4527                alloc_flags |= ALLOC_HARDER;
4528
4529        alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
4530
4531        return alloc_flags;
4532}
4533
4534static bool oom_reserves_allowed(struct task_struct *tsk)
4535{
4536        if (!tsk_is_oom_victim(tsk))
4537                return false;
4538
4539        /*
4540         * !MMU doesn't have oom reaper so give access to memory reserves
4541         * only to the thread with TIF_MEMDIE set
4542         */
4543        if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
4544                return false;
4545
4546        return true;
4547}
4548
4549/*
4550 * Distinguish requests which really need access to full memory
4551 * reserves from oom victims which can live with a portion of it
4552 */
4553static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
4554{
4555        if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
4556                return 0;
4557        if (gfp_mask & __GFP_MEMALLOC)
4558                return ALLOC_NO_WATERMARKS;
4559        if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
4560                return ALLOC_NO_WATERMARKS;
4561        if (!in_interrupt()) {
4562                if (current->flags & PF_MEMALLOC)
4563                        return ALLOC_NO_WATERMARKS;
4564                else if (oom_reserves_allowed(current))
4565                        return ALLOC_OOM;
4566        }
4567
4568        return 0;
4569}
4570
4571bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
4572{
4573        return !!__gfp_pfmemalloc_flags(gfp_mask);
4574}
4575
4576/*
4577 * Checks whether it makes sense to retry the reclaim to make a forward progress
4578 * for the given allocation request.
4579 *
4580 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
4581 * without success, or when we couldn't even meet the watermark if we
4582 * reclaimed all remaining pages on the LRU lists.
4583 *
4584 * Returns true if a retry is viable or false to enter the oom path.
4585 */
4586static inline bool
4587should_reclaim_retry(gfp_t gfp_mask, unsigned order,
4588                     struct alloc_context *ac, int alloc_flags,
4589                     bool did_some_progress, int *no_progress_loops)
4590{
4591        struct zone *zone;
4592        struct zoneref *z;
4593        bool ret = false;
4594
4595        /*
4596         * Costly allocations might have made a progress but this doesn't mean
4597         * their order will become available due to high fragmentation so
4598         * always increment the no progress counter for them
4599         */
4600        if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
4601                *no_progress_loops = 0;
4602        else
4603                (*no_progress_loops)++;
4604
4605        /*
4606         * Make sure we converge to OOM if we cannot make any progress
4607         * several times in the row.
4608         */
4609        if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
4610                /* Before OOM, exhaust highatomic_reserve */
4611                return unreserve_highatomic_pageblock(ac, true);
4612        }
4613
4614        /*
4615         * Keep reclaiming pages while there is a chance this will lead
4616         * somewhere.  If none of the target zones can satisfy our allocation
4617         * request even if all reclaimable pages are considered then we are
4618         * screwed and have to go OOM.
4619         */
4620        for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
4621                                ac->highest_zoneidx, ac->nodemask) {
4622                unsigned long available;
4623                unsigned long reclaimable;
4624                unsigned long min_wmark = min_wmark_pages(zone);
4625                bool wmark;
4626
4627                available = reclaimable = zone_reclaimable_pages(zone);
4628                available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
4629
4630                /*
4631                 * Would the allocation succeed if we reclaimed all
4632                 * reclaimable pages?
4633                 */
4634                wmark = __zone_watermark_ok(zone, order, min_wmark,
4635                                ac->highest_zoneidx, alloc_flags, available);
4636                trace_reclaim_retry_zone(z, order, reclaimable,
4637                                available, min_wmark, *no_progress_loops, wmark);
4638                if (wmark) {
4639                        /*
4640                         * If we didn't make any progress and have a lot of
4641                         * dirty + writeback pages then we should wait for
4642                         * an IO to complete to slow down the reclaim and
4643                         * prevent from pre mature OOM
4644                         */
4645                        if (!did_some_progress) {
4646                                unsigned long write_pending;
4647
4648                                write_pending = zone_page_state_snapshot(zone,
4649                                                        NR_ZONE_WRITE_PENDING);
4650
4651                                if (2 * write_pending > reclaimable) {
4652                                        congestion_wait(BLK_RW_ASYNC, HZ/10);
4653                                        return true;
4654                                }
4655                        }
4656
4657                        ret = true;
4658                        goto out;
4659                }
4660        }
4661
4662out:
4663        /*
4664         * Memory allocation/reclaim might be called from a WQ context and the
4665         * current implementation of the WQ concurrency control doesn't
4666         * recognize that a particular WQ is congested if the worker thread is
4667         * looping without ever sleeping. Therefore we have to do a short sleep
4668         * here rather than calling cond_resched().
4669         */
4670        if (current->flags & PF_WQ_WORKER)
4671                schedule_timeout_uninterruptible(1);
4672        else
4673                cond_resched();
4674        return ret;
4675}
4676
4677static inline bool
4678check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
4679{
4680        /*
4681         * It's possible that cpuset's mems_allowed and the nodemask from
4682         * mempolicy don't intersect. This should be normally dealt with by
4683         * policy_nodemask(), but it's possible to race with cpuset update in
4684         * such a way the check therein was true, and then it became false
4685         * before we got our cpuset_mems_cookie here.
4686         * This assumes that for all allocations, ac->nodemask can come only
4687         * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
4688         * when it does not intersect with the cpuset restrictions) or the
4689         * caller can deal with a violated nodemask.
4690         */
4691        if (cpusets_enabled() && ac->nodemask &&
4692                        !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
4693                ac->nodemask = NULL;
4694                return true;
4695        }
4696
4697        /*
4698         * When updating a task's mems_allowed or mempolicy nodemask, it is
4699         * possible to race with parallel threads in such a way that our
4700         * allocation can fail while the mask is being updated. If we are about
4701         * to fail, check if the cpuset changed during allocation and if so,
4702         * retry.
4703         */
4704        if (read_mems_allowed_retry(cpuset_mems_cookie))
4705                return true;
4706
4707        return false;
4708}
4709
4710static inline struct page *
4711__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
4712                                                struct alloc_context *ac)
4713{
4714        bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
4715        const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
4716        struct page *page = NULL;
4717        unsigned int alloc_flags;
4718        unsigned long did_some_progress;
4719        enum compact_priority compact_priority;
4720        enum compact_result compact_result;
4721        int compaction_retries;
4722        int no_progress_loops;
4723        unsigned int cpuset_mems_cookie;
4724        int reserve_flags;
4725
4726        /*
4727         * We also sanity check to catch abuse of atomic reserves being used by
4728         * callers that are not in atomic context.
4729         */
4730        if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
4731                                (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
4732                gfp_mask &= ~__GFP_ATOMIC;
4733
4734retry_cpuset:
4735        compaction_retries = 0;
4736        no_progress_loops = 0;
4737        compact_priority = DEF_COMPACT_PRIORITY;
4738        cpuset_mems_cookie = read_mems_allowed_begin();
4739
4740        /*
4741         * The fast path uses conservative alloc_flags to succeed only until
4742         * kswapd needs to be woken up, and to avoid the cost of setting up
4743         * alloc_flags precisely. So we do that now.
4744         */
4745        alloc_flags = gfp_to_alloc_flags(gfp_mask);
4746
4747        /*
4748         * We need to recalculate the starting point for the zonelist iterator
4749         * because we might have used different nodemask in the fast path, or
4750         * there was a cpuset modification and we are retrying - otherwise we
4751         * could end up iterating over non-eligible zones endlessly.
4752         */
4753        ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4754                                        ac->highest_zoneidx, ac->nodemask);
4755        if (!ac->preferred_zoneref->zone)
4756                goto nopage;
4757
4758        if (alloc_flags & ALLOC_KSWAPD)
4759                wake_all_kswapds(order, gfp_mask, ac);
4760
4761        /*
4762         * The adjusted alloc_flags might result in immediate success, so try
4763         * that first
4764         */
4765        page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4766        if (page)
4767                goto got_pg;
4768
4769        /*
4770         * For costly allocations, try direct compaction first, as it's likely
4771         * that we have enough base pages and don't need to reclaim. For non-
4772         * movable high-order allocations, do that as well, as compaction will
4773         * try prevent permanent fragmentation by migrating from blocks of the
4774         * same migratetype.
4775         * Don't try this for allocations that are allowed to ignore
4776         * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
4777         */
4778        if (can_direct_reclaim &&
4779                        (costly_order ||
4780                           (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
4781                        && !gfp_pfmemalloc_allowed(gfp_mask)) {
4782                page = __alloc_pages_direct_compact(gfp_mask, order,
4783                                                alloc_flags, ac,
4784                                                INIT_COMPACT_PRIORITY,
4785                                                &compact_result);
4786                if (page)
4787                        goto got_pg;
4788
4789                /*
4790                 * Checks for costly allocations with __GFP_NORETRY, which
4791                 * includes some THP page fault allocations
4792                 */
4793                if (costly_order && (gfp_mask & __GFP_NORETRY)) {
4794                        /*
4795                         * If allocating entire pageblock(s) and compaction
4796                         * failed because all zones are below low watermarks
4797                         * or is prohibited because it recently failed at this
4798                         * order, fail immediately unless the allocator has
4799                         * requested compaction and reclaim retry.
4800                         *
4801                         * Reclaim is
4802                         *  - potentially very expensive because zones are far
4803                         *    below their low watermarks or this is part of very
4804                         *    bursty high order allocations,
4805                         *  - not guaranteed to help because isolate_freepages()
4806                         *    may not iterate over freed pages as part of its
4807                         *    linear scan, and
4808                         *  - unlikely to make entire pageblocks free on its
4809                         *    own.
4810                         */
4811                        if (compact_result == COMPACT_SKIPPED ||
4812                            compact_result == COMPACT_DEFERRED)
4813                                goto nopage;
4814
4815                        /*
4816                         * Looks like reclaim/compaction is worth trying, but
4817                         * sync compaction could be very expensive, so keep
4818                         * using async compaction.
4819                         */
4820                        compact_priority = INIT_COMPACT_PRIORITY;
4821                }
4822        }
4823
4824retry:
4825        /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
4826        if (alloc_flags & ALLOC_KSWAPD)
4827                wake_all_kswapds(order, gfp_mask, ac);
4828
4829        reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
4830        if (reserve_flags)
4831                alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags);
4832
4833        /*
4834         * Reset the nodemask and zonelist iterators if memory policies can be
4835         * ignored. These allocations are high priority and system rather than
4836         * user oriented.
4837         */
4838        if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
4839                ac->nodemask = NULL;
4840                ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4841                                        ac->highest_zoneidx, ac->nodemask);
4842        }
4843
4844        /* Attempt with potentially adjusted zonelist and alloc_flags */
4845        page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4846        if (page)
4847                goto got_pg;
4848
4849        /* Caller is not willing to reclaim, we can't balance anything */
4850        if (!can_direct_reclaim)
4851                goto nopage;
4852
4853        /* Avoid recursion of direct reclaim */
4854        if (current->flags & PF_MEMALLOC)
4855                goto nopage;
4856
4857        /* Try direct reclaim and then allocating */
4858        page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
4859                                                        &did_some_progress);
4860        if (page)
4861                goto got_pg;
4862
4863        /* Try direct compaction and then allocating */
4864        page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
4865                                        compact_priority, &compact_result);
4866        if (page)
4867                goto got_pg;
4868
4869        /* Do not loop if specifically requested */
4870        if (gfp_mask & __GFP_NORETRY)
4871                goto nopage;
4872
4873        /*
4874         * Do not retry costly high order allocations unless they are
4875         * __GFP_RETRY_MAYFAIL
4876         */
4877        if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
4878                goto nopage;
4879
4880        if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
4881                                 did_some_progress > 0, &no_progress_loops))
4882                goto retry;
4883
4884        /*
4885         * It doesn't make any sense to retry for the compaction if the order-0
4886         * reclaim is not able to make any progress because the current
4887         * implementation of the compaction depends on the sufficient amount
4888         * of free memory (see __compaction_suitable)
4889         */
4890        if (did_some_progress > 0 &&
4891                        should_compact_retry(ac, order, alloc_flags,
4892                                compact_result, &compact_priority,
4893                                &compaction_retries))
4894                goto retry;
4895
4896
4897        /* Deal with possible cpuset update races before we start OOM killing */
4898        if (check_retry_cpuset(cpuset_mems_cookie, ac))
4899                goto retry_cpuset;
4900
4901        /* Reclaim has failed us, start killing things */
4902        page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
4903        if (page)
4904                goto got_pg;
4905
4906        /* Avoid allocations with no watermarks from looping endlessly */
4907        if (tsk_is_oom_victim(current) &&
4908            (alloc_flags & ALLOC_OOM ||
4909             (gfp_mask & __GFP_NOMEMALLOC)))
4910                goto nopage;
4911
4912        /* Retry as long as the OOM killer is making progress */
4913        if (did_some_progress) {
4914                no_progress_loops = 0;
4915                goto retry;
4916        }
4917
4918nopage:
4919        /* Deal with possible cpuset update races before we fail */
4920        if (check_retry_cpuset(cpuset_mems_cookie, ac))
4921                goto retry_cpuset;
4922
4923        /*
4924         * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
4925         * we always retry
4926         */
4927        if (gfp_mask & __GFP_NOFAIL) {
4928                /*
4929                 * All existing users of the __GFP_NOFAIL are blockable, so warn
4930                 * of any new users that actually require GFP_NOWAIT
4931                 */
4932                if (WARN_ON_ONCE(!can_direct_reclaim))
4933                        goto fail;
4934
4935                /*
4936                 * PF_MEMALLOC request from this context is rather bizarre
4937                 * because we cannot reclaim anything and only can loop waiting
4938                 * for somebody to do a work for us
4939                 */
4940                WARN_ON_ONCE(current->flags & PF_MEMALLOC);
4941
4942                /*
4943                 * non failing costly orders are a hard requirement which we
4944                 * are not prepared for much so let's warn about these users
4945                 * so that we can identify them and convert them to something
4946                 * else.
4947                 */
4948                WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
4949
4950                /*
4951                 * Help non-failing allocations by giving them access to memory
4952                 * reserves but do not use ALLOC_NO_WATERMARKS because this
4953                 * could deplete whole memory reserves which would just make
4954                 * the situation worse
4955                 */
4956                page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
4957                if (page)
4958                        goto got_pg;
4959
4960                cond_resched();
4961                goto retry;
4962        }
4963fail:
4964        warn_alloc(gfp_mask, ac->nodemask,
4965                        "page allocation failure: order:%u", order);
4966got_pg:
4967        return page;
4968}
4969
4970static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
4971                int preferred_nid, nodemask_t *nodemask,
4972                struct alloc_context *ac, gfp_t *alloc_gfp,
4973                unsigned int *alloc_flags)
4974{
4975        ac->highest_zoneidx = gfp_zone(gfp_mask);
4976        ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
4977        ac->nodemask = nodemask;
4978        ac->migratetype = gfp_migratetype(gfp_mask);
4979
4980        if (cpusets_enabled()) {
4981                *alloc_gfp |= __GFP_HARDWALL;
4982                /*
4983                 * When we are in the interrupt context, it is irrelevant
4984                 * to the current task context. It means that any node ok.
4985                 */
4986                if (!in_interrupt() && !ac->nodemask)
4987                        ac->nodemask = &cpuset_current_mems_allowed;
4988                else
4989                        *alloc_flags |= ALLOC_CPUSET;
4990        }
4991
4992        fs_reclaim_acquire(gfp_mask);
4993        fs_reclaim_release(gfp_mask);
4994
4995        might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
4996
4997        if (should_fail_alloc_page(gfp_mask, order))
4998                return false;
4999
5000        *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags);
5001
5002        /* Dirty zone balancing only done in the fast path */
5003        ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
5004
5005        /*
5006         * The preferred zone is used for statistics but crucially it is
5007         * also used as the starting point for the zonelist iterator. It
5008         * may get reset for allocations that ignore memory policies.
5009         */
5010        ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
5011                                        ac->highest_zoneidx, ac->nodemask);
5012
5013        return true;
5014}
5015
5016/*
5017 * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array
5018 * @gfp: GFP flags for the allocation
5019 * @preferred_nid: The preferred NUMA node ID to allocate from
5020 * @nodemask: Set of nodes to allocate from, may be NULL
5021 * @nr_pages: The number of pages desired on the list or array
5022 * @page_list: Optional list to store the allocated pages
5023 * @page_array: Optional array to store the pages
5024 *
5025 * This is a batched version of the page allocator that attempts to
5026 * allocate nr_pages quickly. Pages are added to page_list if page_list
5027 * is not NULL, otherwise it is assumed that the page_array is valid.
5028 *
5029 * For lists, nr_pages is the number of pages that should be allocated.
5030 *
5031 * For arrays, only NULL elements are populated with pages and nr_pages
5032 * is the maximum number of pages that will be stored in the array.
5033 *
5034 * Returns the number of pages on the list or array.
5035 */
5036unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
5037                        nodemask_t *nodemask, int nr_pages,
5038                        struct list_head *page_list,
5039                        struct page **page_array)
5040{
5041        struct page *page;
5042        unsigned long flags;
5043        struct zone *zone;
5044        struct zoneref *z;
5045        struct per_cpu_pages *pcp;
5046        struct list_head *pcp_list;
5047        struct alloc_context ac;
5048        gfp_t alloc_gfp;
5049        unsigned int alloc_flags = ALLOC_WMARK_LOW;
5050        int nr_populated = 0;
5051
5052        if (unlikely(nr_pages <= 0))
5053                return 0;
5054
5055        /*
5056         * Skip populated array elements to determine if any pages need
5057         * to be allocated before disabling IRQs.
5058         */
5059        while (page_array && nr_populated < nr_pages && page_array[nr_populated])
5060                nr_populated++;
5061
5062        /* Already populated array? */
5063        if (unlikely(page_array && nr_pages - nr_populated == 0))
5064                return nr_populated;
5065
5066        /* Use the single page allocator for one page. */
5067        if (nr_pages - nr_populated == 1)
5068                goto failed;
5069
5070        /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
5071        gfp &= gfp_allowed_mask;
5072        alloc_gfp = gfp;
5073        if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags))
5074                return 0;
5075        gfp = alloc_gfp;
5076
5077        /* Find an allowed local zone that meets the low watermark. */
5078        for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) {
5079                unsigned long mark;
5080
5081                if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) &&
5082                    !__cpuset_zone_allowed(zone, gfp)) {
5083                        continue;
5084                }
5085
5086                if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone &&
5087                    zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) {
5088                        goto failed;
5089                }
5090
5091                mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages;
5092                if (zone_watermark_fast(zone, 0,  mark,
5093                                zonelist_zone_idx(ac.preferred_zoneref),
5094                                alloc_flags, gfp)) {
5095                        break;
5096                }
5097        }
5098
5099        /*
5100         * If there are no allowed local zones that meets the watermarks then
5101         * try to allocate a single page and reclaim if necessary.
5102         */
5103        if (unlikely(!zone))
5104                goto failed;
5105
5106        /* Attempt the batch allocation */
5107        local_irq_save(flags);
5108        pcp = &this_cpu_ptr(zone->pageset)->pcp;
5109        pcp_list = &pcp->lists[ac.migratetype];
5110
5111        while (nr_populated < nr_pages) {
5112
5113                /* Skip existing pages */
5114                if (page_array && page_array[nr_populated]) {
5115                        nr_populated++;
5116                        continue;
5117                }
5118
5119                page = __rmqueue_pcplist(zone, ac.migratetype, alloc_flags,
5120                                                                pcp, pcp_list);
5121                if (unlikely(!page)) {
5122                        /* Try and get at least one page */
5123                        if (!nr_populated)
5124                                goto failed_irq;
5125                        break;
5126                }
5127
5128                /*
5129                 * Ideally this would be batched but the best way to do
5130                 * that cheaply is to first convert zone_statistics to
5131                 * be inaccurate per-cpu counter like vm_events to avoid
5132                 * a RMW cycle then do the accounting with IRQs enabled.
5133                 */
5134                __count_zid_vm_events(PGALLOC, zone_idx(zone), 1);
5135                zone_statistics(ac.preferred_zoneref->zone, zone);
5136
5137                prep_new_page(page, 0, gfp, 0);
5138                if (page_list)
5139                        list_add(&page->lru, page_list);
5140                else
5141                        page_array[nr_populated] = page;
5142                nr_populated++;
5143        }
5144
5145        local_irq_restore(flags);
5146
5147        return nr_populated;
5148
5149failed_irq:
5150        local_irq_restore(flags);
5151
5152failed:
5153        page = __alloc_pages(gfp, 0, preferred_nid, nodemask);
5154        if (page) {
5155                if (page_list)
5156                        list_add(&page->lru, page_list);
5157                else
5158                        page_array[nr_populated] = page;
5159                nr_populated++;
5160        }
5161
5162        return nr_populated;
5163}
5164EXPORT_SYMBOL_GPL(__alloc_pages_bulk);
5165
5166/*
5167 * This is the 'heart' of the zoned buddy allocator.
5168 */
5169struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
5170                                                        nodemask_t *nodemask)
5171{
5172        struct page *page;
5173        unsigned int alloc_flags = ALLOC_WMARK_LOW;
5174        gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */
5175        struct alloc_context ac = { };
5176
5177        /*
5178         * There are several places where we assume that the order value is sane
5179         * so bail out early if the request is out of bound.
5180         */
5181        if (unlikely(order >= MAX_ORDER)) {
5182                WARN_ON_ONCE(!(gfp & __GFP_NOWARN));
5183                return NULL;
5184        }
5185
5186        gfp &= gfp_allowed_mask;
5187        /*
5188         * Apply scoped allocation constraints. This is mainly about GFP_NOFS
5189         * resp. GFP_NOIO which has to be inherited for all allocation requests
5190         * from a particular context which has been marked by
5191         * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures
5192         * movable zones are not used during allocation.
5193         */
5194        gfp = current_gfp_context(gfp);
5195        alloc_gfp = gfp;
5196        if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac,
5197                        &alloc_gfp, &alloc_flags))
5198                return NULL;
5199
5200        /*
5201         * Forbid the first pass from falling back to types that fragment
5202         * memory until all local zones are considered.
5203         */
5204        alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp);
5205
5206        /* First allocation attempt */
5207        page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac);
5208        if (likely(page))
5209                goto out;
5210
5211        alloc_gfp = gfp;
5212        ac.spread_dirty_pages = false;
5213
5214        /*
5215         * Restore the original nodemask if it was potentially replaced with
5216         * &cpuset_current_mems_allowed to optimize the fast-path attempt.
5217         */
5218        ac.nodemask = nodemask;
5219
5220        page = __alloc_pages_slowpath(alloc_gfp, order, &ac);
5221
5222out:
5223        if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT) && page &&
5224            unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) {
5225                __free_pages(page, order);
5226                page = NULL;
5227        }
5228
5229        trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype);
5230
5231        return page;
5232}
5233EXPORT_SYMBOL(__alloc_pages);
5234
5235/*
5236 * Common helper functions. Never use with __GFP_HIGHMEM because the returned
5237 * address cannot represent highmem pages. Use alloc_pages and then kmap if
5238 * you need to access high mem.
5239 */
5240unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
5241{
5242        struct page *page;
5243
5244        page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
5245        if (!page)
5246                return 0;
5247        return (unsigned long) page_address(page);
5248}
5249EXPORT_SYMBOL(__get_free_pages);
5250
5251unsigned long get_zeroed_page(gfp_t gfp_mask)
5252{
5253        return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
5254}
5255EXPORT_SYMBOL(get_zeroed_page);
5256
5257static inline void free_the_page(struct page *page, unsigned int order)
5258{
5259        if (order == 0)         /* Via pcp? */
5260                free_unref_page(page);
5261        else
5262                __free_pages_ok(page, order, FPI_NONE);
5263}
5264
5265/**
5266 * __free_pages - Free pages allocated with alloc_pages().
5267 * @page: The page pointer returned from alloc_pages().
5268 * @order: The order of the allocation.
5269 *
5270 * This function can free multi-page allocations that are not compound
5271 * pages.  It does not check that the @order passed in matches that of
5272 * the allocation, so it is easy to leak memory.  Freeing more memory
5273 * than was allocated will probably emit a warning.
5274 *
5275 * If the last reference to this page is speculative, it will be released
5276 * by put_page() which only frees the first page of a non-compound
5277 * allocation.  To prevent the remaining pages from being leaked, we free
5278 * the subsequent pages here.  If you want to use the page's reference
5279 * count to decide when to free the allocation, you should allocate a
5280 * compound page, and use put_page() instead of __free_pages().
5281 *
5282 * Context: May be called in interrupt context or while holding a normal
5283 * spinlock, but not in NMI context or while holding a raw spinlock.
5284 */
5285void __free_pages(struct page *page, unsigned int order)
5286{
5287        if (put_page_testzero(page))
5288                free_the_page(page, order);
5289        else if (!PageHead(page))
5290                while (order-- > 0)
5291                        free_the_page(page + (1 << order), order);
5292}
5293EXPORT_SYMBOL(__free_pages);
5294
5295void free_pages(unsigned long addr, unsigned int order)
5296{
5297        if (addr != 0) {
5298                VM_BUG_ON(!virt_addr_valid((void *)addr));
5299                __free_pages(virt_to_page((void *)addr), order);
5300        }
5301}
5302
5303EXPORT_SYMBOL(free_pages);
5304
5305/*
5306 * Page Fragment:
5307 *  An arbitrary-length arbitrary-offset area of memory which resides
5308 *  within a 0 or higher order page.  Multiple fragments within that page
5309 *  are individually refcounted, in the page's reference counter.
5310 *
5311 * The page_frag functions below provide a simple allocation framework for
5312 * page fragments.  This is used by the network stack and network device
5313 * drivers to provide a backing region of memory for use as either an
5314 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
5315 */
5316static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
5317                                             gfp_t gfp_mask)
5318{
5319        struct page *page = NULL;
5320        gfp_t gfp = gfp_mask;
5321
5322#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5323        gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
5324                    __GFP_NOMEMALLOC;
5325        page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
5326                                PAGE_FRAG_CACHE_MAX_ORDER);
5327        nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
5328#endif
5329        if (unlikely(!page))
5330                page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
5331
5332        nc->va = page ? page_address(page) : NULL;
5333
5334        return page;
5335}
5336
5337void __page_frag_cache_drain(struct page *page, unsigned int count)
5338{
5339        VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
5340
5341        if (page_ref_sub_and_test(page, count))
5342                free_the_page(page, compound_order(page));
5343}
5344EXPORT_SYMBOL(__page_frag_cache_drain);
5345
5346void *page_frag_alloc_align(struct page_frag_cache *nc,
5347                      unsigned int fragsz, gfp_t gfp_mask,
5348                      unsigned int align_mask)
5349{
5350        unsigned int size = PAGE_SIZE;
5351        struct page *page;
5352        int offset;
5353
5354        if (unlikely(!nc->va)) {
5355refill:
5356                page = __page_frag_cache_refill(nc, gfp_mask);
5357                if (!page)
5358                        return NULL;
5359
5360#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5361                /* if size can vary use size else just use PAGE_SIZE */
5362                size = nc->size;
5363#endif
5364                /* Even if we own the page, we do not use atomic_set().
5365                 * This would break get_page_unless_zero() users.
5366                 */
5367                page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
5368
5369                /* reset page count bias and offset to start of new frag */
5370                nc->pfmemalloc = page_is_pfmemalloc(page);
5371                nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
5372                nc->offset = size;
5373        }
5374
5375        offset = nc->offset - fragsz;
5376        if (unlikely(offset < 0)) {
5377                page = virt_to_page(nc->va);
5378
5379                if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
5380                        goto refill;
5381
5382                if (unlikely(nc->pfmemalloc)) {
5383                        free_the_page(page, compound_order(page));
5384                        goto refill;
5385                }
5386
5387#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
5388                /* if size can vary use size else just use PAGE_SIZE */
5389                size = nc->size;
5390#endif
5391                /* OK, page count is 0, we can safely set it */
5392                set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
5393
5394                /* reset page count bias and offset to start of new frag */
5395                nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
5396                offset = size - fragsz;
5397        }
5398
5399        nc->pagecnt_bias--;
5400        offset &= align_mask;
5401        nc->offset = offset;
5402
5403        return nc->va + offset;
5404}
5405EXPORT_SYMBOL(page_frag_alloc_align);
5406
5407/*
5408 * Frees a page fragment allocated out of either a compound or order 0 page.
5409 */
5410void page_frag_free(void *addr)
5411{
5412        struct page *page = virt_to_head_page(addr);
5413
5414        if (unlikely(put_page_testzero(page)))
5415                free_the_page(page, compound_order(page));
5416}
5417EXPORT_SYMBOL(page_frag_free);
5418
5419static void *make_alloc_exact(unsigned long addr, unsigned int order,
5420                size_t size)
5421{
5422        if (addr) {
5423                unsigned long alloc_end = addr + (PAGE_SIZE << order);
5424                unsigned long used = addr + PAGE_ALIGN(size);
5425
5426                split_page(virt_to_page((void *)addr), order);
5427                while (used < alloc_end) {
5428                        free_page(used);
5429                        used += PAGE_SIZE;
5430                }
5431        }
5432        return (void *)addr;
5433}
5434
5435/**
5436 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
5437 * @size: the number of bytes to allocate
5438 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5439 *
5440 * This function is similar to alloc_pages(), except that it allocates the
5441 * minimum number of pages to satisfy the request.  alloc_pages() can only
5442 * allocate memory in power-of-two pages.
5443 *
5444 * This function is also limited by MAX_ORDER.
5445 *
5446 * Memory allocated by this function must be released by free_pages_exact().
5447 *
5448 * Return: pointer to the allocated area or %NULL in case of error.
5449 */
5450void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
5451{
5452        unsigned int order = get_order(size);
5453        unsigned long addr;
5454
5455        if (WARN_ON_ONCE(gfp_mask & __GFP_COMP))
5456                gfp_mask &= ~__GFP_COMP;
5457
5458        addr = __get_free_pages(gfp_mask, order);
5459        return make_alloc_exact(addr, order, size);
5460}
5461EXPORT_SYMBOL(alloc_pages_exact);
5462
5463/**
5464 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
5465 *                         pages on a node.
5466 * @nid: the preferred node ID where memory should be allocated
5467 * @size: the number of bytes to allocate
5468 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5469 *
5470 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
5471 * back.
5472 *
5473 * Return: pointer to the allocated area or %NULL in case of error.
5474 */
5475void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
5476{
5477        unsigned int order = get_order(size);
5478        struct page *p;
5479
5480        if (WARN_ON_ONCE(gfp_mask & __GFP_COMP))
5481                gfp_mask &= ~__GFP_COMP;
5482
5483        p = alloc_pages_node(nid, gfp_mask, order);
5484        if (!p)
5485                return NULL;
5486        return make_alloc_exact((unsigned long)page_address(p), order, size);
5487}
5488
5489/**
5490 * free_pages_exact - release memory allocated via alloc_pages_exact()
5491 * @virt: the value returned by alloc_pages_exact.
5492 * @size: size of allocation, same value as passed to alloc_pages_exact().
5493 *
5494 * Release the memory allocated by a previous call to alloc_pages_exact.
5495 */
5496void free_pages_exact(void *virt, size_t size)
5497{
5498        unsigned long addr = (unsigned long)virt;
5499        unsigned long end = addr + PAGE_ALIGN(size);
5500
5501        while (addr < end) {
5502                free_page(addr);
5503                addr += PAGE_SIZE;
5504        }
5505}
5506EXPORT_SYMBOL(free_pages_exact);
5507
5508/**
5509 * nr_free_zone_pages - count number of pages beyond high watermark
5510 * @offset: The zone index of the highest zone
5511 *
5512 * nr_free_zone_pages() counts the number of pages which are beyond the
5513 * high watermark within all zones at or below a given zone index.  For each
5514 * zone, the number of pages is calculated as:
5515 *
5516 *     nr_free_zone_pages = managed_pages - high_pages
5517 *
5518 * Return: number of pages beyond high watermark.
5519 */
5520static unsigned long nr_free_zone_pages(int offset)
5521{
5522        struct zoneref *z;
5523        struct zone *zone;
5524
5525        /* Just pick one node, since fallback list is circular */
5526        unsigned long sum = 0;
5527
5528        struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
5529
5530        for_each_zone_zonelist(zone, z, zonelist, offset) {
5531                unsigned long size = zone_managed_pages(zone);
5532                unsigned long high = high_wmark_pages(zone);
5533                if (size > high)
5534                        sum += size - high;
5535        }
5536
5537        return sum;
5538}
5539
5540/**
5541 * nr_free_buffer_pages - count number of pages beyond high watermark
5542 *
5543 * nr_free_buffer_pages() counts the number of pages which are beyond the high
5544 * watermark within ZONE_DMA and ZONE_NORMAL.
5545 *
5546 * Return: number of pages beyond high watermark within ZONE_DMA and
5547 * ZONE_NORMAL.
5548 */
5549unsigned long nr_free_buffer_pages(void)
5550{
5551        return nr_free_zone_pages(gfp_zone(GFP_USER));
5552}
5553EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
5554
5555static inline void show_node(struct zone *zone)
5556{
5557        if (IS_ENABLED(CONFIG_NUMA))
5558                printk("Node %d ", zone_to_nid(zone));
5559}
5560
5561long si_mem_available(void)
5562{
5563        long available;
5564        unsigned long pagecache;
5565        unsigned long wmark_low = 0;
5566        unsigned long pages[NR_LRU_LISTS];
5567        unsigned long reclaimable;
5568        struct zone *zone;
5569        int lru;
5570
5571        for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
5572                pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
5573
5574        for_each_zone(zone)
5575                wmark_low += low_wmark_pages(zone);
5576
5577        /*
5578         * Estimate the amount of memory available for userspace allocations,
5579         * without causing swapping.
5580         */
5581        available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
5582
5583        /*
5584         * Not all the page cache can be freed, otherwise the system will
5585         * start swapping. Assume at least half of the page cache, or the
5586         * low watermark worth of cache, needs to stay.
5587         */
5588        pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
5589        pagecache -= min(pagecache / 2, wmark_low);
5590        available += pagecache;
5591
5592        /*
5593         * Part of the reclaimable slab and other kernel memory consists of
5594         * items that are in use, and cannot be freed. Cap this estimate at the
5595         * low watermark.
5596         */
5597        reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) +
5598                global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
5599        available += reclaimable - min(reclaimable / 2, wmark_low);
5600
5601        if (available < 0)
5602                available = 0;
5603        return available;
5604}
5605EXPORT_SYMBOL_GPL(si_mem_available);
5606
5607void si_meminfo(struct sysinfo *val)
5608{
5609        val->totalram = totalram_pages();
5610        val->sharedram = global_node_page_state(NR_SHMEM);
5611        val->freeram = global_zone_page_state(NR_FREE_PAGES);
5612        val->bufferram = nr_blockdev_pages();
5613        val->totalhigh = totalhigh_pages();
5614        val->freehigh = nr_free_highpages();
5615        val->mem_unit = PAGE_SIZE;
5616}
5617
5618EXPORT_SYMBOL(si_meminfo);
5619
5620#ifdef CONFIG_NUMA
5621void si_meminfo_node(struct sysinfo *val, int nid)
5622{
5623        int zone_type;          /* needs to be signed */
5624        unsigned long managed_pages = 0;
5625        unsigned long managed_highpages = 0;
5626        unsigned long free_highpages = 0;
5627        pg_data_t *pgdat = NODE_DATA(nid);
5628
5629        for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
5630                managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]);
5631        val->totalram = managed_pages;
5632        val->sharedram = node_page_state(pgdat, NR_SHMEM);
5633        val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
5634#ifdef CONFIG_HIGHMEM
5635        for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
5636                struct zone *zone = &pgdat->node_zones[zone_type];
5637
5638                if (is_highmem(zone)) {
5639                        managed_highpages += zone_managed_pages(zone);
5640                        free_highpages += zone_page_state(zone, NR_FREE_PAGES);
5641                }
5642        }
5643        val->totalhigh = managed_highpages;
5644        val->freehigh = free_highpages;
5645#else
5646        val->totalhigh = managed_highpages;
5647        val->freehigh = free_highpages;
5648#endif
5649        val->mem_unit = PAGE_SIZE;
5650}
5651#endif
5652
5653/*
5654 * Determine whether the node should be displayed or not, depending on whether
5655 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
5656 */
5657static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
5658{
5659        if (!(flags & SHOW_MEM_FILTER_NODES))
5660                return false;
5661
5662        /*
5663         * no node mask - aka implicit memory numa policy. Do not bother with
5664         * the synchronization - read_mems_allowed_begin - because we do not
5665         * have to be precise here.
5666         */
5667        if (!nodemask)
5668                nodemask = &cpuset_current_mems_allowed;
5669
5670        return !node_isset(nid, *nodemask);
5671}
5672
5673#define K(x) ((x) << (PAGE_SHIFT-10))
5674
5675static void show_migration_types(unsigned char type)
5676{
5677        static const char types[MIGRATE_TYPES] = {
5678                [MIGRATE_UNMOVABLE]     = 'U',
5679                [MIGRATE_MOVABLE]       = 'M',
5680                [MIGRATE_RECLAIMABLE]   = 'E',
5681                [MIGRATE_HIGHATOMIC]    = 'H',
5682#ifdef CONFIG_CMA
5683                [MIGRATE_CMA]           = 'C',
5684#endif
5685#ifdef CONFIG_MEMORY_ISOLATION
5686                [MIGRATE_ISOLATE]       = 'I',
5687#endif
5688        };
5689        char tmp[MIGRATE_TYPES + 1];
5690        char *p = tmp;
5691        int i;
5692
5693        for (i = 0; i < MIGRATE_TYPES; i++) {
5694                if (type & (1 << i))
5695                        *p++ = types[i];
5696        }
5697
5698        *p = '\0';
5699        printk(KERN_CONT "(%s) ", tmp);
5700}
5701
5702/*
5703 * Show free area list (used inside shift_scroll-lock stuff)
5704 * We also calculate the percentage fragmentation. We do this by counting the
5705 * memory on each free list with the exception of the first item on the list.
5706 *
5707 * Bits in @filter:
5708 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
5709 *   cpuset.
5710 */
5711void show_free_areas(unsigned int filter, nodemask_t *nodemask)
5712{
5713        unsigned long free_pcp = 0;
5714        int cpu;
5715        struct zone *zone;
5716        pg_data_t *pgdat;
5717
5718        for_each_populated_zone(zone) {
5719                if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
5720                        continue;
5721
5722                for_each_online_cpu(cpu)
5723                        free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
5724        }
5725
5726        printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
5727                " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
5728                " unevictable:%lu dirty:%lu writeback:%lu\n"
5729                " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
5730                " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
5731                " free:%lu free_pcp:%lu free_cma:%lu\n",
5732                global_node_page_state(NR_ACTIVE_ANON),
5733                global_node_page_state(NR_INACTIVE_ANON),
5734                global_node_page_state(NR_ISOLATED_ANON),
5735                global_node_page_state(NR_ACTIVE_FILE),
5736                global_node_page_state(NR_INACTIVE_FILE),
5737                global_node_page_state(NR_ISOLATED_FILE),
5738                global_node_page_state(NR_UNEVICTABLE),
5739                global_node_page_state(NR_FILE_DIRTY),
5740                global_node_page_state(NR_WRITEBACK),
5741                global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B),
5742                global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B),
5743                global_node_page_state(NR_FILE_MAPPED),
5744                global_node_page_state(NR_SHMEM),
5745                global_node_page_state(NR_PAGETABLE),
5746                global_zone_page_state(NR_BOUNCE),
5747                global_zone_page_state(NR_FREE_PAGES),
5748                free_pcp,
5749                global_zone_page_state(NR_FREE_CMA_PAGES));
5750
5751        for_each_online_pgdat(pgdat) {
5752                if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
5753                        continue;
5754
5755                printk("Node %d"
5756                        " active_anon:%lukB"
5757                        " inactive_anon:%lukB"
5758                        " active_file:%lukB"
5759                        " inactive_file:%lukB"
5760                        " unevictable:%lukB"
5761                        " isolated(anon):%lukB"
5762                        " isolated(file):%lukB"
5763                        " mapped:%lukB"
5764                        " dirty:%lukB"
5765                        " writeback:%lukB"
5766                        " shmem:%lukB"
5767#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5768                        " shmem_thp: %lukB"
5769                        " shmem_pmdmapped: %lukB"
5770                        " anon_thp: %lukB"
5771#endif
5772                        " writeback_tmp:%lukB"
5773                        " kernel_stack:%lukB"
5774#ifdef CONFIG_SHADOW_CALL_STACK
5775                        " shadow_call_stack:%lukB"
5776#endif
5777                        " pagetables:%lukB"
5778                        " all_unreclaimable? %s"
5779                        "\n",
5780                        pgdat->node_id,
5781                        K(node_page_state(pgdat, NR_ACTIVE_ANON)),
5782                        K(node_page_state(pgdat, NR_INACTIVE_ANON)),
5783                        K(node_page_state(pgdat, NR_ACTIVE_FILE)),
5784                        K(node_page_state(pgdat, NR_INACTIVE_FILE)),
5785                        K(node_page_state(pgdat, NR_UNEVICTABLE)),
5786                        K(node_page_state(pgdat, NR_ISOLATED_ANON)),
5787                        K(node_page_state(pgdat, NR_ISOLATED_FILE)),
5788                        K(node_page_state(pgdat, NR_FILE_MAPPED)),
5789                        K(node_page_state(pgdat, NR_FILE_DIRTY)),
5790                        K(node_page_state(pgdat, NR_WRITEBACK)),
5791                        K(node_page_state(pgdat, NR_SHMEM)),
5792#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5793                        K(node_page_state(pgdat, NR_SHMEM_THPS)),
5794                        K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)),
5795                        K(node_page_state(pgdat, NR_ANON_THPS)),
5796#endif
5797                        K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
5798                        node_page_state(pgdat, NR_KERNEL_STACK_KB),
5799#ifdef CONFIG_SHADOW_CALL_STACK
5800                        node_page_state(pgdat, NR_KERNEL_SCS_KB),
5801#endif
5802                        K(node_page_state(pgdat, NR_PAGETABLE)),
5803                        pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
5804                                "yes" : "no");
5805        }
5806
5807        for_each_populated_zone(zone) {
5808                int i;
5809
5810                if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
5811                        continue;
5812
5813                free_pcp = 0;
5814                for_each_online_cpu(cpu)
5815                        free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
5816
5817                show_node(zone);
5818                printk(KERN_CONT
5819                        "%s"
5820                        " free:%lukB"
5821                        " min:%lukB"
5822                        " low:%lukB"
5823                        " high:%lukB"
5824                        " reserved_highatomic:%luKB"
5825                        " active_anon:%lukB"
5826                        " inactive_anon:%lukB"
5827                        " active_file:%lukB"
5828                        " inactive_file:%lukB"
5829                        " unevictable:%lukB"
5830                        " writepending:%lukB"
5831                        " present:%lukB"
5832                        " managed:%lukB"
5833                        " mlocked:%lukB"
5834                        " bounce:%lukB"
5835                        " free_pcp:%lukB"
5836                        " local_pcp:%ukB"
5837                        " free_cma:%lukB"
5838                        "\n",
5839                        zone->name,
5840                        K(zone_page_state(zone, NR_FREE_PAGES)),
5841                        K(min_wmark_pages(zone)),
5842                        K(low_wmark_pages(zone)),
5843                        K(high_wmark_pages(zone)),
5844                        K(zone->nr_reserved_highatomic),
5845                        K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
5846                        K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
5847                        K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
5848                        K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
5849                        K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
5850                        K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
5851                        K(zone->present_pages),
5852                        K(zone_managed_pages(zone)),
5853                        K(zone_page_state(zone, NR_MLOCK)),
5854                        K(zone_page_state(zone, NR_BOUNCE)),
5855                        K(free_pcp),
5856                        K(this_cpu_read(zone->pageset->pcp.count)),
5857                        K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
5858                printk("lowmem_reserve[]:");
5859                for (i = 0; i < MAX_NR_ZONES; i++)
5860                        printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
5861                printk(KERN_CONT "\n");
5862        }
5863
5864        for_each_populated_zone(zone) {
5865                unsigned int order;
5866                unsigned long nr[MAX_ORDER], flags, total = 0;
5867                unsigned char types[MAX_ORDER];
5868
5869                if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
5870                        continue;
5871                show_node(zone);
5872                printk(KERN_CONT "%s: ", zone->name);
5873
5874                spin_lock_irqsave(&zone->lock, flags);
5875                for (order = 0; order < MAX_ORDER; order++) {
5876                        struct free_area *area = &zone->free_area[order];
5877                        int type;
5878
5879                        nr[order] = area->nr_free;
5880                        total += nr[order] << order;
5881
5882                        types[order] = 0;
5883                        for (type = 0; type < MIGRATE_TYPES; type++) {
5884                                if (!free_area_empty(area, type))
5885                                        types[order] |= 1 << type;
5886                        }
5887                }
5888                spin_unlock_irqrestore(&zone->lock, flags);
5889                for (order = 0; order < MAX_ORDER; order++) {
5890                        printk(KERN_CONT "%lu*%lukB ",
5891                               nr[order], K(1UL) << order);
5892                        if (nr[order])
5893                                show_migration_types(types[order]);
5894                }
5895                printk(KERN_CONT "= %lukB\n", K(total));
5896        }
5897
5898        hugetlb_show_meminfo();
5899
5900        printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
5901
5902        show_swap_cache_info();
5903}
5904
5905static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
5906{
5907        zoneref->zone = zone;
5908        zoneref->zone_idx = zone_idx(zone);
5909}
5910
5911/*
5912 * Builds allocation fallback zone lists.
5913 *
5914 * Add all populated zones of a node to the zonelist.
5915 */
5916static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
5917{
5918        struct zone *zone;
5919        enum zone_type zone_type = MAX_NR_ZONES;
5920        int nr_zones = 0;
5921
5922        do {
5923                zone_type--;
5924                zone = pgdat->node_zones + zone_type;
5925                if (managed_zone(zone)) {
5926                        zoneref_set_zone(zone, &zonerefs[nr_zones++]);
5927                        check_highest_zone(zone_type);
5928                }
5929        } while (zone_type);
5930
5931        return nr_zones;
5932}
5933
5934#ifdef CONFIG_NUMA
5935
5936static int __parse_numa_zonelist_order(char *s)
5937{
5938        /*
5939         * We used to support different zonelists modes but they turned
5940         * out to be just not useful. Let's keep the warning in place
5941         * if somebody still use the cmd line parameter so that we do
5942         * not fail it silently
5943         */
5944        if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
5945                pr_warn("Ignoring unsupported numa_zonelist_order value:  %s\n", s);
5946                return -EINVAL;
5947        }
5948        return 0;
5949}
5950
5951char numa_zonelist_order[] = "Node";
5952
5953/*
5954 * sysctl handler for numa_zonelist_order
5955 */
5956int numa_zonelist_order_handler(struct ctl_table *table, int write,
5957                void *buffer, size_t *length, loff_t *ppos)
5958{
5959        if (write)
5960                return __parse_numa_zonelist_order(buffer);
5961        return proc_dostring(table, write, buffer, length, ppos);
5962}
5963
5964
5965#define MAX_NODE_LOAD (nr_online_nodes)
5966static int node_load[MAX_NUMNODES];
5967
5968/**
5969 * find_next_best_node - find the next node that should appear in a given node's fallback list
5970 * @node: node whose fallback list we're appending
5971 * @used_node_mask: nodemask_t of already used nodes
5972 *
5973 * We use a number of factors to determine which is the next node that should
5974 * appear on a given node's fallback list.  The node should not have appeared
5975 * already in @node's fallback list, and it should be the next closest node
5976 * according to the distance array (which contains arbitrary distance values
5977 * from each node to each node in the system), and should also prefer nodes
5978 * with no CPUs, since presumably they'll have very little allocation pressure
5979 * on them otherwise.
5980 *
5981 * Return: node id of the found node or %NUMA_NO_NODE if no node is found.
5982 */
5983static int find_next_best_node(int node, nodemask_t *used_node_mask)
5984{
5985        int n, val;
5986        int min_val = INT_MAX;
5987        int best_node = NUMA_NO_NODE;
5988
5989        /* Use the local node if we haven't already */
5990        if (!node_isset(node, *used_node_mask)) {
5991                node_set(node, *used_node_mask);
5992                return node;
5993        }
5994
5995        for_each_node_state(n, N_MEMORY) {
5996
5997                /* Don't want a node to appear more than once */
5998                if (node_isset(n, *used_node_mask))
5999                        continue;
6000
6001                /* Use the distance array to find the distance */
6002                val = node_distance(node, n);
6003
6004                /* Penalize nodes under us ("prefer the next node") */
6005                val += (n < node);
6006
6007                /* Give preference to headless and unused nodes */
6008                if (!cpumask_empty(cpumask_of_node(n)))
6009                        val += PENALTY_FOR_NODE_WITH_CPUS;
6010
6011                /* Slight preference for less loaded node */
6012                val *= (MAX_NODE_LOAD*MAX_NUMNODES);
6013                val += node_load[n];
6014
6015                if (val < min_val) {
6016                        min_val = val;
6017                        best_node = n;
6018                }
6019        }
6020
6021        if (best_node >= 0)
6022                node_set(best_node, *used_node_mask);
6023
6024        return best_node;
6025}
6026
6027
6028/*
6029 * Build zonelists ordered by node and zones within node.
6030 * This results in maximum locality--normal zone overflows into local
6031 * DMA zone, if any--but risks exhausting DMA zone.
6032 */
6033static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
6034                unsigned nr_nodes)
6035{
6036        struct zoneref *zonerefs;
6037        int i;
6038
6039        zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
6040
6041        for (i = 0; i < nr_nodes; i++) {
6042                int nr_zones;
6043
6044                pg_data_t *node = NODE_DATA(node_order[i]);
6045
6046                nr_zones = build_zonerefs_node(node, zonerefs);
6047                zonerefs += nr_zones;
6048        }
6049        zonerefs->zone = NULL;
6050        zonerefs->zone_idx = 0;
6051}
6052
6053/*
6054 * Build gfp_thisnode zonelists
6055 */
6056static void build_thisnode_zonelists(pg_data_t *pgdat)
6057{
6058        struct zoneref *zonerefs;
6059        int nr_zones;
6060
6061        zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
6062        nr_zones = build_zonerefs_node(pgdat, zonerefs);
6063        zonerefs += nr_zones;
6064        zonerefs->zone = NULL;
6065        zonerefs->zone_idx = 0;
6066}
6067
6068/*
6069 * Build zonelists ordered by zone and nodes within zones.
6070 * This results in conserving DMA zone[s] until all Normal memory is
6071 * exhausted, but results in overflowing to remote node while memory
6072 * may still exist in local DMA zone.
6073 */
6074
6075static void build_zonelists(pg_data_t *pgdat)
6076{
6077        static int node_order[MAX_NUMNODES];
6078        int node, load, nr_nodes = 0;
6079        nodemask_t used_mask = NODE_MASK_NONE;
6080        int local_node, prev_node;
6081
6082        /* NUMA-aware ordering of nodes */
6083        local_node = pgdat->node_id;
6084        load = nr_online_nodes;
6085        prev_node = local_node;
6086
6087        memset(node_order, 0, sizeof(node_order));
6088        while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
6089                /*
6090                 * We don't want to pressure a particular node.
6091                 * So adding penalty to the first node in same
6092                 * distance group to make it round-robin.
6093                 */
6094                if (node_distance(local_node, node) !=
6095                    node_distance(local_node, prev_node))
6096                        node_load[node] = load;
6097
6098                node_order[nr_nodes++] = node;
6099                prev_node = node;
6100                load--;
6101        }
6102
6103        build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
6104        build_thisnode_zonelists(pgdat);
6105}
6106
6107#ifdef CONFIG_HAVE_MEMORYLESS_NODES
6108/*
6109 * Return node id of node used for "local" allocations.
6110 * I.e., first node id of first zone in arg node's generic zonelist.
6111 * Used for initializing percpu 'numa_mem', which is used primarily
6112 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
6113 */
6114int local_memory_node(int node)
6115{
6116        struct zoneref *z;
6117
6118        z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
6119                                   gfp_zone(GFP_KERNEL),
6120                                   NULL);
6121        return zone_to_nid(z->zone);
6122}
6123#endif
6124
6125static void setup_min_unmapped_ratio(void);
6126static void setup_min_slab_ratio(void);
6127#else   /* CONFIG_NUMA */
6128
6129static void build_zonelists(pg_data_t *pgdat)
6130{
6131        int node, local_node;
6132        struct zoneref *zonerefs;
6133        int nr_zones;
6134
6135        local_node = pgdat->node_id;
6136
6137        zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
6138        nr_zones = build_zonerefs_node(pgdat, zonerefs);
6139        zonerefs += nr_zones;
6140
6141        /*
6142         * Now we build the zonelist so that it contains the zones
6143         * of all the other nodes.
6144         * We don't want to pressure a particular node, so when
6145         * building the zones for node N, we make sure that the
6146         * zones coming right after the local ones are those from
6147         * node N+1 (modulo N)
6148         */
6149        for (node = local_node + 1; node < MAX_NUMNODES; node++) {
6150                if (!node_online(node))
6151                        continue;
6152                nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
6153                zonerefs += nr_zones;
6154        }
6155        for (node = 0; node < local_node; node++) {
6156                if (!node_online(node))
6157                        continue;
6158                nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
6159                zonerefs += nr_zones;
6160        }
6161
6162        zonerefs->zone = NULL;
6163        zonerefs->zone_idx = 0;
6164}
6165
6166#endif  /* CONFIG_NUMA */
6167
6168/*
6169 * Boot pageset table. One per cpu which is going to be used for all
6170 * zones and all nodes. The parameters will be set in such a way
6171 * that an item put on a list will immediately be handed over to
6172 * the buddy list. This is safe since pageset manipulation is done
6173 * with interrupts disabled.
6174 *
6175 * The boot_pagesets must be kept even after bootup is complete for
6176 * unused processors and/or zones. They do play a role for bootstrapping
6177 * hotplugged processors.
6178 *
6179 * zoneinfo_show() and maybe other functions do
6180 * not check if the processor is online before following the pageset pointer.
6181 * Other parts of the kernel may not check if the zone is available.
6182 */
6183static void pageset_init(struct per_cpu_pageset *p);
6184/* These effectively disable the pcplists in the boot pageset completely */
6185#define BOOT_PAGESET_HIGH       0
6186#define BOOT_PAGESET_BATCH      1
6187static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
6188static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
6189
6190static void __build_all_zonelists(void *data)
6191{
6192        int nid;
6193        int __maybe_unused cpu;
6194        pg_data_t *self = data;
6195        static DEFINE_SPINLOCK(lock);
6196
6197        spin_lock(&lock);
6198
6199#ifdef CONFIG_NUMA
6200        memset(node_load, 0, sizeof(node_load));
6201#endif
6202
6203        /*
6204         * This node is hotadded and no memory is yet present.   So just
6205         * building zonelists is fine - no need to touch other nodes.
6206         */
6207        if (self && !node_online(self->node_id)) {
6208                build_zonelists(self);
6209        } else {
6210                for_each_online_node(nid) {
6211                        pg_data_t *pgdat = NODE_DATA(nid);
6212
6213                        build_zonelists(pgdat);
6214                }
6215
6216#ifdef CONFIG_HAVE_MEMORYLESS_NODES
6217                /*
6218                 * We now know the "local memory node" for each node--
6219                 * i.e., the node of the first zone in the generic zonelist.
6220                 * Set up numa_mem percpu variable for on-line cpus.  During
6221                 * boot, only the boot cpu should be on-line;  we'll init the
6222                 * secondary cpus' numa_mem as they come on-line.  During
6223                 * node/memory hotplug, we'll fixup all on-line cpus.
6224                 */
6225                for_each_online_cpu(cpu)
6226                        set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
6227#endif
6228        }
6229
6230        spin_unlock(&lock);
6231}
6232
6233static noinline void __init
6234build_all_zonelists_init(void)
6235{
6236        int cpu;
6237
6238        __build_all_zonelists(NULL);
6239
6240        /*
6241         * Initialize the boot_pagesets that are going to be used
6242         * for bootstrapping processors. The real pagesets for
6243         * each zone will be allocated later when the per cpu
6244         * allocator is available.
6245         *
6246         * boot_pagesets are used also for bootstrapping offline
6247         * cpus if the system is already booted because the pagesets
6248         * are needed to initialize allocators on a specific cpu too.
6249         * F.e. the percpu allocator needs the page allocator which
6250         * needs the percpu allocator in order to allocate its pagesets
6251         * (a chicken-egg dilemma).
6252         */
6253        for_each_possible_cpu(cpu)
6254                pageset_init(&per_cpu(boot_pageset, cpu));
6255
6256        mminit_verify_zonelist();
6257        cpuset_init_current_mems_allowed();
6258}
6259
6260/*
6261 * unless system_state == SYSTEM_BOOTING.
6262 *
6263 * __ref due to call of __init annotated helper build_all_zonelists_init
6264 * [protected by SYSTEM_BOOTING].
6265 */
6266void __ref build_all_zonelists(pg_data_t *pgdat)
6267{
6268        unsigned long vm_total_pages;
6269
6270        if (system_state == SYSTEM_BOOTING) {
6271                build_all_zonelists_init();
6272        } else {
6273                __build_all_zonelists(pgdat);
6274                /* cpuset refresh routine should be here */
6275        }
6276        /* Get the number of free pages beyond high watermark in all zones. */
6277        vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
6278        /*
6279         * Disable grouping by mobility if the number of pages in the
6280         * system is too low to allow the mechanism to work. It would be
6281         * more accurate, but expensive to check per-zone. This check is
6282         * made on memory-hotadd so a system can start with mobility
6283         * disabled and enable it later
6284         */
6285        if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
6286                page_group_by_mobility_disabled = 1;
6287        else
6288                page_group_by_mobility_disabled = 0;
6289
6290        pr_info("Built %u zonelists, mobility grouping %s.  Total pages: %ld\n",
6291                nr_online_nodes,
6292                page_group_by_mobility_disabled ? "off" : "on",
6293                vm_total_pages);
6294#ifdef CONFIG_NUMA
6295        pr_info("Policy zone: %s\n", zone_names[policy_zone]);
6296#endif
6297}
6298
6299/* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
6300static bool __meminit
6301overlap_memmap_init(unsigned long zone, unsigned long *pfn)
6302{
6303        static struct memblock_region *r;
6304
6305        if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
6306                if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
6307                        for_each_mem_region(r) {
6308                                if (*pfn < memblock_region_memory_end_pfn(r))
6309                                        break;
6310                        }
6311                }
6312                if (*pfn >= memblock_region_memory_base_pfn(r) &&
6313                    memblock_is_mirror(r)) {
6314                        *pfn = memblock_region_memory_end_pfn(r);
6315                        return true;
6316                }
6317        }
6318        return false;
6319}
6320
6321/*
6322 * Initially all pages are reserved - free ones are freed
6323 * up by memblock_free_all() once the early boot process is
6324 * done. Non-atomic initialization, single-pass.
6325 *
6326 * All aligned pageblocks are initialized to the specified migratetype
6327 * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
6328 * zone stats (e.g., nr_isolate_pageblock) are touched.
6329 */
6330void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone,
6331                unsigned long start_pfn, unsigned long zone_end_pfn,
6332                enum meminit_context context,
6333                struct vmem_altmap *altmap, int migratetype)
6334{
6335        unsigned long pfn, end_pfn = start_pfn + size;
6336        struct page *page;
6337
6338        if (highest_memmap_pfn < end_pfn - 1)
6339                highest_memmap_pfn = end_pfn - 1;
6340
6341#ifdef CONFIG_ZONE_DEVICE
6342        /*
6343         * Honor reservation requested by the driver for this ZONE_DEVICE
6344         * memory. We limit the total number of pages to initialize to just
6345         * those that might contain the memory mapping. We will defer the
6346         * ZONE_DEVICE page initialization until after we have released
6347         * the hotplug lock.
6348         */
6349        if (zone == ZONE_DEVICE) {
6350                if (!altmap)
6351                        return;
6352
6353                if (start_pfn == altmap->base_pfn)
6354                        start_pfn += altmap->reserve;
6355                end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
6356        }
6357#endif
6358
6359        for (pfn = start_pfn; pfn < end_pfn; ) {
6360                /*
6361                 * There can be holes in boot-time mem_map[]s handed to this
6362                 * function.  They do not exist on hotplugged memory.
6363                 */
6364                if (context == MEMINIT_EARLY) {
6365                        if (overlap_memmap_init(zone, &pfn))
6366                                continue;
6367                        if (defer_init(nid, pfn, zone_end_pfn))
6368                                break;
6369                }
6370
6371                page = pfn_to_page(pfn);
6372                __init_single_page(page, pfn, zone, nid);
6373                if (context == MEMINIT_HOTPLUG)
6374                        __SetPageReserved(page);
6375
6376                /*
6377                 * Usually, we want to mark the pageblock MIGRATE_MOVABLE,
6378                 * such that unmovable allocations won't be scattered all
6379                 * over the place during system boot.
6380                 */
6381                if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
6382                        set_pageblock_migratetype(page, migratetype);
6383                        cond_resched();
6384                }
6385                pfn++;
6386        }
6387}
6388
6389#ifdef CONFIG_ZONE_DEVICE
6390void __ref memmap_init_zone_device(struct zone *zone,
6391                                   unsigned long start_pfn,
6392                                   unsigned long nr_pages,
6393                                   struct dev_pagemap *pgmap)
6394{
6395        unsigned long pfn, end_pfn = start_pfn + nr_pages;
6396        struct pglist_data *pgdat = zone->zone_pgdat;
6397        struct vmem_altmap *altmap = pgmap_altmap(pgmap);
6398        unsigned long zone_idx = zone_idx(zone);
6399        unsigned long start = jiffies;
6400        int nid = pgdat->node_id;
6401
6402        if (WARN_ON_ONCE(!pgmap || zone_idx(zone) != ZONE_DEVICE))
6403                return;
6404
6405        /*
6406         * The call to memmap_init should have already taken care
6407         * of the pages reserved for the memmap, so we can just jump to
6408         * the end of that region and start processing the device pages.
6409         */
6410        if (altmap) {
6411                start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
6412                nr_pages = end_pfn - start_pfn;
6413        }
6414
6415        for (pfn = start_pfn; pfn < end_pfn; pfn++) {
6416                struct page *page = pfn_to_page(pfn);
6417
6418                __init_single_page(page, pfn, zone_idx, nid);
6419
6420                /*
6421                 * Mark page reserved as it will need to wait for onlining
6422                 * phase for it to be fully associated with a zone.
6423                 *
6424                 * We can use the non-atomic __set_bit operation for setting
6425                 * the flag as we are still initializing the pages.
6426                 */
6427                __SetPageReserved(page);
6428
6429                /*
6430                 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
6431                 * and zone_device_data.  It is a bug if a ZONE_DEVICE page is
6432                 * ever freed or placed on a driver-private list.
6433                 */
6434                page->pgmap = pgmap;
6435                page->zone_device_data = NULL;
6436
6437                /*
6438                 * Mark the block movable so that blocks are reserved for
6439                 * movable at startup. This will force kernel allocations
6440                 * to reserve their blocks rather than leaking throughout
6441                 * the address space during boot when many long-lived
6442                 * kernel allocations are made.
6443                 *
6444                 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
6445                 * because this is done early in section_activate()
6446                 */
6447                if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
6448                        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
6449                        cond_resched();
6450                }
6451        }
6452
6453        pr_info("%s initialised %lu pages in %ums\n", __func__,
6454                nr_pages, jiffies_to_msecs(jiffies - start));
6455}
6456
6457#endif
6458static void __meminit zone_init_free_lists(struct zone *zone)
6459{
6460        unsigned int order, t;
6461        for_each_migratetype_order(order, t) {
6462                INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
6463                zone->free_area[order].nr_free = 0;
6464        }
6465}
6466
6467#if !defined(CONFIG_FLAT_NODE_MEM_MAP)
6468/*
6469 * Only struct pages that correspond to ranges defined by memblock.memory
6470 * are zeroed and initialized by going through __init_single_page() during
6471 * memmap_init_zone_range().
6472 *
6473 * But, there could be struct pages that correspond to holes in
6474 * memblock.memory. This can happen because of the following reasons:
6475 * - physical memory bank size is not necessarily the exact multiple of the
6476 *   arbitrary section size
6477 * - early reserved memory may not be listed in memblock.memory
6478 * - memory layouts defined with memmap= kernel parameter may not align
6479 *   nicely with memmap sections
6480 *
6481 * Explicitly initialize those struct pages so that:
6482 * - PG_Reserved is set
6483 * - zone and node links point to zone and node that span the page if the
6484 *   hole is in the middle of a zone
6485 * - zone and node links point to adjacent zone/node if the hole falls on
6486 *   the zone boundary; the pages in such holes will be prepended to the
6487 *   zone/node above the hole except for the trailing pages in the last
6488 *   section that will be appended to the zone/node below.
6489 */
6490static void __init init_unavailable_range(unsigned long spfn,
6491                                          unsigned long epfn,
6492                                          int zone, int node)
6493{
6494        unsigned long pfn;
6495        u64 pgcnt = 0;
6496
6497        for (pfn = spfn; pfn < epfn; pfn++) {
6498                if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
6499                        pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
6500                                + pageblock_nr_pages - 1;
6501                        continue;
6502                }
6503                __init_single_page(pfn_to_page(pfn), pfn, zone, node);
6504                __SetPageReserved(pfn_to_page(pfn));
6505                pgcnt++;
6506        }
6507
6508        if (pgcnt)
6509                pr_info("On node %d, zone %s: %lld pages in unavailable ranges",
6510                        node, zone_names[zone], pgcnt);
6511}
6512#else
6513static inline void init_unavailable_range(unsigned long spfn,
6514                                          unsigned long epfn,
6515                                          int zone, int node)
6516{
6517}
6518#endif
6519
6520static void __init memmap_init_zone_range(struct zone *zone,
6521                                          unsigned long start_pfn,
6522                                          unsigned long end_pfn,
6523                                          unsigned long *hole_pfn)
6524{
6525        unsigned long zone_start_pfn = zone->zone_start_pfn;
6526        unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
6527        int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
6528
6529        start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
6530        end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
6531
6532        if (start_pfn >= end_pfn)
6533                return;
6534
6535        memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn,
6536                          zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
6537
6538        if (*hole_pfn < start_pfn)
6539                init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid);
6540
6541        *hole_pfn = end_pfn;
6542}
6543
6544static void __init memmap_init(void)
6545{
6546        unsigned long start_pfn, end_pfn;
6547        unsigned long hole_pfn = 0;
6548        int i, j, zone_id, nid;
6549
6550        for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
6551                struct pglist_data *node = NODE_DATA(nid);
6552
6553                for (j = 0; j < MAX_NR_ZONES; j++) {
6554                        struct zone *zone = node->node_zones + j;
6555
6556                        if (!populated_zone(zone))
6557                                continue;
6558
6559                        memmap_init_zone_range(zone, start_pfn, end_pfn,
6560                                               &hole_pfn);
6561                        zone_id = j;
6562                }
6563        }
6564
6565#ifdef CONFIG_SPARSEMEM
6566        /*
6567         * Initialize the memory map for hole in the range [memory_end,
6568         * section_end].
6569         * Append the pages in this hole to the highest zone in the last
6570         * node.
6571         * The call to init_unavailable_range() is outside the ifdef to
6572         * silence the compiler warining about zone_id set but not used;
6573         * for FLATMEM it is a nop anyway
6574         */
6575        end_pfn = round_up(end_pfn, PAGES_PER_SECTION);
6576        if (hole_pfn < end_pfn)
6577#endif
6578                init_unavailable_range(hole_pfn, end_pfn, zone_id, nid);
6579}
6580
6581static int zone_batchsize(struct zone *zone)
6582{
6583#ifdef CONFIG_MMU
6584        int batch;
6585
6586        /*
6587         * The per-cpu-pages pools are set to around 1000th of the
6588         * size of the zone.
6589         */
6590        batch = zone_managed_pages(zone) / 1024;
6591        /* But no more than a meg. */
6592        if (batch * PAGE_SIZE > 1024 * 1024)
6593                batch = (1024 * 1024) / PAGE_SIZE;
6594        batch /= 4;             /* We effectively *= 4 below */
6595        if (batch < 1)
6596                batch = 1;
6597
6598        /*
6599         * Clamp the batch to a 2^n - 1 value. Having a power
6600         * of 2 value was found to be more likely to have
6601         * suboptimal cache aliasing properties in some cases.
6602         *
6603         * For example if 2 tasks are alternately allocating
6604         * batches of pages, one task can end up with a lot
6605         * of pages of one half of the possible page colors
6606         * and the other with pages of the other colors.
6607         */
6608        batch = rounddown_pow_of_two(batch + batch/2) - 1;
6609
6610        return batch;
6611
6612#else
6613        /* The deferral and batching of frees should be suppressed under NOMMU
6614         * conditions.
6615         *
6616         * The problem is that NOMMU needs to be able to allocate large chunks
6617         * of contiguous memory as there's no hardware page translation to
6618         * assemble apparent contiguous memory from discontiguous pages.
6619         *
6620         * Queueing large contiguous runs of pages for batching, however,
6621         * causes the pages to actually be freed in smaller chunks.  As there
6622         * can be a significant delay between the individual batches being
6623         * recycled, this leads to the once large chunks of space being
6624         * fragmented and becoming unavailable for high-order allocations.
6625         */
6626        return 0;
6627#endif
6628}
6629
6630/*
6631 * pcp->high and pcp->batch values are related and generally batch is lower
6632 * than high. They are also related to pcp->count such that count is lower
6633 * than high, and as soon as it reaches high, the pcplist is flushed.
6634 *
6635 * However, guaranteeing these relations at all times would require e.g. write
6636 * barriers here but also careful usage of read barriers at the read side, and
6637 * thus be prone to error and bad for performance. Thus the update only prevents
6638 * store tearing. Any new users of pcp->batch and pcp->high should ensure they
6639 * can cope with those fields changing asynchronously, and fully trust only the
6640 * pcp->count field on the local CPU with interrupts disabled.
6641 *
6642 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
6643 * outside of boot time (or some other assurance that no concurrent updaters
6644 * exist).
6645 */
6646static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
6647                unsigned long batch)
6648{
6649        WRITE_ONCE(pcp->batch, batch);
6650        WRITE_ONCE(pcp->high, high);
6651}
6652
6653static void pageset_init(struct per_cpu_pageset *p)
6654{
6655        struct per_cpu_pages *pcp;
6656        int migratetype;
6657
6658        memset(p, 0, sizeof(*p));
6659
6660        pcp = &p->pcp;
6661        for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
6662                INIT_LIST_HEAD(&pcp->lists[migratetype]);
6663
6664        /*
6665         * Set batch and high values safe for a boot pageset. A true percpu
6666         * pageset's initialization will update them subsequently. Here we don't
6667         * need to be as careful as pageset_update() as nobody can access the
6668         * pageset yet.
6669         */
6670        pcp->high = BOOT_PAGESET_HIGH;
6671        pcp->batch = BOOT_PAGESET_BATCH;
6672}
6673
6674static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high,
6675                unsigned long batch)
6676{
6677        struct per_cpu_pageset *p;
6678        int cpu;
6679
6680        for_each_possible_cpu(cpu) {
6681                p = per_cpu_ptr(zone->pageset, cpu);
6682                pageset_update(&p->pcp, high, batch);
6683        }
6684}
6685
6686/*
6687 * Calculate and set new high and batch values for all per-cpu pagesets of a
6688 * zone, based on the zone's size and the percpu_pagelist_fraction sysctl.
6689 */
6690static void zone_set_pageset_high_and_batch(struct zone *zone)
6691{
6692        unsigned long new_high, new_batch;
6693
6694        if (percpu_pagelist_fraction) {
6695                new_high = zone_managed_pages(zone) / percpu_pagelist_fraction;
6696                new_batch = max(1UL, new_high / 4);
6697                if ((new_high / 4) > (PAGE_SHIFT * 8))
6698                        new_batch = PAGE_SHIFT * 8;
6699        } else {
6700                new_batch = zone_batchsize(zone);
6701                new_high = 6 * new_batch;
6702                new_batch = max(1UL, 1 * new_batch);
6703        }
6704
6705        if (zone->pageset_high == new_high &&
6706            zone->pageset_batch == new_batch)
6707                return;
6708
6709        zone->pageset_high = new_high;
6710        zone->pageset_batch = new_batch;
6711
6712        __zone_set_pageset_high_and_batch(zone, new_high, new_batch);
6713}
6714
6715void __meminit setup_zone_pageset(struct zone *zone)
6716{
6717        struct per_cpu_pageset *p;
6718        int cpu;
6719
6720        zone->pageset = alloc_percpu(struct per_cpu_pageset);
6721        for_each_possible_cpu(cpu) {
6722                p = per_cpu_ptr(zone->pageset, cpu);
6723                pageset_init(p);
6724        }
6725
6726        zone_set_pageset_high_and_batch(zone);
6727}
6728
6729/*
6730 * Allocate per cpu pagesets and initialize them.
6731 * Before this call only boot pagesets were available.
6732 */
6733void __init setup_per_cpu_pageset(void)
6734{
6735        struct pglist_data *pgdat;
6736        struct zone *zone;
6737        int __maybe_unused cpu;
6738
6739        for_each_populated_zone(zone)
6740                setup_zone_pageset(zone);
6741
6742#ifdef CONFIG_NUMA
6743        /*
6744         * Unpopulated zones continue using the boot pagesets.
6745         * The numa stats for these pagesets need to be reset.
6746         * Otherwise, they will end up skewing the stats of
6747         * the nodes these zones are associated with.
6748         */
6749        for_each_possible_cpu(cpu) {
6750                struct per_cpu_pageset *pcp = &per_cpu(boot_pageset, cpu);
6751                memset(pcp->vm_numa_stat_diff, 0,
6752                       sizeof(pcp->vm_numa_stat_diff));
6753        }
6754#endif
6755
6756        for_each_online_pgdat(pgdat)
6757                pgdat->per_cpu_nodestats =
6758                        alloc_percpu(struct per_cpu_nodestat);
6759}
6760
6761static __meminit void zone_pcp_init(struct zone *zone)
6762{
6763        /*
6764         * per cpu subsystem is not up at this point. The following code
6765         * relies on the ability of the linker to provide the
6766         * offset of a (static) per cpu variable into the per cpu area.
6767         */
6768        zone->pageset = &boot_pageset;
6769        zone->pageset_high = BOOT_PAGESET_HIGH;
6770        zone->pageset_batch = BOOT_PAGESET_BATCH;
6771
6772        if (populated_zone(zone))
6773                printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%u\n",
6774                        zone->name, zone->present_pages,
6775                                         zone_batchsize(zone));
6776}
6777
6778void __meminit init_currently_empty_zone(struct zone *zone,
6779                                        unsigned long zone_start_pfn,
6780                                        unsigned long size)
6781{
6782        struct pglist_data *pgdat = zone->zone_pgdat;
6783        int zone_idx = zone_idx(zone) + 1;
6784
6785        if (zone_idx > pgdat->nr_zones)
6786                pgdat->nr_zones = zone_idx;
6787
6788        zone->zone_start_pfn = zone_start_pfn;
6789
6790        mminit_dprintk(MMINIT_TRACE, "memmap_init",
6791                        "Initialising map node %d zone %lu pfns %lu -> %lu\n",
6792                        pgdat->node_id,
6793                        (unsigned long)zone_idx(zone),
6794                        zone_start_pfn, (zone_start_pfn + size));
6795
6796        zone_init_free_lists(zone);
6797        zone->initialized = 1;
6798}
6799
6800/**
6801 * get_pfn_range_for_nid - Return the start and end page frames for a node
6802 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
6803 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
6804 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
6805 *
6806 * It returns the start and end page frame of a node based on information
6807 * provided by memblock_set_node(). If called for a node
6808 * with no available memory, a warning is printed and the start and end
6809 * PFNs will be 0.
6810 */
6811void __init get_pfn_range_for_nid(unsigned int nid,
6812                        unsigned long *start_pfn, unsigned long *end_pfn)
6813{
6814        unsigned long this_start_pfn, this_end_pfn;
6815        int i;
6816
6817        *start_pfn = -1UL;
6818        *end_pfn = 0;
6819
6820        for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
6821                *start_pfn = min(*start_pfn, this_start_pfn);
6822                *end_pfn = max(*end_pfn, this_end_pfn);
6823        }
6824
6825        if (*start_pfn == -1UL)
6826                *start_pfn = 0;
6827}
6828
6829/*
6830 * This finds a zone that can be used for ZONE_MOVABLE pages. The
6831 * assumption is made that zones within a node are ordered in monotonic
6832 * increasing memory addresses so that the "highest" populated zone is used
6833 */
6834static void __init find_usable_zone_for_movable(void)
6835{
6836        int zone_index;
6837        for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
6838                if (zone_index == ZONE_MOVABLE)
6839                        continue;
6840
6841                if (arch_zone_highest_possible_pfn[zone_index] >
6842                                arch_zone_lowest_possible_pfn[zone_index])
6843                        break;
6844        }
6845
6846        VM_BUG_ON(zone_index == -1);
6847        movable_zone = zone_index;
6848}
6849
6850/*
6851 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
6852 * because it is sized independent of architecture. Unlike the other zones,
6853 * the starting point for ZONE_MOVABLE is not fixed. It may be different
6854 * in each node depending on the size of each node and how evenly kernelcore
6855 * is distributed. This helper function adjusts the zone ranges
6856 * provided by the architecture for a given node by using the end of the
6857 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
6858 * zones within a node are in order of monotonic increases memory addresses
6859 */
6860static void __init adjust_zone_range_for_zone_movable(int nid,
6861                                        unsigned long zone_type,
6862                                        unsigned long node_start_pfn,
6863                                        unsigned long node_end_pfn,
6864                                        unsigned long *zone_start_pfn,
6865                                        unsigned long *zone_end_pfn)
6866{
6867        /* Only adjust if ZONE_MOVABLE is on this node */
6868        if (zone_movable_pfn[nid]) {
6869                /* Size ZONE_MOVABLE */
6870                if (zone_type == ZONE_MOVABLE) {
6871                        *zone_start_pfn = zone_movable_pfn[nid];
6872                        *zone_end_pfn = min(node_end_pfn,
6873                                arch_zone_highest_possible_pfn[movable_zone]);
6874
6875                /* Adjust for ZONE_MOVABLE starting within this range */
6876                } else if (!mirrored_kernelcore &&
6877                        *zone_start_pfn < zone_movable_pfn[nid] &&
6878                        *zone_end_pfn > zone_movable_pfn[nid]) {
6879                        *zone_end_pfn = zone_movable_pfn[nid];
6880
6881                /* Check if this whole range is within ZONE_MOVABLE */
6882                } else if (*zone_start_pfn >= zone_movable_pfn[nid])
6883                        *zone_start_pfn = *zone_end_pfn;
6884        }
6885}
6886
6887/*
6888 * Return the number of pages a zone spans in a node, including holes
6889 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
6890 */
6891static unsigned long __init zone_spanned_pages_in_node(int nid,
6892                                        unsigned long zone_type,
6893                                        unsigned long node_start_pfn,
6894                                        unsigned long node_end_pfn,
6895                                        unsigned long *zone_start_pfn,
6896                                        unsigned long *zone_end_pfn)
6897{
6898        unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
6899        unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
6900        /* When hotadd a new node from cpu_up(), the node should be empty */
6901        if (!node_start_pfn && !node_end_pfn)
6902                return 0;
6903
6904        /* Get the start and end of the zone */
6905        *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
6906        *zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
6907        adjust_zone_range_for_zone_movable(nid, zone_type,
6908                                node_start_pfn, node_end_pfn,
6909                                zone_start_pfn, zone_end_pfn);
6910
6911        /* Check that this node has pages within the zone's required range */
6912        if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
6913                return 0;
6914
6915        /* Move the zone boundaries inside the node if necessary */
6916        *zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
6917        *zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
6918
6919        /* Return the spanned pages */
6920        return *zone_end_pfn - *zone_start_pfn;
6921}
6922
6923/*
6924 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
6925 * then all holes in the requested range will be accounted for.
6926 */
6927unsigned long __init __absent_pages_in_range(int nid,
6928                                unsigned long range_start_pfn,
6929                                unsigned long range_end_pfn)
6930{
6931        unsigned long nr_absent = range_end_pfn - range_start_pfn;
6932        unsigned long start_pfn, end_pfn;
6933        int i;
6934
6935        for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
6936                start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
6937                end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
6938                nr_absent -= end_pfn - start_pfn;
6939        }
6940        return nr_absent;
6941}
6942
6943/**
6944 * absent_pages_in_range - Return number of page frames in holes within a range
6945 * @start_pfn: The start PFN to start searching for holes
6946 * @end_pfn: The end PFN to stop searching for holes
6947 *
6948 * Return: the number of pages frames in memory holes within a range.
6949 */
6950unsigned long __init absent_pages_in_range(unsigned long start_pfn,
6951                                                        unsigned long end_pfn)
6952{
6953        return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
6954}
6955
6956/* Return the number of page frames in holes in a zone on a node */
6957static unsigned long __init zone_absent_pages_in_node(int nid,
6958                                        unsigned long zone_type,
6959                                        unsigned long node_start_pfn,
6960                                        unsigned long node_end_pfn)
6961{
6962        unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
6963        unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
6964        unsigned long zone_start_pfn, zone_end_pfn;
6965        unsigned long nr_absent;
6966
6967        /* When hotadd a new node from cpu_up(), the node should be empty */
6968        if (!node_start_pfn && !node_end_pfn)
6969                return 0;
6970
6971        zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
6972        zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
6973
6974        adjust_zone_range_for_zone_movable(nid, zone_type,
6975                        node_start_pfn, node_end_pfn,
6976                        &zone_start_pfn, &zone_end_pfn);
6977        nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
6978
6979        /*
6980         * ZONE_MOVABLE handling.
6981         * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
6982         * and vice versa.
6983         */
6984        if (mirrored_kernelcore && zone_movable_pfn[nid]) {
6985                unsigned long start_pfn, end_pfn;
6986                struct memblock_region *r;
6987
6988                for_each_mem_region(r) {
6989                        start_pfn = clamp(memblock_region_memory_base_pfn(r),
6990                                          zone_start_pfn, zone_end_pfn);
6991                        end_pfn = clamp(memblock_region_memory_end_pfn(r),
6992                                        zone_start_pfn, zone_end_pfn);
6993
6994                        if (zone_type == ZONE_MOVABLE &&
6995                            memblock_is_mirror(r))
6996                                nr_absent += end_pfn - start_pfn;
6997
6998                        if (zone_type == ZONE_NORMAL &&
6999                            !memblock_is_mirror(r))
7000                                nr_absent += end_pfn - start_pfn;
7001                }
7002        }
7003
7004        return nr_absent;
7005}
7006
7007static void __init calculate_node_totalpages(struct pglist_data *pgdat,
7008                                                unsigned long node_start_pfn,
7009                                                unsigned long node_end_pfn)
7010{
7011        unsigned long realtotalpages = 0, totalpages = 0;
7012        enum zone_type i;
7013
7014        for (i = 0; i < MAX_NR_ZONES; i++) {
7015                struct zone *zone = pgdat->node_zones + i;
7016                unsigned long zone_start_pfn, zone_end_pfn;
7017                unsigned long spanned, absent;
7018                unsigned long size, real_size;
7019
7020                spanned = zone_spanned_pages_in_node(pgdat->node_id, i,
7021                                                     node_start_pfn,
7022                                                     node_end_pfn,
7023                                                     &zone_start_pfn,
7024                                                     &zone_end_pfn);
7025                absent = zone_absent_pages_in_node(pgdat->node_id, i,
7026                                                   node_start_pfn,
7027                                                   node_end_pfn);
7028
7029                size = spanned;
7030                real_size = size - absent;
7031
7032                if (size)
7033                        zone->zone_start_pfn = zone_start_pfn;
7034                else
7035                        zone->zone_start_pfn = 0;
7036                zone->spanned_pages = size;
7037                zone->present_pages = real_size;
7038
7039                totalpages += size;
7040                realtotalpages += real_size;
7041        }
7042
7043        pgdat->node_spanned_pages = totalpages;
7044        pgdat->node_present_pages = realtotalpages;
7045        printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
7046                                                        realtotalpages);
7047}
7048
7049#ifndef CONFIG_SPARSEMEM
7050/*
7051 * Calculate the size of the zone->blockflags rounded to an unsigned long
7052 * Start by making sure zonesize is a multiple of pageblock_order by rounding
7053 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
7054 * round what is now in bits to nearest long in bits, then return it in
7055 * bytes.
7056 */
7057static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
7058{
7059        unsigned long usemapsize;
7060
7061        zonesize += zone_start_pfn & (pageblock_nr_pages-1);
7062        usemapsize = roundup(zonesize, pageblock_nr_pages);
7063        usemapsize = usemapsize >> pageblock_order;
7064        usemapsize *= NR_PAGEBLOCK_BITS;
7065        usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
7066
7067        return usemapsize / 8;
7068}
7069
7070static void __ref setup_usemap(struct zone *zone)
7071{
7072        unsigned long usemapsize = usemap_size(zone->zone_start_pfn,
7073                                               zone->spanned_pages);
7074        zone->pageblock_flags = NULL;
7075        if (usemapsize) {
7076                zone->pageblock_flags =
7077                        memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
7078                                            zone_to_nid(zone));
7079                if (!zone->pageblock_flags)
7080                        panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
7081                              usemapsize, zone->name, zone_to_nid(zone));
7082        }
7083}
7084#else
7085static inline void setup_usemap(struct zone *zone) {}
7086#endif /* CONFIG_SPARSEMEM */
7087
7088#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
7089
7090/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
7091void __init set_pageblock_order(void)
7092{
7093        unsigned int order;
7094
7095        /* Check that pageblock_nr_pages has not already been setup */
7096        if (pageblock_order)
7097                return;
7098
7099        if (HPAGE_SHIFT > PAGE_SHIFT)
7100                order = HUGETLB_PAGE_ORDER;
7101        else
7102                order = MAX_ORDER - 1;
7103
7104        /*
7105         * Assume the largest contiguous order of interest is a huge page.
7106         * This value may be variable depending on boot parameters on IA64 and
7107         * powerpc.
7108         */
7109        pageblock_order = order;
7110}
7111#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
7112
7113/*
7114 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
7115 * is unused as pageblock_order is set at compile-time. See
7116 * include/linux/pageblock-flags.h for the values of pageblock_order based on
7117 * the kernel config
7118 */
7119void __init set_pageblock_order(void)
7120{
7121}
7122
7123#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
7124
7125static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
7126                                                unsigned long present_pages)
7127{
7128        unsigned long pages = spanned_pages;
7129
7130        /*
7131         * Provide a more accurate estimation if there are holes within
7132         * the zone and SPARSEMEM is in use. If there are holes within the
7133         * zone, each populated memory region may cost us one or two extra
7134         * memmap pages due to alignment because memmap pages for each
7135         * populated regions may not be naturally aligned on page boundary.
7136         * So the (present_pages >> 4) heuristic is a tradeoff for that.
7137         */
7138        if (spanned_pages > present_pages + (present_pages >> 4) &&
7139            IS_ENABLED(CONFIG_SPARSEMEM))
7140                pages = present_pages;
7141
7142        return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
7143}
7144
7145#ifdef CONFIG_TRANSPARENT_HUGEPAGE
7146static void pgdat_init_split_queue(struct pglist_data *pgdat)
7147{
7148        struct deferred_split *ds_queue = &pgdat->deferred_split_queue;
7149
7150        spin_lock_init(&ds_queue->split_queue_lock);
7151        INIT_LIST_HEAD(&ds_queue->split_queue);
7152        ds_queue->split_queue_len = 0;
7153}
7154#else
7155static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
7156#endif
7157
7158#ifdef CONFIG_COMPACTION
7159static void pgdat_init_kcompactd(struct pglist_data *pgdat)
7160{
7161        init_waitqueue_head(&pgdat->kcompactd_wait);
7162}
7163#else
7164static void pgdat_init_kcompactd(struct pglist_data *pgdat) {}
7165#endif
7166
7167static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
7168{
7169        pgdat_resize_init(pgdat);
7170
7171        pgdat_init_split_queue(pgdat);
7172        pgdat_init_kcompactd(pgdat);
7173
7174        init_waitqueue_head(&pgdat->kswapd_wait);
7175        init_waitqueue_head(&pgdat->pfmemalloc_wait);
7176
7177        pgdat_page_ext_init(pgdat);
7178        lruvec_init(&pgdat->__lruvec);
7179}
7180
7181static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
7182                                                        unsigned long remaining_pages)
7183{
7184        atomic_long_set(&zone->managed_pages, remaining_pages);
7185        zone_set_nid(zone, nid);
7186        zone->name = zone_names[idx];
7187        zone->zone_pgdat = NODE_DATA(nid);
7188        spin_lock_init(&zone->lock);
7189        zone_seqlock_init(zone);
7190        zone_pcp_init(zone);
7191}
7192
7193/*
7194 * Set up the zone data structures
7195 * - init pgdat internals
7196 * - init all zones belonging to this node
7197 *
7198 * NOTE: this function is only called during memory hotplug
7199 */
7200#ifdef CONFIG_MEMORY_HOTPLUG
7201void __ref free_area_init_core_hotplug(int nid)
7202{
7203        enum zone_type z;
7204        pg_data_t *pgdat = NODE_DATA(nid);
7205
7206        pgdat_init_internals(pgdat);
7207        for (z = 0; z < MAX_NR_ZONES; z++)
7208                zone_init_internals(&pgdat->node_zones[z], z, nid, 0);
7209}
7210#endif
7211
7212/*
7213 * Set up the zone data structures:
7214 *   - mark all pages reserved
7215 *   - mark all memory queues empty
7216 *   - clear the memory bitmaps
7217 *
7218 * NOTE: pgdat should get zeroed by caller.
7219 * NOTE: this function is only called during early init.
7220 */
7221static void __init free_area_init_core(struct pglist_data *pgdat)
7222{
7223        enum zone_type j;
7224        int nid = pgdat->node_id;
7225
7226        pgdat_init_internals(pgdat);
7227        pgdat->per_cpu_nodestats = &boot_nodestats;
7228
7229        for (j = 0; j < MAX_NR_ZONES; j++) {
7230                struct zone *zone = pgdat->node_zones + j;
7231                unsigned long size, freesize, memmap_pages;
7232
7233                size = zone->spanned_pages;
7234                freesize = zone->present_pages;
7235
7236                /*
7237                 * Adjust freesize so that it accounts for how much memory
7238                 * is used by this zone for memmap. This affects the watermark
7239                 * and per-cpu initialisations
7240                 */
7241                memmap_pages = calc_memmap_size(size, freesize);
7242                if (!is_highmem_idx(j)) {
7243                        if (freesize >= memmap_pages) {
7244                                freesize -= memmap_pages;
7245                                if (memmap_pages)
7246                                        printk(KERN_DEBUG
7247                                               "  %s zone: %lu pages used for memmap\n",
7248                                               zone_names[j], memmap_pages);
7249                        } else
7250                                pr_warn("  %s zone: %lu pages exceeds freesize %lu\n",
7251                                        zone_names[j], memmap_pages, freesize);
7252                }
7253
7254                /* Account for reserved pages */
7255                if (j == 0 && freesize > dma_reserve) {
7256                        freesize -= dma_reserve;
7257                        printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
7258                                        zone_names[0], dma_reserve);
7259                }
7260
7261                if (!is_highmem_idx(j))
7262                        nr_kernel_pages += freesize;
7263                /* Charge for highmem memmap if there are enough kernel pages */
7264                else if (nr_kernel_pages > memmap_pages * 2)
7265                        nr_kernel_pages -= memmap_pages;
7266                nr_all_pages += freesize;
7267
7268                /*
7269                 * Set an approximate value for lowmem here, it will be adjusted
7270                 * when the bootmem allocator frees pages into the buddy system.
7271                 * And all highmem pages will be managed by the buddy system.
7272                 */
7273                zone_init_internals(zone, j, nid, freesize);
7274
7275                if (!size)
7276                        continue;
7277
7278                set_pageblock_order();
7279                setup_usemap(zone);
7280                init_currently_empty_zone(zone, zone->zone_start_pfn, size);
7281        }
7282}
7283
7284#ifdef CONFIG_FLAT_NODE_MEM_MAP
7285static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
7286{
7287        unsigned long __maybe_unused start = 0;
7288        unsigned long __maybe_unused offset = 0;
7289
7290        /* Skip empty nodes */
7291        if (!pgdat->node_spanned_pages)
7292                return;
7293
7294        start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
7295        offset = pgdat->node_start_pfn - start;
7296        /* ia64 gets its own node_mem_map, before this, without bootmem */
7297        if (!pgdat->node_mem_map) {
7298                unsigned long size, end;
7299                struct page *map;
7300
7301                /*
7302                 * The zone's endpoints aren't required to be MAX_ORDER
7303                 * aligned but the node_mem_map endpoints must be in order
7304                 * for the buddy allocator to function correctly.
7305                 */
7306                end = pgdat_end_pfn(pgdat);
7307                end = ALIGN(end, MAX_ORDER_NR_PAGES);
7308                size =  (end - start) * sizeof(struct page);
7309                map = memblock_alloc_node(size, SMP_CACHE_BYTES,
7310                                          pgdat->node_id);
7311                if (!map)
7312                        panic("Failed to allocate %ld bytes for node %d memory map\n",
7313                              size, pgdat->node_id);
7314                pgdat->node_mem_map = map + offset;
7315        }
7316        pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
7317                                __func__, pgdat->node_id, (unsigned long)pgdat,
7318                                (unsigned long)pgdat->node_mem_map);
7319#ifndef CONFIG_NEED_MULTIPLE_NODES
7320        /*
7321         * With no DISCONTIG, the global mem_map is just set as node 0's
7322         */
7323        if (pgdat == NODE_DATA(0)) {
7324                mem_map = NODE_DATA(0)->node_mem_map;
7325                if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
7326                        mem_map -= offset;
7327        }
7328#endif
7329}
7330#else
7331static void __ref alloc_node_mem_map(struct pglist_data *pgdat) { }
7332#endif /* CONFIG_FLAT_NODE_MEM_MAP */
7333
7334#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
7335static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
7336{
7337        pgdat->first_deferred_pfn = ULONG_MAX;
7338}
7339#else
7340static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
7341#endif
7342
7343static void __init free_area_init_node(int nid)
7344{
7345        pg_data_t *pgdat = NODE_DATA(nid);
7346        unsigned long start_pfn = 0;
7347        unsigned long end_pfn = 0;
7348
7349        /* pg_data_t should be reset to zero when it's allocated */
7350        WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx);
7351
7352        get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
7353
7354        pgdat->node_id = nid;
7355        pgdat->node_start_pfn = start_pfn;
7356        pgdat->per_cpu_nodestats = NULL;
7357
7358        pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
7359                (u64)start_pfn << PAGE_SHIFT,
7360                end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
7361        calculate_node_totalpages(pgdat, start_pfn, end_pfn);
7362
7363        alloc_node_mem_map(pgdat);
7364        pgdat_set_deferred_range(pgdat);
7365
7366        free_area_init_core(pgdat);
7367}
7368
7369void __init free_area_init_memoryless_node(int nid)
7370{
7371        free_area_init_node(nid);
7372}
7373
7374#if MAX_NUMNODES > 1
7375/*
7376 * Figure out the number of possible node ids.
7377 */
7378void __init setup_nr_node_ids(void)
7379{
7380        unsigned int highest;
7381
7382        highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
7383        nr_node_ids = highest + 1;
7384}
7385#endif
7386
7387/**
7388 * node_map_pfn_alignment - determine the maximum internode alignment
7389 *
7390 * This function should be called after node map is populated and sorted.
7391 * It calculates the maximum power of two alignment which can distinguish
7392 * all the nodes.
7393 *
7394 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
7395 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
7396 * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
7397 * shifted, 1GiB is enough and this function will indicate so.
7398 *
7399 * This is used to test whether pfn -> nid mapping of the chosen memory
7400 * model has fine enough granularity to avoid incorrect mapping for the
7401 * populated node map.
7402 *
7403 * Return: the determined alignment in pfn's.  0 if there is no alignment
7404 * requirement (single node).
7405 */
7406unsigned long __init node_map_pfn_alignment(void)
7407{
7408        unsigned long accl_mask = 0, last_end = 0;
7409        unsigned long start, end, mask;
7410        int last_nid = NUMA_NO_NODE;
7411        int i, nid;
7412
7413        for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
7414                if (!start || last_nid < 0 || last_nid == nid) {
7415                        last_nid = nid;
7416                        last_end = end;
7417                        continue;
7418                }
7419
7420                /*
7421                 * Start with a mask granular enough to pin-point to the
7422                 * start pfn and tick off bits one-by-one until it becomes
7423                 * too coarse to separate the current node from the last.
7424                 */
7425                mask = ~((1 << __ffs(start)) - 1);
7426                while (mask && last_end <= (start & (mask << 1)))
7427                        mask <<= 1;
7428
7429                /* accumulate all internode masks */
7430                accl_mask |= mask;
7431        }
7432
7433        /* convert mask to number of pages */
7434        return ~accl_mask + 1;
7435}
7436
7437/**
7438 * find_min_pfn_with_active_regions - Find the minimum PFN registered
7439 *
7440 * Return: the minimum PFN based on information provided via
7441 * memblock_set_node().
7442 */
7443unsigned long __init find_min_pfn_with_active_regions(void)
7444{
7445        return PHYS_PFN(memblock_start_of_DRAM());
7446}
7447
7448/*
7449 * early_calculate_totalpages()
7450 * Sum pages in active regions for movable zone.
7451 * Populate N_MEMORY for calculating usable_nodes.
7452 */
7453static unsigned long __init early_calculate_totalpages(void)
7454{
7455        unsigned long totalpages = 0;
7456        unsigned long start_pfn, end_pfn;
7457        int i, nid;
7458
7459        for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
7460                unsigned long pages = end_pfn - start_pfn;
7461
7462                totalpages += pages;
7463                if (pages)
7464                        node_set_state(nid, N_MEMORY);
7465        }
7466        return totalpages;
7467}
7468
7469/*
7470 * Find the PFN the Movable zone begins in each node. Kernel memory
7471 * is spread evenly between nodes as long as the nodes have enough
7472 * memory. When they don't, some nodes will have more kernelcore than
7473 * others
7474 */
7475static void __init find_zone_movable_pfns_for_nodes(void)
7476{
7477        int i, nid;
7478        unsigned long usable_startpfn;
7479        unsigned long kernelcore_node, kernelcore_remaining;
7480        /* save the state before borrow the nodemask */
7481        nodemask_t saved_node_state = node_states[N_MEMORY];
7482        unsigned long totalpages = early_calculate_totalpages();
7483        int usable_nodes = nodes_weight(node_states[N_MEMORY]);
7484        struct memblock_region *r;
7485
7486        /* Need to find movable_zone earlier when movable_node is specified. */
7487        find_usable_zone_for_movable();
7488
7489        /*
7490         * If movable_node is specified, ignore kernelcore and movablecore
7491         * options.
7492         */
7493        if (movable_node_is_enabled()) {
7494                for_each_mem_region(r) {
7495                        if (!memblock_is_hotpluggable(r))
7496                                continue;
7497
7498                        nid = memblock_get_region_node(r);
7499
7500                        usable_startpfn = PFN_DOWN(r->base);
7501                        zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
7502                                min(usable_startpfn, zone_movable_pfn[nid]) :
7503                                usable_startpfn;
7504                }
7505
7506                goto out2;
7507        }
7508
7509        /*
7510         * If kernelcore=mirror is specified, ignore movablecore option
7511         */
7512        if (mirrored_kernelcore) {
7513                bool mem_below_4gb_not_mirrored = false;
7514
7515                for_each_mem_region(r) {
7516                        if (memblock_is_mirror(r))
7517                                continue;
7518
7519                        nid = memblock_get_region_node(r);
7520
7521                        usable_startpfn = memblock_region_memory_base_pfn(r);
7522
7523                        if (usable_startpfn < 0x100000) {
7524                                mem_below_4gb_not_mirrored = true;
7525                                continue;
7526                        }
7527
7528                        zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
7529                                min(usable_startpfn, zone_movable_pfn[nid]) :
7530                                usable_startpfn;
7531                }
7532
7533                if (mem_below_4gb_not_mirrored)
7534                        pr_warn("This configuration results in unmirrored kernel memory.\n");
7535
7536                goto out2;
7537        }
7538
7539        /*
7540         * If kernelcore=nn% or movablecore=nn% was specified, calculate the
7541         * amount of necessary memory.
7542         */
7543        if (required_kernelcore_percent)
7544                required_kernelcore = (totalpages * 100 * required_kernelcore_percent) /
7545                                       10000UL;
7546        if (required_movablecore_percent)
7547                required_movablecore = (totalpages * 100 * required_movablecore_percent) /
7548                                        10000UL;
7549
7550        /*
7551         * If movablecore= was specified, calculate what size of
7552         * kernelcore that corresponds so that memory usable for
7553         * any allocation type is evenly spread. If both kernelcore
7554         * and movablecore are specified, then the value of kernelcore
7555         * will be used for required_kernelcore if it's greater than
7556         * what movablecore would have allowed.
7557         */
7558        if (required_movablecore) {
7559                unsigned long corepages;
7560
7561                /*
7562                 * Round-up so that ZONE_MOVABLE is at least as large as what
7563                 * was requested by the user
7564                 */
7565                required_movablecore =
7566                        roundup(required_movablecore, MAX_ORDER_NR_PAGES);
7567                required_movablecore = min(totalpages, required_movablecore);
7568                corepages = totalpages - required_movablecore;
7569
7570                required_kernelcore = max(required_kernelcore, corepages);
7571        }
7572
7573        /*
7574         * If kernelcore was not specified or kernelcore size is larger
7575         * than totalpages, there is no ZONE_MOVABLE.
7576         */
7577        if (!required_kernelcore || required_kernelcore >= totalpages)
7578                goto out;
7579
7580        /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
7581        usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
7582
7583restart:
7584        /* Spread kernelcore memory as evenly as possible throughout nodes */
7585        kernelcore_node = required_kernelcore / usable_nodes;
7586        for_each_node_state(nid, N_MEMORY) {
7587                unsigned long start_pfn, end_pfn;
7588
7589                /*
7590                 * Recalculate kernelcore_node if the division per node
7591                 * now exceeds what is necessary to satisfy the requested
7592                 * amount of memory for the kernel
7593                 */
7594                if (required_kernelcore < kernelcore_node)
7595                        kernelcore_node = required_kernelcore / usable_nodes;
7596
7597                /*
7598                 * As the map is walked, we track how much memory is usable
7599                 * by the kernel using kernelcore_remaining. When it is
7600                 * 0, the rest of the node is usable by ZONE_MOVABLE
7601                 */
7602                kernelcore_remaining = kernelcore_node;
7603
7604                /* Go through each range of PFNs within this node */
7605                for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
7606                        unsigned long size_pages;
7607
7608                        start_pfn = max(start_pfn, zone_movable_pfn[nid]);
7609                        if (start_pfn >= end_pfn)
7610                                continue;
7611
7612                        /* Account for what is only usable for kernelcore */
7613                        if (start_pfn < usable_startpfn) {
7614                                unsigned long kernel_pages;
7615                                kernel_pages = min(end_pfn, usable_startpfn)
7616                                                                - start_pfn;
7617
7618                                kernelcore_remaining -= min(kernel_pages,
7619                                                        kernelcore_remaining);
7620                                required_kernelcore -= min(kernel_pages,
7621                                                        required_kernelcore);
7622
7623                                /* Continue if range is now fully accounted */
7624                                if (end_pfn <= usable_startpfn) {
7625
7626                                        /*
7627                                         * Push zone_movable_pfn to the end so
7628                                         * that if we have to rebalance
7629                                         * kernelcore across nodes, we will
7630                                         * not double account here
7631                                         */
7632                                        zone_movable_pfn[nid] = end_pfn;
7633                                        continue;
7634                                }
7635                                start_pfn = usable_startpfn;
7636                        }
7637
7638                        /*
7639                         * The usable PFN range for ZONE_MOVABLE is from
7640                         * start_pfn->end_pfn. Calculate size_pages as the
7641                         * number of pages used as kernelcore
7642                         */
7643                        size_pages = end_pfn - start_pfn;
7644                        if (size_pages > kernelcore_remaining)
7645                                size_pages = kernelcore_remaining;
7646                        zone_movable_pfn[nid] = start_pfn + size_pages;
7647
7648                        /*
7649                         * Some kernelcore has been met, update counts and
7650                         * break if the kernelcore for this node has been
7651                         * satisfied
7652                         */
7653                        required_kernelcore -= min(required_kernelcore,
7654                                                                size_pages);
7655                        kernelcore_remaining -= size_pages;
7656                        if (!kernelcore_remaining)
7657                                break;
7658                }
7659        }
7660
7661        /*
7662         * If there is still required_kernelcore, we do another pass with one
7663         * less node in the count. This will push zone_movable_pfn[nid] further
7664         * along on the nodes that still have memory until kernelcore is
7665         * satisfied
7666         */
7667        usable_nodes--;
7668        if (usable_nodes && required_kernelcore > usable_nodes)
7669                goto restart;
7670
7671out2:
7672        /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
7673        for (nid = 0; nid < MAX_NUMNODES; nid++)
7674                zone_movable_pfn[nid] =
7675                        roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
7676
7677out:
7678        /* restore the node_state */
7679        node_states[N_MEMORY] = saved_node_state;
7680}
7681
7682/* Any regular or high memory on that node ? */
7683static void check_for_memory(pg_data_t *pgdat, int nid)
7684{
7685        enum zone_type zone_type;
7686
7687        for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
7688                struct zone *zone = &pgdat->node_zones[zone_type];
7689                if (populated_zone(zone)) {
7690                        if (IS_ENABLED(CONFIG_HIGHMEM))
7691                                node_set_state(nid, N_HIGH_MEMORY);
7692                        if (zone_type <= ZONE_NORMAL)
7693                                node_set_state(nid, N_NORMAL_MEMORY);
7694                        break;
7695                }
7696        }
7697}
7698
7699/*
7700 * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For
7701 * such cases we allow max_zone_pfn sorted in the descending order
7702 */
7703bool __weak arch_has_descending_max_zone_pfns(void)
7704{
7705        return false;
7706}
7707
7708/**
7709 * free_area_init - Initialise all pg_data_t and zone data
7710 * @max_zone_pfn: an array of max PFNs for each zone
7711 *
7712 * This will call free_area_init_node() for each active node in the system.
7713 * Using the page ranges provided by memblock_set_node(), the size of each
7714 * zone in each node and their holes is calculated. If the maximum PFN
7715 * between two adjacent zones match, it is assumed that the zone is empty.
7716 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
7717 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
7718 * starts where the previous one ended. For example, ZONE_DMA32 starts
7719 * at arch_max_dma_pfn.
7720 */
7721void __init free_area_init(unsigned long *max_zone_pfn)
7722{
7723        unsigned long start_pfn, end_pfn;
7724        int i, nid, zone;
7725        bool descending;
7726
7727        /* Record where the zone boundaries are */
7728        memset(arch_zone_lowest_possible_pfn, 0,
7729                                sizeof(arch_zone_lowest_possible_pfn));
7730        memset(arch_zone_highest_possible_pfn, 0,
7731                                sizeof(arch_zone_highest_possible_pfn));
7732
7733        start_pfn = find_min_pfn_with_active_regions();
7734        descending = arch_has_descending_max_zone_pfns();
7735
7736        for (i = 0; i < MAX_NR_ZONES; i++) {
7737                if (descending)
7738                        zone = MAX_NR_ZONES - i - 1;
7739                else
7740                        zone = i;
7741
7742                if (zone == ZONE_MOVABLE)
7743                        continue;
7744
7745                end_pfn = max(max_zone_pfn[zone], start_pfn);
7746                arch_zone_lowest_possible_pfn[zone] = start_pfn;
7747                arch_zone_highest_possible_pfn[zone] = end_pfn;
7748
7749                start_pfn = end_pfn;
7750        }
7751
7752        /* Find the PFNs that ZONE_MOVABLE begins at in each node */
7753        memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
7754        find_zone_movable_pfns_for_nodes();
7755
7756        /* Print out the zone ranges */
7757        pr_info("Zone ranges:\n");
7758        for (i = 0; i < MAX_NR_ZONES; i++) {
7759                if (i == ZONE_MOVABLE)
7760                        continue;
7761                pr_info("  %-8s ", zone_names[i]);
7762                if (arch_zone_lowest_possible_pfn[i] ==
7763                                arch_zone_highest_possible_pfn[i])
7764                        pr_cont("empty\n");
7765                else
7766                        pr_cont("[mem %#018Lx-%#018Lx]\n",
7767                                (u64)arch_zone_lowest_possible_pfn[i]
7768                                        << PAGE_SHIFT,
7769                                ((u64)arch_zone_highest_possible_pfn[i]
7770                                        << PAGE_SHIFT) - 1);
7771        }
7772
7773        /* Print out the PFNs ZONE_MOVABLE begins at in each node */
7774        pr_info("Movable zone start for each node\n");
7775        for (i = 0; i < MAX_NUMNODES; i++) {
7776                if (zone_movable_pfn[i])
7777                        pr_info("  Node %d: %#018Lx\n", i,
7778                               (u64)zone_movable_pfn[i] << PAGE_SHIFT);
7779        }
7780
7781        /*
7782         * Print out the early node map, and initialize the
7783         * subsection-map relative to active online memory ranges to
7784         * enable future "sub-section" extensions of the memory map.
7785         */
7786        pr_info("Early memory node ranges\n");
7787        for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
7788                pr_info("  node %3d: [mem %#018Lx-%#018Lx]\n", nid,
7789                        (u64)start_pfn << PAGE_SHIFT,
7790                        ((u64)end_pfn << PAGE_SHIFT) - 1);
7791                subsection_map_init(start_pfn, end_pfn - start_pfn);
7792        }
7793
7794        /* Initialise every node */
7795        mminit_verify_pageflags_layout();
7796        setup_nr_node_ids();
7797        for_each_online_node(nid) {
7798                pg_data_t *pgdat = NODE_DATA(nid);
7799                free_area_init_node(nid);
7800
7801                /* Any memory on that node */
7802                if (pgdat->node_present_pages)
7803                        node_set_state(nid, N_MEMORY);
7804                check_for_memory(pgdat, nid);
7805        }
7806
7807        memmap_init();
7808}
7809
7810static int __init cmdline_parse_core(char *p, unsigned long *core,
7811                                     unsigned long *percent)
7812{
7813        unsigned long long coremem;
7814        char *endptr;
7815
7816        if (!p)
7817                return -EINVAL;
7818
7819        /* Value may be a percentage of total memory, otherwise bytes */
7820        coremem = simple_strtoull(p, &endptr, 0);
7821        if (*endptr == '%') {
7822                /* Paranoid check for percent values greater than 100 */
7823                WARN_ON(coremem > 100);
7824
7825                *percent = coremem;
7826        } else {
7827                coremem = memparse(p, &p);
7828                /* Paranoid check that UL is enough for the coremem value */
7829                WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
7830
7831                *core = coremem >> PAGE_SHIFT;
7832                *percent = 0UL;
7833        }
7834        return 0;
7835}
7836
7837/*
7838 * kernelcore=size sets the amount of memory for use for allocations that
7839 * cannot be reclaimed or migrated.
7840 */
7841static int __init cmdline_parse_kernelcore(char *p)
7842{
7843        /* parse kernelcore=mirror */
7844        if (parse_option_str(p, "mirror")) {
7845                mirrored_kernelcore = true;
7846                return 0;
7847        }
7848
7849        return cmdline_parse_core(p, &required_kernelcore,
7850                                  &required_kernelcore_percent);
7851}
7852
7853/*
7854 * movablecore=size sets the amount of memory for use for allocations that
7855 * can be reclaimed or migrated.
7856 */
7857static int __init cmdline_parse_movablecore(char *p)
7858{
7859        return cmdline_parse_core(p, &required_movablecore,
7860                                  &required_movablecore_percent);
7861}
7862
7863early_param("kernelcore", cmdline_parse_kernelcore);
7864early_param("movablecore", cmdline_parse_movablecore);
7865
7866void adjust_managed_page_count(struct page *page, long count)
7867{
7868        atomic_long_add(count, &page_zone(page)->managed_pages);
7869        totalram_pages_add(count);
7870#ifdef CONFIG_HIGHMEM
7871        if (PageHighMem(page))
7872                totalhigh_pages_add(count);
7873#endif
7874}
7875EXPORT_SYMBOL(adjust_managed_page_count);
7876
7877unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
7878{
7879        void *pos;
7880        unsigned long pages = 0;
7881
7882        start = (void *)PAGE_ALIGN((unsigned long)start);
7883        end = (void *)((unsigned long)end & PAGE_MASK);
7884        for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
7885                struct page *page = virt_to_page(pos);
7886                void *direct_map_addr;
7887
7888                /*
7889                 * 'direct_map_addr' might be different from 'pos'
7890                 * because some architectures' virt_to_page()
7891                 * work with aliases.  Getting the direct map
7892                 * address ensures that we get a _writeable_
7893                 * alias for the memset().
7894                 */
7895                direct_map_addr = page_address(page);
7896                /*
7897                 * Perform a kasan-unchecked memset() since this memory
7898                 * has not been initialized.
7899                 */
7900                direct_map_addr = kasan_reset_tag(direct_map_addr);
7901                if ((unsigned int)poison <= 0xFF)
7902                        memset(direct_map_addr, poison, PAGE_SIZE);
7903
7904                free_reserved_page(page);
7905        }
7906
7907        if (pages && s)
7908                pr_info("Freeing %s memory: %ldK\n",
7909                        s, pages << (PAGE_SHIFT - 10));
7910
7911        return pages;
7912}
7913
7914void __init mem_init_print_info(void)
7915{
7916        unsigned long physpages, codesize, datasize, rosize, bss_size;
7917        unsigned long init_code_size, init_data_size;
7918
7919        physpages = get_num_physpages();
7920        codesize = _etext - _stext;
7921        datasize = _edata - _sdata;
7922        rosize = __end_rodata - __start_rodata;
7923        bss_size = __bss_stop - __bss_start;
7924        init_data_size = __init_end - __init_begin;
7925        init_code_size = _einittext - _sinittext;
7926
7927        /*
7928         * Detect special cases and adjust section sizes accordingly:
7929         * 1) .init.* may be embedded into .data sections
7930         * 2) .init.text.* may be out of [__init_begin, __init_end],
7931         *    please refer to arch/tile/kernel/vmlinux.lds.S.
7932         * 3) .rodata.* may be embedded into .text or .data sections.
7933         */
7934#define adj_init_size(start, end, size, pos, adj) \
7935        do { \
7936                if (start <= pos && pos < end && size > adj) \
7937                        size -= adj; \
7938        } while (0)
7939
7940        adj_init_size(__init_begin, __init_end, init_data_size,
7941                     _sinittext, init_code_size);
7942        adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
7943        adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
7944        adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
7945        adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
7946
7947#undef  adj_init_size
7948
7949        pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
7950#ifdef  CONFIG_HIGHMEM
7951                ", %luK highmem"
7952#endif
7953                ")\n",
7954                nr_free_pages() << (PAGE_SHIFT - 10),
7955                physpages << (PAGE_SHIFT - 10),
7956                codesize >> 10, datasize >> 10, rosize >> 10,
7957                (init_data_size + init_code_size) >> 10, bss_size >> 10,
7958                (physpages - totalram_pages() - totalcma_pages) << (PAGE_SHIFT - 10),
7959                totalcma_pages << (PAGE_SHIFT - 10)
7960#ifdef  CONFIG_HIGHMEM
7961                , totalhigh_pages() << (PAGE_SHIFT - 10)
7962#endif
7963                );
7964}
7965
7966/**
7967 * set_dma_reserve - set the specified number of pages reserved in the first zone
7968 * @new_dma_reserve: The number of pages to mark reserved
7969 *
7970 * The per-cpu batchsize and zone watermarks are determined by managed_pages.
7971 * In the DMA zone, a significant percentage may be consumed by kernel image
7972 * and other unfreeable allocations which can skew the watermarks badly. This
7973 * function may optionally be used to account for unfreeable pages in the
7974 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
7975 * smaller per-cpu batchsize.
7976 */
7977void __init set_dma_reserve(unsigned long new_dma_reserve)
7978{
7979        dma_reserve = new_dma_reserve;
7980}
7981
7982static int page_alloc_cpu_dead(unsigned int cpu)
7983{
7984
7985        lru_add_drain_cpu(cpu);
7986        drain_pages(cpu);
7987
7988        /*
7989         * Spill the event counters of the dead processor
7990         * into the current processors event counters.
7991         * This artificially elevates the count of the current
7992         * processor.
7993         */
7994        vm_events_fold_cpu(cpu);
7995
7996        /*
7997         * Zero the differential counters of the dead processor
7998         * so that the vm statistics are consistent.
7999         *
8000         * This is only okay since the processor is dead and cannot
8001         * race with what we are doing.
8002         */
8003        cpu_vm_stats_fold(cpu);
8004        return 0;
8005}
8006
8007#ifdef CONFIG_NUMA
8008int hashdist = HASHDIST_DEFAULT;
8009
8010static int __init set_hashdist(char *str)
8011{
8012        if (!str)
8013                return 0;
8014        hashdist = simple_strtoul(str, &str, 0);
8015        return 1;
8016}
8017__setup("hashdist=", set_hashdist);
8018#endif
8019
8020void __init page_alloc_init(void)
8021{
8022        int ret;
8023
8024#ifdef CONFIG_NUMA
8025        if (num_node_state(N_MEMORY) == 1)
8026                hashdist = 0;
8027#endif
8028
8029        ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC_DEAD,
8030                                        "mm/page_alloc:dead", NULL,
8031                                        page_alloc_cpu_dead);
8032        WARN_ON(ret < 0);
8033}
8034
8035/*
8036 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
8037 *      or min_free_kbytes changes.
8038 */
8039static void calculate_totalreserve_pages(void)
8040{
8041        struct pglist_data *pgdat;
8042        unsigned long reserve_pages = 0;
8043        enum zone_type i, j;
8044
8045        for_each_online_pgdat(pgdat) {
8046
8047                pgdat->totalreserve_pages = 0;
8048
8049                for (i = 0; i < MAX_NR_ZONES; i++) {
8050                        struct zone *zone = pgdat->node_zones + i;
8051                        long max = 0;
8052                        unsigned long managed_pages = zone_managed_pages(zone);
8053
8054                        /* Find valid and maximum lowmem_reserve in the zone */
8055                        for (j = i; j < MAX_NR_ZONES; j++) {
8056                                if (zone->lowmem_reserve[j] > max)
8057                                        max = zone->lowmem_reserve[j];
8058                        }
8059
8060                        /* we treat the high watermark as reserved pages. */
8061                        max += high_wmark_pages(zone);
8062
8063                        if (max > managed_pages)
8064                                max = managed_pages;
8065
8066                        pgdat->totalreserve_pages += max;
8067
8068                        reserve_pages += max;
8069                }
8070        }
8071        totalreserve_pages = reserve_pages;
8072}
8073
8074/*
8075 * setup_per_zone_lowmem_reserve - called whenever
8076 *      sysctl_lowmem_reserve_ratio changes.  Ensures that each zone
8077 *      has a correct pages reserved value, so an adequate number of
8078 *      pages are left in the zone after a successful __alloc_pages().
8079 */
8080static void setup_per_zone_lowmem_reserve(void)
8081{
8082        struct pglist_data *pgdat;
8083        enum zone_type i, j;
8084
8085        for_each_online_pgdat(pgdat) {
8086                for (i = 0; i < MAX_NR_ZONES - 1; i++) {
8087                        struct zone *zone = &pgdat->node_zones[i];
8088                        int ratio = sysctl_lowmem_reserve_ratio[i];
8089                        bool clear = !ratio || !zone_managed_pages(zone);
8090                        unsigned long managed_pages = 0;
8091
8092                        for (j = i + 1; j < MAX_NR_ZONES; j++) {
8093                                struct zone *upper_zone = &pgdat->node_zones[j];
8094
8095                                managed_pages += zone_managed_pages(upper_zone);
8096
8097                                if (clear)
8098                                        zone->lowmem_reserve[j] = 0;
8099                                else
8100                                        zone->lowmem_reserve[j] = managed_pages / ratio;
8101                        }
8102                }
8103        }
8104
8105        /* update totalreserve_pages */
8106        calculate_totalreserve_pages();
8107}
8108
8109static void __setup_per_zone_wmarks(void)
8110{
8111        unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
8112        unsigned long lowmem_pages = 0;
8113        struct zone *zone;
8114        unsigned long flags;
8115
8116        /* Calculate total number of !ZONE_HIGHMEM pages */
8117        for_each_zone(zone) {
8118                if (!is_highmem(zone))
8119                        lowmem_pages += zone_managed_pages(zone);
8120        }
8121
8122        for_each_zone(zone) {
8123                u64 tmp;
8124
8125                spin_lock_irqsave(&zone->lock, flags);
8126                tmp = (u64)pages_min * zone_managed_pages(zone);
8127                do_div(tmp, lowmem_pages);
8128                if (is_highmem(zone)) {
8129                        /*
8130                         * __GFP_HIGH and PF_MEMALLOC allocations usually don't
8131                         * need highmem pages, so cap pages_min to a small
8132                         * value here.
8133                         *
8134                         * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
8135                         * deltas control async page reclaim, and so should
8136                         * not be capped for highmem.
8137                         */
8138                        unsigned long min_pages;
8139
8140                        min_pages = zone_managed_pages(zone) / 1024;
8141                        min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
8142                        zone->_watermark[WMARK_MIN] = min_pages;
8143                } else {
8144                        /*
8145                         * If it's a lowmem zone, reserve a number of pages
8146                         * proportionate to the zone's size.
8147                         */
8148                        zone->_watermark[WMARK_MIN] = tmp;
8149                }
8150
8151                /*
8152                 * Set the kswapd watermarks distance according to the
8153                 * scale factor in proportion to available memory, but
8154                 * ensure a minimum size on small systems.
8155                 */
8156                tmp = max_t(u64, tmp >> 2,
8157                            mult_frac(zone_managed_pages(zone),
8158                                      watermark_scale_factor, 10000));
8159
8160                zone->watermark_boost = 0;
8161                zone->_watermark[WMARK_LOW]  = min_wmark_pages(zone) + tmp;
8162                zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
8163
8164                spin_unlock_irqrestore(&zone->lock, flags);
8165        }
8166
8167        /* update totalreserve_pages */
8168        calculate_totalreserve_pages();
8169}
8170
8171/**
8172 * setup_per_zone_wmarks - called when min_free_kbytes changes
8173 * or when memory is hot-{added|removed}
8174 *
8175 * Ensures that the watermark[min,low,high] values for each zone are set
8176 * correctly with respect to min_free_kbytes.
8177 */
8178void setup_per_zone_wmarks(void)
8179{
8180        static DEFINE_SPINLOCK(lock);
8181
8182        spin_lock(&lock);
8183        __setup_per_zone_wmarks();
8184        spin_unlock(&lock);
8185}
8186
8187/*
8188 * Initialise min_free_kbytes.
8189 *
8190 * For small machines we want it small (128k min).  For large machines
8191 * we want it large (256MB max).  But it is not linear, because network
8192 * bandwidth does not increase linearly with machine size.  We use
8193 *
8194 *      min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
8195 *      min_free_kbytes = sqrt(lowmem_kbytes * 16)
8196 *
8197 * which yields
8198 *
8199 * 16MB:        512k
8200 * 32MB:        724k
8201 * 64MB:        1024k
8202 * 128MB:       1448k
8203 * 256MB:       2048k
8204 * 512MB:       2896k
8205 * 1024MB:      4096k
8206 * 2048MB:      5792k
8207 * 4096MB:      8192k
8208 * 8192MB:      11584k
8209 * 16384MB:     16384k
8210 */
8211int __meminit init_per_zone_wmark_min(void)
8212{
8213        unsigned long lowmem_kbytes;
8214        int new_min_free_kbytes;
8215
8216        lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
8217        new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
8218
8219        if (new_min_free_kbytes > user_min_free_kbytes) {
8220                min_free_kbytes = new_min_free_kbytes;
8221                if (min_free_kbytes < 128)
8222                        min_free_kbytes = 128;
8223                if (min_free_kbytes > 262144)
8224                        min_free_kbytes = 262144;
8225        } else {
8226                pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
8227                                new_min_free_kbytes, user_min_free_kbytes);
8228        }
8229        setup_per_zone_wmarks();
8230        refresh_zone_stat_thresholds();
8231        setup_per_zone_lowmem_reserve();
8232
8233#ifdef CONFIG_NUMA
8234        setup_min_unmapped_ratio();
8235        setup_min_slab_ratio();
8236#endif
8237
8238        khugepaged_min_free_kbytes_update();
8239
8240        return 0;
8241}
8242postcore_initcall(init_per_zone_wmark_min)
8243
8244/*
8245 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
8246 *      that we can call two helper functions whenever min_free_kbytes
8247 *      changes.
8248 */
8249int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
8250                void *buffer, size_t *length, loff_t *ppos)
8251{
8252        int rc;
8253
8254        rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8255        if (rc)
8256                return rc;
8257
8258        if (write) {
8259                user_min_free_kbytes = min_free_kbytes;
8260                setup_per_zone_wmarks();
8261        }
8262        return 0;
8263}
8264
8265int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
8266                void *buffer, size_t *length, loff_t *ppos)
8267{
8268        int rc;
8269
8270        rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8271        if (rc)
8272                return rc;
8273
8274        if (write)
8275                setup_per_zone_wmarks();
8276
8277        return 0;
8278}
8279
8280#ifdef CONFIG_NUMA
8281static void setup_min_unmapped_ratio(void)
8282{
8283        pg_data_t *pgdat;
8284        struct zone *zone;
8285
8286        for_each_online_pgdat(pgdat)
8287                pgdat->min_unmapped_pages = 0;
8288
8289        for_each_zone(zone)
8290                zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) *
8291                                                         sysctl_min_unmapped_ratio) / 100;
8292}
8293
8294
8295int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
8296                void *buffer, size_t *length, loff_t *ppos)
8297{
8298        int rc;
8299
8300        rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8301        if (rc)
8302                return rc;
8303
8304        setup_min_unmapped_ratio();
8305
8306        return 0;
8307}
8308
8309static void setup_min_slab_ratio(void)
8310{
8311        pg_data_t *pgdat;
8312        struct zone *zone;
8313
8314        for_each_online_pgdat(pgdat)
8315                pgdat->min_slab_pages = 0;
8316
8317        for_each_zone(zone)
8318                zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) *
8319                                                     sysctl_min_slab_ratio) / 100;
8320}
8321
8322int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
8323                void *buffer, size_t *length, loff_t *ppos)
8324{
8325        int rc;
8326
8327        rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8328        if (rc)
8329                return rc;
8330
8331        setup_min_slab_ratio();
8332
8333        return 0;
8334}
8335#endif
8336
8337/*
8338 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
8339 *      proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
8340 *      whenever sysctl_lowmem_reserve_ratio changes.
8341 *
8342 * The reserve ratio obviously has absolutely no relation with the
8343 * minimum watermarks. The lowmem reserve ratio can only make sense
8344 * if in function of the boot time zone sizes.
8345 */
8346int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
8347                void *buffer, size_t *length, loff_t *ppos)
8348{
8349        int i;
8350
8351        proc_dointvec_minmax(table, write, buffer, length, ppos);
8352
8353        for (i = 0; i < MAX_NR_ZONES; i++) {
8354                if (sysctl_lowmem_reserve_ratio[i] < 1)
8355                        sysctl_lowmem_reserve_ratio[i] = 0;
8356        }
8357
8358        setup_per_zone_lowmem_reserve();
8359        return 0;
8360}
8361
8362/*
8363 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
8364 * cpu.  It is the fraction of total pages in each zone that a hot per cpu
8365 * pagelist can have before it gets flushed back to buddy allocator.
8366 */
8367int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
8368                void *buffer, size_t *length, loff_t *ppos)
8369{
8370        struct zone *zone;
8371        int old_percpu_pagelist_fraction;
8372        int ret;
8373
8374        mutex_lock(&pcp_batch_high_lock);
8375        old_percpu_pagelist_fraction = percpu_pagelist_fraction;
8376
8377        ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
8378        if (!write || ret < 0)
8379                goto out;
8380
8381        /* Sanity checking to avoid pcp imbalance */
8382        if (percpu_pagelist_fraction &&
8383            percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
8384                percpu_pagelist_fraction = old_percpu_pagelist_fraction;
8385                ret = -EINVAL;
8386                goto out;
8387        }
8388
8389        /* No change? */
8390        if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
8391                goto out;
8392
8393        for_each_populated_zone(zone)
8394                zone_set_pageset_high_and_batch(zone);
8395out:
8396        mutex_unlock(&pcp_batch_high_lock);
8397        return ret;
8398}
8399
8400#ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
8401/*
8402 * Returns the number of pages that arch has reserved but
8403 * is not known to alloc_large_system_hash().
8404 */
8405static unsigned long __init arch_reserved_kernel_pages(void)
8406{
8407        return 0;
8408}
8409#endif
8410
8411/*
8412 * Adaptive scale is meant to reduce sizes of hash tables on large memory
8413 * machines. As memory size is increased the scale is also increased but at
8414 * slower pace.  Starting from ADAPT_SCALE_BASE (64G), every time memory
8415 * quadruples the scale is increased by one, which means the size of hash table
8416 * only doubles, instead of quadrupling as well.
8417 * Because 32-bit systems cannot have large physical memory, where this scaling
8418 * makes sense, it is disabled on such platforms.
8419 */
8420#if __BITS_PER_LONG > 32
8421#define ADAPT_SCALE_BASE        (64ul << 30)
8422#define ADAPT_SCALE_SHIFT       2
8423#define ADAPT_SCALE_NPAGES      (ADAPT_SCALE_BASE >> PAGE_SHIFT)
8424#endif
8425
8426/*
8427 * allocate a large system hash table from bootmem
8428 * - it is assumed that the hash table must contain an exact power-of-2
8429 *   quantity of entries
8430 * - limit is the number of hash buckets, not the total allocation size
8431 */
8432void *__init alloc_large_system_hash(const char *tablename,
8433                                     unsigned long bucketsize,
8434                                     unsigned long numentries,
8435                                     int scale,
8436                                     int flags,
8437                                     unsigned int *_hash_shift,
8438                                     unsigned int *_hash_mask,
8439                                     unsigned long low_limit,
8440                                     unsigned long high_limit)
8441{
8442        unsigned long long max = high_limit;
8443        unsigned long log2qty, size;
8444        void *table = NULL;
8445        gfp_t gfp_flags;
8446        bool virt;
8447        bool huge;
8448
8449        /* allow the kernel cmdline to have a say */
8450        if (!numentries) {
8451                /* round applicable memory size up to nearest megabyte */
8452                numentries = nr_kernel_pages;
8453                numentries -= arch_reserved_kernel_pages();
8454
8455                /* It isn't necessary when PAGE_SIZE >= 1MB */
8456                if (PAGE_SHIFT < 20)
8457                        numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
8458
8459#if __BITS_PER_LONG > 32
8460                if (!high_limit) {
8461                        unsigned long adapt;
8462
8463                        for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
8464                             adapt <<= ADAPT_SCALE_SHIFT)
8465                                scale++;
8466                }
8467#endif
8468
8469                /* limit to 1 bucket per 2^scale bytes of low memory */
8470                if (scale > PAGE_SHIFT)
8471                        numentries >>= (scale - PAGE_SHIFT);
8472                else
8473                        numentries <<= (PAGE_SHIFT - scale);
8474
8475                /* Make sure we've got at least a 0-order allocation.. */
8476                if (unlikely(flags & HASH_SMALL)) {
8477                        /* Makes no sense without HASH_EARLY */
8478                        WARN_ON(!(flags & HASH_EARLY));
8479                        if (!(numentries >> *_hash_shift)) {
8480                                numentries = 1UL << *_hash_shift;
8481                                BUG_ON(!numentries);
8482                        }
8483                } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
8484                        numentries = PAGE_SIZE / bucketsize;
8485        }
8486        numentries = roundup_pow_of_two(numentries);
8487
8488        /* limit allocation size to 1/16 total memory by default */
8489        if (max == 0) {
8490                max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
8491                do_div(max, bucketsize);
8492        }
8493        max = min(max, 0x80000000ULL);
8494
8495        if (numentries < low_limit)
8496                numentries = low_limit;
8497        if (numentries > max)
8498                numentries = max;
8499
8500        log2qty = ilog2(numentries);
8501
8502        gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
8503        do {
8504                virt = false;
8505                size = bucketsize << log2qty;
8506                if (flags & HASH_EARLY) {
8507                        if (flags & HASH_ZERO)
8508                                table = memblock_alloc(size, SMP_CACHE_BYTES);
8509                        else
8510                                table = memblock_alloc_raw(size,
8511                                                           SMP_CACHE_BYTES);
8512                } else if (get_order(size) >= MAX_ORDER || hashdist) {
8513                        table = __vmalloc(size, gfp_flags);
8514                        virt = true;
8515                        huge = is_vm_area_hugepages(table);
8516                } else {
8517                        /*
8518                         * If bucketsize is not a power-of-two, we may free
8519                         * some pages at the end of hash table which
8520                         * alloc_pages_exact() automatically does
8521                         */
8522                        table = alloc_pages_exact(size, gfp_flags);
8523                        kmemleak_alloc(table, size, 1, gfp_flags);
8524                }
8525        } while (!table && size > PAGE_SIZE && --log2qty);
8526
8527        if (!table)
8528                panic("Failed to allocate %s hash table\n", tablename);
8529
8530        pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
8531                tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
8532                virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear");
8533
8534        if (_hash_shift)
8535                *_hash_shift = log2qty;
8536        if (_hash_mask)
8537                *_hash_mask = (1 << log2qty) - 1;
8538
8539        return table;
8540}
8541
8542/*
8543 * This function checks whether pageblock includes unmovable pages or not.
8544 *
8545 * PageLRU check without isolation or lru_lock could race so that
8546 * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
8547 * check without lock_page also may miss some movable non-lru pages at
8548 * race condition. So you can't expect this function should be exact.
8549 *
8550 * Returns a page without holding a reference. If the caller wants to
8551 * dereference that page (e.g., dumping), it has to make sure that it
8552 * cannot get removed (e.g., via memory unplug) concurrently.
8553 *
8554 */
8555struct page *has_unmovable_pages(struct zone *zone, struct page *page,
8556                                 int migratetype, int flags)
8557{
8558        unsigned long iter = 0;
8559        unsigned long pfn = page_to_pfn(page);
8560        unsigned long offset = pfn % pageblock_nr_pages;
8561
8562        if (is_migrate_cma_page(page)) {
8563                /*
8564                 * CMA allocations (alloc_contig_range) really need to mark
8565                 * isolate CMA pageblocks even when they are not movable in fact
8566                 * so consider them movable here.
8567                 */
8568                if (is_migrate_cma(migratetype))
8569                        return NULL;
8570
8571                return page;
8572        }
8573
8574        for (; iter < pageblock_nr_pages - offset; iter++) {
8575                if (!pfn_valid_within(pfn + iter))
8576                        continue;
8577
8578                page = pfn_to_page(pfn + iter);
8579
8580                /*
8581                 * Both, bootmem allocations and memory holes are marked
8582                 * PG_reserved and are unmovable. We can even have unmovable
8583                 * allocations inside ZONE_MOVABLE, for example when
8584                 * specifying "movablecore".
8585                 */
8586                if (PageReserved(page))
8587                        return page;
8588
8589                /*
8590                 * If the zone is movable and we have ruled out all reserved
8591                 * pages then it should be reasonably safe to assume the rest
8592                 * is movable.
8593                 */
8594                if (zone_idx(zone) == ZONE_MOVABLE)
8595                        continue;
8596
8597                /*
8598                 * Hugepages are not in LRU lists, but they're movable.
8599                 * THPs are on the LRU, but need to be counted as #small pages.
8600                 * We need not scan over tail pages because we don't
8601                 * handle each tail page individually in migration.
8602                 */
8603                if (PageHuge(page) || PageTransCompound(page)) {
8604                        struct page *head = compound_head(page);
8605                        unsigned int skip_pages;
8606
8607                        if (PageHuge(page)) {
8608                                if (!hugepage_migration_supported(page_hstate(head)))
8609                                        return page;
8610                        } else if (!PageLRU(head) && !__PageMovable(head)) {
8611                                return page;
8612                        }
8613
8614                        skip_pages = compound_nr(head) - (page - head);
8615                        iter += skip_pages - 1;
8616                        continue;
8617                }
8618
8619                /*
8620                 * We can't use page_count without pin a page
8621                 * because another CPU can free compound page.
8622                 * This check already skips compound tails of THP
8623                 * because their page->_refcount is zero at all time.
8624                 */
8625                if (!page_ref_count(page)) {
8626                        if (PageBuddy(page))
8627                                iter += (1 << buddy_order(page)) - 1;
8628                        continue;
8629                }
8630
8631                /*
8632                 * The HWPoisoned page may be not in buddy system, and
8633                 * page_count() is not 0.
8634                 */
8635                if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
8636                        continue;
8637
8638                /*
8639                 * We treat all PageOffline() pages as movable when offlining
8640                 * to give drivers a chance to decrement their reference count
8641                 * in MEM_GOING_OFFLINE in order to indicate that these pages
8642                 * can be offlined as there are no direct references anymore.
8643                 * For actually unmovable PageOffline() where the driver does
8644                 * not support this, we will fail later when trying to actually
8645                 * move these pages that still have a reference count > 0.
8646                 * (false negatives in this function only)
8647                 */
8648                if ((flags & MEMORY_OFFLINE) && PageOffline(page))
8649                        continue;
8650
8651                if (__PageMovable(page) || PageLRU(page))
8652                        continue;
8653
8654                /*
8655                 * If there are RECLAIMABLE pages, we need to check
8656                 * it.  But now, memory offline itself doesn't call
8657                 * shrink_node_slabs() and it still to be fixed.
8658                 */
8659                return page;
8660        }
8661        return NULL;
8662}
8663
8664#ifdef CONFIG_CONTIG_ALLOC
8665static unsigned long pfn_max_align_down(unsigned long pfn)
8666{
8667        return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
8668                             pageblock_nr_pages) - 1);
8669}
8670
8671static unsigned long pfn_max_align_up(unsigned long pfn)
8672{
8673        return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
8674                                pageblock_nr_pages));
8675}
8676
8677#if defined(CONFIG_DYNAMIC_DEBUG) || \
8678        (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
8679/* Usage: See admin-guide/dynamic-debug-howto.rst */
8680static void alloc_contig_dump_pages(struct list_head *page_list)
8681{
8682        DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure");
8683
8684        if (DYNAMIC_DEBUG_BRANCH(descriptor)) {
8685                struct page *page;
8686
8687                dump_stack();
8688                list_for_each_entry(page, page_list, lru)
8689                        dump_page(page, "migration failure");
8690        }
8691}
8692#else
8693static inline void alloc_contig_dump_pages(struct list_head *page_list)
8694{
8695}
8696#endif
8697
8698/* [start, end) must belong to a single zone. */
8699static int __alloc_contig_migrate_range(struct compact_control *cc,
8700                                        unsigned long start, unsigned long end)
8701{
8702        /* This function is based on compact_zone() from compaction.c. */
8703        unsigned int nr_reclaimed;
8704        unsigned long pfn = start;
8705        unsigned int tries = 0;
8706        int ret = 0;
8707        struct migration_target_control mtc = {
8708                .nid = zone_to_nid(cc->zone),
8709                .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
8710        };
8711
8712        lru_cache_disable();
8713
8714        while (pfn < end || !list_empty(&cc->migratepages)) {
8715                if (fatal_signal_pending(current)) {
8716                        ret = -EINTR;
8717                        break;
8718                }
8719
8720                if (list_empty(&cc->migratepages)) {
8721                        cc->nr_migratepages = 0;
8722                        ret = isolate_migratepages_range(cc, pfn, end);
8723                        if (ret && ret != -EAGAIN)
8724                                break;
8725                        pfn = cc->migrate_pfn;
8726                        tries = 0;
8727                } else if (++tries == 5) {
8728                        ret = -EBUSY;
8729                        break;
8730                }
8731
8732                nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
8733                                                        &cc->migratepages);
8734                cc->nr_migratepages -= nr_reclaimed;
8735
8736                ret = migrate_pages(&cc->migratepages, alloc_migration_target,
8737                                NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE);
8738
8739                /*
8740                 * On -ENOMEM, migrate_pages() bails out right away. It is pointless
8741                 * to retry again over this error, so do the same here.
8742                 */
8743                if (ret == -ENOMEM)
8744                        break;
8745        }
8746
8747        lru_cache_enable();
8748        if (ret < 0) {
8749                alloc_contig_dump_pages(&cc->migratepages);
8750                putback_movable_pages(&cc->migratepages);
8751                return ret;
8752        }
8753        return 0;
8754}
8755
8756/**
8757 * alloc_contig_range() -- tries to allocate given range of pages
8758 * @start:      start PFN to allocate
8759 * @end:        one-past-the-last PFN to allocate
8760 * @migratetype:        migratetype of the underlying pageblocks (either
8761 *                      #MIGRATE_MOVABLE or #MIGRATE_CMA).  All pageblocks
8762 *                      in range must have the same migratetype and it must
8763 *                      be either of the two.
8764 * @gfp_mask:   GFP mask to use during compaction
8765 *
8766 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
8767 * aligned.  The PFN range must belong to a single zone.
8768 *
8769 * The first thing this routine does is attempt to MIGRATE_ISOLATE all
8770 * pageblocks in the range.  Once isolated, the pageblocks should not
8771 * be modified by others.
8772 *
8773 * Return: zero on success or negative error code.  On success all
8774 * pages which PFN is in [start, end) are allocated for the caller and
8775 * need to be freed with free_contig_range().
8776 */
8777int alloc_contig_range(unsigned long start, unsigned long end,
8778                       unsigned migratetype, gfp_t gfp_mask)
8779{
8780        unsigned long outer_start, outer_end;
8781        unsigned int order;
8782        int ret = 0;
8783
8784        struct compact_control cc = {
8785                .nr_migratepages = 0,
8786                .order = -1,
8787                .zone = page_zone(pfn_to_page(start)),
8788                .mode = MIGRATE_SYNC,
8789                .ignore_skip_hint = true,
8790                .no_set_skip_hint = true,
8791                .gfp_mask = current_gfp_context(gfp_mask),
8792                .alloc_contig = true,
8793        };
8794        INIT_LIST_HEAD(&cc.migratepages);
8795
8796        /*
8797         * What we do here is we mark all pageblocks in range as
8798         * MIGRATE_ISOLATE.  Because pageblock and max order pages may
8799         * have different sizes, and due to the way page allocator
8800         * work, we align the range to biggest of the two pages so
8801         * that page allocator won't try to merge buddies from
8802         * different pageblocks and change MIGRATE_ISOLATE to some
8803         * other migration type.
8804         *
8805         * Once the pageblocks are marked as MIGRATE_ISOLATE, we
8806         * migrate the pages from an unaligned range (ie. pages that
8807         * we are interested in).  This will put all the pages in
8808         * range back to page allocator as MIGRATE_ISOLATE.
8809         *
8810         * When this is done, we take the pages in range from page
8811         * allocator removing them from the buddy system.  This way
8812         * page allocator will never consider using them.
8813         *
8814         * This lets us mark the pageblocks back as
8815         * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
8816         * aligned range but not in the unaligned, original range are
8817         * put back to page allocator so that buddy can use them.
8818         */
8819
8820        ret = start_isolate_page_range(pfn_max_align_down(start),
8821                                       pfn_max_align_up(end), migratetype, 0);
8822        if (ret)
8823                return ret;
8824
8825        drain_all_pages(cc.zone);
8826
8827        /*
8828         * In case of -EBUSY, we'd like to know which page causes problem.
8829         * So, just fall through. test_pages_isolated() has a tracepoint
8830         * which will report the busy page.
8831         *
8832         * It is possible that busy pages could become available before
8833         * the call to test_pages_isolated, and the range will actually be
8834         * allocated.  So, if we fall through be sure to clear ret so that
8835         * -EBUSY is not accidentally used or returned to caller.
8836         */
8837        ret = __alloc_contig_migrate_range(&cc, start, end);
8838        if (ret && ret != -EBUSY)
8839                goto done;
8840        ret = 0;
8841
8842        /*
8843         * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
8844         * aligned blocks that are marked as MIGRATE_ISOLATE.  What's
8845         * more, all pages in [start, end) are free in page allocator.
8846         * What we are going to do is to allocate all pages from
8847         * [start, end) (that is remove them from page allocator).
8848         *
8849         * The only problem is that pages at the beginning and at the
8850         * end of interesting range may be not aligned with pages that
8851         * page allocator holds, ie. they can be part of higher order
8852         * pages.  Because of this, we reserve the bigger range and
8853         * once this is done free the pages we are not interested in.
8854         *
8855         * We don't have to hold zone->lock here because the pages are
8856         * isolated thus they won't get removed from buddy.
8857         */
8858
8859        order = 0;
8860        outer_start = start;
8861        while (!PageBuddy(pfn_to_page(outer_start))) {
8862                if (++order >= MAX_ORDER) {
8863                        outer_start = start;
8864                        break;
8865                }
8866                outer_start &= ~0UL << order;
8867        }
8868
8869        if (outer_start != start) {
8870                order = buddy_order(pfn_to_page(outer_start));
8871
8872                /*
8873                 * outer_start page could be small order buddy page and
8874                 * it doesn't include start page. Adjust outer_start
8875                 * in this case to report failed page properly
8876                 * on tracepoint in test_pages_isolated()
8877                 */
8878                if (outer_start + (1UL << order) <= start)
8879                        outer_start = start;
8880        }
8881
8882        /* Make sure the range is really isolated. */
8883        if (test_pages_isolated(outer_start, end, 0)) {
8884                ret = -EBUSY;
8885                goto done;
8886        }
8887
8888        /* Grab isolated pages from freelists. */
8889        outer_end = isolate_freepages_range(&cc, outer_start, end);
8890        if (!outer_end) {
8891                ret = -EBUSY;
8892                goto done;
8893        }
8894
8895        /* Free head and tail (if any) */
8896        if (start != outer_start)
8897                free_contig_range(outer_start, start - outer_start);
8898        if (end != outer_end)
8899                free_contig_range(end, outer_end - end);
8900
8901done:
8902        undo_isolate_page_range(pfn_max_align_down(start),
8903                                pfn_max_align_up(end), migratetype);
8904        return ret;
8905}
8906EXPORT_SYMBOL(alloc_contig_range);
8907
8908static int __alloc_contig_pages(unsigned long start_pfn,
8909                                unsigned long nr_pages, gfp_t gfp_mask)
8910{
8911        unsigned long end_pfn = start_pfn + nr_pages;
8912
8913        return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
8914                                  gfp_mask);
8915}
8916
8917static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
8918                                   unsigned long nr_pages)
8919{
8920        unsigned long i, end_pfn = start_pfn + nr_pages;
8921        struct page *page;
8922
8923        for (i = start_pfn; i < end_pfn; i++) {
8924                page = pfn_to_online_page(i);
8925                if (!page)
8926                        return false;
8927
8928                if (page_zone(page) != z)
8929                        return false;
8930
8931                if (PageReserved(page))
8932                        return false;
8933        }
8934        return true;
8935}
8936
8937static bool zone_spans_last_pfn(const struct zone *zone,
8938                                unsigned long start_pfn, unsigned long nr_pages)
8939{
8940        unsigned long last_pfn = start_pfn + nr_pages - 1;
8941
8942        return zone_spans_pfn(zone, last_pfn);
8943}
8944
8945/**
8946 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages
8947 * @nr_pages:   Number of contiguous pages to allocate
8948 * @gfp_mask:   GFP mask to limit search and used during compaction
8949 * @nid:        Target node
8950 * @nodemask:   Mask for other possible nodes
8951 *
8952 * This routine is a wrapper around alloc_contig_range(). It scans over zones
8953 * on an applicable zonelist to find a contiguous pfn range which can then be
8954 * tried for allocation with alloc_contig_range(). This routine is intended
8955 * for allocation requests which can not be fulfilled with the buddy allocator.
8956 *
8957 * The allocated memory is always aligned to a page boundary. If nr_pages is a
8958 * power of two then the alignment is guaranteed to be to the given nr_pages
8959 * (e.g. 1GB request would be aligned to 1GB).
8960 *
8961 * Allocated pages can be freed with free_contig_range() or by manually calling
8962 * __free_page() on each allocated page.
8963 *
8964 * Return: pointer to contiguous pages on success, or NULL if not successful.
8965 */
8966struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
8967                                int nid, nodemask_t *nodemask)
8968{
8969        unsigned long ret, pfn, flags;
8970        struct zonelist *zonelist;
8971        struct zone *zone;
8972        struct zoneref *z;
8973
8974        zonelist = node_zonelist(nid, gfp_mask);
8975        for_each_zone_zonelist_nodemask(zone, z, zonelist,
8976                                        gfp_zone(gfp_mask), nodemask) {
8977                spin_lock_irqsave(&zone->lock, flags);
8978
8979                pfn = ALIGN(zone->zone_start_pfn, nr_pages);
8980                while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
8981                        if (pfn_range_valid_contig(zone, pfn, nr_pages)) {
8982                                /*
8983                                 * We release the zone lock here because
8984                                 * alloc_contig_range() will also lock the zone
8985                                 * at some point. If there's an allocation
8986                                 * spinning on this lock, it may win the race
8987                                 * and cause alloc_contig_range() to fail...
8988                                 */
8989                                spin_unlock_irqrestore(&zone->lock, flags);
8990                                ret = __alloc_contig_pages(pfn, nr_pages,
8991                                                        gfp_mask);
8992                                if (!ret)
8993                                        return pfn_to_page(pfn);
8994                                spin_lock_irqsave(&zone->lock, flags);
8995                        }
8996                        pfn += nr_pages;
8997                }
8998                spin_unlock_irqrestore(&zone->lock, flags);
8999        }
9000        return NULL;
9001}
9002#endif /* CONFIG_CONTIG_ALLOC */
9003
9004void free_contig_range(unsigned long pfn, unsigned long nr_pages)
9005{
9006        unsigned long count = 0;
9007
9008        for (; nr_pages--; pfn++) {
9009                struct page *page = pfn_to_page(pfn);
9010
9011                count += page_count(page) != 1;
9012                __free_page(page);
9013        }
9014        WARN(count != 0, "%lu pages are still in use!\n", count);
9015}
9016EXPORT_SYMBOL(free_contig_range);
9017
9018/*
9019 * The zone indicated has a new number of managed_pages; batch sizes and percpu
9020 * page high values need to be recalculated.
9021 */
9022void __meminit zone_pcp_update(struct zone *zone)
9023{
9024        mutex_lock(&pcp_batch_high_lock);
9025        zone_set_pageset_high_and_batch(zone);
9026        mutex_unlock(&pcp_batch_high_lock);
9027}
9028
9029/*
9030 * Effectively disable pcplists for the zone by setting the high limit to 0
9031 * and draining all cpus. A concurrent page freeing on another CPU that's about
9032 * to put the page on pcplist will either finish before the drain and the page
9033 * will be drained, or observe the new high limit and skip the pcplist.
9034 *
9035 * Must be paired with a call to zone_pcp_enable().
9036 */
9037void zone_pcp_disable(struct zone *zone)
9038{
9039        mutex_lock(&pcp_batch_high_lock);
9040        __zone_set_pageset_high_and_batch(zone, 0, 1);
9041        __drain_all_pages(zone, true);
9042}
9043
9044void zone_pcp_enable(struct zone *zone)
9045{
9046        __zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch);
9047        mutex_unlock(&pcp_batch_high_lock);
9048}
9049
9050void zone_pcp_reset(struct zone *zone)
9051{
9052        int cpu;
9053        struct per_cpu_pageset *pset;
9054
9055        if (zone->pageset != &boot_pageset) {
9056                for_each_online_cpu(cpu) {
9057                        pset = per_cpu_ptr(zone->pageset, cpu);
9058                        drain_zonestat(zone, pset);
9059                }
9060                free_percpu(zone->pageset);
9061                zone->pageset = &boot_pageset;
9062        }
9063}
9064
9065#ifdef CONFIG_MEMORY_HOTREMOVE
9066/*
9067 * All pages in the range must be in a single zone, must not contain holes,
9068 * must span full sections, and must be isolated before calling this function.
9069 */
9070void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
9071{
9072        unsigned long pfn = start_pfn;
9073        struct page *page;
9074        struct zone *zone;
9075        unsigned int order;
9076        unsigned long flags;
9077
9078        offline_mem_sections(pfn, end_pfn);
9079        zone = page_zone(pfn_to_page(pfn));
9080        spin_lock_irqsave(&zone->lock, flags);
9081        while (pfn < end_pfn) {
9082                page = pfn_to_page(pfn);
9083                /*
9084                 * The HWPoisoned page may be not in buddy system, and
9085                 * page_count() is not 0.
9086                 */
9087                if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
9088                        pfn++;
9089                        continue;
9090                }
9091                /*
9092                 * At this point all remaining PageOffline() pages have a
9093                 * reference count of 0 and can simply be skipped.
9094                 */
9095                if (PageOffline(page)) {
9096                        BUG_ON(page_count(page));
9097                        BUG_ON(PageBuddy(page));
9098                        pfn++;
9099                        continue;
9100                }
9101
9102                BUG_ON(page_count(page));
9103                BUG_ON(!PageBuddy(page));
9104                order = buddy_order(page);
9105                del_page_from_free_list(page, zone, order);
9106                pfn += (1 << order);
9107        }
9108        spin_unlock_irqrestore(&zone->lock, flags);
9109}
9110#endif
9111
9112bool is_free_buddy_page(struct page *page)
9113{
9114        struct zone *zone = page_zone(page);
9115        unsigned long pfn = page_to_pfn(page);
9116        unsigned long flags;
9117        unsigned int order;
9118
9119        spin_lock_irqsave(&zone->lock, flags);
9120        for (order = 0; order < MAX_ORDER; order++) {
9121                struct page *page_head = page - (pfn & ((1 << order) - 1));
9122
9123                if (PageBuddy(page_head) && buddy_order(page_head) >= order)
9124                        break;
9125        }
9126        spin_unlock_irqrestore(&zone->lock, flags);
9127
9128        return order < MAX_ORDER;
9129}
9130
9131#ifdef CONFIG_MEMORY_FAILURE
9132/*
9133 * Break down a higher-order page in sub-pages, and keep our target out of
9134 * buddy allocator.
9135 */
9136static void break_down_buddy_pages(struct zone *zone, struct page *page,
9137                                   struct page *target, int low, int high,
9138                                   int migratetype)
9139{
9140        unsigned long size = 1 << high;
9141        struct page *current_buddy, *next_page;
9142
9143        while (high > low) {
9144                high--;
9145                size >>= 1;
9146
9147                if (target >= &page[size]) {
9148                        next_page = page + size;
9149                        current_buddy = page;
9150                } else {
9151                        next_page = page;
9152                        current_buddy = page + size;
9153                }
9154
9155                if (set_page_guard(zone, current_buddy, high, migratetype))
9156                        continue;
9157
9158                if (current_buddy != target) {
9159                        add_to_free_list(current_buddy, zone, high, migratetype);
9160                        set_buddy_order(current_buddy, high);
9161                        page = next_page;
9162                }
9163        }
9164}
9165
9166/*
9167 * Take a page that will be marked as poisoned off the buddy allocator.
9168 */
9169bool take_page_off_buddy(struct page *page)
9170{
9171        struct zone *zone = page_zone(page);
9172        unsigned long pfn = page_to_pfn(page);
9173        unsigned long flags;
9174        unsigned int order;
9175        bool ret = false;
9176
9177        spin_lock_irqsave(&zone->lock, flags);
9178        for (order = 0; order < MAX_ORDER; order++) {
9179                struct page *page_head = page - (pfn & ((1 << order) - 1));
9180                int page_order = buddy_order(page_head);
9181
9182                if (PageBuddy(page_head) && page_order >= order) {
9183                        unsigned long pfn_head = page_to_pfn(page_head);
9184                        int migratetype = get_pfnblock_migratetype(page_head,
9185                                                                   pfn_head);
9186
9187                        del_page_from_free_list(page_head, zone, page_order);
9188                        break_down_buddy_pages(zone, page_head, page, 0,
9189                                                page_order, migratetype);
9190                        if (!is_migrate_isolate(migratetype))
9191                                __mod_zone_freepage_state(zone, -1, migratetype);
9192                        ret = true;
9193                        break;
9194                }
9195                if (page_count(page_head) > 0)
9196                        break;
9197        }
9198        spin_unlock_irqrestore(&zone->lock, flags);
9199        return ret;
9200}
9201#endif
9202