linux/mm/page_alloc.c
<<
>>
Prefs
   1/*
   2 *  linux/mm/page_alloc.c
   3 *
   4 *  Manages the free list, the system allocates free pages here.
   5 *  Note that kmalloc() lives in slab.c
   6 *
   7 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   8 *  Swap reorganised 29.12.95, Stephen Tweedie
   9 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  10 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
  11 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
  12 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
  13 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
  14 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
  15 */
  16
  17#include <linux/stddef.h>
  18#include <linux/mm.h>
  19#include <linux/swap.h>
  20#include <linux/interrupt.h>
  21#include <linux/pagemap.h>
  22#include <linux/jiffies.h>
  23#include <linux/bootmem.h>
  24#include <linux/memblock.h>
  25#include <linux/compiler.h>
  26#include <linux/kernel.h>
  27#include <linux/kmemcheck.h>
  28#include <linux/module.h>
  29#include <linux/suspend.h>
  30#include <linux/pagevec.h>
  31#include <linux/blkdev.h>
  32#include <linux/slab.h>
  33#include <linux/ratelimit.h>
  34#include <linux/oom.h>
  35#include <linux/notifier.h>
  36#include <linux/topology.h>
  37#include <linux/sysctl.h>
  38#include <linux/cpu.h>
  39#include <linux/cpuset.h>
  40#include <linux/memory_hotplug.h>
  41#include <linux/nodemask.h>
  42#include <linux/vmalloc.h>
  43#include <linux/vmstat.h>
  44#include <linux/mempolicy.h>
  45#include <linux/stop_machine.h>
  46#include <linux/sort.h>
  47#include <linux/pfn.h>
  48#include <linux/backing-dev.h>
  49#include <linux/fault-inject.h>
  50#include <linux/page-isolation.h>
  51#include <linux/page_cgroup.h>
  52#include <linux/debugobjects.h>
  53#include <linux/kmemleak.h>
  54#include <linux/compaction.h>
  55#include <trace/events/kmem.h>
  56#include <linux/ftrace_event.h>
  57#include <linux/memcontrol.h>
  58#include <linux/prefetch.h>
  59#include <linux/migrate.h>
  60#include <linux/page-debug-flags.h>
  61
  62#include <asm/tlbflush.h>
  63#include <asm/div64.h>
  64#include "internal.h"
  65
  66#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
  67DEFINE_PER_CPU(int, numa_node);
  68EXPORT_PER_CPU_SYMBOL(numa_node);
  69#endif
  70
  71#ifdef CONFIG_HAVE_MEMORYLESS_NODES
  72/*
  73 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
  74 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
  75 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
  76 * defined in <linux/topology.h>.
  77 */
  78DEFINE_PER_CPU(int, _numa_mem_);                /* Kernel "local memory" node */
  79EXPORT_PER_CPU_SYMBOL(_numa_mem_);
  80#endif
  81
  82/*
  83 * Array of node states.
  84 */
  85nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
  86        [N_POSSIBLE] = NODE_MASK_ALL,
  87        [N_ONLINE] = { { [0] = 1UL } },
  88#ifndef CONFIG_NUMA
  89        [N_NORMAL_MEMORY] = { { [0] = 1UL } },
  90#ifdef CONFIG_HIGHMEM
  91        [N_HIGH_MEMORY] = { { [0] = 1UL } },
  92#endif
  93        [N_CPU] = { { [0] = 1UL } },
  94#endif  /* NUMA */
  95};
  96EXPORT_SYMBOL(node_states);
  97
  98unsigned long totalram_pages __read_mostly;
  99unsigned long totalreserve_pages __read_mostly;
 100/*
 101 * When calculating the number of globally allowed dirty pages, there
 102 * is a certain number of per-zone reserves that should not be
 103 * considered dirtyable memory.  This is the sum of those reserves
 104 * over all existing zones that contribute dirtyable memory.
 105 */
 106unsigned long dirty_balance_reserve __read_mostly;
 107
 108int percpu_pagelist_fraction;
 109gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
 110
 111#ifdef CONFIG_PM_SLEEP
 112/*
 113 * The following functions are used by the suspend/hibernate code to temporarily
 114 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
 115 * while devices are suspended.  To avoid races with the suspend/hibernate code,
 116 * they should always be called with pm_mutex held (gfp_allowed_mask also should
 117 * only be modified with pm_mutex held, unless the suspend/hibernate code is
 118 * guaranteed not to run in parallel with that modification).
 119 */
 120
 121static gfp_t saved_gfp_mask;
 122
 123void pm_restore_gfp_mask(void)
 124{
 125        WARN_ON(!mutex_is_locked(&pm_mutex));
 126        if (saved_gfp_mask) {
 127                gfp_allowed_mask = saved_gfp_mask;
 128                saved_gfp_mask = 0;
 129        }
 130}
 131
 132void pm_restrict_gfp_mask(void)
 133{
 134        WARN_ON(!mutex_is_locked(&pm_mutex));
 135        WARN_ON(saved_gfp_mask);
 136        saved_gfp_mask = gfp_allowed_mask;
 137        gfp_allowed_mask &= ~GFP_IOFS;
 138}
 139
 140bool pm_suspended_storage(void)
 141{
 142        if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS)
 143                return false;
 144        return true;
 145}
 146#endif /* CONFIG_PM_SLEEP */
 147
 148#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
 149int pageblock_order __read_mostly;
 150#endif
 151
 152static void __free_pages_ok(struct page *page, unsigned int order);
 153
 154/*
 155 * results with 256, 32 in the lowmem_reserve sysctl:
 156 *      1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
 157 *      1G machine -> (16M dma, 784M normal, 224M high)
 158 *      NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
 159 *      HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
 160 *      HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
 161 *
 162 * TBD: should special case ZONE_DMA32 machines here - in those we normally
 163 * don't need any ZONE_NORMAL reservation
 164 */
 165int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
 166#ifdef CONFIG_ZONE_DMA
 167         256,
 168#endif
 169#ifdef CONFIG_ZONE_DMA32
 170         256,
 171#endif
 172#ifdef CONFIG_HIGHMEM
 173         32,
 174#endif
 175         32,
 176};
 177
 178EXPORT_SYMBOL(totalram_pages);
 179
 180static char * const zone_names[MAX_NR_ZONES] = {
 181#ifdef CONFIG_ZONE_DMA
 182         "DMA",
 183#endif
 184#ifdef CONFIG_ZONE_DMA32
 185         "DMA32",
 186#endif
 187         "Normal",
 188#ifdef CONFIG_HIGHMEM
 189         "HighMem",
 190#endif
 191         "Movable",
 192};
 193
 194int min_free_kbytes = 1024;
 195
 196static unsigned long __meminitdata nr_kernel_pages;
 197static unsigned long __meminitdata nr_all_pages;
 198static unsigned long __meminitdata dma_reserve;
 199
 200#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 201static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
 202static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
 203static unsigned long __initdata required_kernelcore;
 204static unsigned long __initdata required_movablecore;
 205static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
 206
 207/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
 208int movable_zone;
 209EXPORT_SYMBOL(movable_zone);
 210#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
 211
 212#if MAX_NUMNODES > 1
 213int nr_node_ids __read_mostly = MAX_NUMNODES;
 214int nr_online_nodes __read_mostly = 1;
 215EXPORT_SYMBOL(nr_node_ids);
 216EXPORT_SYMBOL(nr_online_nodes);
 217#endif
 218
 219int page_group_by_mobility_disabled __read_mostly;
 220
 221/*
 222 * NOTE:
 223 * Don't use set_pageblock_migratetype(page, MIGRATE_ISOLATE) directly.
 224 * Instead, use {un}set_pageblock_isolate.
 225 */
 226void set_pageblock_migratetype(struct page *page, int migratetype)
 227{
 228
 229        if (unlikely(page_group_by_mobility_disabled))
 230                migratetype = MIGRATE_UNMOVABLE;
 231
 232        set_pageblock_flags_group(page, (unsigned long)migratetype,
 233                                        PB_migrate, PB_migrate_end);
 234}
 235
 236bool oom_killer_disabled __read_mostly;
 237
 238#ifdef CONFIG_DEBUG_VM
 239static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
 240{
 241        int ret = 0;
 242        unsigned seq;
 243        unsigned long pfn = page_to_pfn(page);
 244
 245        do {
 246                seq = zone_span_seqbegin(zone);
 247                if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
 248                        ret = 1;
 249                else if (pfn < zone->zone_start_pfn)
 250                        ret = 1;
 251        } while (zone_span_seqretry(zone, seq));
 252
 253        return ret;
 254}
 255
 256static int page_is_consistent(struct zone *zone, struct page *page)
 257{
 258        if (!pfn_valid_within(page_to_pfn(page)))
 259                return 0;
 260        if (zone != page_zone(page))
 261                return 0;
 262
 263        return 1;
 264}
 265/*
 266 * Temporary debugging check for pages not lying within a given zone.
 267 */
 268static int bad_range(struct zone *zone, struct page *page)
 269{
 270        if (page_outside_zone_boundaries(zone, page))
 271                return 1;
 272        if (!page_is_consistent(zone, page))
 273                return 1;
 274
 275        return 0;
 276}
 277#else
 278static inline int bad_range(struct zone *zone, struct page *page)
 279{
 280        return 0;
 281}
 282#endif
 283
 284static void bad_page(struct page *page)
 285{
 286        static unsigned long resume;
 287        static unsigned long nr_shown;
 288        static unsigned long nr_unshown;
 289
 290        /* Don't complain about poisoned pages */
 291        if (PageHWPoison(page)) {
 292                reset_page_mapcount(page); /* remove PageBuddy */
 293                return;
 294        }
 295
 296        /*
 297         * Allow a burst of 60 reports, then keep quiet for that minute;
 298         * or allow a steady drip of one report per second.
 299         */
 300        if (nr_shown == 60) {
 301                if (time_before(jiffies, resume)) {
 302                        nr_unshown++;
 303                        goto out;
 304                }
 305                if (nr_unshown) {
 306                        printk(KERN_ALERT
 307                              "BUG: Bad page state: %lu messages suppressed\n",
 308                                nr_unshown);
 309                        nr_unshown = 0;
 310                }
 311                nr_shown = 0;
 312        }
 313        if (nr_shown++ == 0)
 314                resume = jiffies + 60 * HZ;
 315
 316        printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
 317                current->comm, page_to_pfn(page));
 318        dump_page(page);
 319
 320        print_modules();
 321        dump_stack();
 322out:
 323        /* Leave bad fields for debug, except PageBuddy could make trouble */
 324        reset_page_mapcount(page); /* remove PageBuddy */
 325        add_taint(TAINT_BAD_PAGE);
 326}
 327
 328/*
 329 * Higher-order pages are called "compound pages".  They are structured thusly:
 330 *
 331 * The first PAGE_SIZE page is called the "head page".
 332 *
 333 * The remaining PAGE_SIZE pages are called "tail pages".
 334 *
 335 * All pages have PG_compound set.  All tail pages have their ->first_page
 336 * pointing at the head page.
 337 *
 338 * The first tail page's ->lru.next holds the address of the compound page's
 339 * put_page() function.  Its ->lru.prev holds the order of allocation.
 340 * This usage means that zero-order pages may not be compound.
 341 */
 342
 343static void free_compound_page(struct page *page)
 344{
 345        __free_pages_ok(page, compound_order(page));
 346}
 347
 348void prep_compound_page(struct page *page, unsigned long order)
 349{
 350        int i;
 351        int nr_pages = 1 << order;
 352
 353        set_compound_page_dtor(page, free_compound_page);
 354        set_compound_order(page, order);
 355        __SetPageHead(page);
 356        for (i = 1; i < nr_pages; i++) {
 357                struct page *p = page + i;
 358                __SetPageTail(p);
 359                set_page_count(p, 0);
 360                p->first_page = page;
 361        }
 362}
 363
 364/* update __split_huge_page_refcount if you change this function */
 365static int destroy_compound_page(struct page *page, unsigned long order)
 366{
 367        int i;
 368        int nr_pages = 1 << order;
 369        int bad = 0;
 370
 371        if (unlikely(compound_order(page) != order) ||
 372            unlikely(!PageHead(page))) {
 373                bad_page(page);
 374                bad++;
 375        }
 376
 377        __ClearPageHead(page);
 378
 379        for (i = 1; i < nr_pages; i++) {
 380                struct page *p = page + i;
 381
 382                if (unlikely(!PageTail(p) || (p->first_page != page))) {
 383                        bad_page(page);
 384                        bad++;
 385                }
 386                __ClearPageTail(p);
 387        }
 388
 389        return bad;
 390}
 391
 392static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
 393{
 394        int i;
 395
 396        /*
 397         * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
 398         * and __GFP_HIGHMEM from hard or soft interrupt context.
 399         */
 400        VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
 401        for (i = 0; i < (1 << order); i++)
 402                clear_highpage(page + i);
 403}
 404
 405#ifdef CONFIG_DEBUG_PAGEALLOC
 406unsigned int _debug_guardpage_minorder;
 407
 408static int __init debug_guardpage_minorder_setup(char *buf)
 409{
 410        unsigned long res;
 411
 412        if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
 413                printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
 414                return 0;
 415        }
 416        _debug_guardpage_minorder = res;
 417        printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
 418        return 0;
 419}
 420__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
 421
 422static inline void set_page_guard_flag(struct page *page)
 423{
 424        __set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
 425}
 426
 427static inline void clear_page_guard_flag(struct page *page)
 428{
 429        __clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
 430}
 431#else
 432static inline void set_page_guard_flag(struct page *page) { }
 433static inline void clear_page_guard_flag(struct page *page) { }
 434#endif
 435
 436static inline void set_page_order(struct page *page, int order)
 437{
 438        set_page_private(page, order);
 439        __SetPageBuddy(page);
 440}
 441
 442static inline void rmv_page_order(struct page *page)
 443{
 444        __ClearPageBuddy(page);
 445        set_page_private(page, 0);
 446}
 447
 448/*
 449 * Locate the struct page for both the matching buddy in our
 450 * pair (buddy1) and the combined O(n+1) page they form (page).
 451 *
 452 * 1) Any buddy B1 will have an order O twin B2 which satisfies
 453 * the following equation:
 454 *     B2 = B1 ^ (1 << O)
 455 * For example, if the starting buddy (buddy2) is #8 its order
 456 * 1 buddy is #10:
 457 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
 458 *
 459 * 2) Any buddy B will have an order O+1 parent P which
 460 * satisfies the following equation:
 461 *     P = B & ~(1 << O)
 462 *
 463 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
 464 */
 465static inline unsigned long
 466__find_buddy_index(unsigned long page_idx, unsigned int order)
 467{
 468        return page_idx ^ (1 << order);
 469}
 470
 471/*
 472 * This function checks whether a page is free && is the buddy
 473 * we can do coalesce a page and its buddy if
 474 * (a) the buddy is not in a hole &&
 475 * (b) the buddy is in the buddy system &&
 476 * (c) a page and its buddy have the same order &&
 477 * (d) a page and its buddy are in the same zone.
 478 *
 479 * For recording whether a page is in the buddy system, we set ->_mapcount -2.
 480 * Setting, clearing, and testing _mapcount -2 is serialized by zone->lock.
 481 *
 482 * For recording page's order, we use page_private(page).
 483 */
 484static inline int page_is_buddy(struct page *page, struct page *buddy,
 485                                                                int order)
 486{
 487        if (!pfn_valid_within(page_to_pfn(buddy)))
 488                return 0;
 489
 490        if (page_zone_id(page) != page_zone_id(buddy))
 491                return 0;
 492
 493        if (page_is_guard(buddy) && page_order(buddy) == order) {
 494                VM_BUG_ON(page_count(buddy) != 0);
 495                return 1;
 496        }
 497
 498        if (PageBuddy(buddy) && page_order(buddy) == order) {
 499                VM_BUG_ON(page_count(buddy) != 0);
 500                return 1;
 501        }
 502        return 0;
 503}
 504
 505/*
 506 * Freeing function for a buddy system allocator.
 507 *
 508 * The concept of a buddy system is to maintain direct-mapped table
 509 * (containing bit values) for memory blocks of various "orders".
 510 * The bottom level table contains the map for the smallest allocatable
 511 * units of memory (here, pages), and each level above it describes
 512 * pairs of units from the levels below, hence, "buddies".
 513 * At a high level, all that happens here is marking the table entry
 514 * at the bottom level available, and propagating the changes upward
 515 * as necessary, plus some accounting needed to play nicely with other
 516 * parts of the VM system.
 517 * At each level, we keep a list of pages, which are heads of continuous
 518 * free pages of length of (1 << order) and marked with _mapcount -2. Page's
 519 * order is recorded in page_private(page) field.
 520 * So when we are allocating or freeing one, we can derive the state of the
 521 * other.  That is, if we allocate a small block, and both were
 522 * free, the remainder of the region must be split into blocks.
 523 * If a block is freed, and its buddy is also free, then this
 524 * triggers coalescing into a block of larger size.
 525 *
 526 * -- wli
 527 */
 528
 529static inline void __free_one_page(struct page *page,
 530                struct zone *zone, unsigned int order,
 531                int migratetype)
 532{
 533        unsigned long page_idx;
 534        unsigned long combined_idx;
 535        unsigned long uninitialized_var(buddy_idx);
 536        struct page *buddy;
 537
 538        if (unlikely(PageCompound(page)))
 539                if (unlikely(destroy_compound_page(page, order)))
 540                        return;
 541
 542        VM_BUG_ON(migratetype == -1);
 543
 544        page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
 545
 546        VM_BUG_ON(page_idx & ((1 << order) - 1));
 547        VM_BUG_ON(bad_range(zone, page));
 548
 549        while (order < MAX_ORDER-1) {
 550                buddy_idx = __find_buddy_index(page_idx, order);
 551                buddy = page + (buddy_idx - page_idx);
 552                if (!page_is_buddy(page, buddy, order))
 553                        break;
 554                /*
 555                 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
 556                 * merge with it and move up one order.
 557                 */
 558                if (page_is_guard(buddy)) {
 559                        clear_page_guard_flag(buddy);
 560                        set_page_private(page, 0);
 561                        __mod_zone_freepage_state(zone, 1 << order,
 562                                                  migratetype);
 563                } else {
 564                        list_del(&buddy->lru);
 565                        zone->free_area[order].nr_free--;
 566                        rmv_page_order(buddy);
 567                }
 568                combined_idx = buddy_idx & page_idx;
 569                page = page + (combined_idx - page_idx);
 570                page_idx = combined_idx;
 571                order++;
 572        }
 573        set_page_order(page, order);
 574
 575        /*
 576         * If this is not the largest possible page, check if the buddy
 577         * of the next-highest order is free. If it is, it's possible
 578         * that pages are being freed that will coalesce soon. In case,
 579         * that is happening, add the free page to the tail of the list
 580         * so it's less likely to be used soon and more likely to be merged
 581         * as a higher order page
 582         */
 583        if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
 584                struct page *higher_page, *higher_buddy;
 585                combined_idx = buddy_idx & page_idx;
 586                higher_page = page + (combined_idx - page_idx);
 587                buddy_idx = __find_buddy_index(combined_idx, order + 1);
 588                higher_buddy = higher_page + (buddy_idx - combined_idx);
 589                if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
 590                        list_add_tail(&page->lru,
 591                                &zone->free_area[order].free_list[migratetype]);
 592                        goto out;
 593                }
 594        }
 595
 596        list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
 597out:
 598        zone->free_area[order].nr_free++;
 599}
 600
 601static inline int free_pages_check(struct page *page)
 602{
 603        if (unlikely(page_mapcount(page) |
 604                (page->mapping != NULL)  |
 605                (atomic_read(&page->_count) != 0) |
 606                (page->flags & PAGE_FLAGS_CHECK_AT_FREE) |
 607                (mem_cgroup_bad_page_check(page)))) {
 608                bad_page(page);
 609                return 1;
 610        }
 611        if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
 612                page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
 613        return 0;
 614}
 615
 616/*
 617 * Frees a number of pages from the PCP lists
 618 * Assumes all pages on list are in same zone, and of same order.
 619 * count is the number of pages to free.
 620 *
 621 * If the zone was previously in an "all pages pinned" state then look to
 622 * see if this freeing clears that state.
 623 *
 624 * And clear the zone's pages_scanned counter, to hold off the "all pages are
 625 * pinned" detection logic.
 626 */
 627static void free_pcppages_bulk(struct zone *zone, int count,
 628                                        struct per_cpu_pages *pcp)
 629{
 630        int migratetype = 0;
 631        int batch_free = 0;
 632        int to_free = count;
 633
 634        spin_lock(&zone->lock);
 635        zone->all_unreclaimable = 0;
 636        zone->pages_scanned = 0;
 637
 638        while (to_free) {
 639                struct page *page;
 640                struct list_head *list;
 641
 642                /*
 643                 * Remove pages from lists in a round-robin fashion. A
 644                 * batch_free count is maintained that is incremented when an
 645                 * empty list is encountered.  This is so more pages are freed
 646                 * off fuller lists instead of spinning excessively around empty
 647                 * lists
 648                 */
 649                do {
 650                        batch_free++;
 651                        if (++migratetype == MIGRATE_PCPTYPES)
 652                                migratetype = 0;
 653                        list = &pcp->lists[migratetype];
 654                } while (list_empty(list));
 655
 656                /* This is the only non-empty list. Free them all. */
 657                if (batch_free == MIGRATE_PCPTYPES)
 658                        batch_free = to_free;
 659
 660                do {
 661                        int mt; /* migratetype of the to-be-freed page */
 662
 663                        page = list_entry(list->prev, struct page, lru);
 664                        /* must delete as __free_one_page list manipulates */
 665                        list_del(&page->lru);
 666                        mt = get_freepage_migratetype(page);
 667                        /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
 668                        __free_one_page(page, zone, 0, mt);
 669                        trace_mm_page_pcpu_drain(page, 0, mt);
 670                        if (is_migrate_cma(mt))
 671                                __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1);
 672                } while (--to_free && --batch_free && !list_empty(list));
 673        }
 674        __mod_zone_page_state(zone, NR_FREE_PAGES, count);
 675        spin_unlock(&zone->lock);
 676}
 677
 678static void free_one_page(struct zone *zone, struct page *page, int order,
 679                                int migratetype)
 680{
 681        spin_lock(&zone->lock);
 682        zone->all_unreclaimable = 0;
 683        zone->pages_scanned = 0;
 684
 685        __free_one_page(page, zone, order, migratetype);
 686        if (unlikely(migratetype != MIGRATE_ISOLATE))
 687                __mod_zone_freepage_state(zone, 1 << order, migratetype);
 688        spin_unlock(&zone->lock);
 689}
 690
 691static bool free_pages_prepare(struct page *page, unsigned int order)
 692{
 693        int i;
 694        int bad = 0;
 695
 696        trace_mm_page_free(page, order);
 697        kmemcheck_free_shadow(page, order);
 698
 699        if (PageAnon(page))
 700                page->mapping = NULL;
 701        for (i = 0; i < (1 << order); i++)
 702                bad += free_pages_check(page + i);
 703        if (bad)
 704                return false;
 705
 706        if (!PageHighMem(page)) {
 707                debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
 708                debug_check_no_obj_freed(page_address(page),
 709                                           PAGE_SIZE << order);
 710        }
 711        arch_free_page(page, order);
 712        kernel_map_pages(page, 1 << order, 0);
 713
 714        return true;
 715}
 716
 717static void __free_pages_ok(struct page *page, unsigned int order)
 718{
 719        unsigned long flags;
 720        int migratetype;
 721
 722        if (!free_pages_prepare(page, order))
 723                return;
 724
 725        local_irq_save(flags);
 726        __count_vm_events(PGFREE, 1 << order);
 727        migratetype = get_pageblock_migratetype(page);
 728        set_freepage_migratetype(page, migratetype);
 729        free_one_page(page_zone(page), page, order, migratetype);
 730        local_irq_restore(flags);
 731}
 732
 733void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
 734{
 735        unsigned int nr_pages = 1 << order;
 736        unsigned int loop;
 737
 738        prefetchw(page);
 739        for (loop = 0; loop < nr_pages; loop++) {
 740                struct page *p = &page[loop];
 741
 742                if (loop + 1 < nr_pages)
 743                        prefetchw(p + 1);
 744                __ClearPageReserved(p);
 745                set_page_count(p, 0);
 746        }
 747
 748        set_page_refcounted(page);
 749        __free_pages(page, order);
 750}
 751
 752#ifdef CONFIG_CMA
 753/* Free whole pageblock and set it's migration type to MIGRATE_CMA. */
 754void __init init_cma_reserved_pageblock(struct page *page)
 755{
 756        unsigned i = pageblock_nr_pages;
 757        struct page *p = page;
 758
 759        do {
 760                __ClearPageReserved(p);
 761                set_page_count(p, 0);
 762        } while (++p, --i);
 763
 764        set_page_refcounted(page);
 765        set_pageblock_migratetype(page, MIGRATE_CMA);
 766        __free_pages(page, pageblock_order);
 767        totalram_pages += pageblock_nr_pages;
 768}
 769#endif
 770
 771/*
 772 * The order of subdivision here is critical for the IO subsystem.
 773 * Please do not alter this order without good reasons and regression
 774 * testing. Specifically, as large blocks of memory are subdivided,
 775 * the order in which smaller blocks are delivered depends on the order
 776 * they're subdivided in this function. This is the primary factor
 777 * influencing the order in which pages are delivered to the IO
 778 * subsystem according to empirical testing, and this is also justified
 779 * by considering the behavior of a buddy system containing a single
 780 * large block of memory acted on by a series of small allocations.
 781 * This behavior is a critical factor in sglist merging's success.
 782 *
 783 * -- wli
 784 */
 785static inline void expand(struct zone *zone, struct page *page,
 786        int low, int high, struct free_area *area,
 787        int migratetype)
 788{
 789        unsigned long size = 1 << high;
 790
 791        while (high > low) {
 792                area--;
 793                high--;
 794                size >>= 1;
 795                VM_BUG_ON(bad_range(zone, &page[size]));
 796
 797#ifdef CONFIG_DEBUG_PAGEALLOC
 798                if (high < debug_guardpage_minorder()) {
 799                        /*
 800                         * Mark as guard pages (or page), that will allow to
 801                         * merge back to allocator when buddy will be freed.
 802                         * Corresponding page table entries will not be touched,
 803                         * pages will stay not present in virtual address space
 804                         */
 805                        INIT_LIST_HEAD(&page[size].lru);
 806                        set_page_guard_flag(&page[size]);
 807                        set_page_private(&page[size], high);
 808                        /* Guard pages are not available for any usage */
 809                        __mod_zone_freepage_state(zone, -(1 << high),
 810                                                  migratetype);
 811                        continue;
 812                }
 813#endif
 814                list_add(&page[size].lru, &area->free_list[migratetype]);
 815                area->nr_free++;
 816                set_page_order(&page[size], high);
 817        }
 818}
 819
 820/*
 821 * This page is about to be returned from the page allocator
 822 */
 823static inline int check_new_page(struct page *page)
 824{
 825        if (unlikely(page_mapcount(page) |
 826                (page->mapping != NULL)  |
 827                (atomic_read(&page->_count) != 0)  |
 828                (page->flags & PAGE_FLAGS_CHECK_AT_PREP) |
 829                (mem_cgroup_bad_page_check(page)))) {
 830                bad_page(page);
 831                return 1;
 832        }
 833        return 0;
 834}
 835
 836static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
 837{
 838        int i;
 839
 840        for (i = 0; i < (1 << order); i++) {
 841                struct page *p = page + i;
 842                if (unlikely(check_new_page(p)))
 843                        return 1;
 844        }
 845
 846        set_page_private(page, 0);
 847        set_page_refcounted(page);
 848
 849        arch_alloc_page(page, order);
 850        kernel_map_pages(page, 1 << order, 1);
 851
 852        if (gfp_flags & __GFP_ZERO)
 853                prep_zero_page(page, order, gfp_flags);
 854
 855        if (order && (gfp_flags & __GFP_COMP))
 856                prep_compound_page(page, order);
 857
 858        return 0;
 859}
 860
 861/*
 862 * Go through the free lists for the given migratetype and remove
 863 * the smallest available page from the freelists
 864 */
 865static inline
 866struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
 867                                                int migratetype)
 868{
 869        unsigned int current_order;
 870        struct free_area * area;
 871        struct page *page;
 872
 873        /* Find a page of the appropriate size in the preferred list */
 874        for (current_order = order; current_order < MAX_ORDER; ++current_order) {
 875                area = &(zone->free_area[current_order]);
 876                if (list_empty(&area->free_list[migratetype]))
 877                        continue;
 878
 879                page = list_entry(area->free_list[migratetype].next,
 880                                                        struct page, lru);
 881                list_del(&page->lru);
 882                rmv_page_order(page);
 883                area->nr_free--;
 884                expand(zone, page, order, current_order, area, migratetype);
 885                return page;
 886        }
 887
 888        return NULL;
 889}
 890
 891
 892/*
 893 * This array describes the order lists are fallen back to when
 894 * the free lists for the desirable migrate type are depleted
 895 */
 896static int fallbacks[MIGRATE_TYPES][4] = {
 897        [MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,     MIGRATE_RESERVE },
 898        [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,     MIGRATE_RESERVE },
 899#ifdef CONFIG_CMA
 900        [MIGRATE_MOVABLE]     = { MIGRATE_CMA,         MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
 901        [MIGRATE_CMA]         = { MIGRATE_RESERVE }, /* Never used */
 902#else
 903        [MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE,   MIGRATE_RESERVE },
 904#endif
 905        [MIGRATE_RESERVE]     = { MIGRATE_RESERVE }, /* Never used */
 906        [MIGRATE_ISOLATE]     = { MIGRATE_RESERVE }, /* Never used */
 907};
 908
 909/*
 910 * Move the free pages in a range to the free lists of the requested type.
 911 * Note that start_page and end_pages are not aligned on a pageblock
 912 * boundary. If alignment is required, use move_freepages_block()
 913 */
 914int move_freepages(struct zone *zone,
 915                          struct page *start_page, struct page *end_page,
 916                          int migratetype)
 917{
 918        struct page *page;
 919        unsigned long order;
 920        int pages_moved = 0;
 921
 922#ifndef CONFIG_HOLES_IN_ZONE
 923        /*
 924         * page_zone is not safe to call in this context when
 925         * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
 926         * anyway as we check zone boundaries in move_freepages_block().
 927         * Remove at a later date when no bug reports exist related to
 928         * grouping pages by mobility
 929         */
 930        BUG_ON(page_zone(start_page) != page_zone(end_page));
 931#endif
 932
 933        for (page = start_page; page <= end_page;) {
 934                /* Make sure we are not inadvertently changing nodes */
 935                VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
 936
 937                if (!pfn_valid_within(page_to_pfn(page))) {
 938                        page++;
 939                        continue;
 940                }
 941
 942                if (!PageBuddy(page)) {
 943                        page++;
 944                        continue;
 945                }
 946
 947                order = page_order(page);
 948                list_move(&page->lru,
 949                          &zone->free_area[order].free_list[migratetype]);
 950                set_freepage_migratetype(page, migratetype);
 951                page += 1 << order;
 952                pages_moved += 1 << order;
 953        }
 954
 955        return pages_moved;
 956}
 957
 958int move_freepages_block(struct zone *zone, struct page *page,
 959                                int migratetype)
 960{
 961        unsigned long start_pfn, end_pfn;
 962        struct page *start_page, *end_page;
 963
 964        start_pfn = page_to_pfn(page);
 965        start_pfn = start_pfn & ~(pageblock_nr_pages-1);
 966        start_page = pfn_to_page(start_pfn);
 967        end_page = start_page + pageblock_nr_pages - 1;
 968        end_pfn = start_pfn + pageblock_nr_pages - 1;
 969
 970        /* Do not cross zone boundaries */
 971        if (start_pfn < zone->zone_start_pfn)
 972                start_page = page;
 973        if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
 974                return 0;
 975
 976        return move_freepages(zone, start_page, end_page, migratetype);
 977}
 978
 979static void change_pageblock_range(struct page *pageblock_page,
 980                                        int start_order, int migratetype)
 981{
 982        int nr_pageblocks = 1 << (start_order - pageblock_order);
 983
 984        while (nr_pageblocks--) {
 985                set_pageblock_migratetype(pageblock_page, migratetype);
 986                pageblock_page += pageblock_nr_pages;
 987        }
 988}
 989
 990/* Remove an element from the buddy allocator from the fallback list */
 991static inline struct page *
 992__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
 993{
 994        struct free_area * area;
 995        int current_order;
 996        struct page *page;
 997        int migratetype, i;
 998
 999        /* Find the largest possible block of pages in the other list */
1000        for (current_order = MAX_ORDER-1; current_order >= order;
1001                                                --current_order) {
1002                for (i = 0;; i++) {
1003                        migratetype = fallbacks[start_migratetype][i];
1004
1005                        /* MIGRATE_RESERVE handled later if necessary */
1006                        if (migratetype == MIGRATE_RESERVE)
1007                                break;
1008
1009                        area = &(zone->free_area[current_order]);
1010                        if (list_empty(&area->free_list[migratetype]))
1011                                continue;
1012
1013                        page = list_entry(area->free_list[migratetype].next,
1014                                        struct page, lru);
1015                        area->nr_free--;
1016
1017                        /*
1018                         * If breaking a large block of pages, move all free
1019                         * pages to the preferred allocation list. If falling
1020                         * back for a reclaimable kernel allocation, be more
1021                         * aggressive about taking ownership of free pages
1022                         *
1023                         * On the other hand, never change migration
1024                         * type of MIGRATE_CMA pageblocks nor move CMA
1025                         * pages on different free lists. We don't
1026                         * want unmovable pages to be allocated from
1027                         * MIGRATE_CMA areas.
1028                         */
1029                        if (!is_migrate_cma(migratetype) &&
1030                            (unlikely(current_order >= pageblock_order / 2) ||
1031                             start_migratetype == MIGRATE_RECLAIMABLE ||
1032                             page_group_by_mobility_disabled)) {
1033                                int pages;
1034                                pages = move_freepages_block(zone, page,
1035                                                                start_migratetype);
1036
1037                                /* Claim the whole block if over half of it is free */
1038                                if (pages >= (1 << (pageblock_order-1)) ||
1039                                                page_group_by_mobility_disabled)
1040                                        set_pageblock_migratetype(page,
1041                                                                start_migratetype);
1042
1043                                migratetype = start_migratetype;
1044                        }
1045
1046                        /* Remove the page from the freelists */
1047                        list_del(&page->lru);
1048                        rmv_page_order(page);
1049
1050                        /* Take ownership for orders >= pageblock_order */
1051                        if (current_order >= pageblock_order &&
1052                            !is_migrate_cma(migratetype))
1053                                change_pageblock_range(page, current_order,
1054                                                        start_migratetype);
1055
1056                        expand(zone, page, order, current_order, area,
1057                               is_migrate_cma(migratetype)
1058                             ? migratetype : start_migratetype);
1059
1060                        trace_mm_page_alloc_extfrag(page, order, current_order,
1061                                start_migratetype, migratetype);
1062
1063                        return page;
1064                }
1065        }
1066
1067        return NULL;
1068}
1069
1070/*
1071 * Do the hard work of removing an element from the buddy allocator.
1072 * Call me with the zone->lock already held.
1073 */
1074static struct page *__rmqueue(struct zone *zone, unsigned int order,
1075                                                int migratetype)
1076{
1077        struct page *page;
1078
1079retry_reserve:
1080        page = __rmqueue_smallest(zone, order, migratetype);
1081
1082        if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
1083                page = __rmqueue_fallback(zone, order, migratetype);
1084
1085                /*
1086                 * Use MIGRATE_RESERVE rather than fail an allocation. goto
1087                 * is used because __rmqueue_smallest is an inline function
1088                 * and we want just one call site
1089                 */
1090                if (!page) {
1091                        migratetype = MIGRATE_RESERVE;
1092                        goto retry_reserve;
1093                }
1094        }
1095
1096        trace_mm_page_alloc_zone_locked(page, order, migratetype);
1097        return page;
1098}
1099
1100/*
1101 * Obtain a specified number of elements from the buddy allocator, all under
1102 * a single hold of the lock, for efficiency.  Add them to the supplied list.
1103 * Returns the number of new pages which were placed at *list.
1104 */
1105static int rmqueue_bulk(struct zone *zone, unsigned int order,
1106                        unsigned long count, struct list_head *list,
1107                        int migratetype, int cold)
1108{
1109        int mt = migratetype, i;
1110
1111        spin_lock(&zone->lock);
1112        for (i = 0; i < count; ++i) {
1113                struct page *page = __rmqueue(zone, order, migratetype);
1114                if (unlikely(page == NULL))
1115                        break;
1116
1117                /*
1118                 * Split buddy pages returned by expand() are received here
1119                 * in physical page order. The page is added to the callers and
1120                 * list and the list head then moves forward. From the callers
1121                 * perspective, the linked list is ordered by page number in
1122                 * some conditions. This is useful for IO devices that can
1123                 * merge IO requests if the physical pages are ordered
1124                 * properly.
1125                 */
1126                if (likely(cold == 0))
1127                        list_add(&page->lru, list);
1128                else
1129                        list_add_tail(&page->lru, list);
1130                if (IS_ENABLED(CONFIG_CMA)) {
1131                        mt = get_pageblock_migratetype(page);
1132                        if (!is_migrate_cma(mt) && mt != MIGRATE_ISOLATE)
1133                                mt = migratetype;
1134                }
1135                set_freepage_migratetype(page, mt);
1136                list = &page->lru;
1137                if (is_migrate_cma(mt))
1138                        __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
1139                                              -(1 << order));
1140        }
1141        __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
1142        spin_unlock(&zone->lock);
1143        return i;
1144}
1145
1146#ifdef CONFIG_NUMA
1147/*
1148 * Called from the vmstat counter updater to drain pagesets of this
1149 * currently executing processor on remote nodes after they have
1150 * expired.
1151 *
1152 * Note that this function must be called with the thread pinned to
1153 * a single processor.
1154 */
1155void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
1156{
1157        unsigned long flags;
1158        int to_drain;
1159
1160        local_irq_save(flags);
1161        if (pcp->count >= pcp->batch)
1162                to_drain = pcp->batch;
1163        else
1164                to_drain = pcp->count;
1165        if (to_drain > 0) {
1166                free_pcppages_bulk(zone, to_drain, pcp);
1167                pcp->count -= to_drain;
1168        }
1169        local_irq_restore(flags);
1170}
1171#endif
1172
1173/*
1174 * Drain pages of the indicated processor.
1175 *
1176 * The processor must either be the current processor and the
1177 * thread pinned to the current processor or a processor that
1178 * is not online.
1179 */
1180static void drain_pages(unsigned int cpu)
1181{
1182        unsigned long flags;
1183        struct zone *zone;
1184
1185        for_each_populated_zone(zone) {
1186                struct per_cpu_pageset *pset;
1187                struct per_cpu_pages *pcp;
1188
1189                local_irq_save(flags);
1190                pset = per_cpu_ptr(zone->pageset, cpu);
1191
1192                pcp = &pset->pcp;
1193                if (pcp->count) {
1194                        free_pcppages_bulk(zone, pcp->count, pcp);
1195                        pcp->count = 0;
1196                }
1197                local_irq_restore(flags);
1198        }
1199}
1200
1201/*
1202 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
1203 */
1204void drain_local_pages(void *arg)
1205{
1206        drain_pages(smp_processor_id());
1207}
1208
1209/*
1210 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
1211 *
1212 * Note that this code is protected against sending an IPI to an offline
1213 * CPU but does not guarantee sending an IPI to newly hotplugged CPUs:
1214 * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but
1215 * nothing keeps CPUs from showing up after we populated the cpumask and
1216 * before the call to on_each_cpu_mask().
1217 */
1218void drain_all_pages(void)
1219{
1220        int cpu;
1221        struct per_cpu_pageset *pcp;
1222        struct zone *zone;
1223
1224        /*
1225         * Allocate in the BSS so we wont require allocation in
1226         * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
1227         */
1228        static cpumask_t cpus_with_pcps;
1229
1230        /*
1231         * We don't care about racing with CPU hotplug event
1232         * as offline notification will cause the notified
1233         * cpu to drain that CPU pcps and on_each_cpu_mask
1234         * disables preemption as part of its processing
1235         */
1236        for_each_online_cpu(cpu) {
1237                bool has_pcps = false;
1238                for_each_populated_zone(zone) {
1239                        pcp = per_cpu_ptr(zone->pageset, cpu);
1240                        if (pcp->pcp.count) {
1241                                has_pcps = true;
1242                                break;
1243                        }
1244                }
1245                if (has_pcps)
1246                        cpumask_set_cpu(cpu, &cpus_with_pcps);
1247                else
1248                        cpumask_clear_cpu(cpu, &cpus_with_pcps);
1249        }
1250        on_each_cpu_mask(&cpus_with_pcps, drain_local_pages, NULL, 1);
1251}
1252
1253#ifdef CONFIG_HIBERNATION
1254
1255void mark_free_pages(struct zone *zone)
1256{
1257        unsigned long pfn, max_zone_pfn;
1258        unsigned long flags;
1259        int order, t;
1260        struct list_head *curr;
1261
1262        if (!zone->spanned_pages)
1263                return;
1264
1265        spin_lock_irqsave(&zone->lock, flags);
1266
1267        max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1268        for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1269                if (pfn_valid(pfn)) {
1270                        struct page *page = pfn_to_page(pfn);
1271
1272                        if (!swsusp_page_is_forbidden(page))
1273                                swsusp_unset_page_free(page);
1274                }
1275
1276        for_each_migratetype_order(order, t) {
1277                list_for_each(curr, &zone->free_area[order].free_list[t]) {
1278                        unsigned long i;
1279
1280                        pfn = page_to_pfn(list_entry(curr, struct page, lru));
1281                        for (i = 0; i < (1UL << order); i++)
1282                                swsusp_set_page_free(pfn_to_page(pfn + i));
1283                }
1284        }
1285        spin_unlock_irqrestore(&zone->lock, flags);
1286}
1287#endif /* CONFIG_PM */
1288
1289/*
1290 * Free a 0-order page
1291 * cold == 1 ? free a cold page : free a hot page
1292 */
1293void free_hot_cold_page(struct page *page, int cold)
1294{
1295        struct zone *zone = page_zone(page);
1296        struct per_cpu_pages *pcp;
1297        unsigned long flags;
1298        int migratetype;
1299
1300        if (!free_pages_prepare(page, 0))
1301                return;
1302
1303        migratetype = get_pageblock_migratetype(page);
1304        set_freepage_migratetype(page, migratetype);
1305        local_irq_save(flags);
1306        __count_vm_event(PGFREE);
1307
1308        /*
1309         * We only track unmovable, reclaimable and movable on pcp lists.
1310         * Free ISOLATE pages back to the allocator because they are being
1311         * offlined but treat RESERVE as movable pages so we can get those
1312         * areas back if necessary. Otherwise, we may have to free
1313         * excessively into the page allocator
1314         */
1315        if (migratetype >= MIGRATE_PCPTYPES) {
1316                if (unlikely(migratetype == MIGRATE_ISOLATE)) {
1317                        free_one_page(zone, page, 0, migratetype);
1318                        goto out;
1319                }
1320                migratetype = MIGRATE_MOVABLE;
1321        }
1322
1323        pcp = &this_cpu_ptr(zone->pageset)->pcp;
1324        if (cold)
1325                list_add_tail(&page->lru, &pcp->lists[migratetype]);
1326        else
1327                list_add(&page->lru, &pcp->lists[migratetype]);
1328        pcp->count++;
1329        if (pcp->count >= pcp->high) {
1330                free_pcppages_bulk(zone, pcp->batch, pcp);
1331                pcp->count -= pcp->batch;
1332        }
1333
1334out:
1335        local_irq_restore(flags);
1336}
1337
1338/*
1339 * Free a list of 0-order pages
1340 */
1341void free_hot_cold_page_list(struct list_head *list, int cold)
1342{
1343        struct page *page, *next;
1344
1345        list_for_each_entry_safe(page, next, list, lru) {
1346                trace_mm_page_free_batched(page, cold);
1347                free_hot_cold_page(page, cold);
1348        }
1349}
1350
1351/*
1352 * split_page takes a non-compound higher-order page, and splits it into
1353 * n (1<<order) sub-pages: page[0..n]
1354 * Each sub-page must be freed individually.
1355 *
1356 * Note: this is probably too low level an operation for use in drivers.
1357 * Please consult with lkml before using this in your driver.
1358 */
1359void split_page(struct page *page, unsigned int order)
1360{
1361        int i;
1362
1363        VM_BUG_ON(PageCompound(page));
1364        VM_BUG_ON(!page_count(page));
1365
1366#ifdef CONFIG_KMEMCHECK
1367        /*
1368         * Split shadow pages too, because free(page[0]) would
1369         * otherwise free the whole shadow.
1370         */
1371        if (kmemcheck_page_is_tracked(page))
1372                split_page(virt_to_page(page[0].shadow), order);
1373#endif
1374
1375        for (i = 1; i < (1 << order); i++)
1376                set_page_refcounted(page + i);
1377}
1378
1379/*
1380 * Similar to the split_page family of functions except that the page
1381 * required at the given order and being isolated now to prevent races
1382 * with parallel allocators
1383 */
1384int capture_free_page(struct page *page, int alloc_order, int migratetype)
1385{
1386        unsigned int order;
1387        unsigned long watermark;
1388        struct zone *zone;
1389        int mt;
1390
1391        BUG_ON(!PageBuddy(page));
1392
1393        zone = page_zone(page);
1394        order = page_order(page);
1395
1396        /* Obey watermarks as if the page was being allocated */
1397        watermark = low_wmark_pages(zone) + (1 << order);
1398        if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
1399                return 0;
1400
1401        /* Remove page from free list */
1402        list_del(&page->lru);
1403        zone->free_area[order].nr_free--;
1404        rmv_page_order(page);
1405
1406        mt = get_pageblock_migratetype(page);
1407        if (unlikely(mt != MIGRATE_ISOLATE))
1408                __mod_zone_freepage_state(zone, -(1UL << alloc_order), mt);
1409
1410        if (alloc_order != order)
1411                expand(zone, page, alloc_order, order,
1412                        &zone->free_area[order], migratetype);
1413
1414        /* Set the pageblock if the captured page is at least a pageblock */
1415        if (order >= pageblock_order - 1) {
1416                struct page *endpage = page + (1 << order) - 1;
1417                for (; page < endpage; page += pageblock_nr_pages) {
1418                        int mt = get_pageblock_migratetype(page);
1419                        if (mt != MIGRATE_ISOLATE && !is_migrate_cma(mt))
1420                                set_pageblock_migratetype(page,
1421                                                          MIGRATE_MOVABLE);
1422                }
1423        }
1424
1425        return 1UL << alloc_order;
1426}
1427
1428/*
1429 * Similar to split_page except the page is already free. As this is only
1430 * being used for migration, the migratetype of the block also changes.
1431 * As this is called with interrupts disabled, the caller is responsible
1432 * for calling arch_alloc_page() and kernel_map_page() after interrupts
1433 * are enabled.
1434 *
1435 * Note: this is probably too low level an operation for use in drivers.
1436 * Please consult with lkml before using this in your driver.
1437 */
1438int split_free_page(struct page *page)
1439{
1440        unsigned int order;
1441        int nr_pages;
1442
1443        BUG_ON(!PageBuddy(page));
1444        order = page_order(page);
1445
1446        nr_pages = capture_free_page(page, order, 0);
1447        if (!nr_pages)
1448                return 0;
1449
1450        /* Split into individual pages */
1451        set_page_refcounted(page);
1452        split_page(page, order);
1453        return nr_pages;
1454}
1455
1456/*
1457 * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
1458 * we cheat by calling it from here, in the order > 0 path.  Saves a branch
1459 * or two.
1460 */
1461static inline
1462struct page *buffered_rmqueue(struct zone *preferred_zone,
1463                        struct zone *zone, int order, gfp_t gfp_flags,
1464                        int migratetype)
1465{
1466        unsigned long flags;
1467        struct page *page;
1468        int cold = !!(gfp_flags & __GFP_COLD);
1469
1470again:
1471        if (likely(order == 0)) {
1472                struct per_cpu_pages *pcp;
1473                struct list_head *list;
1474
1475                local_irq_save(flags);
1476                pcp = &this_cpu_ptr(zone->pageset)->pcp;
1477                list = &pcp->lists[migratetype];
1478                if (list_empty(list)) {
1479                        pcp->count += rmqueue_bulk(zone, 0,
1480                                        pcp->batch, list,
1481                                        migratetype, cold);
1482                        if (unlikely(list_empty(list)))
1483                                goto failed;
1484                }
1485
1486                if (cold)
1487                        page = list_entry(list->prev, struct page, lru);
1488                else
1489                        page = list_entry(list->next, struct page, lru);
1490
1491                list_del(&page->lru);
1492                pcp->count--;
1493        } else {
1494                if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1495                        /*
1496                         * __GFP_NOFAIL is not to be used in new code.
1497                         *
1498                         * All __GFP_NOFAIL callers should be fixed so that they
1499                         * properly detect and handle allocation failures.
1500                         *
1501                         * We most definitely don't want callers attempting to
1502                         * allocate greater than order-1 page units with
1503                         * __GFP_NOFAIL.
1504                         */
1505                        WARN_ON_ONCE(order > 1);
1506                }
1507                spin_lock_irqsave(&zone->lock, flags);
1508                page = __rmqueue(zone, order, migratetype);
1509                spin_unlock(&zone->lock);
1510                if (!page)
1511                        goto failed;
1512                __mod_zone_freepage_state(zone, -(1 << order),
1513                                          get_pageblock_migratetype(page));
1514        }
1515
1516        __count_zone_vm_events(PGALLOC, zone, 1 << order);
1517        zone_statistics(preferred_zone, zone, gfp_flags);
1518        local_irq_restore(flags);
1519
1520        VM_BUG_ON(bad_range(zone, page));
1521        if (prep_new_page(page, order, gfp_flags))
1522                goto again;
1523        return page;
1524
1525failed:
1526        local_irq_restore(flags);
1527        return NULL;
1528}
1529
1530#ifdef CONFIG_FAIL_PAGE_ALLOC
1531
1532static struct {
1533        struct fault_attr attr;
1534
1535        u32 ignore_gfp_highmem;
1536        u32 ignore_gfp_wait;
1537        u32 min_order;
1538} fail_page_alloc = {
1539        .attr = FAULT_ATTR_INITIALIZER,
1540        .ignore_gfp_wait = 1,
1541        .ignore_gfp_highmem = 1,
1542        .min_order = 1,
1543};
1544
1545static int __init setup_fail_page_alloc(char *str)
1546{
1547        return setup_fault_attr(&fail_page_alloc.attr, str);
1548}
1549__setup("fail_page_alloc=", setup_fail_page_alloc);
1550
1551static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1552{
1553        if (order < fail_page_alloc.min_order)
1554                return false;
1555        if (gfp_mask & __GFP_NOFAIL)
1556                return false;
1557        if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1558                return false;
1559        if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1560                return false;
1561
1562        return should_fail(&fail_page_alloc.attr, 1 << order);
1563}
1564
1565#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1566
1567static int __init fail_page_alloc_debugfs(void)
1568{
1569        umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1570        struct dentry *dir;
1571
1572        dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
1573                                        &fail_page_alloc.attr);
1574        if (IS_ERR(dir))
1575                return PTR_ERR(dir);
1576
1577        if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
1578                                &fail_page_alloc.ignore_gfp_wait))
1579                goto fail;
1580        if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1581                                &fail_page_alloc.ignore_gfp_highmem))
1582                goto fail;
1583        if (!debugfs_create_u32("min-order", mode, dir,
1584                                &fail_page_alloc.min_order))
1585                goto fail;
1586
1587        return 0;
1588fail:
1589        debugfs_remove_recursive(dir);
1590
1591        return -ENOMEM;
1592}
1593
1594late_initcall(fail_page_alloc_debugfs);
1595
1596#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1597
1598#else /* CONFIG_FAIL_PAGE_ALLOC */
1599
1600static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1601{
1602        return false;
1603}
1604
1605#endif /* CONFIG_FAIL_PAGE_ALLOC */
1606
1607/*
1608 * Return true if free pages are above 'mark'. This takes into account the order
1609 * of the allocation.
1610 */
1611static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1612                      int classzone_idx, int alloc_flags, long free_pages)
1613{
1614        /* free_pages my go negative - that's OK */
1615        long min = mark;
1616        long lowmem_reserve = z->lowmem_reserve[classzone_idx];
1617        int o;
1618
1619        free_pages -= (1 << order) - 1;
1620        if (alloc_flags & ALLOC_HIGH)
1621                min -= min / 2;
1622        if (alloc_flags & ALLOC_HARDER)
1623                min -= min / 4;
1624#ifdef CONFIG_CMA
1625        /* If allocation can't use CMA areas don't use free CMA pages */
1626        if (!(alloc_flags & ALLOC_CMA))
1627                free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
1628#endif
1629        if (free_pages <= min + lowmem_reserve)
1630                return false;
1631        for (o = 0; o < order; o++) {
1632                /* At the next order, this order's pages become unavailable */
1633                free_pages -= z->free_area[o].nr_free << o;
1634
1635                /* Require fewer higher order pages to be free */
1636                min >>= 1;
1637
1638                if (free_pages <= min)
1639                        return false;
1640        }
1641        return true;
1642}
1643
1644#ifdef CONFIG_MEMORY_ISOLATION
1645static inline unsigned long nr_zone_isolate_freepages(struct zone *zone)
1646{
1647        if (unlikely(zone->nr_pageblock_isolate))
1648                return zone->nr_pageblock_isolate * pageblock_nr_pages;
1649        return 0;
1650}
1651#else
1652static inline unsigned long nr_zone_isolate_freepages(struct zone *zone)
1653{
1654        return 0;
1655}
1656#endif
1657
1658bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1659                      int classzone_idx, int alloc_flags)
1660{
1661        return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1662                                        zone_page_state(z, NR_FREE_PAGES));
1663}
1664
1665bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
1666                      int classzone_idx, int alloc_flags)
1667{
1668        long free_pages = zone_page_state(z, NR_FREE_PAGES);
1669
1670        if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
1671                free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
1672
1673        /*
1674         * If the zone has MIGRATE_ISOLATE type free pages, we should consider
1675         * it.  nr_zone_isolate_freepages is never accurate so kswapd might not
1676         * sleep although it could do so.  But this is more desirable for memory
1677         * hotplug than sleeping which can cause a livelock in the direct
1678         * reclaim path.
1679         */
1680        free_pages -= nr_zone_isolate_freepages(z);
1681        return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1682                                                                free_pages);
1683}
1684
1685#ifdef CONFIG_NUMA
1686/*
1687 * zlc_setup - Setup for "zonelist cache".  Uses cached zone data to
1688 * skip over zones that are not allowed by the cpuset, or that have
1689 * been recently (in last second) found to be nearly full.  See further
1690 * comments in mmzone.h.  Reduces cache footprint of zonelist scans
1691 * that have to skip over a lot of full or unallowed zones.
1692 *
1693 * If the zonelist cache is present in the passed in zonelist, then
1694 * returns a pointer to the allowed node mask (either the current
1695 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
1696 *
1697 * If the zonelist cache is not available for this zonelist, does
1698 * nothing and returns NULL.
1699 *
1700 * If the fullzones BITMAP in the zonelist cache is stale (more than
1701 * a second since last zap'd) then we zap it out (clear its bits.)
1702 *
1703 * We hold off even calling zlc_setup, until after we've checked the
1704 * first zone in the zonelist, on the theory that most allocations will
1705 * be satisfied from that first zone, so best to examine that zone as
1706 * quickly as we can.
1707 */
1708static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1709{
1710        struct zonelist_cache *zlc;     /* cached zonelist speedup info */
1711        nodemask_t *allowednodes;       /* zonelist_cache approximation */
1712
1713        zlc = zonelist->zlcache_ptr;
1714        if (!zlc)
1715                return NULL;
1716
1717        if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1718                bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1719                zlc->last_full_zap = jiffies;
1720        }
1721
1722        allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1723                                        &cpuset_current_mems_allowed :
1724                                        &node_states[N_HIGH_MEMORY];
1725        return allowednodes;
1726}
1727
1728/*
1729 * Given 'z' scanning a zonelist, run a couple of quick checks to see
1730 * if it is worth looking at further for free memory:
1731 *  1) Check that the zone isn't thought to be full (doesn't have its
1732 *     bit set in the zonelist_cache fullzones BITMAP).
1733 *  2) Check that the zones node (obtained from the zonelist_cache
1734 *     z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1735 * Return true (non-zero) if zone is worth looking at further, or
1736 * else return false (zero) if it is not.
1737 *
1738 * This check -ignores- the distinction between various watermarks,
1739 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ...  If a zone is
1740 * found to be full for any variation of these watermarks, it will
1741 * be considered full for up to one second by all requests, unless
1742 * we are so low on memory on all allowed nodes that we are forced
1743 * into the second scan of the zonelist.
1744 *
1745 * In the second scan we ignore this zonelist cache and exactly
1746 * apply the watermarks to all zones, even it is slower to do so.
1747 * We are low on memory in the second scan, and should leave no stone
1748 * unturned looking for a free page.
1749 */
1750static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1751                                                nodemask_t *allowednodes)
1752{
1753        struct zonelist_cache *zlc;     /* cached zonelist speedup info */
1754        int i;                          /* index of *z in zonelist zones */
1755        int n;                          /* node that zone *z is on */
1756
1757        zlc = zonelist->zlcache_ptr;
1758        if (!zlc)
1759                return 1;
1760
1761        i = z - zonelist->_zonerefs;
1762        n = zlc->z_to_n[i];
1763
1764        /* This zone is worth trying if it is allowed but not full */
1765        return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1766}
1767
1768/*
1769 * Given 'z' scanning a zonelist, set the corresponding bit in
1770 * zlc->fullzones, so that subsequent attempts to allocate a page
1771 * from that zone don't waste time re-examining it.
1772 */
1773static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1774{
1775        struct zonelist_cache *zlc;     /* cached zonelist speedup info */
1776        int i;                          /* index of *z in zonelist zones */
1777
1778        zlc = zonelist->zlcache_ptr;
1779        if (!zlc)
1780                return;
1781
1782        i = z - zonelist->_zonerefs;
1783
1784        set_bit(i, zlc->fullzones);
1785}
1786
1787/*
1788 * clear all zones full, called after direct reclaim makes progress so that
1789 * a zone that was recently full is not skipped over for up to a second
1790 */
1791static void zlc_clear_zones_full(struct zonelist *zonelist)
1792{
1793        struct zonelist_cache *zlc;     /* cached zonelist speedup info */
1794
1795        zlc = zonelist->zlcache_ptr;
1796        if (!zlc)
1797                return;
1798
1799        bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1800}
1801
1802static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
1803{
1804        return node_isset(local_zone->node, zone->zone_pgdat->reclaim_nodes);
1805}
1806
1807static void __paginginit init_zone_allows_reclaim(int nid)
1808{
1809        int i;
1810
1811        for_each_online_node(i)
1812                if (node_distance(nid, i) <= RECLAIM_DISTANCE)
1813                        node_set(i, NODE_DATA(nid)->reclaim_nodes);
1814                else
1815                        zone_reclaim_mode = 1;
1816}
1817
1818#else   /* CONFIG_NUMA */
1819
1820static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1821{
1822        return NULL;
1823}
1824
1825static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1826                                nodemask_t *allowednodes)
1827{
1828        return 1;
1829}
1830
1831static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1832{
1833}
1834
1835static void zlc_clear_zones_full(struct zonelist *zonelist)
1836{
1837}
1838
1839static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
1840{
1841        return true;
1842}
1843
1844static inline void init_zone_allows_reclaim(int nid)
1845{
1846}
1847#endif  /* CONFIG_NUMA */
1848
1849/*
1850 * get_page_from_freelist goes through the zonelist trying to allocate
1851 * a page.
1852 */
1853static struct page *
1854get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1855                struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
1856                struct zone *preferred_zone, int migratetype)
1857{
1858        struct zoneref *z;
1859        struct page *page = NULL;
1860        int classzone_idx;
1861        struct zone *zone;
1862        nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1863        int zlc_active = 0;             /* set if using zonelist_cache */
1864        int did_zlc_setup = 0;          /* just call zlc_setup() one time */
1865
1866        classzone_idx = zone_idx(preferred_zone);
1867zonelist_scan:
1868        /*
1869         * Scan zonelist, looking for a zone with enough free.
1870         * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1871         */
1872        for_each_zone_zonelist_nodemask(zone, z, zonelist,
1873                                                high_zoneidx, nodemask) {
1874                if (NUMA_BUILD && zlc_active &&
1875                        !zlc_zone_worth_trying(zonelist, z, allowednodes))
1876                                continue;
1877                if ((alloc_flags & ALLOC_CPUSET) &&
1878                        !cpuset_zone_allowed_softwall(zone, gfp_mask))
1879                                continue;
1880                /*
1881                 * When allocating a page cache page for writing, we
1882                 * want to get it from a zone that is within its dirty
1883                 * limit, such that no single zone holds more than its
1884                 * proportional share of globally allowed dirty pages.
1885                 * The dirty limits take into account the zone's
1886                 * lowmem reserves and high watermark so that kswapd
1887                 * should be able to balance it without having to
1888                 * write pages from its LRU list.
1889                 *
1890                 * This may look like it could increase pressure on
1891                 * lower zones by failing allocations in higher zones
1892                 * before they are full.  But the pages that do spill
1893                 * over are limited as the lower zones are protected
1894                 * by this very same mechanism.  It should not become
1895                 * a practical burden to them.
1896                 *
1897                 * XXX: For now, allow allocations to potentially
1898                 * exceed the per-zone dirty limit in the slowpath
1899                 * (ALLOC_WMARK_LOW unset) before going into reclaim,
1900                 * which is important when on a NUMA setup the allowed
1901                 * zones are together not big enough to reach the
1902                 * global limit.  The proper fix for these situations
1903                 * will require awareness of zones in the
1904                 * dirty-throttling and the flusher threads.
1905                 */
1906                if ((alloc_flags & ALLOC_WMARK_LOW) &&
1907                    (gfp_mask & __GFP_WRITE) && !zone_dirty_ok(zone))
1908                        goto this_zone_full;
1909
1910                BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
1911                if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1912                        unsigned long mark;
1913                        int ret;
1914
1915                        mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
1916                        if (zone_watermark_ok(zone, order, mark,
1917                                    classzone_idx, alloc_flags))
1918                                goto try_this_zone;
1919
1920                        if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
1921                                /*
1922                                 * we do zlc_setup if there are multiple nodes
1923                                 * and before considering the first zone allowed
1924                                 * by the cpuset.
1925                                 */
1926                                allowednodes = zlc_setup(zonelist, alloc_flags);
1927                                zlc_active = 1;
1928                                did_zlc_setup = 1;
1929                        }
1930
1931                        if (zone_reclaim_mode == 0 ||
1932                            !zone_allows_reclaim(preferred_zone, zone))
1933                                goto this_zone_full;
1934
1935                        /*
1936                         * As we may have just activated ZLC, check if the first
1937                         * eligible zone has failed zone_reclaim recently.
1938                         */
1939                        if (NUMA_BUILD && zlc_active &&
1940                                !zlc_zone_worth_trying(zonelist, z, allowednodes))
1941                                continue;
1942
1943                        ret = zone_reclaim(zone, gfp_mask, order);
1944                        switch (ret) {
1945                        case ZONE_RECLAIM_NOSCAN:
1946                                /* did not scan */
1947                                continue;
1948                        case ZONE_RECLAIM_FULL:
1949                                /* scanned but unreclaimable */
1950                                continue;
1951                        default:
1952                                /* did we reclaim enough */
1953                                if (!zone_watermark_ok(zone, order, mark,
1954                                                classzone_idx, alloc_flags))
1955                                        goto this_zone_full;
1956                        }
1957                }
1958
1959try_this_zone:
1960                page = buffered_rmqueue(preferred_zone, zone, order,
1961                                                gfp_mask, migratetype);
1962                if (page)
1963                        break;
1964this_zone_full:
1965                if (NUMA_BUILD)
1966                        zlc_mark_zone_full(zonelist, z);
1967        }
1968
1969        if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1970                /* Disable zlc cache for second zonelist scan */
1971                zlc_active = 0;
1972                goto zonelist_scan;
1973        }
1974
1975        if (page)
1976                /*
1977                 * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was
1978                 * necessary to allocate the page. The expectation is
1979                 * that the caller is taking steps that will free more
1980                 * memory. The caller should avoid the page being used
1981                 * for !PFMEMALLOC purposes.
1982                 */
1983                page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
1984
1985        return page;
1986}
1987
1988/*
1989 * Large machines with many possible nodes should not always dump per-node
1990 * meminfo in irq context.
1991 */
1992static inline bool should_suppress_show_mem(void)
1993{
1994        bool ret = false;
1995
1996#if NODES_SHIFT > 8
1997        ret = in_interrupt();
1998#endif
1999        return ret;
2000}
2001
2002static DEFINE_RATELIMIT_STATE(nopage_rs,
2003                DEFAULT_RATELIMIT_INTERVAL,
2004                DEFAULT_RATELIMIT_BURST);
2005
2006void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
2007{
2008        unsigned int filter = SHOW_MEM_FILTER_NODES;
2009
2010        if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
2011            debug_guardpage_minorder() > 0)
2012                return;
2013
2014        /*
2015         * This documents exceptions given to allocations in certain
2016         * contexts that are allowed to allocate outside current's set
2017         * of allowed nodes.
2018         */
2019        if (!(gfp_mask & __GFP_NOMEMALLOC))
2020                if (test_thread_flag(TIF_MEMDIE) ||
2021                    (current->flags & (PF_MEMALLOC | PF_EXITING)))
2022                        filter &= ~SHOW_MEM_FILTER_NODES;
2023        if (in_interrupt() || !(gfp_mask & __GFP_WAIT))
2024                filter &= ~SHOW_MEM_FILTER_NODES;
2025
2026        if (fmt) {
2027                struct va_format vaf;
2028                va_list args;
2029
2030                va_start(args, fmt);
2031
2032                vaf.fmt = fmt;
2033                vaf.va = &args;
2034
2035                pr_warn("%pV", &vaf);
2036
2037                va_end(args);
2038        }
2039
2040        pr_warn("%s: page allocation failure: order:%d, mode:0x%x\n",
2041                current->comm, order, gfp_mask);
2042
2043        dump_stack();
2044        if (!should_suppress_show_mem())
2045                show_mem(filter);
2046}
2047
2048static inline int
2049should_alloc_retry(gfp_t gfp_mask, unsigned int order,
2050                                unsigned long did_some_progress,
2051                                unsigned long pages_reclaimed)
2052{
2053        /* Do not loop if specifically requested */
2054        if (gfp_mask & __GFP_NORETRY)
2055                return 0;
2056
2057        /* Always retry if specifically requested */
2058        if (gfp_mask & __GFP_NOFAIL)
2059                return 1;
2060
2061        /*
2062         * Suspend converts GFP_KERNEL to __GFP_WAIT which can prevent reclaim
2063         * making forward progress without invoking OOM. Suspend also disables
2064         * storage devices so kswapd will not help. Bail if we are suspending.
2065         */
2066        if (!did_some_progress && pm_suspended_storage())
2067                return 0;
2068
2069        /*
2070         * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
2071         * means __GFP_NOFAIL, but that may not be true in other
2072         * implementations.
2073         */
2074        if (order <= PAGE_ALLOC_COSTLY_ORDER)
2075                return 1;
2076
2077        /*
2078         * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
2079         * specified, then we retry until we no longer reclaim any pages
2080         * (above), or we've reclaimed an order of pages at least as
2081         * large as the allocation's order. In both cases, if the
2082         * allocation still fails, we stop retrying.
2083         */
2084        if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
2085                return 1;
2086
2087        return 0;
2088}
2089
2090static inline struct page *
2091__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
2092        struct zonelist *zonelist, enum zone_type high_zoneidx,
2093        nodemask_t *nodemask, struct zone *preferred_zone,
2094        int migratetype)
2095{
2096        struct page *page;
2097
2098        /* Acquire the OOM killer lock for the zones in zonelist */
2099        if (!try_set_zonelist_oom(zonelist, gfp_mask)) {
2100                schedule_timeout_uninterruptible(1);
2101                return NULL;
2102        }
2103
2104        /*
2105         * Go through the zonelist yet one more time, keep very high watermark
2106         * here, this is only to catch a parallel oom killing, we must fail if
2107         * we're still under heavy pressure.
2108         */
2109        page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
2110                order, zonelist, high_zoneidx,
2111                ALLOC_WMARK_HIGH|ALLOC_CPUSET,
2112                preferred_zone, migratetype);
2113        if (page)
2114                goto out;
2115
2116        if (!(gfp_mask & __GFP_NOFAIL)) {
2117                /* The OOM killer will not help higher order allocs */
2118                if (order > PAGE_ALLOC_COSTLY_ORDER)
2119                        goto out;
2120                /* The OOM killer does not needlessly kill tasks for lowmem */
2121                if (high_zoneidx < ZONE_NORMAL)
2122                        goto out;
2123                /*
2124                 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
2125                 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
2126                 * The caller should handle page allocation failure by itself if
2127                 * it specifies __GFP_THISNODE.
2128                 * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
2129                 */
2130                if (gfp_mask & __GFP_THISNODE)
2131                        goto out;
2132        }
2133        /* Exhausted what can be done so it's blamo time */
2134        out_of_memory(zonelist, gfp_mask, order, nodemask, false);
2135
2136out:
2137        clear_zonelist_oom(zonelist, gfp_mask);
2138        return page;
2139}
2140
2141#ifdef CONFIG_COMPACTION
2142/* Try memory compaction for high-order allocations before reclaim */
2143static struct page *
2144__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2145        struct zonelist *zonelist, enum zone_type high_zoneidx,
2146        nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2147        int migratetype, bool sync_migration,
2148        bool *contended_compaction, bool *deferred_compaction,
2149        unsigned long *did_some_progress)
2150{
2151        struct page *page = NULL;
2152
2153        if (!order)
2154                return NULL;
2155
2156        if (compaction_deferred(preferred_zone, order)) {
2157                *deferred_compaction = true;
2158                return NULL;
2159        }
2160
2161        current->flags |= PF_MEMALLOC;
2162        *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
2163                                                nodemask, sync_migration,
2164                                                contended_compaction, &page);
2165        current->flags &= ~PF_MEMALLOC;
2166
2167        /* If compaction captured a page, prep and use it */
2168        if (page) {
2169                prep_new_page(page, order, gfp_mask);
2170                goto got_page;
2171        }
2172
2173        if (*did_some_progress != COMPACT_SKIPPED) {
2174                /* Page migration frees to the PCP lists but we want merging */
2175                drain_pages(get_cpu());
2176                put_cpu();
2177
2178                page = get_page_from_freelist(gfp_mask, nodemask,
2179                                order, zonelist, high_zoneidx,
2180                                alloc_flags & ~ALLOC_NO_WATERMARKS,
2181                                preferred_zone, migratetype);
2182                if (page) {
2183got_page:
2184                        preferred_zone->compact_blockskip_flush = false;
2185                        preferred_zone->compact_considered = 0;
2186                        preferred_zone->compact_defer_shift = 0;
2187                        if (order >= preferred_zone->compact_order_failed)
2188                                preferred_zone->compact_order_failed = order + 1;
2189                        count_vm_event(COMPACTSUCCESS);
2190                        return page;
2191                }
2192
2193                /*
2194                 * It's bad if compaction run occurs and fails.
2195                 * The most likely reason is that pages exist,
2196                 * but not enough to satisfy watermarks.
2197                 */
2198                count_vm_event(COMPACTFAIL);
2199
2200                /*
2201                 * As async compaction considers a subset of pageblocks, only
2202                 * defer if the failure was a sync compaction failure.
2203                 */
2204                if (sync_migration)
2205                        defer_compaction(preferred_zone, order);
2206
2207                cond_resched();
2208        }
2209
2210        return NULL;
2211}
2212#else
2213static inline struct page *
2214__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2215        struct zonelist *zonelist, enum zone_type high_zoneidx,
2216        nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2217        int migratetype, bool sync_migration,
2218        bool *contended_compaction, bool *deferred_compaction,
2219        unsigned long *did_some_progress)
2220{
2221        return NULL;
2222}
2223#endif /* CONFIG_COMPACTION */
2224
2225/* Perform direct synchronous page reclaim */
2226static int
2227__perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist,
2228                  nodemask_t *nodemask)
2229{
2230        struct reclaim_state reclaim_state;
2231        int progress;
2232
2233        cond_resched();
2234
2235        /* We now go into synchronous reclaim */
2236        cpuset_memory_pressure_bump();
2237        current->flags |= PF_MEMALLOC;
2238        lockdep_set_current_reclaim_state(gfp_mask);
2239        reclaim_state.reclaimed_slab = 0;
2240        current->reclaim_state = &reclaim_state;
2241
2242        progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
2243
2244        current->reclaim_state = NULL;
2245        lockdep_clear_current_reclaim_state();
2246        current->flags &= ~PF_MEMALLOC;
2247
2248        cond_resched();
2249
2250        return progress;
2251}
2252
2253/* The really slow allocator path where we enter direct reclaim */
2254static inline struct page *
2255__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
2256        struct zonelist *zonelist, enum zone_type high_zoneidx,
2257        nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2258        int migratetype, unsigned long *did_some_progress)
2259{
2260        struct page *page = NULL;
2261        bool drained = false;
2262
2263        *did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
2264                                               nodemask);
2265        if (unlikely(!(*did_some_progress)))
2266                return NULL;
2267
2268        /* After successful reclaim, reconsider all zones for allocation */
2269        if (NUMA_BUILD)
2270                zlc_clear_zones_full(zonelist);
2271
2272retry:
2273        page = get_page_from_freelist(gfp_mask, nodemask, order,
2274                                        zonelist, high_zoneidx,
2275                                        alloc_flags & ~ALLOC_NO_WATERMARKS,
2276                                        preferred_zone, migratetype);
2277
2278        /*
2279         * If an allocation failed after direct reclaim, it could be because
2280         * pages are pinned on the per-cpu lists. Drain them and try again
2281         */
2282        if (!page && !drained) {
2283                drain_all_pages();
2284                drained = true;
2285                goto retry;
2286        }
2287
2288        return page;
2289}
2290
2291/*
2292 * This is called in the allocator slow-path if the allocation request is of
2293 * sufficient urgency to ignore watermarks and take other desperate measures
2294 */
2295static inline struct page *
2296__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
2297        struct zonelist *zonelist, enum zone_type high_zoneidx,
2298        nodemask_t *nodemask, struct zone *preferred_zone,
2299        int migratetype)
2300{
2301        struct page *page;
2302
2303        do {
2304                page = get_page_from_freelist(gfp_mask, nodemask, order,
2305                        zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
2306                        preferred_zone, migratetype);
2307
2308                if (!page && gfp_mask & __GFP_NOFAIL)
2309                        wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
2310        } while (!page && (gfp_mask & __GFP_NOFAIL));
2311
2312        return page;
2313}
2314
2315static inline
2316void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
2317                                                enum zone_type high_zoneidx,
2318                                                enum zone_type classzone_idx)
2319{
2320        struct zoneref *z;
2321        struct zone *zone;
2322
2323        for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
2324                wakeup_kswapd(zone, order, classzone_idx);
2325}
2326
2327static inline int
2328gfp_to_alloc_flags(gfp_t gfp_mask)
2329{
2330        int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
2331        const gfp_t wait = gfp_mask & __GFP_WAIT;
2332
2333        /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
2334        BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
2335
2336        /*
2337         * The caller may dip into page reserves a bit more if the caller
2338         * cannot run direct reclaim, or if the caller has realtime scheduling
2339         * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
2340         * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
2341         */
2342        alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
2343
2344        if (!wait) {
2345                /*
2346                 * Not worth trying to allocate harder for
2347                 * __GFP_NOMEMALLOC even if it can't schedule.
2348                 */
2349                if  (!(gfp_mask & __GFP_NOMEMALLOC))
2350                        alloc_flags |= ALLOC_HARDER;
2351                /*
2352                 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
2353                 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
2354                 */
2355                alloc_flags &= ~ALLOC_CPUSET;
2356        } else if (unlikely(rt_task(current)) && !in_interrupt())
2357                alloc_flags |= ALLOC_HARDER;
2358
2359        if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
2360                if (gfp_mask & __GFP_MEMALLOC)
2361                        alloc_flags |= ALLOC_NO_WATERMARKS;
2362                else if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
2363                        alloc_flags |= ALLOC_NO_WATERMARKS;
2364                else if (!in_interrupt() &&
2365                                ((current->flags & PF_MEMALLOC) ||
2366                                 unlikely(test_thread_flag(TIF_MEMDIE))))
2367                        alloc_flags |= ALLOC_NO_WATERMARKS;
2368        }
2369#ifdef CONFIG_CMA
2370        if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
2371                alloc_flags |= ALLOC_CMA;
2372#endif
2373        return alloc_flags;
2374}
2375
2376bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
2377{
2378        return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
2379}
2380
2381static inline struct page *
2382__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2383        struct zonelist *zonelist, enum zone_type high_zoneidx,
2384        nodemask_t *nodemask, struct zone *preferred_zone,
2385        int migratetype)
2386{
2387        const gfp_t wait = gfp_mask & __GFP_WAIT;
2388        struct page *page = NULL;
2389        int alloc_flags;
2390        unsigned long pages_reclaimed = 0;
2391        unsigned long did_some_progress;
2392        bool sync_migration = false;
2393        bool deferred_compaction = false;
2394        bool contended_compaction = false;
2395
2396        /*
2397         * In the slowpath, we sanity check order to avoid ever trying to
2398         * reclaim >= MAX_ORDER areas which will never succeed. Callers may
2399         * be using allocators in order of preference for an area that is
2400         * too large.
2401         */
2402        if (order >= MAX_ORDER) {
2403                WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
2404                return NULL;
2405        }
2406
2407        /*
2408         * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
2409         * __GFP_NOWARN set) should not cause reclaim since the subsystem
2410         * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
2411         * using a larger set of nodes after it has established that the
2412         * allowed per node queues are empty and that nodes are
2413         * over allocated.
2414         */
2415        if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
2416                goto nopage;
2417
2418restart:
2419        if (!(gfp_mask & __GFP_NO_KSWAPD))
2420                wake_all_kswapd(order, zonelist, high_zoneidx,
2421                                                zone_idx(preferred_zone));
2422
2423        /*
2424         * OK, we're below the kswapd watermark and have kicked background
2425         * reclaim. Now things get more complex, so set up alloc_flags according
2426         * to how we want to proceed.
2427         */
2428        alloc_flags = gfp_to_alloc_flags(gfp_mask);
2429
2430        /*
2431         * Find the true preferred zone if the allocation is unconstrained by
2432         * cpusets.
2433         */
2434        if (!(alloc_flags & ALLOC_CPUSET) && !nodemask)
2435                first_zones_zonelist(zonelist, high_zoneidx, NULL,
2436                                        &preferred_zone);
2437
2438rebalance:
2439        /* This is the last chance, in general, before the goto nopage. */
2440        page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
2441                        high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
2442                        preferred_zone, migratetype);
2443        if (page)
2444                goto got_pg;
2445
2446        /* Allocate without watermarks if the context allows */
2447        if (alloc_flags & ALLOC_NO_WATERMARKS) {
2448                /*
2449                 * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
2450                 * the allocation is high priority and these type of
2451                 * allocations are system rather than user orientated
2452                 */
2453                zonelist = node_zonelist(numa_node_id(), gfp_mask);
2454
2455                page = __alloc_pages_high_priority(gfp_mask, order,
2456                                zonelist, high_zoneidx, nodemask,
2457                                preferred_zone, migratetype);
2458                if (page) {
2459                        goto got_pg;
2460                }
2461        }
2462
2463        /* Atomic allocations - we can't balance anything */
2464        if (!wait)
2465                goto nopage;
2466
2467        /* Avoid recursion of direct reclaim */
2468        if (current->flags & PF_MEMALLOC)
2469                goto nopage;
2470
2471        /* Avoid allocations with no watermarks from looping endlessly */
2472        if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
2473                goto nopage;
2474
2475        /*
2476         * Try direct compaction. The first pass is asynchronous. Subsequent
2477         * attempts after direct reclaim are synchronous
2478         */
2479        page = __alloc_pages_direct_compact(gfp_mask, order,
2480                                        zonelist, high_zoneidx,
2481                                        nodemask,
2482                                        alloc_flags, preferred_zone,
2483                                        migratetype, sync_migration,
2484                                        &contended_compaction,
2485                                        &deferred_compaction,
2486                                        &did_some_progress);
2487        if (page)
2488                goto got_pg;
2489        sync_migration = true;
2490
2491        /*
2492         * If compaction is deferred for high-order allocations, it is because
2493         * sync compaction recently failed. In this is the case and the caller
2494         * requested a movable allocation that does not heavily disrupt the
2495         * system then fail the allocation instead of entering direct reclaim.
2496         */
2497        if ((deferred_compaction || contended_compaction) &&
2498                                                (gfp_mask & __GFP_NO_KSWAPD))
2499                goto nopage;
2500
2501        /* Try direct reclaim and then allocating */
2502        page = __alloc_pages_direct_reclaim(gfp_mask, order,
2503                                        zonelist, high_zoneidx,
2504                                        nodemask,
2505                                        alloc_flags, preferred_zone,
2506                                        migratetype, &did_some_progress);
2507        if (page)
2508                goto got_pg;
2509
2510        /*
2511         * If we failed to make any progress reclaiming, then we are
2512         * running out of options and have to consider going OOM
2513         */
2514        if (!did_some_progress) {
2515                if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
2516                        if (oom_killer_disabled)
2517                                goto nopage;
2518                        /* Coredumps can quickly deplete all memory reserves */
2519                        if ((current->flags & PF_DUMPCORE) &&
2520                            !(gfp_mask & __GFP_NOFAIL))
2521                                goto nopage;
2522                        page = __alloc_pages_may_oom(gfp_mask, order,
2523                                        zonelist, high_zoneidx,
2524                                        nodemask, preferred_zone,
2525                                        migratetype);
2526                        if (page)
2527                                goto got_pg;
2528
2529                        if (!(gfp_mask & __GFP_NOFAIL)) {
2530                                /*
2531                                 * The oom killer is not called for high-order
2532                                 * allocations that may fail, so if no progress
2533                                 * is being made, there are no other options and
2534                                 * retrying is unlikely to help.
2535                                 */
2536                                if (order > PAGE_ALLOC_COSTLY_ORDER)
2537                                        goto nopage;
2538                                /*
2539                                 * The oom killer is not called for lowmem
2540                                 * allocations to prevent needlessly killing
2541                                 * innocent tasks.
2542                                 */
2543                                if (high_zoneidx < ZONE_NORMAL)
2544                                        goto nopage;
2545                        }
2546
2547                        goto restart;
2548                }
2549        }
2550
2551        /* Check if we should retry the allocation */
2552        pages_reclaimed += did_some_progress;
2553        if (should_alloc_retry(gfp_mask, order, did_some_progress,
2554                                                pages_reclaimed)) {
2555                /* Wait for some write requests to complete then retry */
2556                wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
2557                goto rebalance;
2558        } else {
2559                /*
2560                 * High-order allocations do not necessarily loop after
2561                 * direct reclaim and reclaim/compaction depends on compaction
2562                 * being called after reclaim so call directly if necessary
2563                 */
2564                page = __alloc_pages_direct_compact(gfp_mask, order,
2565                                        zonelist, high_zoneidx,
2566                                        nodemask,
2567                                        alloc_flags, preferred_zone,
2568                                        migratetype, sync_migration,
2569                                        &contended_compaction,
2570                                        &deferred_compaction,
2571                                        &did_some_progress);
2572                if (page)
2573                        goto got_pg;
2574        }
2575
2576nopage:
2577        warn_alloc_failed(gfp_mask, order, NULL);
2578        return page;
2579got_pg:
2580        if (kmemcheck_enabled)
2581                kmemcheck_pagealloc_alloc(page, order, gfp_mask);
2582
2583        return page;
2584}
2585
2586/*
2587 * This is the 'heart' of the zoned buddy allocator.
2588 */
2589struct page *
2590__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2591                        struct zonelist *zonelist, nodemask_t *nodemask)
2592{
2593        enum zone_type high_zoneidx = gfp_zone(gfp_mask);
2594        struct zone *preferred_zone;
2595        struct page *page = NULL;
2596        int migratetype = allocflags_to_migratetype(gfp_mask);
2597        unsigned int cpuset_mems_cookie;
2598        int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET;
2599
2600        gfp_mask &= gfp_allowed_mask;
2601
2602        lockdep_trace_alloc(gfp_mask);
2603
2604        might_sleep_if(gfp_mask & __GFP_WAIT);
2605
2606        if (should_fail_alloc_page(gfp_mask, order))
2607                return NULL;
2608
2609        /*
2610         * Check the zones suitable for the gfp_mask contain at least one
2611         * valid zone. It's possible to have an empty zonelist as a result
2612         * of GFP_THISNODE and a memoryless node
2613         */
2614        if (unlikely(!zonelist->_zonerefs->zone))
2615                return NULL;
2616
2617retry_cpuset:
2618        cpuset_mems_cookie = get_mems_allowed();
2619
2620        /* The preferred zone is used for statistics later */
2621        first_zones_zonelist(zonelist, high_zoneidx,
2622                                nodemask ? : &cpuset_current_mems_allowed,
2623                                &preferred_zone);
2624        if (!preferred_zone)
2625                goto out;
2626
2627#ifdef CONFIG_CMA
2628        if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
2629                alloc_flags |= ALLOC_CMA;
2630#endif
2631        /* First allocation attempt */
2632        page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
2633                        zonelist, high_zoneidx, alloc_flags,
2634                        preferred_zone, migratetype);
2635        if (unlikely(!page))
2636                page = __alloc_pages_slowpath(gfp_mask, order,
2637                                zonelist, high_zoneidx, nodemask,
2638                                preferred_zone, migratetype);
2639
2640        trace_mm_page_alloc(page, order, gfp_mask, migratetype);
2641
2642out:
2643        /*
2644         * When updating a task's mems_allowed, it is possible to race with
2645         * parallel threads in such a way that an allocation can fail while
2646         * the mask is being updated. If a page allocation is about to fail,
2647         * check if the cpuset changed during allocation and if so, retry.
2648         */
2649        if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2650                goto retry_cpuset;
2651
2652        return page;
2653}
2654EXPORT_SYMBOL(__alloc_pages_nodemask);
2655
2656/*
2657 * Common helper functions.
2658 */
2659unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
2660{
2661        struct page *page;
2662
2663        /*
2664         * __get_free_pages() returns a 32-bit address, which cannot represent
2665         * a highmem page
2666         */
2667        VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
2668
2669        page = alloc_pages(gfp_mask, order);
2670        if (!page)
2671                return 0;
2672        return (unsigned long) page_address(page);
2673}
2674EXPORT_SYMBOL(__get_free_pages);
2675
2676unsigned long get_zeroed_page(gfp_t gfp_mask)
2677{
2678        return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
2679}
2680EXPORT_SYMBOL(get_zeroed_page);
2681
2682void __free_pages(struct page *page, unsigned int order)
2683{
2684        if (put_page_testzero(page)) {
2685                if (order == 0)
2686                        free_hot_cold_page(page, 0);
2687                else
2688                        __free_pages_ok(page, order);
2689        }
2690}
2691
2692EXPORT_SYMBOL(__free_pages);
2693
2694void free_pages(unsigned long addr, unsigned int order)
2695{
2696        if (addr != 0) {
2697                VM_BUG_ON(!virt_addr_valid((void *)addr));
2698                __free_pages(virt_to_page((void *)addr), order);
2699        }
2700}
2701
2702EXPORT_SYMBOL(free_pages);
2703
2704static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
2705{
2706        if (addr) {
2707                unsigned long alloc_end = addr + (PAGE_SIZE << order);
2708                unsigned long used = addr + PAGE_ALIGN(size);
2709
2710                split_page(virt_to_page((void *)addr), order);
2711                while (used < alloc_end) {
2712                        free_page(used);
2713                        used += PAGE_SIZE;
2714                }
2715        }
2716        return (void *)addr;
2717}
2718
2719/**
2720 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
2721 * @size: the number of bytes to allocate
2722 * @gfp_mask: GFP flags for the allocation
2723 *
2724 * This function is similar to alloc_pages(), except that it allocates the
2725 * minimum number of pages to satisfy the request.  alloc_pages() can only
2726 * allocate memory in power-of-two pages.
2727 *
2728 * This function is also limited by MAX_ORDER.
2729 *
2730 * Memory allocated by this function must be released by free_pages_exact().
2731 */
2732void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
2733{
2734        unsigned int order = get_order(size);
2735        unsigned long addr;
2736
2737        addr = __get_free_pages(gfp_mask, order);
2738        return make_alloc_exact(addr, order, size);
2739}
2740EXPORT_SYMBOL(alloc_pages_exact);
2741
2742/**
2743 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
2744 *                         pages on a node.
2745 * @nid: the preferred node ID where memory should be allocated
2746 * @size: the number of bytes to allocate
2747 * @gfp_mask: GFP flags for the allocation
2748 *
2749 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
2750 * back.
2751 * Note this is not alloc_pages_exact_node() which allocates on a specific node,
2752 * but is not exact.
2753 */
2754void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
2755{
2756        unsigned order = get_order(size);
2757        struct page *p = alloc_pages_node(nid, gfp_mask, order);
2758        if (!p)
2759                return NULL;
2760        return make_alloc_exact((unsigned long)page_address(p), order, size);
2761}
2762EXPORT_SYMBOL(alloc_pages_exact_nid);
2763
2764/**
2765 * free_pages_exact - release memory allocated via alloc_pages_exact()
2766 * @virt: the value returned by alloc_pages_exact.
2767 * @size: size of allocation, same value as passed to alloc_pages_exact().
2768 *
2769 * Release the memory allocated by a previous call to alloc_pages_exact.
2770 */
2771void free_pages_exact(void *virt, size_t size)
2772{
2773        unsigned long addr = (unsigned long)virt;
2774        unsigned long end = addr + PAGE_ALIGN(size);
2775
2776        while (addr < end) {
2777                free_page(addr);
2778                addr += PAGE_SIZE;
2779        }
2780}
2781EXPORT_SYMBOL(free_pages_exact);
2782
2783static unsigned int nr_free_zone_pages(int offset)
2784{
2785        struct zoneref *z;
2786        struct zone *zone;
2787
2788        /* Just pick one node, since fallback list is circular */
2789        unsigned int sum = 0;
2790
2791        struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
2792
2793        for_each_zone_zonelist(zone, z, zonelist, offset) {
2794                unsigned long size = zone->present_pages;
2795                unsigned long high = high_wmark_pages(zone);
2796                if (size > high)
2797                        sum += size - high;
2798        }
2799
2800        return sum;
2801}
2802
2803/*
2804 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
2805 */
2806unsigned int nr_free_buffer_pages(void)
2807{
2808        return nr_free_zone_pages(gfp_zone(GFP_USER));
2809}
2810EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
2811
2812/*
2813 * Amount of free RAM allocatable within all zones
2814 */
2815unsigned int nr_free_pagecache_pages(void)
2816{
2817        return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
2818}
2819
2820static inline void show_node(struct zone *zone)
2821{
2822        if (NUMA_BUILD)
2823                printk("Node %d ", zone_to_nid(zone));
2824}
2825
2826void si_meminfo(struct sysinfo *val)
2827{
2828        val->totalram = totalram_pages;
2829        val->sharedram = 0;
2830        val->freeram = global_page_state(NR_FREE_PAGES);
2831        val->bufferram = nr_blockdev_pages();
2832        val->totalhigh = totalhigh_pages;
2833        val->freehigh = nr_free_highpages();
2834        val->mem_unit = PAGE_SIZE;
2835}
2836
2837EXPORT_SYMBOL(si_meminfo);
2838
2839#ifdef CONFIG_NUMA
2840void si_meminfo_node(struct sysinfo *val, int nid)
2841{
2842        pg_data_t *pgdat = NODE_DATA(nid);
2843
2844        val->totalram = pgdat->node_present_pages;
2845        val->freeram = node_page_state(nid, NR_FREE_PAGES);
2846#ifdef CONFIG_HIGHMEM
2847        val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
2848        val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
2849                        NR_FREE_PAGES);
2850#else
2851        val->totalhigh = 0;
2852        val->freehigh = 0;
2853#endif
2854        val->mem_unit = PAGE_SIZE;
2855}
2856#endif
2857
2858/*
2859 * Determine whether the node should be displayed or not, depending on whether
2860 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
2861 */
2862bool skip_free_areas_node(unsigned int flags, int nid)
2863{
2864        bool ret = false;
2865        unsigned int cpuset_mems_cookie;
2866
2867        if (!(flags & SHOW_MEM_FILTER_NODES))
2868                goto out;
2869
2870        do {
2871                cpuset_mems_cookie = get_mems_allowed();
2872                ret = !node_isset(nid, cpuset_current_mems_allowed);
2873        } while (!put_mems_allowed(cpuset_mems_cookie));
2874out:
2875        return ret;
2876}
2877
2878#define K(x) ((x) << (PAGE_SHIFT-10))
2879
2880/*
2881 * Show free area list (used inside shift_scroll-lock stuff)
2882 * We also calculate the percentage fragmentation. We do this by counting the
2883 * memory on each free list with the exception of the first item on the list.
2884 * Suppresses nodes that are not allowed by current's cpuset if
2885 * SHOW_MEM_FILTER_NODES is passed.
2886 */
2887void show_free_areas(unsigned int filter)
2888{
2889        int cpu;
2890        struct zone *zone;
2891
2892        for_each_populated_zone(zone) {
2893                if (skip_free_areas_node(filter, zone_to_nid(zone)))
2894                        continue;
2895                show_node(zone);
2896                printk("%s per-cpu:\n", zone->name);
2897
2898                for_each_online_cpu(cpu) {
2899                        struct per_cpu_pageset *pageset;
2900
2901                        pageset = per_cpu_ptr(zone->pageset, cpu);
2902
2903                        printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
2904                               cpu, pageset->pcp.high,
2905                               pageset->pcp.batch, pageset->pcp.count);
2906                }
2907        }
2908
2909        printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
2910                " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
2911                " unevictable:%lu"
2912                " dirty:%lu writeback:%lu unstable:%lu\n"
2913                " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
2914                " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
2915                " free_cma:%lu\n",
2916                global_page_state(NR_ACTIVE_ANON),
2917                global_page_state(NR_INACTIVE_ANON),
2918                global_page_state(NR_ISOLATED_ANON),
2919                global_page_state(NR_ACTIVE_FILE),
2920                global_page_state(NR_INACTIVE_FILE),
2921                global_page_state(NR_ISOLATED_FILE),
2922                global_page_state(NR_UNEVICTABLE),
2923                global_page_state(NR_FILE_DIRTY),
2924                global_page_state(NR_WRITEBACK),
2925                global_page_state(NR_UNSTABLE_NFS),
2926                global_page_state(NR_FREE_PAGES),
2927                global_page_state(NR_SLAB_RECLAIMABLE),
2928                global_page_state(NR_SLAB_UNRECLAIMABLE),
2929                global_page_state(NR_FILE_MAPPED),
2930                global_page_state(NR_SHMEM),
2931                global_page_state(NR_PAGETABLE),
2932                global_page_state(NR_BOUNCE),
2933                global_page_state(NR_FREE_CMA_PAGES));
2934
2935        for_each_populated_zone(zone) {
2936                int i;
2937
2938                if (skip_free_areas_node(filter, zone_to_nid(zone)))
2939                        continue;
2940                show_node(zone);
2941                printk("%s"
2942                        " free:%lukB"
2943                        " min:%lukB"
2944                        " low:%lukB"
2945                        " high:%lukB"
2946                        " active_anon:%lukB"
2947                        " inactive_anon:%lukB"
2948                        " active_file:%lukB"
2949                        " inactive_file:%lukB"
2950                        " unevictable:%lukB"
2951                        " isolated(anon):%lukB"
2952                        " isolated(file):%lukB"
2953                        " present:%lukB"
2954                        " mlocked:%lukB"
2955                        " dirty:%lukB"
2956                        " writeback:%lukB"
2957                        " mapped:%lukB"
2958                        " shmem:%lukB"
2959                        " slab_reclaimable:%lukB"
2960                        " slab_unreclaimable:%lukB"
2961                        " kernel_stack:%lukB"
2962                        " pagetables:%lukB"
2963                        " unstable:%lukB"
2964                        " bounce:%lukB"
2965                        " free_cma:%lukB"
2966                        " writeback_tmp:%lukB"
2967                        " pages_scanned:%lu"
2968                        " all_unreclaimable? %s"
2969                        "\n",
2970                        zone->name,
2971                        K(zone_page_state(zone, NR_FREE_PAGES)),
2972                        K(min_wmark_pages(zone)),
2973                        K(low_wmark_pages(zone)),
2974                        K(high_wmark_pages(zone)),
2975                        K(zone_page_state(zone, NR_ACTIVE_ANON)),
2976                        K(zone_page_state(zone, NR_INACTIVE_ANON)),
2977                        K(zone_page_state(zone, NR_ACTIVE_FILE)),
2978                        K(zone_page_state(zone, NR_INACTIVE_FILE)),
2979                        K(zone_page_state(zone, NR_UNEVICTABLE)),
2980                        K(zone_page_state(zone, NR_ISOLATED_ANON)),
2981                        K(zone_page_state(zone, NR_ISOLATED_FILE)),
2982                        K(zone->present_pages),
2983                        K(zone_page_state(zone, NR_MLOCK)),
2984                        K(zone_page_state(zone, NR_FILE_DIRTY)),
2985                        K(zone_page_state(zone, NR_WRITEBACK)),
2986                        K(zone_page_state(zone, NR_FILE_MAPPED)),
2987                        K(zone_page_state(zone, NR_SHMEM)),
2988                        K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
2989                        K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
2990                        zone_page_state(zone, NR_KERNEL_STACK) *
2991                                THREAD_SIZE / 1024,
2992                        K(zone_page_state(zone, NR_PAGETABLE)),
2993                        K(zone_page_state(zone, NR_UNSTABLE_NFS)),
2994                        K(zone_page_state(zone, NR_BOUNCE)),
2995                        K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
2996                        K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
2997                        zone->pages_scanned,
2998                        (zone->all_unreclaimable ? "yes" : "no")
2999                        );
3000                printk("lowmem_reserve[]:");
3001                for (i = 0; i < MAX_NR_ZONES; i++)
3002                        printk(" %lu", zone->lowmem_reserve[i]);
3003                printk("\n");
3004        }
3005
3006        for_each_populated_zone(zone) {
3007                unsigned long nr[MAX_ORDER], flags, order, total = 0;
3008
3009                if (skip_free_areas_node(filter, zone_to_nid(zone)))
3010                        continue;
3011                show_node(zone);
3012                printk("%s: ", zone->name);
3013
3014                spin_lock_irqsave(&zone->lock, flags);
3015                for (order = 0; order < MAX_ORDER; order++) {
3016                        nr[order] = zone->free_area[order].nr_free;
3017                        total += nr[order] << order;
3018                }
3019                spin_unlock_irqrestore(&zone->lock, flags);
3020                for (order = 0; order < MAX_ORDER; order++)
3021                        printk("%lu*%lukB ", nr[order], K(1UL) << order);
3022                printk("= %lukB\n", K(total));
3023        }
3024
3025        printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
3026
3027        show_swap_cache_info();
3028}
3029
3030static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
3031{
3032        zoneref->zone = zone;
3033        zoneref->zone_idx = zone_idx(zone);
3034}
3035
3036/*
3037 * Builds allocation fallback zone lists.
3038 *
3039 * Add all populated zones of a node to the zonelist.
3040 */
3041static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
3042                                int nr_zones, enum zone_type zone_type)
3043{
3044        struct zone *zone;
3045
3046        BUG_ON(zone_type >= MAX_NR_ZONES);
3047        zone_type++;
3048
3049        do {
3050                zone_type--;
3051                zone = pgdat->node_zones + zone_type;
3052                if (populated_zone(zone)) {
3053                        zoneref_set_zone(zone,
3054                                &zonelist->_zonerefs[nr_zones++]);
3055                        check_highest_zone(zone_type);
3056                }
3057
3058        } while (zone_type);
3059        return nr_zones;
3060}
3061
3062
3063/*
3064 *  zonelist_order:
3065 *  0 = automatic detection of better ordering.
3066 *  1 = order by ([node] distance, -zonetype)
3067 *  2 = order by (-zonetype, [node] distance)
3068 *
3069 *  If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
3070 *  the same zonelist. So only NUMA can configure this param.
3071 */
3072#define ZONELIST_ORDER_DEFAULT  0
3073#define ZONELIST_ORDER_NODE     1
3074#define ZONELIST_ORDER_ZONE     2
3075
3076/* zonelist order in the kernel.
3077 * set_zonelist_order() will set this to NODE or ZONE.
3078 */
3079static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
3080static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
3081
3082
3083#ifdef CONFIG_NUMA
3084/* The value user specified ....changed by config */
3085static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
3086/* string for sysctl */
3087#define NUMA_ZONELIST_ORDER_LEN 16
3088char numa_zonelist_order[16] = "default";
3089
3090/*
3091 * interface for configure zonelist ordering.
3092 * command line option "numa_zonelist_order"
3093 *      = "[dD]efault   - default, automatic configuration.
3094 *      = "[nN]ode      - order by node locality, then by zone within node
3095 *      = "[zZ]one      - order by zone, then by locality within zone
3096 */
3097
3098static int __parse_numa_zonelist_order(char *s)
3099{
3100        if (*s == 'd' || *s == 'D') {
3101                user_zonelist_order = ZONELIST_ORDER_DEFAULT;
3102        } else if (*s == 'n' || *s == 'N') {
3103                user_zonelist_order = ZONELIST_ORDER_NODE;
3104        } else if (*s == 'z' || *s == 'Z') {
3105                user_zonelist_order = ZONELIST_ORDER_ZONE;
3106        } else {
3107                printk(KERN_WARNING
3108                        "Ignoring invalid numa_zonelist_order value:  "
3109                        "%s\n", s);
3110                return -EINVAL;
3111        }
3112        return 0;
3113}
3114
3115static __init int setup_numa_zonelist_order(char *s)
3116{
3117        int ret;
3118
3119        if (!s)
3120                return 0;
3121
3122        ret = __parse_numa_zonelist_order(s);
3123        if (ret == 0)
3124                strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
3125
3126        return ret;
3127}
3128early_param("numa_zonelist_order", setup_numa_zonelist_order);
3129
3130/*
3131 * sysctl handler for numa_zonelist_order
3132 */
3133int numa_zonelist_order_handler(ctl_table *table, int write,
3134                void __user *buffer, size_t *length,
3135                loff_t *ppos)
3136{
3137        char saved_string[NUMA_ZONELIST_ORDER_LEN];
3138        int ret;
3139        static DEFINE_MUTEX(zl_order_mutex);
3140
3141        mutex_lock(&zl_order_mutex);
3142        if (write)
3143                strcpy(saved_string, (char*)table->data);
3144        ret = proc_dostring(table, write, buffer, length, ppos);
3145        if (ret)
3146                goto out;
3147        if (write) {
3148                int oldval = user_zonelist_order;
3149                if (__parse_numa_zonelist_order((char*)table->data)) {
3150                        /*
3151                         * bogus value.  restore saved string
3152                         */
3153                        strncpy((char*)table->data, saved_string,
3154                                NUMA_ZONELIST_ORDER_LEN);
3155                        user_zonelist_order = oldval;
3156                } else if (oldval != user_zonelist_order) {
3157                        mutex_lock(&zonelists_mutex);
3158                        build_all_zonelists(NULL, NULL);
3159                        mutex_unlock(&zonelists_mutex);
3160                }
3161        }
3162out:
3163        mutex_unlock(&zl_order_mutex);
3164        return ret;
3165}
3166
3167
3168#define MAX_NODE_LOAD (nr_online_nodes)
3169static int node_load[MAX_NUMNODES];
3170
3171/**
3172 * find_next_best_node - find the next node that should appear in a given node's fallback list
3173 * @node: node whose fallback list we're appending
3174 * @used_node_mask: nodemask_t of already used nodes
3175 *
3176 * We use a number of factors to determine which is the next node that should
3177 * appear on a given node's fallback list.  The node should not have appeared
3178 * already in @node's fallback list, and it should be the next closest node
3179 * according to the distance array (which contains arbitrary distance values
3180 * from each node to each node in the system), and should also prefer nodes
3181 * with no CPUs, since presumably they'll have very little allocation pressure
3182 * on them otherwise.
3183 * It returns -1 if no node is found.
3184 */
3185static int find_next_best_node(int node, nodemask_t *used_node_mask)
3186{
3187        int n, val;
3188        int min_val = INT_MAX;
3189        int best_node = -1;
3190        const struct cpumask *tmp = cpumask_of_node(0);
3191
3192        /* Use the local node if we haven't already */
3193        if (!node_isset(node, *used_node_mask)) {
3194                node_set(node, *used_node_mask);
3195                return node;
3196        }
3197
3198        for_each_node_state(n, N_HIGH_MEMORY) {
3199
3200                /* Don't want a node to appear more than once */
3201                if (node_isset(n, *used_node_mask))
3202                        continue;
3203
3204                /* Use the distance array to find the distance */
3205                val = node_distance(node, n);
3206
3207                /* Penalize nodes under us ("prefer the next node") */
3208                val += (n < node);
3209
3210                /* Give preference to headless and unused nodes */
3211                tmp = cpumask_of_node(n);
3212                if (!cpumask_empty(tmp))
3213                        val += PENALTY_FOR_NODE_WITH_CPUS;
3214
3215                /* Slight preference for less loaded node */
3216                val *= (MAX_NODE_LOAD*MAX_NUMNODES);
3217                val += node_load[n];
3218
3219                if (val < min_val) {
3220                        min_val = val;
3221                        best_node = n;
3222                }
3223        }
3224
3225        if (best_node >= 0)
3226                node_set(best_node, *used_node_mask);
3227
3228        return best_node;
3229}
3230
3231
3232/*
3233 * Build zonelists ordered by node and zones within node.
3234 * This results in maximum locality--normal zone overflows into local
3235 * DMA zone, if any--but risks exhausting DMA zone.
3236 */
3237static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
3238{
3239        int j;
3240        struct zonelist *zonelist;
3241
3242        zonelist = &pgdat->node_zonelists[0];
3243        for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
3244                ;
3245        j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3246                                                        MAX_NR_ZONES - 1);
3247        zonelist->_zonerefs[j].zone = NULL;
3248        zonelist->_zonerefs[j].zone_idx = 0;
3249}
3250
3251/*
3252 * Build gfp_thisnode zonelists
3253 */
3254static void build_thisnode_zonelists(pg_data_t *pgdat)
3255{
3256        int j;
3257        struct zonelist *zonelist;
3258
3259        zonelist = &pgdat->node_zonelists[1];
3260        j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
3261        zonelist->_zonerefs[j].zone = NULL;
3262        zonelist->_zonerefs[j].zone_idx = 0;
3263}
3264
3265/*
3266 * Build zonelists ordered by zone and nodes within zones.
3267 * This results in conserving DMA zone[s] until all Normal memory is
3268 * exhausted, but results in overflowing to remote node while memory
3269 * may still exist in local DMA zone.
3270 */
3271static int node_order[MAX_NUMNODES];
3272
3273static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
3274{
3275        int pos, j, node;
3276        int zone_type;          /* needs to be signed */
3277        struct zone *z;
3278        struct zonelist *zonelist;
3279
3280        zonelist = &pgdat->node_zonelists[0];
3281        pos = 0;
3282        for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
3283                for (j = 0; j < nr_nodes; j++) {
3284                        node = node_order[j];
3285                        z = &NODE_DATA(node)->node_zones[zone_type];
3286                        if (populated_zone(z)) {
3287                                zoneref_set_zone(z,
3288                                        &zonelist->_zonerefs[pos++]);
3289                                check_highest_zone(zone_type);
3290                        }
3291                }
3292        }
3293        zonelist->_zonerefs[pos].zone = NULL;
3294        zonelist->_zonerefs[pos].zone_idx = 0;
3295}
3296
3297static int default_zonelist_order(void)
3298{
3299        int nid, zone_type;
3300        unsigned long low_kmem_size,total_size;
3301        struct zone *z;
3302        int average_size;
3303        /*
3304         * ZONE_DMA and ZONE_DMA32 can be very small area in the system.
3305         * If they are really small and used heavily, the system can fall
3306         * into OOM very easily.
3307         * This function detect ZONE_DMA/DMA32 size and configures zone order.
3308         */
3309        /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
3310        low_kmem_size = 0;
3311        total_size = 0;
3312        for_each_online_node(nid) {
3313                for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
3314                        z = &NODE_DATA(nid)->node_zones[zone_type];
3315                        if (populated_zone(z)) {
3316                                if (zone_type < ZONE_NORMAL)
3317                                        low_kmem_size += z->present_pages;
3318                                total_size += z->present_pages;
3319                        } else if (zone_type == ZONE_NORMAL) {
3320                                /*
3321                                 * If any node has only lowmem, then node order
3322                                 * is preferred to allow kernel allocations
3323                                 * locally; otherwise, they can easily infringe
3324                                 * on other nodes when there is an abundance of
3325                                 * lowmem available to allocate from.
3326                                 */
3327                                return ZONELIST_ORDER_NODE;
3328                        }
3329                }
3330        }
3331        if (!low_kmem_size ||  /* there are no DMA area. */
3332            low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
3333                return ZONELIST_ORDER_NODE;
3334        /*
3335         * look into each node's config.
3336         * If there is a node whose DMA/DMA32 memory is very big area on
3337         * local memory, NODE_ORDER may be suitable.
3338         */
3339        average_size = total_size /
3340                                (nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
3341        for_each_online_node(nid) {
3342                low_kmem_size = 0;
3343                total_size = 0;
3344                for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
3345                        z = &NODE_DATA(nid)->node_zones[zone_type];
3346                        if (populated_zone(z)) {
3347                                if (zone_type < ZONE_NORMAL)
3348                                        low_kmem_size += z->present_pages;
3349                                total_size += z->present_pages;
3350                        }
3351                }
3352                if (low_kmem_size &&
3353                    total_size > average_size && /* ignore small node */
3354                    low_kmem_size > total_size * 70/100)
3355                        return ZONELIST_ORDER_NODE;
3356        }
3357        return ZONELIST_ORDER_ZONE;
3358}
3359
3360static void set_zonelist_order(void)
3361{
3362        if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
3363                current_zonelist_order = default_zonelist_order();
3364        else
3365                current_zonelist_order = user_zonelist_order;
3366}
3367
3368static void build_zonelists(pg_data_t *pgdat)
3369{
3370        int j, node, load;
3371        enum zone_type i;
3372        nodemask_t used_mask;
3373        int local_node, prev_node;
3374        struct zonelist *zonelist;
3375        int order = current_zonelist_order;
3376
3377        /* initialize zonelists */
3378        for (i = 0; i < MAX_ZONELISTS; i++) {
3379                zonelist = pgdat->node_zonelists + i;
3380                zonelist->_zonerefs[0].zone = NULL;
3381                zonelist->_zonerefs[0].zone_idx = 0;
3382        }
3383
3384        /* NUMA-aware ordering of nodes */
3385        local_node = pgdat->node_id;
3386        load = nr_online_nodes;
3387        prev_node = local_node;
3388        nodes_clear(used_mask);
3389
3390        memset(node_order, 0, sizeof(node_order));
3391        j = 0;
3392
3393        while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
3394                /*
3395                 * We don't want to pressure a particular node.
3396                 * So adding penalty to the first node in same
3397                 * distance group to make it round-robin.
3398                 */
3399                if (node_distance(local_node, node) !=
3400                    node_distance(local_node, prev_node))
3401                        node_load[node] = load;
3402
3403                prev_node = node;
3404                load--;
3405                if (order == ZONELIST_ORDER_NODE)
3406                        build_zonelists_in_node_order(pgdat, node);
3407                else
3408                        node_order[j++] = node; /* remember order */
3409        }
3410
3411        if (order == ZONELIST_ORDER_ZONE) {
3412                /* calculate node order -- i.e., DMA last! */
3413                build_zonelists_in_zone_order(pgdat, j);
3414        }
3415
3416        build_thisnode_zonelists(pgdat);
3417}
3418
3419/* Construct the zonelist performance cache - see further mmzone.h */
3420static void build_zonelist_cache(pg_data_t *pgdat)
3421{
3422        struct zonelist *zonelist;
3423        struct zonelist_cache *zlc;
3424        struct zoneref *z;
3425
3426        zonelist = &pgdat->node_zonelists[0];
3427        zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
3428        bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
3429        for (z = zonelist->_zonerefs; z->zone; z++)
3430                zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
3431}
3432
3433#ifdef CONFIG_HAVE_MEMORYLESS_NODES
3434/*
3435 * Return node id of node used for "local" allocations.
3436 * I.e., first node id of first zone in arg node's generic zonelist.
3437 * Used for initializing percpu 'numa_mem', which is used primarily
3438 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
3439 */
3440int local_memory_node(int node)
3441{
3442        struct zone *zone;
3443
3444        (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
3445                                   gfp_zone(GFP_KERNEL),
3446                                   NULL,
3447                                   &zone);
3448        return zone->node;
3449}
3450#endif
3451
3452#else   /* CONFIG_NUMA */
3453
3454static void set_zonelist_order(void)
3455{
3456        current_zonelist_order = ZONELIST_ORDER_ZONE;
3457}
3458
3459static void build_zonelists(pg_data_t *pgdat)
3460{
3461        int node, local_node;
3462        enum zone_type j;
3463        struct zonelist *zonelist;
3464
3465        local_node = pgdat->node_id;
3466
3467        zonelist = &pgdat->node_zonelists[0];
3468        j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
3469
3470        /*
3471         * Now we build the zonelist so that it contains the zones
3472         * of all the other nodes.
3473         * We don't want to pressure a particular node, so when
3474         * building the zones for node N, we make sure that the
3475         * zones coming right after the local ones are those from
3476         * node N+1 (modulo N)
3477         */
3478        for (node = local_node + 1; node < MAX_NUMNODES; node++) {
3479                if (!node_online(node))
3480                        continue;
3481                j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3482                                                        MAX_NR_ZONES - 1);
3483        }
3484        for (node = 0; node < local_node; node++) {
3485                if (!node_online(node))
3486                        continue;
3487                j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3488                                                        MAX_NR_ZONES - 1);
3489        }
3490
3491        zonelist->_zonerefs[j].zone = NULL;
3492        zonelist->_zonerefs[j].zone_idx = 0;
3493}
3494
3495/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
3496static void build_zonelist_cache(pg_data_t *pgdat)
3497{
3498        pgdat->node_zonelists[0].zlcache_ptr = NULL;
3499}
3500
3501#endif  /* CONFIG_NUMA */
3502
3503/*
3504 * Boot pageset table. One per cpu which is going to be used for all
3505 * zones and all nodes. The parameters will be set in such a way
3506 * that an item put on a list will immediately be handed over to
3507 * the buddy list. This is safe since pageset manipulation is done
3508 * with interrupts disabled.
3509 *
3510 * The boot_pagesets must be kept even after bootup is complete for
3511 * unused processors and/or zones. They do play a role for bootstrapping
3512 * hotplugged processors.
3513 *
3514 * zoneinfo_show() and maybe other functions do
3515 * not check if the processor is online before following the pageset pointer.
3516 * Other parts of the kernel may not check if the zone is available.
3517 */
3518static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
3519static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
3520static void setup_zone_pageset(struct zone *zone);
3521
3522/*
3523 * Global mutex to protect against size modification of zonelists
3524 * as well as to serialize pageset setup for the new populated zone.
3525 */
3526DEFINE_MUTEX(zonelists_mutex);
3527
3528/* return values int ....just for stop_machine() */
3529static int __build_all_zonelists(void *data)
3530{
3531        int nid;
3532        int cpu;
3533        pg_data_t *self = data;
3534
3535#ifdef CONFIG_NUMA
3536        memset(node_load, 0, sizeof(node_load));
3537#endif
3538
3539        if (self && !node_online(self->node_id)) {
3540                build_zonelists(self);
3541                build_zonelist_cache(self);
3542        }
3543
3544        for_each_online_node(nid) {
3545                pg_data_t *pgdat = NODE_DATA(nid);
3546
3547                build_zonelists(pgdat);
3548                build_zonelist_cache(pgdat);
3549        }
3550
3551        /*
3552         * Initialize the boot_pagesets that are going to be used
3553         * for bootstrapping processors. The real pagesets for
3554         * each zone will be allocated later when the per cpu
3555         * allocator is available.
3556         *
3557         * boot_pagesets are used also for bootstrapping offline
3558         * cpus if the system is already booted because the pagesets
3559         * are needed to initialize allocators on a specific cpu too.
3560         * F.e. the percpu allocator needs the page allocator which
3561         * needs the percpu allocator in order to allocate its pagesets
3562         * (a chicken-egg dilemma).
3563         */
3564        for_each_possible_cpu(cpu) {
3565                setup_pageset(&per_cpu(boot_pageset, cpu), 0);
3566
3567#ifdef CONFIG_HAVE_MEMORYLESS_NODES
3568                /*
3569                 * We now know the "local memory node" for each node--
3570                 * i.e., the node of the first zone in the generic zonelist.
3571                 * Set up numa_mem percpu variable for on-line cpus.  During
3572                 * boot, only the boot cpu should be on-line;  we'll init the
3573                 * secondary cpus' numa_mem as they come on-line.  During
3574                 * node/memory hotplug, we'll fixup all on-line cpus.
3575                 */
3576                if (cpu_online(cpu))
3577                        set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
3578#endif
3579        }
3580
3581        return 0;
3582}
3583
3584/*
3585 * Called with zonelists_mutex held always
3586 * unless system_state == SYSTEM_BOOTING.
3587 */
3588void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
3589{
3590        set_zonelist_order();
3591
3592        if (system_state == SYSTEM_BOOTING) {
3593                __build_all_zonelists(NULL);
3594                mminit_verify_zonelist();
3595                cpuset_init_current_mems_allowed();
3596        } else {
3597                /* we have to stop all cpus to guarantee there is no user
3598                   of zonelist */
3599#ifdef CONFIG_MEMORY_HOTPLUG
3600                if (zone)
3601                        setup_zone_pageset(zone);
3602#endif
3603                stop_machine(__build_all_zonelists, pgdat, NULL);
3604                /* cpuset refresh routine should be here */
3605        }
3606        vm_total_pages = nr_free_pagecache_pages();
3607        /*
3608         * Disable grouping by mobility if the number of pages in the
3609         * system is too low to allow the mechanism to work. It would be
3610         * more accurate, but expensive to check per-zone. This check is
3611         * made on memory-hotadd so a system can start with mobility
3612         * disabled and enable it later
3613         */
3614        if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
3615                page_group_by_mobility_disabled = 1;
3616        else
3617                page_group_by_mobility_disabled = 0;
3618
3619        printk("Built %i zonelists in %s order, mobility grouping %s.  "
3620                "Total pages: %ld\n",
3621                        nr_online_nodes,
3622                        zonelist_order_name[current_zonelist_order],
3623                        page_group_by_mobility_disabled ? "off" : "on",
3624                        vm_total_pages);
3625#ifdef CONFIG_NUMA
3626        printk("Policy zone: %s\n", zone_names[policy_zone]);
3627#endif
3628}
3629
3630/*
3631 * Helper functions to size the waitqueue hash table.
3632 * Essentially these want to choose hash table sizes sufficiently
3633 * large so that collisions trying to wait on pages are rare.
3634 * But in fact, the number of active page waitqueues on typical
3635 * systems is ridiculously low, less than 200. So this is even
3636 * conservative, even though it seems large.
3637 *
3638 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
3639 * waitqueues, i.e. the size of the waitq table given the number of pages.
3640 */
3641#define PAGES_PER_WAITQUEUE     256
3642
3643#ifndef CONFIG_MEMORY_HOTPLUG
3644static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3645{
3646        unsigned long size = 1;
3647
3648        pages /= PAGES_PER_WAITQUEUE;
3649
3650        while (size < pages)
3651                size <<= 1;
3652
3653        /*
3654         * Once we have dozens or even hundreds of threads sleeping
3655         * on IO we've got bigger problems than wait queue collision.
3656         * Limit the size of the wait table to a reasonable size.
3657         */
3658        size = min(size, 4096UL);
3659
3660        return max(size, 4UL);
3661}
3662#else
3663/*
3664 * A zone's size might be changed by hot-add, so it is not possible to determine
3665 * a suitable size for its wait_table.  So we use the maximum size now.
3666 *
3667 * The max wait table size = 4096 x sizeof(wait_queue_head_t).   ie:
3668 *
3669 *    i386 (preemption config)    : 4096 x 16 = 64Kbyte.
3670 *    ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
3671 *    ia64, x86-64 (preemption)   : 4096 x 24 = 96Kbyte.
3672 *
3673 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
3674 * or more by the traditional way. (See above).  It equals:
3675 *
3676 *    i386, x86-64, powerpc(4K page size) : =  ( 2G + 1M)byte.
3677 *    ia64(16K page size)                 : =  ( 8G + 4M)byte.
3678 *    powerpc (64K page size)             : =  (32G +16M)byte.
3679 */
3680static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3681{
3682        return 4096UL;
3683}
3684#endif
3685
3686/*
3687 * This is an integer logarithm so that shifts can be used later
3688 * to extract the more random high bits from the multiplicative
3689 * hash function before the remainder is taken.
3690 */
3691static inline unsigned long wait_table_bits(unsigned long size)
3692{
3693        return ffz(~size);
3694}
3695
3696#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
3697
3698/*
3699 * Check if a pageblock contains reserved pages
3700 */
3701static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
3702{
3703        unsigned long pfn;
3704
3705        for (pfn = start_pfn; pfn < end_pfn; pfn++) {
3706                if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
3707                        return 1;
3708        }
3709        return 0;
3710}
3711
3712/*
3713 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
3714 * of blocks reserved is based on min_wmark_pages(zone). The memory within
3715 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
3716 * higher will lead to a bigger reserve which will get freed as contiguous
3717 * blocks as reclaim kicks in
3718 */
3719static void setup_zone_migrate_reserve(struct zone *zone)
3720{
3721        unsigned long start_pfn, pfn, end_pfn, block_end_pfn;
3722        struct page *page;
3723        unsigned long block_migratetype;
3724        int reserve;
3725
3726        /*
3727         * Get the start pfn, end pfn and the number of blocks to reserve
3728         * We have to be careful to be aligned to pageblock_nr_pages to
3729         * make sure that we always check pfn_valid for the first page in
3730         * the block.
3731         */
3732        start_pfn = zone->zone_start_pfn;
3733        end_pfn = start_pfn + zone->spanned_pages;
3734        start_pfn = roundup(start_pfn, pageblock_nr_pages);
3735        reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
3736                                                        pageblock_order;
3737
3738        /*
3739         * Reserve blocks are generally in place to help high-order atomic
3740         * allocations that are short-lived. A min_free_kbytes value that
3741         * would result in more than 2 reserve blocks for atomic allocations
3742         * is assumed to be in place to help anti-fragmentation for the
3743         * future allocation of hugepages at runtime.
3744         */
3745        reserve = min(2, reserve);
3746
3747        for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
3748                if (!pfn_valid(pfn))
3749                        continue;
3750                page = pfn_to_page(pfn);
3751
3752                /* Watch out for overlapping nodes */
3753                if (page_to_nid(page) != zone_to_nid(zone))
3754                        continue;
3755
3756                block_migratetype = get_pageblock_migratetype(page);
3757
3758                /* Only test what is necessary when the reserves are not met */
3759                if (reserve > 0) {
3760                        /*
3761                         * Blocks with reserved pages will never free, skip
3762                         * them.
3763                         */
3764                        block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
3765                        if (pageblock_is_reserved(pfn, block_end_pfn))
3766                                continue;
3767
3768                        /* If this block is reserved, account for it */
3769                        if (block_migratetype == MIGRATE_RESERVE) {
3770                                reserve--;
3771                                continue;
3772                        }
3773
3774                        /* Suitable for reserving if this block is movable */
3775                        if (block_migratetype == MIGRATE_MOVABLE) {
3776                                set_pageblock_migratetype(page,
3777                                                        MIGRATE_RESERVE);
3778                                move_freepages_block(zone, page,
3779                                                        MIGRATE_RESERVE);
3780                                reserve--;
3781                                continue;
3782                        }
3783                }
3784
3785                /*
3786                 * If the reserve is met and this is a previous reserved block,
3787                 * take it back
3788                 */
3789                if (block_migratetype == MIGRATE_RESERVE) {
3790                        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3791                        move_freepages_block(zone, page, MIGRATE_MOVABLE);
3792                }
3793        }
3794}
3795
3796/*
3797 * Initially all pages are reserved - free ones are freed
3798 * up by free_all_bootmem() once the early boot process is
3799 * done. Non-atomic initialization, single-pass.
3800 */
3801void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
3802                unsigned long start_pfn, enum memmap_context context)
3803{
3804        struct page *page;
3805        unsigned long end_pfn = start_pfn + size;
3806        unsigned long pfn;
3807        struct zone *z;
3808
3809        if (highest_memmap_pfn < end_pfn - 1)
3810                highest_memmap_pfn = end_pfn - 1;
3811
3812        z = &NODE_DATA(nid)->node_zones[zone];
3813        for (pfn = start_pfn; pfn < end_pfn; pfn++) {
3814                /*
3815                 * There can be holes in boot-time mem_map[]s
3816                 * handed to this function.  They do not
3817                 * exist on hotplugged memory.
3818                 */
3819                if (context == MEMMAP_EARLY) {
3820                        if (!early_pfn_valid(pfn))
3821                                continue;
3822                        if (!early_pfn_in_nid(pfn, nid))
3823                                continue;
3824                }
3825                page = pfn_to_page(pfn);
3826                set_page_links(page, zone, nid, pfn);
3827                mminit_verify_page_links(page, zone, nid, pfn);
3828                init_page_count(page);
3829                reset_page_mapcount(page);
3830                SetPageReserved(page);
3831                /*
3832                 * Mark the block movable so that blocks are reserved for
3833                 * movable at startup. This will force kernel allocations
3834                 * to reserve their blocks rather than leaking throughout
3835                 * the address space during boot when many long-lived
3836                 * kernel allocations are made. Later some blocks near
3837                 * the start are marked MIGRATE_RESERVE by
3838                 * setup_zone_migrate_reserve()
3839                 *
3840                 * bitmap is created for zone's valid pfn range. but memmap
3841                 * can be created for invalid pages (for alignment)
3842                 * check here not to call set_pageblock_migratetype() against
3843                 * pfn out of zone.
3844                 */
3845                if ((z->zone_start_pfn <= pfn)
3846                    && (pfn < z->zone_start_pfn + z->spanned_pages)
3847                    && !(pfn & (pageblock_nr_pages - 1)))
3848                        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3849
3850                INIT_LIST_HEAD(&page->lru);
3851#ifdef WANT_PAGE_VIRTUAL
3852                /* The shift won't overflow because ZONE_NORMAL is below 4G. */
3853                if (!is_highmem_idx(zone))
3854                        set_page_address(page, __va(pfn << PAGE_SHIFT));
3855#endif
3856        }
3857}
3858
3859static void __meminit zone_init_free_lists(struct zone *zone)
3860{
3861        int order, t;
3862        for_each_migratetype_order(order, t) {
3863                INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
3864                zone->free_area[order].nr_free = 0;
3865        }
3866}
3867
3868#ifndef __HAVE_ARCH_MEMMAP_INIT
3869#define memmap_init(size, nid, zone, start_pfn) \
3870        memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
3871#endif
3872
3873static int __meminit zone_batchsize(struct zone *zone)
3874{
3875#ifdef CONFIG_MMU
3876        int batch;
3877
3878        /*
3879         * The per-cpu-pages pools are set to around 1000th of the
3880         * size of the zone.  But no more than 1/2 of a meg.
3881         *
3882         * OK, so we don't know how big the cache is.  So guess.
3883         */
3884        batch = zone->present_pages / 1024;
3885        if (batch * PAGE_SIZE > 512 * 1024)
3886                batch = (512 * 1024) / PAGE_SIZE;
3887        batch /= 4;             /* We effectively *= 4 below */
3888        if (batch < 1)
3889                batch = 1;
3890
3891        /*
3892         * Clamp the batch to a 2^n - 1 value. Having a power
3893         * of 2 value was found to be more likely to have
3894         * suboptimal cache aliasing properties in some cases.
3895         *
3896         * For example if 2 tasks are alternately allocating
3897         * batches of pages, one task can end up with a lot
3898         * of pages of one half of the possible page colors
3899         * and the other with pages of the other colors.
3900         */
3901        batch = rounddown_pow_of_two(batch + batch/2) - 1;
3902
3903        return batch;
3904
3905#else
3906        /* The deferral and batching of frees should be suppressed under NOMMU
3907         * conditions.
3908         *
3909         * The problem is that NOMMU needs to be able to allocate large chunks
3910         * of contiguous memory as there's no hardware page translation to
3911         * assemble apparent contiguous memory from discontiguous pages.
3912         *
3913         * Queueing large contiguous runs of pages for batching, however,
3914         * causes the pages to actually be freed in smaller chunks.  As there
3915         * can be a significant delay between the individual batches being
3916         * recycled, this leads to the once large chunks of space being
3917         * fragmented and becoming unavailable for high-order allocations.
3918         */
3919        return 0;
3920#endif
3921}
3922
3923static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
3924{
3925        struct per_cpu_pages *pcp;
3926        int migratetype;
3927
3928        memset(p, 0, sizeof(*p));
3929
3930        pcp = &p->pcp;
3931        pcp->count = 0;
3932        pcp->high = 6 * batch;
3933        pcp->batch = max(1UL, 1 * batch);
3934        for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
3935                INIT_LIST_HEAD(&pcp->lists[migratetype]);
3936}
3937
3938/*
3939 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
3940 * to the value high for the pageset p.
3941 */
3942
3943static void setup_pagelist_highmark(struct per_cpu_pageset *p,
3944                                unsigned long high)
3945{
3946        struct per_cpu_pages *pcp;
3947
3948        pcp = &p->pcp;
3949        pcp->high = high;
3950        pcp->batch = max(1UL, high/4);
3951        if ((high/4) > (PAGE_SHIFT * 8))
3952                pcp->batch = PAGE_SHIFT * 8;
3953}
3954
3955static void __meminit setup_zone_pageset(struct zone *zone)
3956{
3957        int cpu;
3958
3959        zone->pageset = alloc_percpu(struct per_cpu_pageset);
3960
3961        for_each_possible_cpu(cpu) {
3962                struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
3963
3964                setup_pageset(pcp, zone_batchsize(zone));
3965
3966                if (percpu_pagelist_fraction)
3967                        setup_pagelist_highmark(pcp,
3968                                (zone->present_pages /
3969                                        percpu_pagelist_fraction));
3970        }
3971}
3972
3973/*
3974 * Allocate per cpu pagesets and initialize them.
3975 * Before this call only boot pagesets were available.
3976 */
3977void __init setup_per_cpu_pageset(void)
3978{
3979        struct zone *zone;
3980
3981        for_each_populated_zone(zone)
3982                setup_zone_pageset(zone);
3983}
3984
3985static noinline __init_refok
3986int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
3987{
3988        int i;
3989        struct pglist_data *pgdat = zone->zone_pgdat;
3990        size_t alloc_size;
3991
3992        /*
3993         * The per-page waitqueue mechanism uses hashed waitqueues
3994         * per zone.
3995         */
3996        zone->wait_table_hash_nr_entries =
3997                 wait_table_hash_nr_entries(zone_size_pages);
3998        zone->wait_table_bits =
3999                wait_table_bits(zone->wait_table_hash_nr_entries);
4000        alloc_size = zone->wait_table_hash_nr_entries
4001                                        * sizeof(wait_queue_head_t);
4002
4003        if (!slab_is_available()) {
4004                zone->wait_table = (wait_queue_head_t *)
4005                        alloc_bootmem_node_nopanic(pgdat, alloc_size);
4006        } else {
4007                /*
4008                 * This case means that a zone whose size was 0 gets new memory
4009                 * via memory hot-add.
4010                 * But it may be the case that a new node was hot-added.  In
4011                 * this case vmalloc() will not be able to use this new node's
4012                 * memory - this wait_table must be initialized to use this new
4013                 * node itself as well.
4014                 * To use this new node's memory, further consideration will be
4015                 * necessary.
4016                 */
4017                zone->wait_table = vmalloc(alloc_size);
4018        }
4019        if (!zone->wait_table)
4020                return -ENOMEM;
4021
4022        for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
4023                init_waitqueue_head(zone->wait_table + i);
4024
4025        return 0;
4026}
4027
4028static __meminit void zone_pcp_init(struct zone *zone)
4029{
4030        /*
4031         * per cpu subsystem is not up at this point. The following code
4032         * relies on the ability of the linker to provide the
4033         * offset of a (static) per cpu variable into the per cpu area.
4034         */
4035        zone->pageset = &boot_pageset;
4036
4037        if (zone->present_pages)
4038                printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%u\n",
4039                        zone->name, zone->present_pages,
4040                                         zone_batchsize(zone));
4041}
4042
4043int __meminit init_currently_empty_zone(struct zone *zone,
4044                                        unsigned long zone_start_pfn,
4045                                        unsigned long size,
4046                                        enum memmap_context context)
4047{
4048        struct pglist_data *pgdat = zone->zone_pgdat;
4049        int ret;
4050        ret = zone_wait_table_init(zone, size);
4051        if (ret)
4052                return ret;
4053        pgdat->nr_zones = zone_idx(zone) + 1;
4054
4055        zone->zone_start_pfn = zone_start_pfn;
4056
4057        mminit_dprintk(MMINIT_TRACE, "memmap_init",
4058                        "Initialising map node %d zone %lu pfns %lu -> %lu\n",
4059                        pgdat->node_id,
4060                        (unsigned long)zone_idx(zone),
4061                        zone_start_pfn, (zone_start_pfn + size));
4062
4063        zone_init_free_lists(zone);
4064
4065        return 0;
4066}
4067
4068#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
4069#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
4070/*
4071 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
4072 * Architectures may implement their own version but if add_active_range()
4073 * was used and there are no special requirements, this is a convenient
4074 * alternative
4075 */
4076int __meminit __early_pfn_to_nid(unsigned long pfn)
4077{
4078        unsigned long start_pfn, end_pfn;
4079        int i, nid;
4080
4081        for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
4082                if (start_pfn <= pfn && pfn < end_pfn)
4083                        return nid;
4084        /* This is a memory hole */
4085        return -1;
4086}
4087#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
4088
4089int __meminit early_pfn_to_nid(unsigned long pfn)
4090{
4091        int nid;
4092
4093        nid = __early_pfn_to_nid(pfn);
4094        if (nid >= 0)
4095                return nid;
4096        /* just returns 0 */
4097        return 0;
4098}
4099
4100#ifdef CONFIG_NODES_SPAN_OTHER_NODES
4101bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
4102{
4103        int nid;
4104
4105        nid = __early_pfn_to_nid(pfn);
4106        if (nid >= 0 && nid != node)
4107                return false;
4108        return true;
4109}
4110#endif
4111
4112/**
4113 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
4114 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
4115 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
4116 *
4117 * If an architecture guarantees that all ranges registered with
4118 * add_active_ranges() contain no holes and may be freed, this
4119 * this function may be used instead of calling free_bootmem() manually.
4120 */
4121void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
4122{
4123        unsigned long start_pfn, end_pfn;
4124        int i, this_nid;
4125
4126        for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
4127                start_pfn = min(start_pfn, max_low_pfn);
4128                end_pfn = min(end_pfn, max_low_pfn);
4129
4130                if (start_pfn < end_pfn)
4131                        free_bootmem_node(NODE_DATA(this_nid),
4132                                          PFN_PHYS(start_pfn),
4133                                          (end_pfn - start_pfn) << PAGE_SHIFT);
4134        }
4135}
4136
4137/**
4138 * sparse_memory_present_with_active_regions - Call memory_present for each active range
4139 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
4140 *
4141 * If an architecture guarantees that all ranges registered with
4142 * add_active_ranges() contain no holes and may be freed, this
4143 * function may be used instead of calling memory_present() manually.
4144 */
4145void __init sparse_memory_present_with_active_regions(int nid)
4146{
4147        unsigned long start_pfn, end_pfn;
4148        int i, this_nid;
4149
4150        for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
4151                memory_present(this_nid, start_pfn, end_pfn);
4152}
4153
4154/**
4155 * get_pfn_range_for_nid - Return the start and end page frames for a node
4156 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
4157 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
4158 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
4159 *
4160 * It returns the start and end page frame of a node based on information
4161 * provided by an arch calling add_active_range(). If called for a node
4162 * with no available memory, a warning is printed and the start and end
4163 * PFNs will be 0.
4164 */
4165void __meminit get_pfn_range_for_nid(unsigned int nid,
4166                        unsigned long *start_pfn, unsigned long *end_pfn)
4167{
4168        unsigned long this_start_pfn, this_end_pfn;
4169        int i;
4170
4171        *start_pfn = -1UL;
4172        *end_pfn = 0;
4173
4174        for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
4175                *start_pfn = min(*start_pfn, this_start_pfn);
4176                *end_pfn = max(*end_pfn, this_end_pfn);
4177        }
4178
4179        if (*start_pfn == -1UL)
4180                *start_pfn = 0;
4181}
4182
4183/*
4184 * This finds a zone that can be used for ZONE_MOVABLE pages. The
4185 * assumption is made that zones within a node are ordered in monotonic
4186 * increasing memory addresses so that the "highest" populated zone is used
4187 */
4188static void __init find_usable_zone_for_movable(void)
4189{
4190        int zone_index;
4191        for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
4192                if (zone_index == ZONE_MOVABLE)
4193                        continue;
4194
4195                if (arch_zone_highest_possible_pfn[zone_index] >
4196                                arch_zone_lowest_possible_pfn[zone_index])
4197                        break;
4198        }
4199
4200        VM_BUG_ON(zone_index == -1);
4201        movable_zone = zone_index;
4202}
4203
4204/*
4205 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
4206 * because it is sized independent of architecture. Unlike the other zones,
4207 * the starting point for ZONE_MOVABLE is not fixed. It may be different
4208 * in each node depending on the size of each node and how evenly kernelcore
4209 * is distributed. This helper function adjusts the zone ranges
4210 * provided by the architecture for a given node by using the end of the
4211 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
4212 * zones within a node are in order of monotonic increases memory addresses
4213 */
4214static void __meminit adjust_zone_range_for_zone_movable(int nid,
4215                                        unsigned long zone_type,
4216                                        unsigned long node_start_pfn,
4217                                        unsigned long node_end_pfn,
4218                                        unsigned long *zone_start_pfn,
4219                                        unsigned long *zone_end_pfn)
4220{
4221        /* Only adjust if ZONE_MOVABLE is on this node */
4222        if (zone_movable_pfn[nid]) {
4223                /* Size ZONE_MOVABLE */
4224                if (zone_type == ZONE_MOVABLE) {
4225                        *zone_start_pfn = zone_movable_pfn[nid];
4226                        *zone_end_pfn = min(node_end_pfn,
4227                                arch_zone_highest_possible_pfn[movable_zone]);
4228
4229                /* Adjust for ZONE_MOVABLE starting within this range */
4230                } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
4231                                *zone_end_pfn > zone_movable_pfn[nid]) {
4232                        *zone_end_pfn = zone_movable_pfn[nid];
4233
4234                /* Check if this whole range is within ZONE_MOVABLE */
4235                } else if (*zone_start_pfn >= zone_movable_pfn[nid])
4236                        *zone_start_pfn = *zone_end_pfn;
4237        }
4238}
4239
4240/*
4241 * Return the number of pages a zone spans in a node, including holes
4242 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
4243 */
4244static unsigned long __meminit zone_spanned_pages_in_node(int nid,
4245                                        unsigned long zone_type,
4246                                        unsigned long *ignored)
4247{
4248        unsigned long node_start_pfn, node_end_pfn;
4249        unsigned long zone_start_pfn, zone_end_pfn;
4250
4251        /* Get the start and end of the node and zone */
4252        get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
4253        zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
4254        zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
4255        adjust_zone_range_for_zone_movable(nid, zone_type,
4256                                node_start_pfn, node_end_pfn,
4257                                &zone_start_pfn, &zone_end_pfn);
4258
4259        /* Check that this node has pages within the zone's required range */
4260        if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
4261                return 0;
4262
4263        /* Move the zone boundaries inside the node if necessary */
4264        zone_end_pfn = min(zone_end_pfn, node_end_pfn);
4265        zone_start_pfn = max(zone_start_pfn, node_start_pfn);
4266
4267        /* Return the spanned pages */
4268        return zone_end_pfn - zone_start_pfn;
4269}
4270
4271/*
4272 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
4273 * then all holes in the requested range will be accounted for.
4274 */
4275unsigned long __meminit __absent_pages_in_range(int nid,
4276                                unsigned long range_start_pfn,
4277                                unsigned long range_end_pfn)
4278{
4279        unsigned long nr_absent = range_end_pfn - range_start_pfn;
4280        unsigned long start_pfn, end_pfn;
4281        int i;
4282
4283        for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
4284                start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
4285                end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
4286                nr_absent -= end_pfn - start_pfn;
4287        }
4288        return nr_absent;
4289}
4290
4291/**
4292 * absent_pages_in_range - Return number of page frames in holes within a range
4293 * @start_pfn: The start PFN to start searching for holes
4294 * @end_pfn: The end PFN to stop searching for holes
4295 *
4296 * It returns the number of pages frames in memory holes within a range.
4297 */
4298unsigned long __init absent_pages_in_range(unsigned long start_pfn,
4299                                                        unsigned long end_pfn)
4300{
4301        return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
4302}
4303
4304/* Return the number of page frames in holes in a zone on a node */
4305static unsigned long __meminit zone_absent_pages_in_node(int nid,
4306                                        unsigned long zone_type,
4307                                        unsigned long *ignored)
4308{
4309        unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
4310        unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
4311        unsigned long node_start_pfn, node_end_pfn;
4312        unsigned long zone_start_pfn, zone_end_pfn;
4313
4314        get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
4315        zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
4316        zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
4317
4318        adjust_zone_range_for_zone_movable(nid, zone_type,
4319                        node_start_pfn, node_end_pfn,
4320                        &zone_start_pfn, &zone_end_pfn);
4321        return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
4322}
4323
4324#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
4325static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
4326                                        unsigned long zone_type,
4327                                        unsigned long *zones_size)
4328{
4329        return zones_size[zone_type];
4330}
4331
4332static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
4333                                                unsigned long zone_type,
4334                                                unsigned long *zholes_size)
4335{
4336        if (!zholes_size)
4337                return 0;
4338
4339        return zholes_size[zone_type];
4340}
4341
4342#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
4343
4344static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
4345                unsigned long *zones_size, unsigned long *zholes_size)
4346{
4347        unsigned long realtotalpages, totalpages = 0;
4348        enum zone_type i;
4349
4350        for (i = 0; i < MAX_NR_ZONES; i++)
4351                totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
4352                                                                zones_size);
4353        pgdat->node_spanned_pages = totalpages;
4354
4355        realtotalpages = totalpages;
4356        for (i = 0; i < MAX_NR_ZONES; i++)
4357                realtotalpages -=
4358                        zone_absent_pages_in_node(pgdat->node_id, i,
4359                                                                zholes_size);
4360        pgdat->node_present_pages = realtotalpages;
4361        printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
4362                                                        realtotalpages);
4363}
4364
4365#ifndef CONFIG_SPARSEMEM
4366/*
4367 * Calculate the size of the zone->blockflags rounded to an unsigned long
4368 * Start by making sure zonesize is a multiple of pageblock_order by rounding
4369 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
4370 * round what is now in bits to nearest long in bits, then return it in
4371 * bytes.
4372 */
4373static unsigned long __init usemap_size(unsigned long zonesize)
4374{
4375        unsigned long usemapsize;
4376
4377        usemapsize = roundup(zonesize, pageblock_nr_pages);
4378        usemapsize = usemapsize >> pageblock_order;
4379        usemapsize *= NR_PAGEBLOCK_BITS;
4380        usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
4381
4382        return usemapsize / 8;
4383}
4384
4385static void __init setup_usemap(struct pglist_data *pgdat,
4386                                struct zone *zone, unsigned long zonesize)
4387{
4388        unsigned long usemapsize = usemap_size(zonesize);
4389        zone->pageblock_flags = NULL;
4390        if (usemapsize)
4391                zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
4392                                                                   usemapsize);
4393}
4394#else
4395static inline void setup_usemap(struct pglist_data *pgdat,
4396                                struct zone *zone, unsigned long zonesize) {}
4397#endif /* CONFIG_SPARSEMEM */
4398
4399#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
4400
4401/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
4402void __init set_pageblock_order(void)
4403{
4404        unsigned int order;
4405
4406        /* Check that pageblock_nr_pages has not already been setup */
4407        if (pageblock_order)
4408                return;
4409
4410        if (HPAGE_SHIFT > PAGE_SHIFT)
4411                order = HUGETLB_PAGE_ORDER;
4412        else
4413                order = MAX_ORDER - 1;
4414
4415        /*
4416         * Assume the largest contiguous order of interest is a huge page.
4417         * This value may be variable depending on boot parameters on IA64 and
4418         * powerpc.
4419         */
4420        pageblock_order = order;
4421}
4422#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4423
4424/*
4425 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
4426 * is unused as pageblock_order is set at compile-time. See
4427 * include/linux/pageblock-flags.h for the values of pageblock_order based on
4428 * the kernel config
4429 */
4430void __init set_pageblock_order(void)
4431{
4432}
4433
4434#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4435
4436/*
4437 * Set up the zone data structures:
4438 *   - mark all pages reserved
4439 *   - mark all memory queues empty
4440 *   - clear the memory bitmaps
4441 *
4442 * NOTE: pgdat should get zeroed by caller.
4443 */
4444static void __paginginit free_area_init_core(struct pglist_data *pgdat,
4445                unsigned long *zones_size, unsigned long *zholes_size)
4446{
4447        enum zone_type j;
4448        int nid = pgdat->node_id;
4449        unsigned long zone_start_pfn = pgdat->node_start_pfn;
4450        int ret;
4451
4452        pgdat_resize_init(pgdat);
4453        init_waitqueue_head(&pgdat->kswapd_wait);
4454        init_waitqueue_head(&pgdat->pfmemalloc_wait);
4455        pgdat_page_cgroup_init(pgdat);
4456
4457        for (j = 0; j < MAX_NR_ZONES; j++) {
4458                struct zone *zone = pgdat->node_zones + j;
4459                unsigned long size, realsize, memmap_pages;
4460
4461                size = zone_spanned_pages_in_node(nid, j, zones_size);
4462                realsize = size - zone_absent_pages_in_node(nid, j,
4463                                                                zholes_size);
4464
4465                /*
4466                 * Adjust realsize so that it accounts for how much memory
4467                 * is used by this zone for memmap. This affects the watermark
4468                 * and per-cpu initialisations
4469                 */
4470                memmap_pages =
4471                        PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
4472                if (realsize >= memmap_pages) {
4473                        realsize -= memmap_pages;
4474                        if (memmap_pages)
4475                                printk(KERN_DEBUG
4476                                       "  %s zone: %lu pages used for memmap\n",
4477                                       zone_names[j], memmap_pages);
4478                } else
4479                        printk(KERN_WARNING
4480                                "  %s zone: %lu pages exceeds realsize %lu\n",
4481                                zone_names[j], memmap_pages, realsize);
4482
4483                /* Account for reserved pages */
4484                if (j == 0 && realsize > dma_reserve) {
4485                        realsize -= dma_reserve;
4486                        printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
4487                                        zone_names[0], dma_reserve);
4488                }
4489
4490                if (!is_highmem_idx(j))
4491                        nr_kernel_pages += realsize;
4492                nr_all_pages += realsize;
4493
4494                zone->spanned_pages = size;
4495                zone->present_pages = realsize;
4496#ifdef CONFIG_NUMA
4497                zone->node = nid;
4498                zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
4499                                                / 100;
4500                zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
4501#endif
4502                zone->name = zone_names[j];
4503                spin_lock_init(&zone->lock);
4504                spin_lock_init(&zone->lru_lock);
4505                zone_seqlock_init(zone);
4506                zone->zone_pgdat = pgdat;
4507
4508                zone_pcp_init(zone);
4509                lruvec_init(&zone->lruvec);
4510                if (!size)
4511                        continue;
4512
4513                set_pageblock_order();
4514                setup_usemap(pgdat, zone, size);
4515                ret = init_currently_empty_zone(zone, zone_start_pfn,
4516                                                size, MEMMAP_EARLY);
4517                BUG_ON(ret);
4518                memmap_init(size, nid, j, zone_start_pfn);
4519                zone_start_pfn += size;
4520        }
4521}
4522
4523static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
4524{
4525        /* Skip empty nodes */
4526        if (!pgdat->node_spanned_pages)
4527                return;
4528
4529#ifdef CONFIG_FLAT_NODE_MEM_MAP
4530        /* ia64 gets its own node_mem_map, before this, without bootmem */
4531        if (!pgdat->node_mem_map) {
4532                unsigned long size, start, end;
4533                struct page *map;
4534
4535                /*
4536                 * The zone's endpoints aren't required to be MAX_ORDER
4537                 * aligned but the node_mem_map endpoints must be in order
4538                 * for the buddy allocator to function correctly.
4539                 */
4540                start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
4541                end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
4542                end = ALIGN(end, MAX_ORDER_NR_PAGES);
4543                size =  (end - start) * sizeof(struct page);
4544                map = alloc_remap(pgdat->node_id, size);
4545                if (!map)
4546                        map = alloc_bootmem_node_nopanic(pgdat, size);
4547                pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
4548        }
4549#ifndef CONFIG_NEED_MULTIPLE_NODES
4550        /*
4551         * With no DISCONTIG, the global mem_map is just set as node 0's
4552         */
4553        if (pgdat == NODE_DATA(0)) {
4554                mem_map = NODE_DATA(0)->node_mem_map;
4555#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
4556                if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
4557                        mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
4558#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
4559        }
4560#endif
4561#endif /* CONFIG_FLAT_NODE_MEM_MAP */
4562}
4563
4564void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
4565                unsigned long node_start_pfn, unsigned long *zholes_size)
4566{
4567        pg_data_t *pgdat = NODE_DATA(nid);
4568
4569        /* pg_data_t should be reset to zero when it's allocated */
4570        WARN_ON(pgdat->nr_zones || pgdat->classzone_idx);
4571
4572        pgdat->node_id = nid;
4573        pgdat->node_start_pfn = node_start_pfn;
4574        init_zone_allows_reclaim(nid);
4575        calculate_node_totalpages(pgdat, zones_size, zholes_size);
4576
4577        alloc_node_mem_map(pgdat);
4578#ifdef CONFIG_FLAT_NODE_MEM_MAP
4579        printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
4580                nid, (unsigned long)pgdat,
4581                (unsigned long)pgdat->node_mem_map);
4582#endif
4583
4584        free_area_init_core(pgdat, zones_size, zholes_size);
4585}
4586
4587#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
4588
4589#if MAX_NUMNODES > 1
4590/*
4591 * Figure out the number of possible node ids.
4592 */
4593static void __init setup_nr_node_ids(void)
4594{
4595        unsigned int node;
4596        unsigned int highest = 0;
4597
4598        for_each_node_mask(node, node_possible_map)
4599                highest = node;
4600        nr_node_ids = highest + 1;
4601}
4602#else
4603static inline void setup_nr_node_ids(void)
4604{
4605}
4606#endif
4607
4608/**
4609 * node_map_pfn_alignment - determine the maximum internode alignment
4610 *
4611 * This function should be called after node map is populated and sorted.
4612 * It calculates the maximum power of two alignment which can distinguish
4613 * all the nodes.
4614 *
4615 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
4616 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
4617 * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
4618 * shifted, 1GiB is enough and this function will indicate so.
4619 *
4620 * This is used to test whether pfn -> nid mapping of the chosen memory
4621 * model has fine enough granularity to avoid incorrect mapping for the
4622 * populated node map.
4623 *
4624 * Returns the determined alignment in pfn's.  0 if there is no alignment
4625 * requirement (single node).
4626 */
4627unsigned long __init node_map_pfn_alignment(void)
4628{
4629        unsigned long accl_mask = 0, last_end = 0;
4630        unsigned long start, end, mask;
4631        int last_nid = -1;
4632        int i, nid;
4633
4634        for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
4635                if (!start || last_nid < 0 || last_nid == nid) {
4636                        last_nid = nid;
4637                        last_end = end;
4638                        continue;
4639                }
4640
4641                /*
4642                 * Start with a mask granular enough to pin-point to the
4643                 * start pfn and tick off bits one-by-one until it becomes
4644                 * too coarse to separate the current node from the last.
4645                 */
4646                mask = ~((1 << __ffs(start)) - 1);
4647                while (mask && last_end <= (start & (mask << 1)))
4648                        mask <<= 1;
4649
4650                /* accumulate all internode masks */
4651                accl_mask |= mask;
4652        }
4653
4654        /* convert mask to number of pages */
4655        return ~accl_mask + 1;
4656}
4657
4658/* Find the lowest pfn for a node */
4659static unsigned long __init find_min_pfn_for_node(int nid)
4660{
4661        unsigned long min_pfn = ULONG_MAX;
4662        unsigned long start_pfn;
4663        int i;
4664
4665        for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
4666                min_pfn = min(min_pfn, start_pfn);
4667
4668        if (min_pfn == ULONG_MAX) {
4669                printk(KERN_WARNING
4670                        "Could not find start_pfn for node %d\n", nid);
4671                return 0;
4672        }
4673
4674        return min_pfn;
4675}
4676
4677/**
4678 * find_min_pfn_with_active_regions - Find the minimum PFN registered
4679 *
4680 * It returns the minimum PFN based on information provided via
4681 * add_active_range().
4682 */
4683unsigned long __init find_min_pfn_with_active_regions(void)
4684{
4685        return find_min_pfn_for_node(MAX_NUMNODES);
4686}
4687
4688/*
4689 * early_calculate_totalpages()
4690 * Sum pages in active regions for movable zone.
4691 * Populate N_HIGH_MEMORY for calculating usable_nodes.
4692 */
4693static unsigned long __init early_calculate_totalpages(void)
4694{
4695        unsigned long totalpages = 0;
4696        unsigned long start_pfn, end_pfn;
4697        int i, nid;
4698
4699        for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
4700                unsigned long pages = end_pfn - start_pfn;
4701
4702                totalpages += pages;
4703                if (pages)
4704                        node_set_state(nid, N_HIGH_MEMORY);
4705        }
4706        return totalpages;
4707}
4708
4709/*
4710 * Find the PFN the Movable zone begins in each node. Kernel memory
4711 * is spread evenly between nodes as long as the nodes have enough
4712 * memory. When they don't, some nodes will have more kernelcore than
4713 * others
4714 */
4715static void __init find_zone_movable_pfns_for_nodes(void)
4716{
4717        int i, nid;
4718        unsigned long usable_startpfn;
4719        unsigned long kernelcore_node, kernelcore_remaining;
4720        /* save the state before borrow the nodemask */
4721        nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
4722        unsigned long totalpages = early_calculate_totalpages();
4723        int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
4724
4725        /*
4726         * If movablecore was specified, calculate what size of
4727         * kernelcore that corresponds so that memory usable for
4728         * any allocation type is evenly spread. If both kernelcore
4729         * and movablecore are specified, then the value of kernelcore
4730         * will be used for required_kernelcore if it's greater than
4731         * what movablecore would have allowed.
4732         */
4733        if (required_movablecore) {
4734                unsigned long corepages;
4735
4736                /*
4737                 * Round-up so that ZONE_MOVABLE is at least as large as what
4738                 * was requested by the user
4739                 */
4740                required_movablecore =
4741                        roundup(required_movablecore, MAX_ORDER_NR_PAGES);
4742                corepages = totalpages - required_movablecore;
4743
4744                required_kernelcore = max(required_kernelcore, corepages);
4745        }
4746
4747        /* If kernelcore was not specified, there is no ZONE_MOVABLE */
4748        if (!required_kernelcore)
4749                goto out;
4750
4751        /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
4752        find_usable_zone_for_movable();
4753        usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
4754
4755restart:
4756        /* Spread kernelcore memory as evenly as possible throughout nodes */
4757        kernelcore_node = required_kernelcore / usable_nodes;
4758        for_each_node_state(nid, N_HIGH_MEMORY) {
4759                unsigned long start_pfn, end_pfn;
4760
4761                /*
4762                 * Recalculate kernelcore_node if the division per node
4763                 * now exceeds what is necessary to satisfy the requested
4764                 * amount of memory for the kernel
4765                 */
4766                if (required_kernelcore < kernelcore_node)
4767                        kernelcore_node = required_kernelcore / usable_nodes;
4768
4769                /*
4770                 * As the map is walked, we track how much memory is usable
4771                 * by the kernel using kernelcore_remaining. When it is
4772                 * 0, the rest of the node is usable by ZONE_MOVABLE
4773                 */
4774                kernelcore_remaining = kernelcore_node;
4775
4776                /* Go through each range of PFNs within this node */
4777                for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
4778                        unsigned long size_pages;
4779
4780                        start_pfn = max(start_pfn, zone_movable_pfn[nid]);
4781                        if (start_pfn >= end_pfn)
4782                                continue;
4783
4784                        /* Account for what is only usable for kernelcore */
4785                        if (start_pfn < usable_startpfn) {
4786                                unsigned long kernel_pages;
4787                                kernel_pages = min(end_pfn, usable_startpfn)
4788                                                                - start_pfn;
4789
4790                                kernelcore_remaining -= min(kernel_pages,
4791                                                        kernelcore_remaining);
4792                                required_kernelcore -= min(kernel_pages,
4793                                                        required_kernelcore);
4794
4795                                /* Continue if range is now fully accounted */
4796                                if (end_pfn <= usable_startpfn) {
4797
4798                                        /*
4799                                         * Push zone_movable_pfn to the end so
4800                                         * that if we have to rebalance
4801                                         * kernelcore across nodes, we will
4802                                         * not double account here
4803                                         */
4804                                        zone_movable_pfn[nid] = end_pfn;
4805                                        continue;
4806                                }
4807                                start_pfn = usable_startpfn;
4808                        }
4809
4810                        /*
4811                         * The usable PFN range for ZONE_MOVABLE is from
4812                         * start_pfn->end_pfn. Calculate size_pages as the
4813                         * number of pages used as kernelcore
4814                         */
4815                        size_pages = end_pfn - start_pfn;
4816                        if (size_pages > kernelcore_remaining)
4817                                size_pages = kernelcore_remaining;
4818                        zone_movable_pfn[nid] = start_pfn + size_pages;
4819
4820                        /*
4821                         * Some kernelcore has been met, update counts and
4822                         * break if the kernelcore for this node has been
4823                         * satisified
4824                         */
4825                        required_kernelcore -= min(required_kernelcore,
4826                                                                size_pages);
4827                        kernelcore_remaining -= size_pages;
4828                        if (!kernelcore_remaining)
4829                                break;
4830                }
4831        }
4832
4833        /*
4834         * If there is still required_kernelcore, we do another pass with one
4835         * less node in the count. This will push zone_movable_pfn[nid] further
4836         * along on the nodes that still have memory until kernelcore is
4837         * satisified
4838         */
4839        usable_nodes--;
4840        if (usable_nodes && required_kernelcore > usable_nodes)
4841                goto restart;
4842
4843        /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
4844        for (nid = 0; nid < MAX_NUMNODES; nid++)
4845                zone_movable_pfn[nid] =
4846                        roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
4847
4848out:
4849        /* restore the node_state */
4850        node_states[N_HIGH_MEMORY] = saved_node_state;
4851}
4852
4853/* Any regular memory on that node ? */
4854static void __init check_for_regular_memory(pg_data_t *pgdat)
4855{
4856#ifdef CONFIG_HIGHMEM
4857        enum zone_type zone_type;
4858
4859        for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
4860                struct zone *zone = &pgdat->node_zones[zone_type];
4861                if (zone->present_pages) {
4862                        node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
4863                        break;
4864                }
4865        }
4866#endif
4867}
4868
4869/**
4870 * free_area_init_nodes - Initialise all pg_data_t and zone data
4871 * @max_zone_pfn: an array of max PFNs for each zone
4872 *
4873 * This will call free_area_init_node() for each active node in the system.
4874 * Using the page ranges provided by add_active_range(), the size of each
4875 * zone in each node and their holes is calculated. If the maximum PFN
4876 * between two adjacent zones match, it is assumed that the zone is empty.
4877 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
4878 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
4879 * starts where the previous one ended. For example, ZONE_DMA32 starts
4880 * at arch_max_dma_pfn.
4881 */
4882void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4883{
4884        unsigned long start_pfn, end_pfn;
4885        int i, nid;
4886
4887        /* Record where the zone boundaries are */
4888        memset(arch_zone_lowest_possible_pfn, 0,
4889                                sizeof(arch_zone_lowest_possible_pfn));
4890        memset(arch_zone_highest_possible_pfn, 0,
4891                                sizeof(arch_zone_highest_possible_pfn));
4892        arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
4893        arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
4894        for (i = 1; i < MAX_NR_ZONES; i++) {
4895                if (i == ZONE_MOVABLE)
4896                        continue;
4897                arch_zone_lowest_possible_pfn[i] =
4898                        arch_zone_highest_possible_pfn[i-1];
4899                arch_zone_highest_possible_pfn[i] =
4900                        max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
4901        }
4902        arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
4903        arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
4904
4905        /* Find the PFNs that ZONE_MOVABLE begins at in each node */
4906        memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
4907        find_zone_movable_pfns_for_nodes();
4908
4909        /* Print out the zone ranges */
4910        printk("Zone ranges:\n");
4911        for (i = 0; i < MAX_NR_ZONES; i++) {
4912                if (i == ZONE_MOVABLE)
4913                        continue;
4914                printk(KERN_CONT "  %-8s ", zone_names[i]);
4915                if (arch_zone_lowest_possible_pfn[i] ==
4916                                arch_zone_highest_possible_pfn[i])
4917                        printk(KERN_CONT "empty\n");
4918                else
4919                        printk(KERN_CONT "[mem %0#10lx-%0#10lx]\n",
4920                                arch_zone_lowest_possible_pfn[i] << PAGE_SHIFT,
4921                                (arch_zone_highest_possible_pfn[i]
4922                                        << PAGE_SHIFT) - 1);
4923        }
4924
4925        /* Print out the PFNs ZONE_MOVABLE begins at in each node */
4926        printk("Movable zone start for each node\n");
4927        for (i = 0; i < MAX_NUMNODES; i++) {
4928                if (zone_movable_pfn[i])
4929                        printk("  Node %d: %#010lx\n", i,
4930                               zone_movable_pfn[i] << PAGE_SHIFT);
4931        }
4932
4933        /* Print out the early node map */
4934        printk("Early memory node ranges\n");
4935        for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
4936                printk("  node %3d: [mem %#010lx-%#010lx]\n", nid,
4937                       start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
4938
4939        /* Initialise every node */
4940        mminit_verify_pageflags_layout();
4941        setup_nr_node_ids();
4942        for_each_online_node(nid) {
4943                pg_data_t *pgdat = NODE_DATA(nid);
4944                free_area_init_node(nid, NULL,
4945                                find_min_pfn_for_node(nid), NULL);
4946
4947                /* Any memory on that node */
4948                if (pgdat->node_present_pages)
4949                        node_set_state(nid, N_HIGH_MEMORY);
4950                check_for_regular_memory(pgdat);
4951        }
4952}
4953
4954static int __init cmdline_parse_core(char *p, unsigned long *core)
4955{
4956        unsigned long long coremem;
4957        if (!p)
4958                return -EINVAL;
4959
4960        coremem = memparse(p, &p);
4961        *core = coremem >> PAGE_SHIFT;
4962
4963        /* Paranoid check that UL is enough for the coremem value */
4964        WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
4965
4966        return 0;
4967}
4968
4969/*
4970 * kernelcore=size sets the amount of memory for use for allocations that
4971 * cannot be reclaimed or migrated.
4972 */
4973static int __init cmdline_parse_kernelcore(char *p)
4974{
4975        return cmdline_parse_core(p, &required_kernelcore);
4976}
4977
4978/*
4979 * movablecore=size sets the amount of memory for use for allocations that
4980 * can be reclaimed or migrated.
4981 */
4982static int __init cmdline_parse_movablecore(char *p)
4983{
4984        return cmdline_parse_core(p, &required_movablecore);
4985}
4986
4987early_param("kernelcore", cmdline_parse_kernelcore);
4988early_param("movablecore", cmdline_parse_movablecore);
4989
4990#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
4991
4992/**
4993 * set_dma_reserve - set the specified number of pages reserved in the first zone
4994 * @new_dma_reserve: The number of pages to mark reserved
4995 *
4996 * The per-cpu batchsize and zone watermarks are determined by present_pages.
4997 * In the DMA zone, a significant percentage may be consumed by kernel image
4998 * and other unfreeable allocations which can skew the watermarks badly. This
4999 * function may optionally be used to account for unfreeable pages in the
5000 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
5001 * smaller per-cpu batchsize.
5002 */
5003void __init set_dma_reserve(unsigned long new_dma_reserve)
5004{
5005        dma_reserve = new_dma_reserve;
5006}
5007
5008void __init free_area_init(unsigned long *zones_size)
5009{
5010        free_area_init_node(0, zones_size,
5011                        __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
5012}
5013
5014static int page_alloc_cpu_notify(struct notifier_block *self,
5015                                 unsigned long action, void *hcpu)
5016{
5017        int cpu = (unsigned long)hcpu;
5018
5019        if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
5020                lru_add_drain_cpu(cpu);
5021                drain_pages(cpu);
5022
5023                /*
5024                 * Spill the event counters of the dead processor
5025                 * into the current processors event counters.
5026                 * This artificially elevates the count of the current
5027                 * processor.
5028                 */
5029                vm_events_fold_cpu(cpu);
5030
5031                /*
5032                 * Zero the differential counters of the dead processor
5033                 * so that the vm statistics are consistent.
5034                 *
5035                 * This is only okay since the processor is dead and cannot
5036                 * race with what we are doing.
5037                 */
5038                refresh_cpu_vm_stats(cpu);
5039        }
5040        return NOTIFY_OK;
5041}
5042
5043void __init page_alloc_init(void)
5044{
5045        hotcpu_notifier(page_alloc_cpu_notify, 0);
5046}
5047
5048/*
5049 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
5050 *      or min_free_kbytes changes.
5051 */
5052static void calculate_totalreserve_pages(void)
5053{
5054        struct pglist_data *pgdat;
5055        unsigned long reserve_pages = 0;
5056        enum zone_type i, j;
5057
5058        for_each_online_pgdat(pgdat) {
5059                for (i = 0; i < MAX_NR_ZONES; i++) {
5060                        struct zone *zone = pgdat->node_zones + i;
5061                        unsigned long max = 0;
5062
5063                        /* Find valid and maximum lowmem_reserve in the zone */
5064                        for (j = i; j < MAX_NR_ZONES; j++) {
5065                                if (zone->lowmem_reserve[j] > max)
5066                                        max = zone->lowmem_reserve[j];
5067                        }
5068
5069                        /* we treat the high watermark as reserved pages. */
5070                        max += high_wmark_pages(zone);
5071
5072                        if (max > zone->present_pages)
5073                                max = zone->present_pages;
5074                        reserve_pages += max;
5075                        /*
5076                         * Lowmem reserves are not available to
5077                         * GFP_HIGHUSER page cache allocations and
5078                         * kswapd tries to balance zones to their high
5079                         * watermark.  As a result, neither should be
5080                         * regarded as dirtyable memory, to prevent a
5081                         * situation where reclaim has to clean pages
5082                         * in order to balance the zones.
5083                         */
5084                        zone->dirty_balance_reserve = max;
5085                }
5086        }
5087        dirty_balance_reserve = reserve_pages;
5088        totalreserve_pages = reserve_pages;
5089}
5090
5091/*
5092 * setup_per_zone_lowmem_reserve - called whenever
5093 *      sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
5094 *      has a correct pages reserved value, so an adequate number of
5095 *      pages are left in the zone after a successful __alloc_pages().
5096 */
5097static void setup_per_zone_lowmem_reserve(void)
5098{
5099        struct pglist_data *pgdat;
5100        enum zone_type j, idx;
5101
5102        for_each_online_pgdat(pgdat) {
5103                for (j = 0; j < MAX_NR_ZONES; j++) {
5104                        struct zone *zone = pgdat->node_zones + j;
5105                        unsigned long present_pages = zone->present_pages;
5106
5107                        zone->lowmem_reserve[j] = 0;
5108
5109                        idx = j;
5110                        while (idx) {
5111                                struct zone *lower_zone;
5112
5113                                idx--;
5114
5115                                if (sysctl_lowmem_reserve_ratio[idx] < 1)
5116                                        sysctl_lowmem_reserve_ratio[idx] = 1;
5117
5118                                lower_zone = pgdat->node_zones + idx;
5119                                lower_zone->lowmem_reserve[j] = present_pages /
5120                                        sysctl_lowmem_reserve_ratio[idx];
5121                                present_pages += lower_zone->present_pages;
5122                        }
5123                }
5124        }
5125
5126        /* update totalreserve_pages */
5127        calculate_totalreserve_pages();
5128}
5129
5130static void __setup_per_zone_wmarks(void)
5131{
5132        unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
5133        unsigned long lowmem_pages = 0;
5134        struct zone *zone;
5135        unsigned long flags;
5136
5137        /* Calculate total number of !ZONE_HIGHMEM pages */
5138        for_each_zone(zone) {
5139                if (!is_highmem(zone))
5140                        lowmem_pages += zone->present_pages;
5141        }
5142
5143        for_each_zone(zone) {
5144                u64 tmp;
5145
5146                spin_lock_irqsave(&zone->lock, flags);
5147                tmp = (u64)pages_min * zone->present_pages;
5148                do_div(tmp, lowmem_pages);
5149                if (is_highmem(zone)) {
5150                        /*
5151                         * __GFP_HIGH and PF_MEMALLOC allocations usually don't
5152                         * need highmem pages, so cap pages_min to a small
5153                         * value here.
5154                         *
5155                         * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
5156                         * deltas controls asynch page reclaim, and so should
5157                         * not be capped for highmem.
5158                         */
5159                        int min_pages;
5160
5161                        min_pages = zone->present_pages / 1024;
5162                        if (min_pages < SWAP_CLUSTER_MAX)
5163                                min_pages = SWAP_CLUSTER_MAX;
5164                        if (min_pages > 128)
5165                                min_pages = 128;
5166                        zone->watermark[WMARK_MIN] = min_pages;
5167                } else {
5168                        /*
5169                         * If it's a lowmem zone, reserve a number of pages
5170                         * proportionate to the zone's size.
5171                         */
5172                        zone->watermark[WMARK_MIN] = tmp;
5173                }
5174
5175                zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
5176                zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
5177
5178                zone->watermark[WMARK_MIN] += cma_wmark_pages(zone);
5179                zone->watermark[WMARK_LOW] += cma_wmark_pages(zone);
5180                zone->watermark[WMARK_HIGH] += cma_wmark_pages(zone);
5181
5182                setup_zone_migrate_reserve(zone);
5183                spin_unlock_irqrestore(&zone->lock, flags);
5184        }
5185
5186        /* update totalreserve_pages */
5187        calculate_totalreserve_pages();
5188}
5189
5190/**
5191 * setup_per_zone_wmarks - called when min_free_kbytes changes
5192 * or when memory is hot-{added|removed}
5193 *
5194 * Ensures that the watermark[min,low,high] values for each zone are set
5195 * correctly with respect to min_free_kbytes.
5196 */
5197void setup_per_zone_wmarks(void)
5198{
5199        mutex_lock(&zonelists_mutex);
5200        __setup_per_zone_wmarks();
5201        mutex_unlock(&zonelists_mutex);
5202}
5203
5204/*
5205 * The inactive anon list should be small enough that the VM never has to
5206 * do too much work, but large enough that each inactive page has a chance
5207 * to be referenced again before it is swapped out.
5208 *
5209 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
5210 * INACTIVE_ANON pages on this zone's LRU, maintained by the
5211 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
5212 * the anonymous pages are kept on the inactive list.
5213 *
5214 * total     target    max
5215 * memory    ratio     inactive anon
5216 * -------------------------------------
5217 *   10MB       1         5MB
5218 *  100MB       1        50MB
5219 *    1GB       3       250MB
5220 *   10GB      10       0.9GB
5221 *  100GB      31         3GB
5222 *    1TB     101        10GB
5223 *   10TB     320        32GB
5224 */
5225static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
5226{
5227        unsigned int gb, ratio;
5228
5229        /* Zone size in gigabytes */
5230        gb = zone->present_pages >> (30 - PAGE_SHIFT);
5231        if (gb)
5232                ratio = int_sqrt(10 * gb);
5233        else
5234                ratio = 1;
5235
5236        zone->inactive_ratio = ratio;
5237}
5238
5239static void __meminit setup_per_zone_inactive_ratio(void)
5240{
5241        struct zone *zone;
5242
5243        for_each_zone(zone)
5244                calculate_zone_inactive_ratio(zone);
5245}
5246
5247/*
5248 * Initialise min_free_kbytes.
5249 *
5250 * For small machines we want it small (128k min).  For large machines
5251 * we want it large (64MB max).  But it is not linear, because network
5252 * bandwidth does not increase linearly with machine size.  We use
5253 *
5254 *      min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
5255 *      min_free_kbytes = sqrt(lowmem_kbytes * 16)
5256 *
5257 * which yields
5258 *
5259 * 16MB:        512k
5260 * 32MB:        724k
5261 * 64MB:        1024k
5262 * 128MB:       1448k
5263 * 256MB:       2048k
5264 * 512MB:       2896k
5265 * 1024MB:      4096k
5266 * 2048MB:      5792k
5267 * 4096MB:      8192k
5268 * 8192MB:      11584k
5269 * 16384MB:     16384k
5270 */
5271int __meminit init_per_zone_wmark_min(void)
5272{
5273        unsigned long lowmem_kbytes;
5274
5275        lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
5276
5277        min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
5278        if (min_free_kbytes < 128)
5279                min_free_kbytes = 128;
5280        if (min_free_kbytes > 65536)
5281                min_free_kbytes = 65536;
5282        setup_per_zone_wmarks();
5283        refresh_zone_stat_thresholds();
5284        setup_per_zone_lowmem_reserve();
5285        setup_per_zone_inactive_ratio();
5286        return 0;
5287}
5288module_init(init_per_zone_wmark_min)
5289
5290/*
5291 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 
5292 *      that we can call two helper functions whenever min_free_kbytes
5293 *      changes.
5294 */
5295int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 
5296        void __user *buffer, size_t *length, loff_t *ppos)
5297{
5298        proc_dointvec(table, write, buffer, length, ppos);
5299        if (write)
5300                setup_per_zone_wmarks();
5301        return 0;
5302}
5303
5304#ifdef CONFIG_NUMA
5305int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
5306        void __user *buffer, size_t *length, loff_t *ppos)
5307{
5308        struct zone *zone;
5309        int rc;
5310
5311        rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5312        if (rc)
5313                return rc;
5314
5315        for_each_zone(zone)
5316                zone->min_unmapped_pages = (zone->present_pages *
5317                                sysctl_min_unmapped_ratio) / 100;
5318        return 0;
5319}
5320
5321int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
5322        void __user *buffer, size_t *length, loff_t *ppos)
5323{
5324        struct zone *zone;
5325        int rc;
5326
5327        rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5328        if (rc)
5329                return rc;
5330
5331        for_each_zone(zone)
5332                zone->min_slab_pages = (zone->present_pages *
5333                                sysctl_min_slab_ratio) / 100;
5334        return 0;
5335}
5336#endif
5337
5338/*
5339 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
5340 *      proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
5341 *      whenever sysctl_lowmem_reserve_ratio changes.
5342 *
5343 * The reserve ratio obviously has absolutely no relation with the
5344 * minimum watermarks. The lowmem reserve ratio can only make sense
5345 * if in function of the boot time zone sizes.
5346 */
5347int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
5348        void __user *buffer, size_t *length, loff_t *ppos)
5349{
5350        proc_dointvec_minmax(table, write, buffer, length, ppos);
5351        setup_per_zone_lowmem_reserve();
5352        return 0;
5353}
5354
5355/*
5356 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
5357 * cpu.  It is the fraction of total pages in each zone that a hot per cpu pagelist
5358 * can have before it gets flushed back to buddy allocator.
5359 */
5360
5361int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
5362        void __user *buffer, size_t *length, loff_t *ppos)
5363{
5364        struct zone *zone;
5365        unsigned int cpu;
5366        int ret;
5367
5368        ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
5369        if (!write || (ret < 0))
5370                return ret;
5371        for_each_populated_zone(zone) {
5372                for_each_possible_cpu(cpu) {
5373                        unsigned long  high;
5374                        high = zone->present_pages / percpu_pagelist_fraction;
5375                        setup_pagelist_highmark(
5376                                per_cpu_ptr(zone->pageset, cpu), high);
5377                }
5378        }
5379        return 0;
5380}
5381
5382int hashdist = HASHDIST_DEFAULT;
5383
5384#ifdef CONFIG_NUMA
5385static int __init set_hashdist(char *str)
5386{
5387        if (!str)
5388                return 0;
5389        hashdist = simple_strtoul(str, &str, 0);
5390        return 1;
5391}
5392__setup("hashdist=", set_hashdist);
5393#endif
5394
5395/*
5396 * allocate a large system hash table from bootmem
5397 * - it is assumed that the hash table must contain an exact power-of-2
5398 *   quantity of entries
5399 * - limit is the number of hash buckets, not the total allocation size
5400 */
5401void *__init alloc_large_system_hash(const char *tablename,
5402                                     unsigned long bucketsize,
5403                                     unsigned long numentries,
5404                                     int scale,
5405                                     int flags,
5406                                     unsigned int *_hash_shift,
5407                                     unsigned int *_hash_mask,
5408                                     unsigned long low_limit,
5409                                     unsigned long high_limit)
5410{
5411        unsigned long long max = high_limit;
5412        unsigned long log2qty, size;
5413        void *table = NULL;
5414
5415        /* allow the kernel cmdline to have a say */
5416        if (!numentries) {
5417                /* round applicable memory size up to nearest megabyte */
5418                numentries = nr_kernel_pages;
5419                numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
5420                numentries >>= 20 - PAGE_SHIFT;
5421                numentries <<= 20 - PAGE_SHIFT;
5422
5423                /* limit to 1 bucket per 2^scale bytes of low memory */
5424                if (scale > PAGE_SHIFT)
5425                        numentries >>= (scale - PAGE_SHIFT);
5426                else
5427                        numentries <<= (PAGE_SHIFT - scale);
5428
5429                /* Make sure we've got at least a 0-order allocation.. */
5430                if (unlikely(flags & HASH_SMALL)) {
5431                        /* Makes no sense without HASH_EARLY */
5432                        WARN_ON(!(flags & HASH_EARLY));
5433                        if (!(numentries >> *_hash_shift)) {
5434                                numentries = 1UL << *_hash_shift;
5435                                BUG_ON(!numentries);
5436                        }
5437                } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
5438                        numentries = PAGE_SIZE / bucketsize;
5439        }
5440        numentries = roundup_pow_of_two(numentries);
5441
5442        /* limit allocation size to 1/16 total memory by default */
5443        if (max == 0) {
5444                max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
5445                do_div(max, bucketsize);
5446        }
5447        max = min(max, 0x80000000ULL);
5448
5449        if (numentries < low_limit)
5450                numentries = low_limit;
5451        if (numentries > max)
5452                numentries = max;
5453
5454        log2qty = ilog2(numentries);
5455
5456        do {
5457                size = bucketsize << log2qty;
5458                if (flags & HASH_EARLY)
5459                        table = alloc_bootmem_nopanic(size);
5460                else if (hashdist)
5461                        table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
5462                else {
5463                        /*
5464                         * If bucketsize is not a power-of-two, we may free
5465                         * some pages at the end of hash table which
5466                         * alloc_pages_exact() automatically does
5467                         */
5468                        if (get_order(size) < MAX_ORDER) {
5469                                table = alloc_pages_exact(size, GFP_ATOMIC);
5470                                kmemleak_alloc(table, size, 1, GFP_ATOMIC);
5471                        }
5472                }
5473        } while (!table && size > PAGE_SIZE && --log2qty);
5474
5475        if (!table)
5476                panic("Failed to allocate %s hash table\n", tablename);
5477
5478        printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",
5479               tablename,
5480               (1UL << log2qty),
5481               ilog2(size) - PAGE_SHIFT,
5482               size);
5483
5484        if (_hash_shift)
5485                *_hash_shift = log2qty;
5486        if (_hash_mask)
5487                *_hash_mask = (1 << log2qty) - 1;
5488
5489        return table;
5490}
5491
5492/* Return a pointer to the bitmap storing bits affecting a block of pages */
5493static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
5494                                                        unsigned long pfn)
5495{
5496#ifdef CONFIG_SPARSEMEM
5497        return __pfn_to_section(pfn)->pageblock_flags;
5498#else
5499        return zone->pageblock_flags;
5500#endif /* CONFIG_SPARSEMEM */
5501}
5502
5503static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
5504{
5505#ifdef CONFIG_SPARSEMEM
5506        pfn &= (PAGES_PER_SECTION-1);
5507        return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
5508#else
5509        pfn = pfn - zone->zone_start_pfn;
5510        return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
5511#endif /* CONFIG_SPARSEMEM */
5512}
5513
5514/**
5515 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
5516 * @page: The page within the block of interest
5517 * @start_bitidx: The first bit of interest to retrieve
5518 * @end_bitidx: The last bit of interest
5519 * returns pageblock_bits flags
5520 */
5521unsigned long get_pageblock_flags_group(struct page *page,
5522                                        int start_bitidx, int end_bitidx)
5523{
5524        struct zone *zone;
5525        unsigned long *bitmap;
5526        unsigned long pfn, bitidx;
5527        unsigned long flags = 0;
5528        unsigned long value = 1;
5529
5530        zone = page_zone(page);
5531        pfn = page_to_pfn(page);
5532        bitmap = get_pageblock_bitmap(zone, pfn);
5533        bitidx = pfn_to_bitidx(zone, pfn);
5534
5535        for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5536                if (test_bit(bitidx + start_bitidx, bitmap))
5537                        flags |= value;
5538
5539        return flags;
5540}
5541
5542/**
5543 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
5544 * @page: The page within the block of interest
5545 * @start_bitidx: The first bit of interest
5546 * @end_bitidx: The last bit of interest
5547 * @flags: The flags to set
5548 */
5549void set_pageblock_flags_group(struct page *page, unsigned long flags,
5550                                        int start_bitidx, int end_bitidx)
5551{
5552        struct zone *zone;
5553        unsigned long *bitmap;
5554        unsigned long pfn, bitidx;
5555        unsigned long value = 1;
5556
5557        zone = page_zone(page);
5558        pfn = page_to_pfn(page);
5559        bitmap = get_pageblock_bitmap(zone, pfn);
5560        bitidx = pfn_to_bitidx(zone, pfn);
5561        VM_BUG_ON(pfn < zone->zone_start_pfn);
5562        VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
5563
5564        for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5565                if (flags & value)
5566                        __set_bit(bitidx + start_bitidx, bitmap);
5567                else
5568                        __clear_bit(bitidx + start_bitidx, bitmap);
5569}
5570
5571/*
5572 * This function checks whether pageblock includes unmovable pages or not.
5573 * If @count is not zero, it is okay to include less @count unmovable pages
5574 *
5575 * PageLRU check wihtout isolation or lru_lock could race so that
5576 * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
5577 * expect this function should be exact.
5578 */
5579bool has_unmovable_pages(struct zone *zone, struct page *page, int count)
5580{
5581        unsigned long pfn, iter, found;
5582        int mt;
5583
5584        /*
5585         * For avoiding noise data, lru_add_drain_all() should be called
5586         * If ZONE_MOVABLE, the zone never contains unmovable pages
5587         */
5588        if (zone_idx(zone) == ZONE_MOVABLE)
5589                return false;
5590        mt = get_pageblock_migratetype(page);
5591        if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
5592                return false;
5593
5594        pfn = page_to_pfn(page);
5595        for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
5596                unsigned long check = pfn + iter;
5597
5598                if (!pfn_valid_within(check))
5599                        continue;
5600
5601                page = pfn_to_page(check);
5602                /*
5603                 * We can't use page_count without pin a page
5604                 * because another CPU can free compound page.
5605                 * This check already skips compound tails of THP
5606                 * because their page->_count is zero at all time.
5607                 */
5608                if (!atomic_read(&page->_count)) {
5609                        if (PageBuddy(page))
5610                                iter += (1 << page_order(page)) - 1;
5611                        continue;
5612                }
5613
5614                if (!PageLRU(page))
5615                        found++;
5616                /*
5617                 * If there are RECLAIMABLE pages, we need to check it.
5618                 * But now, memory offline itself doesn't call shrink_slab()
5619                 * and it still to be fixed.
5620                 */
5621                /*
5622                 * If the page is not RAM, page_count()should be 0.
5623                 * we don't need more check. This is an _used_ not-movable page.
5624                 *
5625                 * The problematic thing here is PG_reserved pages. PG_reserved
5626                 * is set to both of a memory hole page and a _used_ kernel
5627                 * page at boot.
5628                 */
5629                if (found > count)
5630                        return true;
5631        }
5632        return false;
5633}
5634
5635bool is_pageblock_removable_nolock(struct page *page)
5636{
5637        struct zone *zone;
5638        unsigned long pfn;
5639
5640        /*
5641         * We have to be careful here because we are iterating over memory
5642         * sections which are not zone aware so we might end up outside of
5643         * the zone but still within the section.
5644         * We have to take care about the node as well. If the node is offline
5645         * its NODE_DATA will be NULL - see page_zone.
5646         */
5647        if (!node_online(page_to_nid(page)))
5648                return false;
5649
5650        zone = page_zone(page);
5651        pfn = page_to_pfn(page);
5652        if (zone->zone_start_pfn > pfn ||
5653                        zone->zone_start_pfn + zone->spanned_pages <= pfn)
5654                return false;
5655
5656        return !has_unmovable_pages(zone, page, 0);
5657}
5658
5659#ifdef CONFIG_CMA
5660
5661static unsigned long pfn_max_align_down(unsigned long pfn)
5662{
5663        return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
5664                             pageblock_nr_pages) - 1);
5665}
5666
5667static unsigned long pfn_max_align_up(unsigned long pfn)
5668{
5669        return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
5670                                pageblock_nr_pages));
5671}
5672
5673/* [start, end) must belong to a single zone. */
5674static int __alloc_contig_migrate_range(struct compact_control *cc,
5675                                        unsigned long start, unsigned long end)
5676{
5677        /* This function is based on compact_zone() from compaction.c. */
5678        unsigned long nr_reclaimed;
5679        unsigned long pfn = start;
5680        unsigned int tries = 0;
5681        int ret = 0;
5682
5683        migrate_prep_local();
5684
5685        while (pfn < end || !list_empty(&cc->migratepages)) {
5686                if (fatal_signal_pending(current)) {
5687                        ret = -EINTR;
5688                        break;
5689                }
5690
5691                if (list_empty(&cc->migratepages)) {
5692                        cc->nr_migratepages = 0;
5693                        pfn = isolate_migratepages_range(cc->zone, cc,
5694                                                         pfn, end, true);
5695                        if (!pfn) {
5696                                ret = -EINTR;
5697                                break;
5698                        }
5699                        tries = 0;
5700                } else if (++tries == 5) {
5701                        ret = ret < 0 ? ret : -EBUSY;
5702                        break;
5703                }
5704
5705                nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
5706                                                        &cc->migratepages);
5707                cc->nr_migratepages -= nr_reclaimed;
5708
5709                ret = migrate_pages(&cc->migratepages,
5710                                    alloc_migrate_target,
5711                                    0, false, MIGRATE_SYNC);
5712        }
5713
5714        putback_lru_pages(&cc->migratepages);
5715        return ret > 0 ? 0 : ret;
5716}
5717
5718/*
5719 * Update zone's cma pages counter used for watermark level calculation.
5720 */
5721static inline void __update_cma_watermarks(struct zone *zone, int count)
5722{
5723        unsigned long flags;
5724        spin_lock_irqsave(&zone->lock, flags);
5725        zone->min_cma_pages += count;
5726        spin_unlock_irqrestore(&zone->lock, flags);
5727        setup_per_zone_wmarks();
5728}
5729
5730/*
5731 * Trigger memory pressure bump to reclaim some pages in order to be able to
5732 * allocate 'count' pages in single page units. Does similar work as
5733 *__alloc_pages_slowpath() function.
5734 */
5735static int __reclaim_pages(struct zone *zone, gfp_t gfp_mask, int count)
5736{
5737        enum zone_type high_zoneidx = gfp_zone(gfp_mask);
5738        struct zonelist *zonelist = node_zonelist(0, gfp_mask);
5739        int did_some_progress = 0;
5740        int order = 1;
5741
5742        /*
5743         * Increase level of watermarks to force kswapd do his job
5744         * to stabilise at new watermark level.
5745         */
5746        __update_cma_watermarks(zone, count);
5747