linux/mm/page_alloc.c
<<
>>
Prefs
   1/*
   2 *  linux/mm/page_alloc.c
   3 *
   4 *  Manages the free list, the system allocates free pages here.
   5 *  Note that kmalloc() lives in slab.c
   6 *
   7 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   8 *  Swap reorganised 29.12.95, Stephen Tweedie
   9 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  10 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
  11 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
  12 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
  13 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
  14 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
  15 */
  16
  17#include <linux/stddef.h>
  18#include <linux/mm.h>
  19#include <linux/swap.h>
  20#include <linux/interrupt.h>
  21#include <linux/pagemap.h>
  22#include <linux/jiffies.h>
  23#include <linux/bootmem.h>
  24#include <linux/memblock.h>
  25#include <linux/compiler.h>
  26#include <linux/kernel.h>
  27#include <linux/kmemcheck.h>
  28#include <linux/module.h>
  29#include <linux/suspend.h>
  30#include <linux/pagevec.h>
  31#include <linux/blkdev.h>
  32#include <linux/slab.h>
  33#include <linux/ratelimit.h>
  34#include <linux/oom.h>
  35#include <linux/notifier.h>
  36#include <linux/topology.h>
  37#include <linux/sysctl.h>
  38#include <linux/cpu.h>
  39#include <linux/cpuset.h>
  40#include <linux/memory_hotplug.h>
  41#include <linux/nodemask.h>
  42#include <linux/vmalloc.h>
  43#include <linux/vmstat.h>
  44#include <linux/mempolicy.h>
  45#include <linux/stop_machine.h>
  46#include <linux/sort.h>
  47#include <linux/pfn.h>
  48#include <linux/backing-dev.h>
  49#include <linux/fault-inject.h>
  50#include <linux/page-isolation.h>
  51#include <linux/page_cgroup.h>
  52#include <linux/debugobjects.h>
  53#include <linux/kmemleak.h>
  54#include <linux/memory.h>
  55#include <linux/compaction.h>
  56#include <trace/events/kmem.h>
  57#include <linux/ftrace_event.h>
  58#include <linux/memcontrol.h>
  59#include <linux/prefetch.h>
  60#include <linux/migrate.h>
  61#include <linux/page-debug-flags.h>
  62
  63#include <asm/tlbflush.h>
  64#include <asm/div64.h>
  65#include "internal.h"
  66
  67#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
  68DEFINE_PER_CPU(int, numa_node);
  69EXPORT_PER_CPU_SYMBOL(numa_node);
  70#endif
  71
  72#ifdef CONFIG_HAVE_MEMORYLESS_NODES
  73/*
  74 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
  75 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
  76 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
  77 * defined in <linux/topology.h>.
  78 */
  79DEFINE_PER_CPU(int, _numa_mem_);                /* Kernel "local memory" node */
  80EXPORT_PER_CPU_SYMBOL(_numa_mem_);
  81#endif
  82
  83/*
  84 * Array of node states.
  85 */
  86nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
  87        [N_POSSIBLE] = NODE_MASK_ALL,
  88        [N_ONLINE] = { { [0] = 1UL } },
  89#ifndef CONFIG_NUMA
  90        [N_NORMAL_MEMORY] = { { [0] = 1UL } },
  91#ifdef CONFIG_HIGHMEM
  92        [N_HIGH_MEMORY] = { { [0] = 1UL } },
  93#endif
  94        [N_CPU] = { { [0] = 1UL } },
  95#endif  /* NUMA */
  96};
  97EXPORT_SYMBOL(node_states);
  98
  99unsigned long totalram_pages __read_mostly;
 100unsigned long totalreserve_pages __read_mostly;
 101/*
 102 * When calculating the number of globally allowed dirty pages, there
 103 * is a certain number of per-zone reserves that should not be
 104 * considered dirtyable memory.  This is the sum of those reserves
 105 * over all existing zones that contribute dirtyable memory.
 106 */
 107unsigned long dirty_balance_reserve __read_mostly;
 108
 109int percpu_pagelist_fraction;
 110gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
 111
 112#ifdef CONFIG_PM_SLEEP
 113/*
 114 * The following functions are used by the suspend/hibernate code to temporarily
 115 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
 116 * while devices are suspended.  To avoid races with the suspend/hibernate code,
 117 * they should always be called with pm_mutex held (gfp_allowed_mask also should
 118 * only be modified with pm_mutex held, unless the suspend/hibernate code is
 119 * guaranteed not to run in parallel with that modification).
 120 */
 121
 122static gfp_t saved_gfp_mask;
 123
 124void pm_restore_gfp_mask(void)
 125{
 126        WARN_ON(!mutex_is_locked(&pm_mutex));
 127        if (saved_gfp_mask) {
 128                gfp_allowed_mask = saved_gfp_mask;
 129                saved_gfp_mask = 0;
 130        }
 131}
 132
 133void pm_restrict_gfp_mask(void)
 134{
 135        WARN_ON(!mutex_is_locked(&pm_mutex));
 136        WARN_ON(saved_gfp_mask);
 137        saved_gfp_mask = gfp_allowed_mask;
 138        gfp_allowed_mask &= ~GFP_IOFS;
 139}
 140
 141bool pm_suspended_storage(void)
 142{
 143        if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS)
 144                return false;
 145        return true;
 146}
 147#endif /* CONFIG_PM_SLEEP */
 148
 149#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
 150int pageblock_order __read_mostly;
 151#endif
 152
 153static void __free_pages_ok(struct page *page, unsigned int order);
 154
 155/*
 156 * results with 256, 32 in the lowmem_reserve sysctl:
 157 *      1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
 158 *      1G machine -> (16M dma, 784M normal, 224M high)
 159 *      NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
 160 *      HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
 161 *      HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
 162 *
 163 * TBD: should special case ZONE_DMA32 machines here - in those we normally
 164 * don't need any ZONE_NORMAL reservation
 165 */
 166int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
 167#ifdef CONFIG_ZONE_DMA
 168         256,
 169#endif
 170#ifdef CONFIG_ZONE_DMA32
 171         256,
 172#endif
 173#ifdef CONFIG_HIGHMEM
 174         32,
 175#endif
 176         32,
 177};
 178
 179EXPORT_SYMBOL(totalram_pages);
 180
 181static char * const zone_names[MAX_NR_ZONES] = {
 182#ifdef CONFIG_ZONE_DMA
 183         "DMA",
 184#endif
 185#ifdef CONFIG_ZONE_DMA32
 186         "DMA32",
 187#endif
 188         "Normal",
 189#ifdef CONFIG_HIGHMEM
 190         "HighMem",
 191#endif
 192         "Movable",
 193};
 194
 195int min_free_kbytes = 1024;
 196
 197static unsigned long __meminitdata nr_kernel_pages;
 198static unsigned long __meminitdata nr_all_pages;
 199static unsigned long __meminitdata dma_reserve;
 200
 201#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 202static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
 203static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
 204static unsigned long __initdata required_kernelcore;
 205static unsigned long __initdata required_movablecore;
 206static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
 207
 208/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
 209int movable_zone;
 210EXPORT_SYMBOL(movable_zone);
 211#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
 212
 213#if MAX_NUMNODES > 1
 214int nr_node_ids __read_mostly = MAX_NUMNODES;
 215int nr_online_nodes __read_mostly = 1;
 216EXPORT_SYMBOL(nr_node_ids);
 217EXPORT_SYMBOL(nr_online_nodes);
 218#endif
 219
 220int page_group_by_mobility_disabled __read_mostly;
 221
 222static void set_pageblock_migratetype(struct page *page, int migratetype)
 223{
 224
 225        if (unlikely(page_group_by_mobility_disabled))
 226                migratetype = MIGRATE_UNMOVABLE;
 227
 228        set_pageblock_flags_group(page, (unsigned long)migratetype,
 229                                        PB_migrate, PB_migrate_end);
 230}
 231
 232bool oom_killer_disabled __read_mostly;
 233
 234#ifdef CONFIG_DEBUG_VM
 235static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
 236{
 237        int ret = 0;
 238        unsigned seq;
 239        unsigned long pfn = page_to_pfn(page);
 240
 241        do {
 242                seq = zone_span_seqbegin(zone);
 243                if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
 244                        ret = 1;
 245                else if (pfn < zone->zone_start_pfn)
 246                        ret = 1;
 247        } while (zone_span_seqretry(zone, seq));
 248
 249        return ret;
 250}
 251
 252static int page_is_consistent(struct zone *zone, struct page *page)
 253{
 254        if (!pfn_valid_within(page_to_pfn(page)))
 255                return 0;
 256        if (zone != page_zone(page))
 257                return 0;
 258
 259        return 1;
 260}
 261/*
 262 * Temporary debugging check for pages not lying within a given zone.
 263 */
 264static int bad_range(struct zone *zone, struct page *page)
 265{
 266        if (page_outside_zone_boundaries(zone, page))
 267                return 1;
 268        if (!page_is_consistent(zone, page))
 269                return 1;
 270
 271        return 0;
 272}
 273#else
 274static inline int bad_range(struct zone *zone, struct page *page)
 275{
 276        return 0;
 277}
 278#endif
 279
 280static void bad_page(struct page *page)
 281{
 282        static unsigned long resume;
 283        static unsigned long nr_shown;
 284        static unsigned long nr_unshown;
 285
 286        /* Don't complain about poisoned pages */
 287        if (PageHWPoison(page)) {
 288                reset_page_mapcount(page); /* remove PageBuddy */
 289                return;
 290        }
 291
 292        /*
 293         * Allow a burst of 60 reports, then keep quiet for that minute;
 294         * or allow a steady drip of one report per second.
 295         */
 296        if (nr_shown == 60) {
 297                if (time_before(jiffies, resume)) {
 298                        nr_unshown++;
 299                        goto out;
 300                }
 301                if (nr_unshown) {
 302                        printk(KERN_ALERT
 303                              "BUG: Bad page state: %lu messages suppressed\n",
 304                                nr_unshown);
 305                        nr_unshown = 0;
 306                }
 307                nr_shown = 0;
 308        }
 309        if (nr_shown++ == 0)
 310                resume = jiffies + 60 * HZ;
 311
 312        printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
 313                current->comm, page_to_pfn(page));
 314        dump_page(page);
 315
 316        print_modules();
 317        dump_stack();
 318out:
 319        /* Leave bad fields for debug, except PageBuddy could make trouble */
 320        reset_page_mapcount(page); /* remove PageBuddy */
 321        add_taint(TAINT_BAD_PAGE);
 322}
 323
 324/*
 325 * Higher-order pages are called "compound pages".  They are structured thusly:
 326 *
 327 * The first PAGE_SIZE page is called the "head page".
 328 *
 329 * The remaining PAGE_SIZE pages are called "tail pages".
 330 *
 331 * All pages have PG_compound set.  All tail pages have their ->first_page
 332 * pointing at the head page.
 333 *
 334 * The first tail page's ->lru.next holds the address of the compound page's
 335 * put_page() function.  Its ->lru.prev holds the order of allocation.
 336 * This usage means that zero-order pages may not be compound.
 337 */
 338
 339static void free_compound_page(struct page *page)
 340{
 341        __free_pages_ok(page, compound_order(page));
 342}
 343
 344void prep_compound_page(struct page *page, unsigned long order)
 345{
 346        int i;
 347        int nr_pages = 1 << order;
 348
 349        set_compound_page_dtor(page, free_compound_page);
 350        set_compound_order(page, order);
 351        __SetPageHead(page);
 352        for (i = 1; i < nr_pages; i++) {
 353                struct page *p = page + i;
 354                __SetPageTail(p);
 355                set_page_count(p, 0);
 356                p->first_page = page;
 357        }
 358}
 359
 360/* update __split_huge_page_refcount if you change this function */
 361static int destroy_compound_page(struct page *page, unsigned long order)
 362{
 363        int i;
 364        int nr_pages = 1 << order;
 365        int bad = 0;
 366
 367        if (unlikely(compound_order(page) != order) ||
 368            unlikely(!PageHead(page))) {
 369                bad_page(page);
 370                bad++;
 371        }
 372
 373        __ClearPageHead(page);
 374
 375        for (i = 1; i < nr_pages; i++) {
 376                struct page *p = page + i;
 377
 378                if (unlikely(!PageTail(p) || (p->first_page != page))) {
 379                        bad_page(page);
 380                        bad++;
 381                }
 382                __ClearPageTail(p);
 383        }
 384
 385        return bad;
 386}
 387
 388static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
 389{
 390        int i;
 391
 392        /*
 393         * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
 394         * and __GFP_HIGHMEM from hard or soft interrupt context.
 395         */
 396        VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
 397        for (i = 0; i < (1 << order); i++)
 398                clear_highpage(page + i);
 399}
 400
 401#ifdef CONFIG_DEBUG_PAGEALLOC
 402unsigned int _debug_guardpage_minorder;
 403
 404static int __init debug_guardpage_minorder_setup(char *buf)
 405{
 406        unsigned long res;
 407
 408        if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
 409                printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
 410                return 0;
 411        }
 412        _debug_guardpage_minorder = res;
 413        printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
 414        return 0;
 415}
 416__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
 417
 418static inline void set_page_guard_flag(struct page *page)
 419{
 420        __set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
 421}
 422
 423static inline void clear_page_guard_flag(struct page *page)
 424{
 425        __clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
 426}
 427#else
 428static inline void set_page_guard_flag(struct page *page) { }
 429static inline void clear_page_guard_flag(struct page *page) { }
 430#endif
 431
 432static inline void set_page_order(struct page *page, int order)
 433{
 434        set_page_private(page, order);
 435        __SetPageBuddy(page);
 436}
 437
 438static inline void rmv_page_order(struct page *page)
 439{
 440        __ClearPageBuddy(page);
 441        set_page_private(page, 0);
 442}
 443
 444/*
 445 * Locate the struct page for both the matching buddy in our
 446 * pair (buddy1) and the combined O(n+1) page they form (page).
 447 *
 448 * 1) Any buddy B1 will have an order O twin B2 which satisfies
 449 * the following equation:
 450 *     B2 = B1 ^ (1 << O)
 451 * For example, if the starting buddy (buddy2) is #8 its order
 452 * 1 buddy is #10:
 453 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
 454 *
 455 * 2) Any buddy B will have an order O+1 parent P which
 456 * satisfies the following equation:
 457 *     P = B & ~(1 << O)
 458 *
 459 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
 460 */
 461static inline unsigned long
 462__find_buddy_index(unsigned long page_idx, unsigned int order)
 463{
 464        return page_idx ^ (1 << order);
 465}
 466
 467/*
 468 * This function checks whether a page is free && is the buddy
 469 * we can do coalesce a page and its buddy if
 470 * (a) the buddy is not in a hole &&
 471 * (b) the buddy is in the buddy system &&
 472 * (c) a page and its buddy have the same order &&
 473 * (d) a page and its buddy are in the same zone.
 474 *
 475 * For recording whether a page is in the buddy system, we set ->_mapcount -2.
 476 * Setting, clearing, and testing _mapcount -2 is serialized by zone->lock.
 477 *
 478 * For recording page's order, we use page_private(page).
 479 */
 480static inline int page_is_buddy(struct page *page, struct page *buddy,
 481                                                                int order)
 482{
 483        if (!pfn_valid_within(page_to_pfn(buddy)))
 484                return 0;
 485
 486        if (page_zone_id(page) != page_zone_id(buddy))
 487                return 0;
 488
 489        if (page_is_guard(buddy) && page_order(buddy) == order) {
 490                VM_BUG_ON(page_count(buddy) != 0);
 491                return 1;
 492        }
 493
 494        if (PageBuddy(buddy) && page_order(buddy) == order) {
 495                VM_BUG_ON(page_count(buddy) != 0);
 496                return 1;
 497        }
 498        return 0;
 499}
 500
 501/*
 502 * Freeing function for a buddy system allocator.
 503 *
 504 * The concept of a buddy system is to maintain direct-mapped table
 505 * (containing bit values) for memory blocks of various "orders".
 506 * The bottom level table contains the map for the smallest allocatable
 507 * units of memory (here, pages), and each level above it describes
 508 * pairs of units from the levels below, hence, "buddies".
 509 * At a high level, all that happens here is marking the table entry
 510 * at the bottom level available, and propagating the changes upward
 511 * as necessary, plus some accounting needed to play nicely with other
 512 * parts of the VM system.
 513 * At each level, we keep a list of pages, which are heads of continuous
 514 * free pages of length of (1 << order) and marked with _mapcount -2. Page's
 515 * order is recorded in page_private(page) field.
 516 * So when we are allocating or freeing one, we can derive the state of the
 517 * other.  That is, if we allocate a small block, and both were
 518 * free, the remainder of the region must be split into blocks.
 519 * If a block is freed, and its buddy is also free, then this
 520 * triggers coalescing into a block of larger size.
 521 *
 522 * -- wli
 523 */
 524
 525static inline void __free_one_page(struct page *page,
 526                struct zone *zone, unsigned int order,
 527                int migratetype)
 528{
 529        unsigned long page_idx;
 530        unsigned long combined_idx;
 531        unsigned long uninitialized_var(buddy_idx);
 532        struct page *buddy;
 533
 534        if (unlikely(PageCompound(page)))
 535                if (unlikely(destroy_compound_page(page, order)))
 536                        return;
 537
 538        VM_BUG_ON(migratetype == -1);
 539
 540        page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
 541
 542        VM_BUG_ON(page_idx & ((1 << order) - 1));
 543        VM_BUG_ON(bad_range(zone, page));
 544
 545        while (order < MAX_ORDER-1) {
 546                buddy_idx = __find_buddy_index(page_idx, order);
 547                buddy = page + (buddy_idx - page_idx);
 548                if (!page_is_buddy(page, buddy, order))
 549                        break;
 550                /*
 551                 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
 552                 * merge with it and move up one order.
 553                 */
 554                if (page_is_guard(buddy)) {
 555                        clear_page_guard_flag(buddy);
 556                        set_page_private(page, 0);
 557                        __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
 558                } else {
 559                        list_del(&buddy->lru);
 560                        zone->free_area[order].nr_free--;
 561                        rmv_page_order(buddy);
 562                }
 563                combined_idx = buddy_idx & page_idx;
 564                page = page + (combined_idx - page_idx);
 565                page_idx = combined_idx;
 566                order++;
 567        }
 568        set_page_order(page, order);
 569
 570        /*
 571         * If this is not the largest possible page, check if the buddy
 572         * of the next-highest order is free. If it is, it's possible
 573         * that pages are being freed that will coalesce soon. In case,
 574         * that is happening, add the free page to the tail of the list
 575         * so it's less likely to be used soon and more likely to be merged
 576         * as a higher order page
 577         */
 578        if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
 579                struct page *higher_page, *higher_buddy;
 580                combined_idx = buddy_idx & page_idx;
 581                higher_page = page + (combined_idx - page_idx);
 582                buddy_idx = __find_buddy_index(combined_idx, order + 1);
 583                higher_buddy = higher_page + (buddy_idx - combined_idx);
 584                if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
 585                        list_add_tail(&page->lru,
 586                                &zone->free_area[order].free_list[migratetype]);
 587                        goto out;
 588                }
 589        }
 590
 591        list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
 592out:
 593        zone->free_area[order].nr_free++;
 594}
 595
 596/*
 597 * free_page_mlock() -- clean up attempts to free and mlocked() page.
 598 * Page should not be on lru, so no need to fix that up.
 599 * free_pages_check() will verify...
 600 */
 601static inline void free_page_mlock(struct page *page)
 602{
 603        __dec_zone_page_state(page, NR_MLOCK);
 604        __count_vm_event(UNEVICTABLE_MLOCKFREED);
 605}
 606
 607static inline int free_pages_check(struct page *page)
 608{
 609        if (unlikely(page_mapcount(page) |
 610                (page->mapping != NULL)  |
 611                (atomic_read(&page->_count) != 0) |
 612                (page->flags & PAGE_FLAGS_CHECK_AT_FREE) |
 613                (mem_cgroup_bad_page_check(page)))) {
 614                bad_page(page);
 615                return 1;
 616        }
 617        if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
 618                page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
 619        return 0;
 620}
 621
 622/*
 623 * Frees a number of pages from the PCP lists
 624 * Assumes all pages on list are in same zone, and of same order.
 625 * count is the number of pages to free.
 626 *
 627 * If the zone was previously in an "all pages pinned" state then look to
 628 * see if this freeing clears that state.
 629 *
 630 * And clear the zone's pages_scanned counter, to hold off the "all pages are
 631 * pinned" detection logic.
 632 */
 633static void free_pcppages_bulk(struct zone *zone, int count,
 634                                        struct per_cpu_pages *pcp)
 635{
 636        int migratetype = 0;
 637        int batch_free = 0;
 638        int to_free = count;
 639
 640        spin_lock(&zone->lock);
 641        zone->all_unreclaimable = 0;
 642        zone->pages_scanned = 0;
 643
 644        while (to_free) {
 645                struct page *page;
 646                struct list_head *list;
 647
 648                /*
 649                 * Remove pages from lists in a round-robin fashion. A
 650                 * batch_free count is maintained that is incremented when an
 651                 * empty list is encountered.  This is so more pages are freed
 652                 * off fuller lists instead of spinning excessively around empty
 653                 * lists
 654                 */
 655                do {
 656                        batch_free++;
 657                        if (++migratetype == MIGRATE_PCPTYPES)
 658                                migratetype = 0;
 659                        list = &pcp->lists[migratetype];
 660                } while (list_empty(list));
 661
 662                /* This is the only non-empty list. Free them all. */
 663                if (batch_free == MIGRATE_PCPTYPES)
 664                        batch_free = to_free;
 665
 666                do {
 667                        page = list_entry(list->prev, struct page, lru);
 668                        /* must delete as __free_one_page list manipulates */
 669                        list_del(&page->lru);
 670                        /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
 671                        __free_one_page(page, zone, 0, page_private(page));
 672                        trace_mm_page_pcpu_drain(page, 0, page_private(page));
 673                } while (--to_free && --batch_free && !list_empty(list));
 674        }
 675        __mod_zone_page_state(zone, NR_FREE_PAGES, count);
 676        spin_unlock(&zone->lock);
 677}
 678
 679static void free_one_page(struct zone *zone, struct page *page, int order,
 680                                int migratetype)
 681{
 682        spin_lock(&zone->lock);
 683        zone->all_unreclaimable = 0;
 684        zone->pages_scanned = 0;
 685
 686        __free_one_page(page, zone, order, migratetype);
 687        __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
 688        spin_unlock(&zone->lock);
 689}
 690
 691static bool free_pages_prepare(struct page *page, unsigned int order)
 692{
 693        int i;
 694        int bad = 0;
 695
 696        trace_mm_page_free(page, order);
 697        kmemcheck_free_shadow(page, order);
 698
 699        if (PageAnon(page))
 700                page->mapping = NULL;
 701        for (i = 0; i < (1 << order); i++)
 702                bad += free_pages_check(page + i);
 703        if (bad)
 704                return false;
 705
 706        if (!PageHighMem(page)) {
 707                debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
 708                debug_check_no_obj_freed(page_address(page),
 709                                           PAGE_SIZE << order);
 710        }
 711        arch_free_page(page, order);
 712        kernel_map_pages(page, 1 << order, 0);
 713
 714        return true;
 715}
 716
 717static void __free_pages_ok(struct page *page, unsigned int order)
 718{
 719        unsigned long flags;
 720        int wasMlocked = __TestClearPageMlocked(page);
 721
 722        if (!free_pages_prepare(page, order))
 723                return;
 724
 725        local_irq_save(flags);
 726        if (unlikely(wasMlocked))
 727                free_page_mlock(page);
 728        __count_vm_events(PGFREE, 1 << order);
 729        free_one_page(page_zone(page), page, order,
 730                                        get_pageblock_migratetype(page));
 731        local_irq_restore(flags);
 732}
 733
 734void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
 735{
 736        unsigned int nr_pages = 1 << order;
 737        unsigned int loop;
 738
 739        prefetchw(page);
 740        for (loop = 0; loop < nr_pages; loop++) {
 741                struct page *p = &page[loop];
 742
 743                if (loop + 1 < nr_pages)
 744                        prefetchw(p + 1);
 745                __ClearPageReserved(p);
 746                set_page_count(p, 0);
 747        }
 748
 749        set_page_refcounted(page);
 750        __free_pages(page, order);
 751}
 752
 753#ifdef CONFIG_CMA
 754/* Free whole pageblock and set it's migration type to MIGRATE_CMA. */
 755void __init init_cma_reserved_pageblock(struct page *page)
 756{
 757        unsigned i = pageblock_nr_pages;
 758        struct page *p = page;
 759
 760        do {
 761                __ClearPageReserved(p);
 762                set_page_count(p, 0);
 763        } while (++p, --i);
 764
 765        set_page_refcounted(page);
 766        set_pageblock_migratetype(page, MIGRATE_CMA);
 767        __free_pages(page, pageblock_order);
 768        totalram_pages += pageblock_nr_pages;
 769}
 770#endif
 771
 772/*
 773 * The order of subdivision here is critical for the IO subsystem.
 774 * Please do not alter this order without good reasons and regression
 775 * testing. Specifically, as large blocks of memory are subdivided,
 776 * the order in which smaller blocks are delivered depends on the order
 777 * they're subdivided in this function. This is the primary factor
 778 * influencing the order in which pages are delivered to the IO
 779 * subsystem according to empirical testing, and this is also justified
 780 * by considering the behavior of a buddy system containing a single
 781 * large block of memory acted on by a series of small allocations.
 782 * This behavior is a critical factor in sglist merging's success.
 783 *
 784 * -- wli
 785 */
 786static inline void expand(struct zone *zone, struct page *page,
 787        int low, int high, struct free_area *area,
 788        int migratetype)
 789{
 790        unsigned long size = 1 << high;
 791
 792        while (high > low) {
 793                area--;
 794                high--;
 795                size >>= 1;
 796                VM_BUG_ON(bad_range(zone, &page[size]));
 797
 798#ifdef CONFIG_DEBUG_PAGEALLOC
 799                if (high < debug_guardpage_minorder()) {
 800                        /*
 801                         * Mark as guard pages (or page), that will allow to
 802                         * merge back to allocator when buddy will be freed.
 803                         * Corresponding page table entries will not be touched,
 804                         * pages will stay not present in virtual address space
 805                         */
 806                        INIT_LIST_HEAD(&page[size].lru);
 807                        set_page_guard_flag(&page[size]);
 808                        set_page_private(&page[size], high);
 809                        /* Guard pages are not available for any usage */
 810                        __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << high));
 811                        continue;
 812                }
 813#endif
 814                list_add(&page[size].lru, &area->free_list[migratetype]);
 815                area->nr_free++;
 816                set_page_order(&page[size], high);
 817        }
 818}
 819
 820/*
 821 * This page is about to be returned from the page allocator
 822 */
 823static inline int check_new_page(struct page *page)
 824{
 825        if (unlikely(page_mapcount(page) |
 826                (page->mapping != NULL)  |
 827                (atomic_read(&page->_count) != 0)  |
 828                (page->flags & PAGE_FLAGS_CHECK_AT_PREP) |
 829                (mem_cgroup_bad_page_check(page)))) {
 830                bad_page(page);
 831                return 1;
 832        }
 833        return 0;
 834}
 835
 836static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
 837{
 838        int i;
 839
 840        for (i = 0; i < (1 << order); i++) {
 841                struct page *p = page + i;
 842                if (unlikely(check_new_page(p)))
 843                        return 1;
 844        }
 845
 846        set_page_private(page, 0);
 847        set_page_refcounted(page);
 848
 849        arch_alloc_page(page, order);
 850        kernel_map_pages(page, 1 << order, 1);
 851
 852        if (gfp_flags & __GFP_ZERO)
 853                prep_zero_page(page, order, gfp_flags);
 854
 855        if (order && (gfp_flags & __GFP_COMP))
 856                prep_compound_page(page, order);
 857
 858        return 0;
 859}
 860
 861/*
 862 * Go through the free lists for the given migratetype and remove
 863 * the smallest available page from the freelists
 864 */
 865static inline
 866struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
 867                                                int migratetype)
 868{
 869        unsigned int current_order;
 870        struct free_area * area;
 871        struct page *page;
 872
 873        /* Find a page of the appropriate size in the preferred list */
 874        for (current_order = order; current_order < MAX_ORDER; ++current_order) {
 875                area = &(zone->free_area[current_order]);
 876                if (list_empty(&area->free_list[migratetype]))
 877                        continue;
 878
 879                page = list_entry(area->free_list[migratetype].next,
 880                                                        struct page, lru);
 881                list_del(&page->lru);
 882                rmv_page_order(page);
 883                area->nr_free--;
 884                expand(zone, page, order, current_order, area, migratetype);
 885                return page;
 886        }
 887
 888        return NULL;
 889}
 890
 891
 892/*
 893 * This array describes the order lists are fallen back to when
 894 * the free lists for the desirable migrate type are depleted
 895 */
 896static int fallbacks[MIGRATE_TYPES][4] = {
 897        [MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,     MIGRATE_RESERVE },
 898        [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,     MIGRATE_RESERVE },
 899#ifdef CONFIG_CMA
 900        [MIGRATE_MOVABLE]     = { MIGRATE_CMA,         MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
 901        [MIGRATE_CMA]         = { MIGRATE_RESERVE }, /* Never used */
 902#else
 903        [MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE,   MIGRATE_RESERVE },
 904#endif
 905        [MIGRATE_RESERVE]     = { MIGRATE_RESERVE }, /* Never used */
 906        [MIGRATE_ISOLATE]     = { MIGRATE_RESERVE }, /* Never used */
 907};
 908
 909/*
 910 * Move the free pages in a range to the free lists of the requested type.
 911 * Note that start_page and end_pages are not aligned on a pageblock
 912 * boundary. If alignment is required, use move_freepages_block()
 913 */
 914static int move_freepages(struct zone *zone,
 915                          struct page *start_page, struct page *end_page,
 916                          int migratetype)
 917{
 918        struct page *page;
 919        unsigned long order;
 920        int pages_moved = 0;
 921
 922#ifndef CONFIG_HOLES_IN_ZONE
 923        /*
 924         * page_zone is not safe to call in this context when
 925         * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
 926         * anyway as we check zone boundaries in move_freepages_block().
 927         * Remove at a later date when no bug reports exist related to
 928         * grouping pages by mobility
 929         */
 930        BUG_ON(page_zone(start_page) != page_zone(end_page));
 931#endif
 932
 933        for (page = start_page; page <= end_page;) {
 934                /* Make sure we are not inadvertently changing nodes */
 935                VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
 936
 937                if (!pfn_valid_within(page_to_pfn(page))) {
 938                        page++;
 939                        continue;
 940                }
 941
 942                if (!PageBuddy(page)) {
 943                        page++;
 944                        continue;
 945                }
 946
 947                order = page_order(page);
 948                list_move(&page->lru,
 949                          &zone->free_area[order].free_list[migratetype]);
 950                page += 1 << order;
 951                pages_moved += 1 << order;
 952        }
 953
 954        return pages_moved;
 955}
 956
 957static int move_freepages_block(struct zone *zone, struct page *page,
 958                                int migratetype)
 959{
 960        unsigned long start_pfn, end_pfn;
 961        struct page *start_page, *end_page;
 962
 963        start_pfn = page_to_pfn(page);
 964        start_pfn = start_pfn & ~(pageblock_nr_pages-1);
 965        start_page = pfn_to_page(start_pfn);
 966        end_page = start_page + pageblock_nr_pages - 1;
 967        end_pfn = start_pfn + pageblock_nr_pages - 1;
 968
 969        /* Do not cross zone boundaries */
 970        if (start_pfn < zone->zone_start_pfn)
 971                start_page = page;
 972        if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
 973                return 0;
 974
 975        return move_freepages(zone, start_page, end_page, migratetype);
 976}
 977
 978static void change_pageblock_range(struct page *pageblock_page,
 979                                        int start_order, int migratetype)
 980{
 981        int nr_pageblocks = 1 << (start_order - pageblock_order);
 982
 983        while (nr_pageblocks--) {
 984                set_pageblock_migratetype(pageblock_page, migratetype);
 985                pageblock_page += pageblock_nr_pages;
 986        }
 987}
 988
 989/* Remove an element from the buddy allocator from the fallback list */
 990static inline struct page *
 991__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
 992{
 993        struct free_area * area;
 994        int current_order;
 995        struct page *page;
 996        int migratetype, i;
 997
 998        /* Find the largest possible block of pages in the other list */
 999        for (current_order = MAX_ORDER-1; current_order >= order;
1000                                                --current_order) {
1001                for (i = 0;; i++) {
1002                        migratetype = fallbacks[start_migratetype][i];
1003
1004                        /* MIGRATE_RESERVE handled later if necessary */
1005                        if (migratetype == MIGRATE_RESERVE)
1006                                break;
1007
1008                        area = &(zone->free_area[current_order]);
1009                        if (list_empty(&area->free_list[migratetype]))
1010                                continue;
1011
1012                        page = list_entry(area->free_list[migratetype].next,
1013                                        struct page, lru);
1014                        area->nr_free--;
1015
1016                        /*
1017                         * If breaking a large block of pages, move all free
1018                         * pages to the preferred allocation list. If falling
1019                         * back for a reclaimable kernel allocation, be more
1020                         * aggressive about taking ownership of free pages
1021                         *
1022                         * On the other hand, never change migration
1023                         * type of MIGRATE_CMA pageblocks nor move CMA
1024                         * pages on different free lists. We don't
1025                         * want unmovable pages to be allocated from
1026                         * MIGRATE_CMA areas.
1027                         */
1028                        if (!is_migrate_cma(migratetype) &&
1029                            (unlikely(current_order >= pageblock_order / 2) ||
1030                             start_migratetype == MIGRATE_RECLAIMABLE ||
1031                             page_group_by_mobility_disabled)) {
1032                                int pages;
1033                                pages = move_freepages_block(zone, page,
1034                                                                start_migratetype);
1035
1036                                /* Claim the whole block if over half of it is free */
1037                                if (pages >= (1 << (pageblock_order-1)) ||
1038                                                page_group_by_mobility_disabled)
1039                                        set_pageblock_migratetype(page,
1040                                                                start_migratetype);
1041
1042                                migratetype = start_migratetype;
1043                        }
1044
1045                        /* Remove the page from the freelists */
1046                        list_del(&page->lru);
1047                        rmv_page_order(page);
1048
1049                        /* Take ownership for orders >= pageblock_order */
1050                        if (current_order >= pageblock_order &&
1051                            !is_migrate_cma(migratetype))
1052                                change_pageblock_range(page, current_order,
1053                                                        start_migratetype);
1054
1055                        expand(zone, page, order, current_order, area,
1056                               is_migrate_cma(migratetype)
1057                             ? migratetype : start_migratetype);
1058
1059                        trace_mm_page_alloc_extfrag(page, order, current_order,
1060                                start_migratetype, migratetype);
1061
1062                        return page;
1063                }
1064        }
1065
1066        return NULL;
1067}
1068
1069/*
1070 * Do the hard work of removing an element from the buddy allocator.
1071 * Call me with the zone->lock already held.
1072 */
1073static struct page *__rmqueue(struct zone *zone, unsigned int order,
1074                                                int migratetype)
1075{
1076        struct page *page;
1077
1078retry_reserve:
1079        page = __rmqueue_smallest(zone, order, migratetype);
1080
1081        if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
1082                page = __rmqueue_fallback(zone, order, migratetype);
1083
1084                /*
1085                 * Use MIGRATE_RESERVE rather than fail an allocation. goto
1086                 * is used because __rmqueue_smallest is an inline function
1087                 * and we want just one call site
1088                 */
1089                if (!page) {
1090                        migratetype = MIGRATE_RESERVE;
1091                        goto retry_reserve;
1092                }
1093        }
1094
1095        trace_mm_page_alloc_zone_locked(page, order, migratetype);
1096        return page;
1097}
1098
1099/*
1100 * Obtain a specified number of elements from the buddy allocator, all under
1101 * a single hold of the lock, for efficiency.  Add them to the supplied list.
1102 * Returns the number of new pages which were placed at *list.
1103 */
1104static int rmqueue_bulk(struct zone *zone, unsigned int order,
1105                        unsigned long count, struct list_head *list,
1106                        int migratetype, int cold)
1107{
1108        int mt = migratetype, i;
1109
1110        spin_lock(&zone->lock);
1111        for (i = 0; i < count; ++i) {
1112                struct page *page = __rmqueue(zone, order, migratetype);
1113                if (unlikely(page == NULL))
1114                        break;
1115
1116                /*
1117                 * Split buddy pages returned by expand() are received here
1118                 * in physical page order. The page is added to the callers and
1119                 * list and the list head then moves forward. From the callers
1120                 * perspective, the linked list is ordered by page number in
1121                 * some conditions. This is useful for IO devices that can
1122                 * merge IO requests if the physical pages are ordered
1123                 * properly.
1124                 */
1125                if (likely(cold == 0))
1126                        list_add(&page->lru, list);
1127                else
1128                        list_add_tail(&page->lru, list);
1129                if (IS_ENABLED(CONFIG_CMA)) {
1130                        mt = get_pageblock_migratetype(page);
1131                        if (!is_migrate_cma(mt) && mt != MIGRATE_ISOLATE)
1132                                mt = migratetype;
1133                }
1134                set_page_private(page, mt);
1135                list = &page->lru;
1136        }
1137        __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
1138        spin_unlock(&zone->lock);
1139        return i;
1140}
1141
1142#ifdef CONFIG_NUMA
1143/*
1144 * Called from the vmstat counter updater to drain pagesets of this
1145 * currently executing processor on remote nodes after they have
1146 * expired.
1147 *
1148 * Note that this function must be called with the thread pinned to
1149 * a single processor.
1150 */
1151void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
1152{
1153        unsigned long flags;
1154        int to_drain;
1155
1156        local_irq_save(flags);
1157        if (pcp->count >= pcp->batch)
1158                to_drain = pcp->batch;
1159        else
1160                to_drain = pcp->count;
1161        free_pcppages_bulk(zone, to_drain, pcp);
1162        pcp->count -= to_drain;
1163        local_irq_restore(flags);
1164}
1165#endif
1166
1167/*
1168 * Drain pages of the indicated processor.
1169 *
1170 * The processor must either be the current processor and the
1171 * thread pinned to the current processor or a processor that
1172 * is not online.
1173 */
1174static void drain_pages(unsigned int cpu)
1175{
1176        unsigned long flags;
1177        struct zone *zone;
1178
1179        for_each_populated_zone(zone) {
1180                struct per_cpu_pageset *pset;
1181                struct per_cpu_pages *pcp;
1182
1183                local_irq_save(flags);
1184                pset = per_cpu_ptr(zone->pageset, cpu);
1185
1186                pcp = &pset->pcp;
1187                if (pcp->count) {
1188                        free_pcppages_bulk(zone, pcp->count, pcp);
1189                        pcp->count = 0;
1190                }
1191                local_irq_restore(flags);
1192        }
1193}
1194
1195/*
1196 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
1197 */
1198void drain_local_pages(void *arg)
1199{
1200        drain_pages(smp_processor_id());
1201}
1202
1203/*
1204 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
1205 *
1206 * Note that this code is protected against sending an IPI to an offline
1207 * CPU but does not guarantee sending an IPI to newly hotplugged CPUs:
1208 * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but
1209 * nothing keeps CPUs from showing up after we populated the cpumask and
1210 * before the call to on_each_cpu_mask().
1211 */
1212void drain_all_pages(void)
1213{
1214        int cpu;
1215        struct per_cpu_pageset *pcp;
1216        struct zone *zone;
1217
1218        /*
1219         * Allocate in the BSS so we wont require allocation in
1220         * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
1221         */
1222        static cpumask_t cpus_with_pcps;
1223
1224        /*
1225         * We don't care about racing with CPU hotplug event
1226         * as offline notification will cause the notified
1227         * cpu to drain that CPU pcps and on_each_cpu_mask
1228         * disables preemption as part of its processing
1229         */
1230        for_each_online_cpu(cpu) {
1231                bool has_pcps = false;
1232                for_each_populated_zone(zone) {
1233                        pcp = per_cpu_ptr(zone->pageset, cpu);
1234                        if (pcp->pcp.count) {
1235                                has_pcps = true;
1236                                break;
1237                        }
1238                }
1239                if (has_pcps)
1240                        cpumask_set_cpu(cpu, &cpus_with_pcps);
1241                else
1242                        cpumask_clear_cpu(cpu, &cpus_with_pcps);
1243        }
1244        on_each_cpu_mask(&cpus_with_pcps, drain_local_pages, NULL, 1);
1245}
1246
1247#ifdef CONFIG_HIBERNATION
1248
1249void mark_free_pages(struct zone *zone)
1250{
1251        unsigned long pfn, max_zone_pfn;
1252        unsigned long flags;
1253        int order, t;
1254        struct list_head *curr;
1255
1256        if (!zone->spanned_pages)
1257                return;
1258
1259        spin_lock_irqsave(&zone->lock, flags);
1260
1261        max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1262        for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1263                if (pfn_valid(pfn)) {
1264                        struct page *page = pfn_to_page(pfn);
1265
1266                        if (!swsusp_page_is_forbidden(page))
1267                                swsusp_unset_page_free(page);
1268                }
1269
1270        for_each_migratetype_order(order, t) {
1271                list_for_each(curr, &zone->free_area[order].free_list[t]) {
1272                        unsigned long i;
1273
1274                        pfn = page_to_pfn(list_entry(curr, struct page, lru));
1275                        for (i = 0; i < (1UL << order); i++)
1276                                swsusp_set_page_free(pfn_to_page(pfn + i));
1277                }
1278        }
1279        spin_unlock_irqrestore(&zone->lock, flags);
1280}
1281#endif /* CONFIG_PM */
1282
1283/*
1284 * Free a 0-order page
1285 * cold == 1 ? free a cold page : free a hot page
1286 */
1287void free_hot_cold_page(struct page *page, int cold)
1288{
1289        struct zone *zone = page_zone(page);
1290        struct per_cpu_pages *pcp;
1291        unsigned long flags;
1292        int migratetype;
1293        int wasMlocked = __TestClearPageMlocked(page);
1294
1295        if (!free_pages_prepare(page, 0))
1296                return;
1297
1298        migratetype = get_pageblock_migratetype(page);
1299        set_page_private(page, migratetype);
1300        local_irq_save(flags);
1301        if (unlikely(wasMlocked))
1302                free_page_mlock(page);
1303        __count_vm_event(PGFREE);
1304
1305        /*
1306         * We only track unmovable, reclaimable and movable on pcp lists.
1307         * Free ISOLATE pages back to the allocator because they are being
1308         * offlined but treat RESERVE as movable pages so we can get those
1309         * areas back if necessary. Otherwise, we may have to free
1310         * excessively into the page allocator
1311         */
1312        if (migratetype >= MIGRATE_PCPTYPES) {
1313                if (unlikely(migratetype == MIGRATE_ISOLATE)) {
1314                        free_one_page(zone, page, 0, migratetype);
1315                        goto out;
1316                }
1317                migratetype = MIGRATE_MOVABLE;
1318        }
1319
1320        pcp = &this_cpu_ptr(zone->pageset)->pcp;
1321        if (cold)
1322                list_add_tail(&page->lru, &pcp->lists[migratetype]);
1323        else
1324                list_add(&page->lru, &pcp->lists[migratetype]);
1325        pcp->count++;
1326        if (pcp->count >= pcp->high) {
1327                free_pcppages_bulk(zone, pcp->batch, pcp);
1328                pcp->count -= pcp->batch;
1329        }
1330
1331out:
1332        local_irq_restore(flags);
1333}
1334
1335/*
1336 * Free a list of 0-order pages
1337 */
1338void free_hot_cold_page_list(struct list_head *list, int cold)
1339{
1340        struct page *page, *next;
1341
1342        list_for_each_entry_safe(page, next, list, lru) {
1343                trace_mm_page_free_batched(page, cold);
1344                free_hot_cold_page(page, cold);
1345        }
1346}
1347
1348/*
1349 * split_page takes a non-compound higher-order page, and splits it into
1350 * n (1<<order) sub-pages: page[0..n]
1351 * Each sub-page must be freed individually.
1352 *
1353 * Note: this is probably too low level an operation for use in drivers.
1354 * Please consult with lkml before using this in your driver.
1355 */
1356void split_page(struct page *page, unsigned int order)
1357{
1358        int i;
1359
1360        VM_BUG_ON(PageCompound(page));
1361        VM_BUG_ON(!page_count(page));
1362
1363#ifdef CONFIG_KMEMCHECK
1364        /*
1365         * Split shadow pages too, because free(page[0]) would
1366         * otherwise free the whole shadow.
1367         */
1368        if (kmemcheck_page_is_tracked(page))
1369                split_page(virt_to_page(page[0].shadow), order);
1370#endif
1371
1372        for (i = 1; i < (1 << order); i++)
1373                set_page_refcounted(page + i);
1374}
1375
1376/*
1377 * Similar to split_page except the page is already free. As this is only
1378 * being used for migration, the migratetype of the block also changes.
1379 * As this is called with interrupts disabled, the caller is responsible
1380 * for calling arch_alloc_page() and kernel_map_page() after interrupts
1381 * are enabled.
1382 *
1383 * Note: this is probably too low level an operation for use in drivers.
1384 * Please consult with lkml before using this in your driver.
1385 */
1386int split_free_page(struct page *page)
1387{
1388        unsigned int order;
1389        unsigned long watermark;
1390        struct zone *zone;
1391
1392        BUG_ON(!PageBuddy(page));
1393
1394        zone = page_zone(page);
1395        order = page_order(page);
1396
1397        /* Obey watermarks as if the page was being allocated */
1398        watermark = low_wmark_pages(zone) + (1 << order);
1399        if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
1400                return 0;
1401
1402        /* Remove page from free list */
1403        list_del(&page->lru);
1404        zone->free_area[order].nr_free--;
1405        rmv_page_order(page);
1406        __mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order));
1407
1408        /* Split into individual pages */
1409        set_page_refcounted(page);
1410        split_page(page, order);
1411
1412        if (order >= pageblock_order - 1) {
1413                struct page *endpage = page + (1 << order) - 1;
1414                for (; page < endpage; page += pageblock_nr_pages) {
1415                        int mt = get_pageblock_migratetype(page);
1416                        if (mt != MIGRATE_ISOLATE && !is_migrate_cma(mt))
1417                                set_pageblock_migratetype(page,
1418                                                          MIGRATE_MOVABLE);
1419                }
1420        }
1421
1422        return 1 << order;
1423}
1424
1425/*
1426 * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
1427 * we cheat by calling it from here, in the order > 0 path.  Saves a branch
1428 * or two.
1429 */
1430static inline
1431struct page *buffered_rmqueue(struct zone *preferred_zone,
1432                        struct zone *zone, int order, gfp_t gfp_flags,
1433                        int migratetype)
1434{
1435        unsigned long flags;
1436        struct page *page;
1437        int cold = !!(gfp_flags & __GFP_COLD);
1438
1439again:
1440        if (likely(order == 0)) {
1441                struct per_cpu_pages *pcp;
1442                struct list_head *list;
1443
1444                local_irq_save(flags);
1445                pcp = &this_cpu_ptr(zone->pageset)->pcp;
1446                list = &pcp->lists[migratetype];
1447                if (list_empty(list)) {
1448                        pcp->count += rmqueue_bulk(zone, 0,
1449                                        pcp->batch, list,
1450                                        migratetype, cold);
1451                        if (unlikely(list_empty(list)))
1452                                goto failed;
1453                }
1454
1455                if (cold)
1456                        page = list_entry(list->prev, struct page, lru);
1457                else
1458                        page = list_entry(list->next, struct page, lru);
1459
1460                list_del(&page->lru);
1461                pcp->count--;
1462        } else {
1463                if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1464                        /*
1465                         * __GFP_NOFAIL is not to be used in new code.
1466                         *
1467                         * All __GFP_NOFAIL callers should be fixed so that they
1468                         * properly detect and handle allocation failures.
1469                         *
1470                         * We most definitely don't want callers attempting to
1471                         * allocate greater than order-1 page units with
1472                         * __GFP_NOFAIL.
1473                         */
1474                        WARN_ON_ONCE(order > 1);
1475                }
1476                spin_lock_irqsave(&zone->lock, flags);
1477                page = __rmqueue(zone, order, migratetype);
1478                spin_unlock(&zone->lock);
1479                if (!page)
1480                        goto failed;
1481                __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
1482        }
1483
1484        __count_zone_vm_events(PGALLOC, zone, 1 << order);
1485        zone_statistics(preferred_zone, zone, gfp_flags);
1486        local_irq_restore(flags);
1487
1488        VM_BUG_ON(bad_range(zone, page));
1489        if (prep_new_page(page, order, gfp_flags))
1490                goto again;
1491        return page;
1492
1493failed:
1494        local_irq_restore(flags);
1495        return NULL;
1496}
1497
1498/* The ALLOC_WMARK bits are used as an index to zone->watermark */
1499#define ALLOC_WMARK_MIN         WMARK_MIN
1500#define ALLOC_WMARK_LOW         WMARK_LOW
1501#define ALLOC_WMARK_HIGH        WMARK_HIGH
1502#define ALLOC_NO_WATERMARKS     0x04 /* don't check watermarks at all */
1503
1504/* Mask to get the watermark bits */
1505#define ALLOC_WMARK_MASK        (ALLOC_NO_WATERMARKS-1)
1506
1507#define ALLOC_HARDER            0x10 /* try to alloc harder */
1508#define ALLOC_HIGH              0x20 /* __GFP_HIGH set */
1509#define ALLOC_CPUSET            0x40 /* check for correct cpuset */
1510
1511#ifdef CONFIG_FAIL_PAGE_ALLOC
1512
1513static struct {
1514        struct fault_attr attr;
1515
1516        u32 ignore_gfp_highmem;
1517        u32 ignore_gfp_wait;
1518        u32 min_order;
1519} fail_page_alloc = {
1520        .attr = FAULT_ATTR_INITIALIZER,
1521        .ignore_gfp_wait = 1,
1522        .ignore_gfp_highmem = 1,
1523        .min_order = 1,
1524};
1525
1526static int __init setup_fail_page_alloc(char *str)
1527{
1528        return setup_fault_attr(&fail_page_alloc.attr, str);
1529}
1530__setup("fail_page_alloc=", setup_fail_page_alloc);
1531
1532static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1533{
1534        if (order < fail_page_alloc.min_order)
1535                return 0;
1536        if (gfp_mask & __GFP_NOFAIL)
1537                return 0;
1538        if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1539                return 0;
1540        if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1541                return 0;
1542
1543        return should_fail(&fail_page_alloc.attr, 1 << order);
1544}
1545
1546#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1547
1548static int __init fail_page_alloc_debugfs(void)
1549{
1550        umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1551        struct dentry *dir;
1552
1553        dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
1554                                        &fail_page_alloc.attr);
1555        if (IS_ERR(dir))
1556                return PTR_ERR(dir);
1557
1558        if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
1559                                &fail_page_alloc.ignore_gfp_wait))
1560                goto fail;
1561        if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1562                                &fail_page_alloc.ignore_gfp_highmem))
1563                goto fail;
1564        if (!debugfs_create_u32("min-order", mode, dir,
1565                                &fail_page_alloc.min_order))
1566                goto fail;
1567
1568        return 0;
1569fail:
1570        debugfs_remove_recursive(dir);
1571
1572        return -ENOMEM;
1573}
1574
1575late_initcall(fail_page_alloc_debugfs);
1576
1577#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1578
1579#else /* CONFIG_FAIL_PAGE_ALLOC */
1580
1581static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1582{
1583        return 0;
1584}
1585
1586#endif /* CONFIG_FAIL_PAGE_ALLOC */
1587
1588/*
1589 * Return true if free pages are above 'mark'. This takes into account the order
1590 * of the allocation.
1591 */
1592static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1593                      int classzone_idx, int alloc_flags, long free_pages)
1594{
1595        /* free_pages my go negative - that's OK */
1596        long min = mark;
1597        int o;
1598
1599        free_pages -= (1 << order) - 1;
1600        if (alloc_flags & ALLOC_HIGH)
1601                min -= min / 2;
1602        if (alloc_flags & ALLOC_HARDER)
1603                min -= min / 4;
1604
1605        if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1606                return false;
1607        for (o = 0; o < order; o++) {
1608                /* At the next order, this order's pages become unavailable */
1609                free_pages -= z->free_area[o].nr_free << o;
1610
1611                /* Require fewer higher order pages to be free */
1612                min >>= 1;
1613
1614                if (free_pages <= min)
1615                        return false;
1616        }
1617        return true;
1618}
1619
1620bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1621                      int classzone_idx, int alloc_flags)
1622{
1623        return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1624                                        zone_page_state(z, NR_FREE_PAGES));
1625}
1626
1627bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
1628                      int classzone_idx, int alloc_flags)
1629{
1630        long free_pages = zone_page_state(z, NR_FREE_PAGES);
1631
1632        if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
1633                free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
1634
1635        return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1636                                                                free_pages);
1637}
1638
1639#ifdef CONFIG_NUMA
1640/*
1641 * zlc_setup - Setup for "zonelist cache".  Uses cached zone data to
1642 * skip over zones that are not allowed by the cpuset, or that have
1643 * been recently (in last second) found to be nearly full.  See further
1644 * comments in mmzone.h.  Reduces cache footprint of zonelist scans
1645 * that have to skip over a lot of full or unallowed zones.
1646 *
1647 * If the zonelist cache is present in the passed in zonelist, then
1648 * returns a pointer to the allowed node mask (either the current
1649 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
1650 *
1651 * If the zonelist cache is not available for this zonelist, does
1652 * nothing and returns NULL.
1653 *
1654 * If the fullzones BITMAP in the zonelist cache is stale (more than
1655 * a second since last zap'd) then we zap it out (clear its bits.)
1656 *
1657 * We hold off even calling zlc_setup, until after we've checked the
1658 * first zone in the zonelist, on the theory that most allocations will
1659 * be satisfied from that first zone, so best to examine that zone as
1660 * quickly as we can.
1661 */
1662static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1663{
1664        struct zonelist_cache *zlc;     /* cached zonelist speedup info */
1665        nodemask_t *allowednodes;       /* zonelist_cache approximation */
1666
1667        zlc = zonelist->zlcache_ptr;
1668        if (!zlc)
1669                return NULL;
1670
1671        if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1672                bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1673                zlc->last_full_zap = jiffies;
1674        }
1675
1676        allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1677                                        &cpuset_current_mems_allowed :
1678                                        &node_states[N_HIGH_MEMORY];
1679        return allowednodes;
1680}
1681
1682/*
1683 * Given 'z' scanning a zonelist, run a couple of quick checks to see
1684 * if it is worth looking at further for free memory:
1685 *  1) Check that the zone isn't thought to be full (doesn't have its
1686 *     bit set in the zonelist_cache fullzones BITMAP).
1687 *  2) Check that the zones node (obtained from the zonelist_cache
1688 *     z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1689 * Return true (non-zero) if zone is worth looking at further, or
1690 * else return false (zero) if it is not.
1691 *
1692 * This check -ignores- the distinction between various watermarks,
1693 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ...  If a zone is
1694 * found to be full for any variation of these watermarks, it will
1695 * be considered full for up to one second by all requests, unless
1696 * we are so low on memory on all allowed nodes that we are forced
1697 * into the second scan of the zonelist.
1698 *
1699 * In the second scan we ignore this zonelist cache and exactly
1700 * apply the watermarks to all zones, even it is slower to do so.
1701 * We are low on memory in the second scan, and should leave no stone
1702 * unturned looking for a free page.
1703 */
1704static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1705                                                nodemask_t *allowednodes)
1706{
1707        struct zonelist_cache *zlc;     /* cached zonelist speedup info */
1708        int i;                          /* index of *z in zonelist zones */
1709        int n;                          /* node that zone *z is on */
1710
1711        zlc = zonelist->zlcache_ptr;
1712        if (!zlc)
1713                return 1;
1714
1715        i = z - zonelist->_zonerefs;
1716        n = zlc->z_to_n[i];
1717
1718        /* This zone is worth trying if it is allowed but not full */
1719        return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1720}
1721
1722/*
1723 * Given 'z' scanning a zonelist, set the corresponding bit in
1724 * zlc->fullzones, so that subsequent attempts to allocate a page
1725 * from that zone don't waste time re-examining it.
1726 */
1727static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1728{
1729        struct zonelist_cache *zlc;     /* cached zonelist speedup info */
1730        int i;                          /* index of *z in zonelist zones */
1731
1732        zlc = zonelist->zlcache_ptr;
1733        if (!zlc)
1734                return;
1735
1736        i = z - zonelist->_zonerefs;
1737
1738        set_bit(i, zlc->fullzones);
1739}
1740
1741/*
1742 * clear all zones full, called after direct reclaim makes progress so that
1743 * a zone that was recently full is not skipped over for up to a second
1744 */
1745static void zlc_clear_zones_full(struct zonelist *zonelist)
1746{
1747        struct zonelist_cache *zlc;     /* cached zonelist speedup info */
1748
1749        zlc = zonelist->zlcache_ptr;
1750        if (!zlc)
1751                return;
1752
1753        bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1754}
1755
1756#else   /* CONFIG_NUMA */
1757
1758static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1759{
1760        return NULL;
1761}
1762
1763static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1764                                nodemask_t *allowednodes)
1765{
1766        return 1;
1767}
1768
1769static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1770{
1771}
1772
1773static void zlc_clear_zones_full(struct zonelist *zonelist)
1774{
1775}
1776#endif  /* CONFIG_NUMA */
1777
1778/*
1779 * get_page_from_freelist goes through the zonelist trying to allocate
1780 * a page.
1781 */
1782static struct page *
1783get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1784                struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
1785                struct zone *preferred_zone, int migratetype)
1786{
1787        struct zoneref *z;
1788        struct page *page = NULL;
1789        int classzone_idx;
1790        struct zone *zone;
1791        nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1792        int zlc_active = 0;             /* set if using zonelist_cache */
1793        int did_zlc_setup = 0;          /* just call zlc_setup() one time */
1794
1795        classzone_idx = zone_idx(preferred_zone);
1796zonelist_scan:
1797        /*
1798         * Scan zonelist, looking for a zone with enough free.
1799         * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1800         */
1801        for_each_zone_zonelist_nodemask(zone, z, zonelist,
1802                                                high_zoneidx, nodemask) {
1803                if (NUMA_BUILD && zlc_active &&
1804                        !zlc_zone_worth_trying(zonelist, z, allowednodes))
1805                                continue;
1806                if ((alloc_flags & ALLOC_CPUSET) &&
1807                        !cpuset_zone_allowed_softwall(zone, gfp_mask))
1808                                continue;
1809                /*
1810                 * When allocating a page cache page for writing, we
1811                 * want to get it from a zone that is within its dirty
1812                 * limit, such that no single zone holds more than its
1813                 * proportional share of globally allowed dirty pages.
1814                 * The dirty limits take into account the zone's
1815                 * lowmem reserves and high watermark so that kswapd
1816                 * should be able to balance it without having to
1817                 * write pages from its LRU list.
1818                 *
1819                 * This may look like it could increase pressure on
1820                 * lower zones by failing allocations in higher zones
1821                 * before they are full.  But the pages that do spill
1822                 * over are limited as the lower zones are protected
1823                 * by this very same mechanism.  It should not become
1824                 * a practical burden to them.
1825                 *
1826                 * XXX: For now, allow allocations to potentially
1827                 * exceed the per-zone dirty limit in the slowpath
1828                 * (ALLOC_WMARK_LOW unset) before going into reclaim,
1829                 * which is important when on a NUMA setup the allowed
1830                 * zones are together not big enough to reach the
1831                 * global limit.  The proper fix for these situations
1832                 * will require awareness of zones in the
1833                 * dirty-throttling and the flusher threads.
1834                 */
1835                if ((alloc_flags & ALLOC_WMARK_LOW) &&
1836                    (gfp_mask & __GFP_WRITE) && !zone_dirty_ok(zone))
1837                        goto this_zone_full;
1838
1839                BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
1840                if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1841                        unsigned long mark;
1842                        int ret;
1843
1844                        mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
1845                        if (zone_watermark_ok(zone, order, mark,
1846                                    classzone_idx, alloc_flags))
1847                                goto try_this_zone;
1848
1849                        if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
1850                                /*
1851                                 * we do zlc_setup if there are multiple nodes
1852                                 * and before considering the first zone allowed
1853                                 * by the cpuset.
1854                                 */
1855                                allowednodes = zlc_setup(zonelist, alloc_flags);
1856                                zlc_active = 1;
1857                                did_zlc_setup = 1;
1858                        }
1859
1860                        if (zone_reclaim_mode == 0)
1861                                goto this_zone_full;
1862
1863                        /*
1864                         * As we may have just activated ZLC, check if the first
1865                         * eligible zone has failed zone_reclaim recently.
1866                         */
1867                        if (NUMA_BUILD && zlc_active &&
1868                                !zlc_zone_worth_trying(zonelist, z, allowednodes))
1869                                continue;
1870
1871                        ret = zone_reclaim(zone, gfp_mask, order);
1872                        switch (ret) {
1873                        case ZONE_RECLAIM_NOSCAN:
1874                                /* did not scan */
1875                                continue;
1876                        case ZONE_RECLAIM_FULL:
1877                                /* scanned but unreclaimable */
1878                                continue;
1879                        default:
1880                                /* did we reclaim enough */
1881                                if (!zone_watermark_ok(zone, order, mark,
1882                                                classzone_idx, alloc_flags))
1883                                        goto this_zone_full;
1884                        }
1885                }
1886
1887try_this_zone:
1888                page = buffered_rmqueue(preferred_zone, zone, order,
1889                                                gfp_mask, migratetype);
1890                if (page)
1891                        break;
1892this_zone_full:
1893                if (NUMA_BUILD)
1894                        zlc_mark_zone_full(zonelist, z);
1895        }
1896
1897        if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1898                /* Disable zlc cache for second zonelist scan */
1899                zlc_active = 0;
1900                goto zonelist_scan;
1901        }
1902        return page;
1903}
1904
1905/*
1906 * Large machines with many possible nodes should not always dump per-node
1907 * meminfo in irq context.
1908 */
1909static inline bool should_suppress_show_mem(void)
1910{
1911        bool ret = false;
1912
1913#if NODES_SHIFT > 8
1914        ret = in_interrupt();
1915#endif
1916        return ret;
1917}
1918
1919static DEFINE_RATELIMIT_STATE(nopage_rs,
1920                DEFAULT_RATELIMIT_INTERVAL,
1921                DEFAULT_RATELIMIT_BURST);
1922
1923void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
1924{
1925        unsigned int filter = SHOW_MEM_FILTER_NODES;
1926
1927        if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
1928            debug_guardpage_minorder() > 0)
1929                return;
1930
1931        /*
1932         * This documents exceptions given to allocations in certain
1933         * contexts that are allowed to allocate outside current's set
1934         * of allowed nodes.
1935         */
1936        if (!(gfp_mask & __GFP_NOMEMALLOC))
1937                if (test_thread_flag(TIF_MEMDIE) ||
1938                    (current->flags & (PF_MEMALLOC | PF_EXITING)))
1939                        filter &= ~SHOW_MEM_FILTER_NODES;
1940        if (in_interrupt() || !(gfp_mask & __GFP_WAIT))
1941                filter &= ~SHOW_MEM_FILTER_NODES;
1942
1943        if (fmt) {
1944                struct va_format vaf;
1945                va_list args;
1946
1947                va_start(args, fmt);
1948
1949                vaf.fmt = fmt;
1950                vaf.va = &args;
1951
1952                pr_warn("%pV", &vaf);
1953
1954                va_end(args);
1955        }
1956
1957        pr_warn("%s: page allocation failure: order:%d, mode:0x%x\n",
1958                current->comm, order, gfp_mask);
1959
1960        dump_stack();
1961        if (!should_suppress_show_mem())
1962                show_mem(filter);
1963}
1964
1965static inline int
1966should_alloc_retry(gfp_t gfp_mask, unsigned int order,
1967                                unsigned long did_some_progress,
1968                                unsigned long pages_reclaimed)
1969{
1970        /* Do not loop if specifically requested */
1971        if (gfp_mask & __GFP_NORETRY)
1972                return 0;
1973
1974        /* Always retry if specifically requested */
1975        if (gfp_mask & __GFP_NOFAIL)
1976                return 1;
1977
1978        /*
1979         * Suspend converts GFP_KERNEL to __GFP_WAIT which can prevent reclaim
1980         * making forward progress without invoking OOM. Suspend also disables
1981         * storage devices so kswapd will not help. Bail if we are suspending.
1982         */
1983        if (!did_some_progress && pm_suspended_storage())
1984                return 0;
1985
1986        /*
1987         * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
1988         * means __GFP_NOFAIL, but that may not be true in other
1989         * implementations.
1990         */
1991        if (order <= PAGE_ALLOC_COSTLY_ORDER)
1992                return 1;
1993
1994        /*
1995         * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
1996         * specified, then we retry until we no longer reclaim any pages
1997         * (above), or we've reclaimed an order of pages at least as
1998         * large as the allocation's order. In both cases, if the
1999         * allocation still fails, we stop retrying.
2000         */
2001        if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
2002                return 1;
2003
2004        return 0;
2005}
2006
2007static inline struct page *
2008__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
2009        struct zonelist *zonelist, enum zone_type high_zoneidx,
2010        nodemask_t *nodemask, struct zone *preferred_zone,
2011        int migratetype)
2012{
2013        struct page *page;
2014
2015        /* Acquire the OOM killer lock for the zones in zonelist */
2016        if (!try_set_zonelist_oom(zonelist, gfp_mask)) {
2017                schedule_timeout_uninterruptible(1);
2018                return NULL;
2019        }
2020
2021        /*
2022         * Go through the zonelist yet one more time, keep very high watermark
2023         * here, this is only to catch a parallel oom killing, we must fail if
2024         * we're still under heavy pressure.
2025         */
2026        page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
2027                order, zonelist, high_zoneidx,
2028                ALLOC_WMARK_HIGH|ALLOC_CPUSET,
2029                preferred_zone, migratetype);
2030        if (page)
2031                goto out;
2032
2033        if (!(gfp_mask & __GFP_NOFAIL)) {
2034                /* The OOM killer will not help higher order allocs */
2035                if (order > PAGE_ALLOC_COSTLY_ORDER)
2036                        goto out;
2037                /* The OOM killer does not needlessly kill tasks for lowmem */
2038                if (high_zoneidx < ZONE_NORMAL)
2039                        goto out;
2040                /*
2041                 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
2042                 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
2043                 * The caller should handle page allocation failure by itself if
2044                 * it specifies __GFP_THISNODE.
2045                 * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
2046                 */
2047                if (gfp_mask & __GFP_THISNODE)
2048                        goto out;
2049        }
2050        /* Exhausted what can be done so it's blamo time */
2051        out_of_memory(zonelist, gfp_mask, order, nodemask, false);
2052
2053out:
2054        clear_zonelist_oom(zonelist, gfp_mask);
2055        return page;
2056}
2057
2058#ifdef CONFIG_COMPACTION
2059/* Try memory compaction for high-order allocations before reclaim */
2060static struct page *
2061__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2062        struct zonelist *zonelist, enum zone_type high_zoneidx,
2063        nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2064        int migratetype, bool sync_migration,
2065        bool *deferred_compaction,
2066        unsigned long *did_some_progress)
2067{
2068        struct page *page;
2069
2070        if (!order)
2071                return NULL;
2072
2073        if (compaction_deferred(preferred_zone, order)) {
2074                *deferred_compaction = true;
2075                return NULL;
2076        }
2077
2078        current->flags |= PF_MEMALLOC;
2079        *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
2080                                                nodemask, sync_migration);
2081        current->flags &= ~PF_MEMALLOC;
2082        if (*did_some_progress != COMPACT_SKIPPED) {
2083
2084                /* Page migration frees to the PCP lists but we want merging */
2085                drain_pages(get_cpu());
2086                put_cpu();
2087
2088                page = get_page_from_freelist(gfp_mask, nodemask,
2089                                order, zonelist, high_zoneidx,
2090                                alloc_flags, preferred_zone,
2091                                migratetype);
2092                if (page) {
2093                        preferred_zone->compact_considered = 0;
2094                        preferred_zone->compact_defer_shift = 0;
2095                        if (order >= preferred_zone->compact_order_failed)
2096                                preferred_zone->compact_order_failed = order + 1;
2097                        count_vm_event(COMPACTSUCCESS);
2098                        return page;
2099                }
2100
2101                /*
2102                 * It's bad if compaction run occurs and fails.
2103                 * The most likely reason is that pages exist,
2104                 * but not enough to satisfy watermarks.
2105                 */
2106                count_vm_event(COMPACTFAIL);
2107
2108                /*
2109                 * As async compaction considers a subset of pageblocks, only
2110                 * defer if the failure was a sync compaction failure.
2111                 */
2112                if (sync_migration)
2113                        defer_compaction(preferred_zone, order);
2114
2115                cond_resched();
2116        }
2117
2118        return NULL;
2119}
2120#else
2121static inline struct page *
2122__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2123        struct zonelist *zonelist, enum zone_type high_zoneidx,
2124        nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2125        int migratetype, bool sync_migration,
2126        bool *deferred_compaction,
2127        unsigned long *did_some_progress)
2128{
2129        return NULL;
2130}
2131#endif /* CONFIG_COMPACTION */
2132
2133/* Perform direct synchronous page reclaim */
2134static int
2135__perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist,
2136                  nodemask_t *nodemask)
2137{
2138        struct reclaim_state reclaim_state;
2139        int progress;
2140
2141        cond_resched();
2142
2143        /* We now go into synchronous reclaim */
2144        cpuset_memory_pressure_bump();
2145        current->flags |= PF_MEMALLOC;
2146        lockdep_set_current_reclaim_state(gfp_mask);
2147        reclaim_state.reclaimed_slab = 0;
2148        current->reclaim_state = &reclaim_state;
2149
2150        progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
2151
2152        current->reclaim_state = NULL;
2153        lockdep_clear_current_reclaim_state();
2154        current->flags &= ~PF_MEMALLOC;
2155
2156        cond_resched();
2157
2158        return progress;
2159}
2160
2161/* The really slow allocator path where we enter direct reclaim */
2162static inline struct page *
2163__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
2164        struct zonelist *zonelist, enum zone_type high_zoneidx,
2165        nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2166        int migratetype, unsigned long *did_some_progress)
2167{
2168        struct page *page = NULL;
2169        bool drained = false;
2170
2171        *did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
2172                                               nodemask);
2173        if (unlikely(!(*did_some_progress)))
2174                return NULL;
2175
2176        /* After successful reclaim, reconsider all zones for allocation */
2177        if (NUMA_BUILD)
2178                zlc_clear_zones_full(zonelist);
2179
2180retry:
2181        page = get_page_from_freelist(gfp_mask, nodemask, order,
2182                                        zonelist, high_zoneidx,
2183                                        alloc_flags, preferred_zone,
2184                                        migratetype);
2185
2186        /*
2187         * If an allocation failed after direct reclaim, it could be because
2188         * pages are pinned on the per-cpu lists. Drain them and try again
2189         */
2190        if (!page && !drained) {
2191                drain_all_pages();
2192                drained = true;
2193                goto retry;
2194        }
2195
2196        return page;
2197}
2198
2199/*
2200 * This is called in the allocator slow-path if the allocation request is of
2201 * sufficient urgency to ignore watermarks and take other desperate measures
2202 */
2203static inline struct page *
2204__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
2205        struct zonelist *zonelist, enum zone_type high_zoneidx,
2206        nodemask_t *nodemask, struct zone *preferred_zone,
2207        int migratetype)
2208{
2209        struct page *page;
2210
2211        do {
2212                page = get_page_from_freelist(gfp_mask, nodemask, order,
2213                        zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
2214                        preferred_zone, migratetype);
2215
2216                if (!page && gfp_mask & __GFP_NOFAIL)
2217                        wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
2218        } while (!page && (gfp_mask & __GFP_NOFAIL));
2219
2220        return page;
2221}
2222
2223static inline
2224void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
2225                                                enum zone_type high_zoneidx,
2226                                                enum zone_type classzone_idx)
2227{
2228        struct zoneref *z;
2229        struct zone *zone;
2230
2231        for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
2232                wakeup_kswapd(zone, order, classzone_idx);
2233}
2234
2235static inline int
2236gfp_to_alloc_flags(gfp_t gfp_mask)
2237{
2238        int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
2239        const gfp_t wait = gfp_mask & __GFP_WAIT;
2240
2241        /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
2242        BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
2243
2244        /*
2245         * The caller may dip into page reserves a bit more if the caller
2246         * cannot run direct reclaim, or if the caller has realtime scheduling
2247         * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
2248         * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
2249         */
2250        alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
2251
2252        if (!wait) {
2253                /*
2254                 * Not worth trying to allocate harder for
2255                 * __GFP_NOMEMALLOC even if it can't schedule.
2256                 */
2257                if  (!(gfp_mask & __GFP_NOMEMALLOC))
2258                        alloc_flags |= ALLOC_HARDER;
2259                /*
2260                 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
2261                 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
2262                 */
2263                alloc_flags &= ~ALLOC_CPUSET;
2264        } else if (unlikely(rt_task(current)) && !in_interrupt())
2265                alloc_flags |= ALLOC_HARDER;
2266
2267        if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
2268                if (!in_interrupt() &&
2269                    ((current->flags & PF_MEMALLOC) ||
2270                     unlikely(test_thread_flag(TIF_MEMDIE))))
2271                        alloc_flags |= ALLOC_NO_WATERMARKS;
2272        }
2273
2274        return alloc_flags;
2275}
2276
2277static inline struct page *
2278__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2279        struct zonelist *zonelist, enum zone_type high_zoneidx,
2280        nodemask_t *nodemask, struct zone *preferred_zone,
2281        int migratetype)
2282{
2283        const gfp_t wait = gfp_mask & __GFP_WAIT;
2284        struct page *page = NULL;
2285        int alloc_flags;
2286        unsigned long pages_reclaimed = 0;
2287        unsigned long did_some_progress;
2288        bool sync_migration = false;
2289        bool deferred_compaction = false;
2290
2291        /*
2292         * In the slowpath, we sanity check order to avoid ever trying to
2293         * reclaim >= MAX_ORDER areas which will never succeed. Callers may
2294         * be using allocators in order of preference for an area that is
2295         * too large.
2296         */
2297        if (order >= MAX_ORDER) {
2298                WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
2299                return NULL;
2300        }
2301
2302        /*
2303         * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
2304         * __GFP_NOWARN set) should not cause reclaim since the subsystem
2305         * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
2306         * using a larger set of nodes after it has established that the
2307         * allowed per node queues are empty and that nodes are
2308         * over allocated.
2309         */
2310        if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
2311                goto nopage;
2312
2313restart:
2314        if (!(gfp_mask & __GFP_NO_KSWAPD))
2315                wake_all_kswapd(order, zonelist, high_zoneidx,
2316                                                zone_idx(preferred_zone));
2317
2318        /*
2319         * OK, we're below the kswapd watermark and have kicked background
2320         * reclaim. Now things get more complex, so set up alloc_flags according
2321         * to how we want to proceed.
2322         */
2323        alloc_flags = gfp_to_alloc_flags(gfp_mask);
2324
2325        /*
2326         * Find the true preferred zone if the allocation is unconstrained by
2327         * cpusets.
2328         */
2329        if (!(alloc_flags & ALLOC_CPUSET) && !nodemask)
2330                first_zones_zonelist(zonelist, high_zoneidx, NULL,
2331                                        &preferred_zone);
2332
2333rebalance:
2334        /* This is the last chance, in general, before the goto nopage. */
2335        page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
2336                        high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
2337                        preferred_zone, migratetype);
2338        if (page)
2339                goto got_pg;
2340
2341        /* Allocate without watermarks if the context allows */
2342        if (alloc_flags & ALLOC_NO_WATERMARKS) {
2343                page = __alloc_pages_high_priority(gfp_mask, order,
2344                                zonelist, high_zoneidx, nodemask,
2345                                preferred_zone, migratetype);
2346                if (page)
2347                        goto got_pg;
2348        }
2349
2350        /* Atomic allocations - we can't balance anything */
2351        if (!wait)
2352                goto nopage;
2353
2354        /* Avoid recursion of direct reclaim */
2355        if (current->flags & PF_MEMALLOC)
2356                goto nopage;
2357
2358        /* Avoid allocations with no watermarks from looping endlessly */
2359        if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
2360                goto nopage;
2361
2362        /*
2363         * Try direct compaction. The first pass is asynchronous. Subsequent
2364         * attempts after direct reclaim are synchronous
2365         */
2366        page = __alloc_pages_direct_compact(gfp_mask, order,
2367                                        zonelist, high_zoneidx,
2368                                        nodemask,
2369                                        alloc_flags, preferred_zone,
2370                                        migratetype, sync_migration,
2371                                        &deferred_compaction,
2372                                        &did_some_progress);
2373        if (page)
2374                goto got_pg;
2375        sync_migration = true;
2376
2377        /*
2378         * If compaction is deferred for high-order allocations, it is because
2379         * sync compaction recently failed. In this is the case and the caller
2380         * has requested the system not be heavily disrupted, fail the
2381         * allocation now instead of entering direct reclaim
2382         */
2383        if (deferred_compaction && (gfp_mask & __GFP_NO_KSWAPD))
2384                goto nopage;
2385
2386        /* Try direct reclaim and then allocating */
2387        page = __alloc_pages_direct_reclaim(gfp_mask, order,
2388                                        zonelist, high_zoneidx,
2389                                        nodemask,
2390                                        alloc_flags, preferred_zone,
2391                                        migratetype, &did_some_progress);
2392        if (page)
2393                goto got_pg;
2394
2395        /*
2396         * If we failed to make any progress reclaiming, then we are
2397         * running out of options and have to consider going OOM
2398         */
2399        if (!did_some_progress) {
2400                if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
2401                        if (oom_killer_disabled)
2402                                goto nopage;
2403                        /* Coredumps can quickly deplete all memory reserves */
2404                        if ((current->flags & PF_DUMPCORE) &&
2405                            !(gfp_mask & __GFP_NOFAIL))
2406                                goto nopage;
2407                        page = __alloc_pages_may_oom(gfp_mask, order,
2408                                        zonelist, high_zoneidx,
2409                                        nodemask, preferred_zone,
2410                                        migratetype);
2411                        if (page)
2412                                goto got_pg;
2413
2414                        if (!(gfp_mask & __GFP_NOFAIL)) {
2415                                /*
2416                                 * The oom killer is not called for high-order
2417                                 * allocations that may fail, so if no progress
2418                                 * is being made, there are no other options and
2419                                 * retrying is unlikely to help.
2420                                 */
2421                                if (order > PAGE_ALLOC_COSTLY_ORDER)
2422                                        goto nopage;
2423                                /*
2424                                 * The oom killer is not called for lowmem
2425                                 * allocations to prevent needlessly killing
2426                                 * innocent tasks.
2427                                 */
2428                                if (high_zoneidx < ZONE_NORMAL)
2429                                        goto nopage;
2430                        }
2431
2432                        goto restart;
2433                }
2434        }
2435
2436        /* Check if we should retry the allocation */
2437        pages_reclaimed += did_some_progress;
2438        if (should_alloc_retry(gfp_mask, order, did_some_progress,
2439                                                pages_reclaimed)) {
2440                /* Wait for some write requests to complete then retry */
2441                wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
2442                goto rebalance;
2443        } else {
2444                /*
2445                 * High-order allocations do not necessarily loop after
2446                 * direct reclaim and reclaim/compaction depends on compaction
2447                 * being called after reclaim so call directly if necessary
2448                 */
2449                page = __alloc_pages_direct_compact(gfp_mask, order,
2450                                        zonelist, high_zoneidx,
2451                                        nodemask,
2452                                        alloc_flags, preferred_zone,
2453                                        migratetype, sync_migration,
2454                                        &deferred_compaction,
2455                                        &did_some_progress);
2456                if (page)
2457                        goto got_pg;
2458        }
2459
2460nopage:
2461        warn_alloc_failed(gfp_mask, order, NULL);
2462        return page;
2463got_pg:
2464        if (kmemcheck_enabled)
2465                kmemcheck_pagealloc_alloc(page, order, gfp_mask);
2466        return page;
2467
2468}
2469
2470/*
2471 * This is the 'heart' of the zoned buddy allocator.
2472 */
2473struct page *
2474__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2475                        struct zonelist *zonelist, nodemask_t *nodemask)
2476{
2477        enum zone_type high_zoneidx = gfp_zone(gfp_mask);
2478        struct zone *preferred_zone;
2479        struct page *page = NULL;
2480        int migratetype = allocflags_to_migratetype(gfp_mask);
2481        unsigned int cpuset_mems_cookie;
2482
2483        gfp_mask &= gfp_allowed_mask;
2484
2485        lockdep_trace_alloc(gfp_mask);
2486
2487        might_sleep_if(gfp_mask & __GFP_WAIT);
2488
2489        if (should_fail_alloc_page(gfp_mask, order))
2490                return NULL;
2491
2492        /*
2493         * Check the zones suitable for the gfp_mask contain at least one
2494         * valid zone. It's possible to have an empty zonelist as a result
2495         * of GFP_THISNODE and a memoryless node
2496         */
2497        if (unlikely(!zonelist->_zonerefs->zone))
2498                return NULL;
2499
2500retry_cpuset:
2501        cpuset_mems_cookie = get_mems_allowed();
2502
2503        /* The preferred zone is used for statistics later */
2504        first_zones_zonelist(zonelist, high_zoneidx,
2505                                nodemask ? : &cpuset_current_mems_allowed,
2506                                &preferred_zone);
2507        if (!preferred_zone)
2508                goto out;
2509
2510        /* First allocation attempt */
2511        page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
2512                        zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
2513                        preferred_zone, migratetype);
2514        if (unlikely(!page))
2515                page = __alloc_pages_slowpath(gfp_mask, order,
2516                                zonelist, high_zoneidx, nodemask,
2517                                preferred_zone, migratetype);
2518
2519        trace_mm_page_alloc(page, order, gfp_mask, migratetype);
2520
2521out:
2522        /*
2523         * When updating a task's mems_allowed, it is possible to race with
2524         * parallel threads in such a way that an allocation can fail while
2525         * the mask is being updated. If a page allocation is about to fail,
2526         * check if the cpuset changed during allocation and if so, retry.
2527         */
2528        if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
2529                goto retry_cpuset;
2530
2531        return page;
2532}
2533EXPORT_SYMBOL(__alloc_pages_nodemask);
2534
2535/*
2536 * Common helper functions.
2537 */
2538unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
2539{
2540        struct page *page;
2541
2542        /*
2543         * __get_free_pages() returns a 32-bit address, which cannot represent
2544         * a highmem page
2545         */
2546        VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
2547
2548        page = alloc_pages(gfp_mask, order);
2549        if (!page)
2550                return 0;
2551        return (unsigned long) page_address(page);
2552}
2553EXPORT_SYMBOL(__get_free_pages);
2554
2555unsigned long get_zeroed_page(gfp_t gfp_mask)
2556{
2557        return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
2558}
2559EXPORT_SYMBOL(get_zeroed_page);
2560
2561void __free_pages(struct page *page, unsigned int order)
2562{
2563        if (put_page_testzero(page)) {
2564                if (order == 0)
2565                        free_hot_cold_page(page, 0);
2566                else
2567                        __free_pages_ok(page, order);
2568        }
2569}
2570
2571EXPORT_SYMBOL(__free_pages);
2572
2573void free_pages(unsigned long addr, unsigned int order)
2574{
2575        if (addr != 0) {
2576                VM_BUG_ON(!virt_addr_valid((void *)addr));
2577                __free_pages(virt_to_page((void *)addr), order);
2578        }
2579}
2580
2581EXPORT_SYMBOL(free_pages);
2582
2583static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
2584{
2585        if (addr) {
2586                unsigned long alloc_end = addr + (PAGE_SIZE << order);
2587                unsigned long used = addr + PAGE_ALIGN(size);
2588
2589                split_page(virt_to_page((void *)addr), order);
2590                while (used < alloc_end) {
2591                        free_page(used);
2592                        used += PAGE_SIZE;
2593                }
2594        }
2595        return (void *)addr;
2596}
2597
2598/**
2599 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
2600 * @size: the number of bytes to allocate
2601 * @gfp_mask: GFP flags for the allocation
2602 *
2603 * This function is similar to alloc_pages(), except that it allocates the
2604 * minimum number of pages to satisfy the request.  alloc_pages() can only
2605 * allocate memory in power-of-two pages.
2606 *
2607 * This function is also limited by MAX_ORDER.
2608 *
2609 * Memory allocated by this function must be released by free_pages_exact().
2610 */
2611void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
2612{
2613        unsigned int order = get_order(size);
2614        unsigned long addr;
2615
2616        addr = __get_free_pages(gfp_mask, order);
2617        return make_alloc_exact(addr, order, size);
2618}
2619EXPORT_SYMBOL(alloc_pages_exact);
2620
2621/**
2622 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
2623 *                         pages on a node.
2624 * @nid: the preferred node ID where memory should be allocated
2625 * @size: the number of bytes to allocate
2626 * @gfp_mask: GFP flags for the allocation
2627 *
2628 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
2629 * back.
2630 * Note this is not alloc_pages_exact_node() which allocates on a specific node,
2631 * but is not exact.
2632 */
2633void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
2634{
2635        unsigned order = get_order(size);
2636        struct page *p = alloc_pages_node(nid, gfp_mask, order);
2637        if (!p)
2638                return NULL;
2639        return make_alloc_exact((unsigned long)page_address(p), order, size);
2640}
2641EXPORT_SYMBOL(alloc_pages_exact_nid);
2642
2643/**
2644 * free_pages_exact - release memory allocated via alloc_pages_exact()
2645 * @virt: the value returned by alloc_pages_exact.
2646 * @size: size of allocation, same value as passed to alloc_pages_exact().
2647 *
2648 * Release the memory allocated by a previous call to alloc_pages_exact.
2649 */
2650void free_pages_exact(void *virt, size_t size)
2651{
2652        unsigned long addr = (unsigned long)virt;
2653        unsigned long end = addr + PAGE_ALIGN(size);
2654
2655        while (addr < end) {
2656                free_page(addr);
2657                addr += PAGE_SIZE;
2658        }
2659}
2660EXPORT_SYMBOL(free_pages_exact);
2661
2662static unsigned int nr_free_zone_pages(int offset)
2663{
2664        struct zoneref *z;
2665        struct zone *zone;
2666
2667        /* Just pick one node, since fallback list is circular */
2668        unsigned int sum = 0;
2669
2670        struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
2671
2672        for_each_zone_zonelist(zone, z, zonelist, offset) {
2673                unsigned long size = zone->present_pages;
2674                unsigned long high = high_wmark_pages(zone);
2675                if (size > high)
2676                        sum += size - high;
2677        }
2678
2679        return sum;
2680}
2681
2682/*
2683 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
2684 */
2685unsigned int nr_free_buffer_pages(void)
2686{
2687        return nr_free_zone_pages(gfp_zone(GFP_USER));
2688}
2689EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
2690
2691/*
2692 * Amount of free RAM allocatable within all zones
2693 */
2694unsigned int nr_free_pagecache_pages(void)
2695{
2696        return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
2697}
2698
2699static inline void show_node(struct zone *zone)
2700{
2701        if (NUMA_BUILD)
2702                printk("Node %d ", zone_to_nid(zone));
2703}
2704
2705void si_meminfo(struct sysinfo *val)
2706{
2707        val->totalram = totalram_pages;
2708        val->sharedram = 0;
2709        val->freeram = global_page_state(NR_FREE_PAGES);
2710        val->bufferram = nr_blockdev_pages();
2711        val->totalhigh = totalhigh_pages;
2712        val->freehigh = nr_free_highpages();
2713        val->mem_unit = PAGE_SIZE;
2714}
2715
2716EXPORT_SYMBOL(si_meminfo);
2717
2718#ifdef CONFIG_NUMA
2719void si_meminfo_node(struct sysinfo *val, int nid)
2720{
2721        pg_data_t *pgdat = NODE_DATA(nid);
2722
2723        val->totalram = pgdat->node_present_pages;
2724        val->freeram = node_page_state(nid, NR_FREE_PAGES);
2725#ifdef CONFIG_HIGHMEM
2726        val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
2727        val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
2728                        NR_FREE_PAGES);
2729#else
2730        val->totalhigh = 0;
2731        val->freehigh = 0;
2732#endif
2733        val->mem_unit = PAGE_SIZE;
2734}
2735#endif
2736
2737/*
2738 * Determine whether the node should be displayed or not, depending on whether
2739 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
2740 */
2741bool skip_free_areas_node(unsigned int flags, int nid)
2742{
2743        bool ret = false;
2744        unsigned int cpuset_mems_cookie;
2745
2746        if (!(flags & SHOW_MEM_FILTER_NODES))
2747                goto out;
2748
2749        do {
2750                cpuset_mems_cookie = get_mems_allowed();
2751                ret = !node_isset(nid, cpuset_current_mems_allowed);
2752        } while (!put_mems_allowed(cpuset_mems_cookie));
2753out:
2754        return ret;
2755}
2756
2757#define K(x) ((x) << (PAGE_SHIFT-10))
2758
2759/*
2760 * Show free area list (used inside shift_scroll-lock stuff)
2761 * We also calculate the percentage fragmentation. We do this by counting the
2762 * memory on each free list with the exception of the first item on the list.
2763 * Suppresses nodes that are not allowed by current's cpuset if
2764 * SHOW_MEM_FILTER_NODES is passed.
2765 */
2766void show_free_areas(unsigned int filter)
2767{
2768        int cpu;
2769        struct zone *zone;
2770
2771        for_each_populated_zone(zone) {
2772                if (skip_free_areas_node(filter, zone_to_nid(zone)))
2773                        continue;
2774                show_node(zone);
2775                printk("%s per-cpu:\n", zone->name);
2776
2777                for_each_online_cpu(cpu) {
2778                        struct per_cpu_pageset *pageset;
2779
2780                        pageset = per_cpu_ptr(zone->pageset, cpu);
2781
2782                        printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
2783                               cpu, pageset->pcp.high,
2784                               pageset->pcp.batch, pageset->pcp.count);
2785                }
2786        }
2787
2788        printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
2789                " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
2790                " unevictable:%lu"
2791                " dirty:%lu writeback:%lu unstable:%lu\n"
2792                " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
2793                " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
2794                global_page_state(NR_ACTIVE_ANON),
2795                global_page_state(NR_INACTIVE_ANON),
2796                global_page_state(NR_ISOLATED_ANON),
2797                global_page_state(NR_ACTIVE_FILE),
2798                global_page_state(NR_INACTIVE_FILE),
2799                global_page_state(NR_ISOLATED_FILE),
2800                global_page_state(NR_UNEVICTABLE),
2801                global_page_state(NR_FILE_DIRTY),
2802                global_page_state(NR_WRITEBACK),
2803                global_page_state(NR_UNSTABLE_NFS),
2804                global_page_state(NR_FREE_PAGES),
2805                global_page_state(NR_SLAB_RECLAIMABLE),
2806                global_page_state(NR_SLAB_UNRECLAIMABLE),
2807                global_page_state(NR_FILE_MAPPED),
2808                global_page_state(NR_SHMEM),
2809                global_page_state(NR_PAGETABLE),
2810                global_page_state(NR_BOUNCE));
2811
2812        for_each_populated_zone(zone) {
2813                int i;
2814
2815                if (skip_free_areas_node(filter, zone_to_nid(zone)))
2816                        continue;
2817                show_node(zone);
2818                printk("%s"
2819                        " free:%lukB"
2820                        " min:%lukB"
2821                        " low:%lukB"
2822                        " high:%lukB"
2823                        " active_anon:%lukB"
2824                        " inactive_anon:%lukB"
2825                        " active_file:%lukB"
2826                        " inactive_file:%lukB"
2827                        " unevictable:%lukB"
2828                        " isolated(anon):%lukB"
2829                        " isolated(file):%lukB"
2830                        " present:%lukB"
2831                        " mlocked:%lukB"
2832                        " dirty:%lukB"
2833                        " writeback:%lukB"
2834                        " mapped:%lukB"
2835                        " shmem:%lukB"
2836                        " slab_reclaimable:%lukB"
2837                        " slab_unreclaimable:%lukB"
2838                        " kernel_stack:%lukB"
2839                        " pagetables:%lukB"
2840                        " unstable:%lukB"
2841                        " bounce:%lukB"
2842                        " writeback_tmp:%lukB"
2843                        " pages_scanned:%lu"
2844                        " all_unreclaimable? %s"
2845                        "\n",
2846                        zone->name,
2847                        K(zone_page_state(zone, NR_FREE_PAGES)),
2848                        K(min_wmark_pages(zone)),
2849                        K(low_wmark_pages(zone)),
2850                        K(high_wmark_pages(zone)),
2851                        K(zone_page_state(zone, NR_ACTIVE_ANON)),
2852                        K(zone_page_state(zone, NR_INACTIVE_ANON)),
2853                        K(zone_page_state(zone, NR_ACTIVE_FILE)),
2854                        K(zone_page_state(zone, NR_INACTIVE_FILE)),
2855                        K(zone_page_state(zone, NR_UNEVICTABLE)),
2856                        K(zone_page_state(zone, NR_ISOLATED_ANON)),
2857                        K(zone_page_state(zone, NR_ISOLATED_FILE)),
2858                        K(zone->present_pages),
2859                        K(zone_page_state(zone, NR_MLOCK)),
2860                        K(zone_page_state(zone, NR_FILE_DIRTY)),
2861                        K(zone_page_state(zone, NR_WRITEBACK)),
2862                        K(zone_page_state(zone, NR_FILE_MAPPED)),
2863                        K(zone_page_state(zone, NR_SHMEM)),
2864                        K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
2865                        K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
2866                        zone_page_state(zone, NR_KERNEL_STACK) *
2867                                THREAD_SIZE / 1024,
2868                        K(zone_page_state(zone, NR_PAGETABLE)),
2869                        K(zone_page_state(zone, NR_UNSTABLE_NFS)),
2870                        K(zone_page_state(zone, NR_BOUNCE)),
2871                        K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
2872                        zone->pages_scanned,
2873                        (zone->all_unreclaimable ? "yes" : "no")
2874                        );
2875                printk("lowmem_reserve[]:");
2876                for (i = 0; i < MAX_NR_ZONES; i++)
2877                        printk(" %lu", zone->lowmem_reserve[i]);
2878                printk("\n");
2879        }
2880
2881        for_each_populated_zone(zone) {
2882                unsigned long nr[MAX_ORDER], flags, order, total = 0;
2883
2884                if (skip_free_areas_node(filter, zone_to_nid(zone)))
2885                        continue;
2886                show_node(zone);
2887                printk("%s: ", zone->name);
2888
2889                spin_lock_irqsave(&zone->lock, flags);
2890                for (order = 0; order < MAX_ORDER; order++) {
2891                        nr[order] = zone->free_area[order].nr_free;
2892                        total += nr[order] << order;
2893                }
2894                spin_unlock_irqrestore(&zone->lock, flags);
2895                for (order = 0; order < MAX_ORDER; order++)
2896                        printk("%lu*%lukB ", nr[order], K(1UL) << order);
2897                printk("= %lukB\n", K(total));
2898        }
2899
2900        printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
2901
2902        show_swap_cache_info();
2903}
2904
2905static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
2906{
2907        zoneref->zone = zone;
2908        zoneref->zone_idx = zone_idx(zone);
2909}
2910
2911/*
2912 * Builds allocation fallback zone lists.
2913 *
2914 * Add all populated zones of a node to the zonelist.
2915 */
2916static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
2917                                int nr_zones, enum zone_type zone_type)
2918{
2919        struct zone *zone;
2920
2921        BUG_ON(zone_type >= MAX_NR_ZONES);
2922        zone_type++;
2923
2924        do {
2925                zone_type--;
2926                zone = pgdat->node_zones + zone_type;
2927                if (populated_zone(zone)) {
2928                        zoneref_set_zone(zone,
2929                                &zonelist->_zonerefs[nr_zones++]);
2930                        check_highest_zone(zone_type);
2931                }
2932
2933        } while (zone_type);
2934        return nr_zones;
2935}
2936
2937
2938/*
2939 *  zonelist_order:
2940 *  0 = automatic detection of better ordering.
2941 *  1 = order by ([node] distance, -zonetype)
2942 *  2 = order by (-zonetype, [node] distance)
2943 *
2944 *  If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
2945 *  the same zonelist. So only NUMA can configure this param.
2946 */
2947#define ZONELIST_ORDER_DEFAULT  0
2948#define ZONELIST_ORDER_NODE     1
2949#define ZONELIST_ORDER_ZONE     2
2950
2951/* zonelist order in the kernel.
2952 * set_zonelist_order() will set this to NODE or ZONE.
2953 */
2954static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
2955static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
2956
2957
2958#ifdef CONFIG_NUMA
2959/* The value user specified ....changed by config */
2960static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2961/* string for sysctl */
2962#define NUMA_ZONELIST_ORDER_LEN 16
2963char numa_zonelist_order[16] = "default";
2964
2965/*
2966 * interface for configure zonelist ordering.
2967 * command line option "numa_zonelist_order"
2968 *      = "[dD]efault   - default, automatic configuration.
2969 *      = "[nN]ode      - order by node locality, then by zone within node
2970 *      = "[zZ]one      - order by zone, then by locality within zone
2971 */
2972
2973static int __parse_numa_zonelist_order(char *s)
2974{
2975        if (*s == 'd' || *s == 'D') {
2976                user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2977        } else if (*s == 'n' || *s == 'N') {
2978                user_zonelist_order = ZONELIST_ORDER_NODE;
2979        } else if (*s == 'z' || *s == 'Z') {
2980                user_zonelist_order = ZONELIST_ORDER_ZONE;
2981        } else {
2982                printk(KERN_WARNING
2983                        "Ignoring invalid numa_zonelist_order value:  "
2984                        "%s\n", s);
2985                return -EINVAL;
2986        }
2987        return 0;
2988}
2989
2990static __init int setup_numa_zonelist_order(char *s)
2991{
2992        int ret;
2993
2994        if (!s)
2995                return 0;
2996
2997        ret = __parse_numa_zonelist_order(s);
2998        if (ret == 0)
2999                strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
3000
3001        return ret;
3002}
3003early_param("numa_zonelist_order", setup_numa_zonelist_order);
3004
3005/*
3006 * sysctl handler for numa_zonelist_order
3007 */
3008int numa_zonelist_order_handler(ctl_table *table, int write,
3009                void __user *buffer, size_t *length,
3010                loff_t *ppos)
3011{
3012        char saved_string[NUMA_ZONELIST_ORDER_LEN];
3013        int ret;
3014        static DEFINE_MUTEX(zl_order_mutex);
3015
3016        mutex_lock(&zl_order_mutex);
3017        if (write)
3018                strcpy(saved_string, (char*)table->data);
3019        ret = proc_dostring(table, write, buffer, length, ppos);
3020        if (ret)
3021                goto out;
3022        if (write) {
3023                int oldval = user_zonelist_order;
3024                if (__parse_numa_zonelist_order((char*)table->data)) {
3025                        /*
3026                         * bogus value.  restore saved string
3027                         */
3028                        strncpy((char*)table->data, saved_string,
3029                                NUMA_ZONELIST_ORDER_LEN);
3030                        user_zonelist_order = oldval;
3031                } else if (oldval != user_zonelist_order) {
3032                        mutex_lock(&zonelists_mutex);
3033                        build_all_zonelists(NULL);
3034                        mutex_unlock(&zonelists_mutex);
3035                }
3036        }
3037out:
3038        mutex_unlock(&zl_order_mutex);
3039        return ret;
3040}
3041
3042
3043#define MAX_NODE_LOAD (nr_online_nodes)
3044static int node_load[MAX_NUMNODES];
3045
3046/**
3047 * find_next_best_node - find the next node that should appear in a given node's fallback list
3048 * @node: node whose fallback list we're appending
3049 * @used_node_mask: nodemask_t of already used nodes
3050 *
3051 * We use a number of factors to determine which is the next node that should
3052 * appear on a given node's fallback list.  The node should not have appeared
3053 * already in @node's fallback list, and it should be the next closest node
3054 * according to the distance array (which contains arbitrary distance values
3055 * from each node to each node in the system), and should also prefer nodes
3056 * with no CPUs, since presumably they'll have very little allocation pressure
3057 * on them otherwise.
3058 * It returns -1 if no node is found.
3059 */
3060static int find_next_best_node(int node, nodemask_t *used_node_mask)
3061{
3062        int n, val;
3063        int min_val = INT_MAX;
3064        int best_node = -1;
3065        const struct cpumask *tmp = cpumask_of_node(0);
3066
3067        /* Use the local node if we haven't already */
3068        if (!node_isset(node, *used_node_mask)) {
3069                node_set(node, *used_node_mask);
3070                return node;
3071        }
3072
3073        for_each_node_state(n, N_HIGH_MEMORY) {
3074
3075                /* Don't want a node to appear more than once */
3076                if (node_isset(n, *used_node_mask))
3077                        continue;
3078
3079                /* Use the distance array to find the distance */
3080                val = node_distance(node, n);
3081
3082                /* Penalize nodes under us ("prefer the next node") */
3083                val += (n < node);
3084
3085                /* Give preference to headless and unused nodes */
3086                tmp = cpumask_of_node(n);
3087                if (!cpumask_empty(tmp))
3088                        val += PENALTY_FOR_NODE_WITH_CPUS;
3089
3090                /* Slight preference for less loaded node */
3091                val *= (MAX_NODE_LOAD*MAX_NUMNODES);
3092                val += node_load[n];
3093
3094                if (val < min_val) {
3095                        min_val = val;
3096                        best_node = n;
3097                }
3098        }
3099
3100        if (best_node >= 0)
3101                node_set(best_node, *used_node_mask);
3102
3103        return best_node;
3104}
3105
3106
3107/*
3108 * Build zonelists ordered by node and zones within node.
3109 * This results in maximum locality--normal zone overflows into local
3110 * DMA zone, if any--but risks exhausting DMA zone.
3111 */
3112static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
3113{
3114        int j;
3115        struct zonelist *zonelist;
3116
3117        zonelist = &pgdat->node_zonelists[0];
3118        for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
3119                ;
3120        j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3121                                                        MAX_NR_ZONES - 1);
3122        zonelist->_zonerefs[j].zone = NULL;
3123        zonelist->_zonerefs[j].zone_idx = 0;
3124}
3125
3126/*
3127 * Build gfp_thisnode zonelists
3128 */
3129static void build_thisnode_zonelists(pg_data_t *pgdat)
3130{
3131        int j;
3132        struct zonelist *zonelist;
3133
3134        zonelist = &pgdat->node_zonelists[1];
3135        j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
3136        zonelist->_zonerefs[j].zone = NULL;
3137        zonelist->_zonerefs[j].zone_idx = 0;
3138}
3139
3140/*
3141 * Build zonelists ordered by zone and nodes within zones.
3142 * This results in conserving DMA zone[s] until all Normal memory is
3143 * exhausted, but results in overflowing to remote node while memory
3144 * may still exist in local DMA zone.
3145 */
3146static int node_order[MAX_NUMNODES];
3147
3148static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
3149{
3150        int pos, j, node;
3151        int zone_type;          /* needs to be signed */
3152        struct zone *z;
3153        struct zonelist *zonelist;
3154
3155        zonelist = &pgdat->node_zonelists[0];
3156        pos = 0;
3157        for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
3158                for (j = 0; j < nr_nodes; j++) {
3159                        node = node_order[j];
3160                        z = &NODE_DATA(node)->node_zones[zone_type];
3161                        if (populated_zone(z)) {
3162                                zoneref_set_zone(z,
3163                                        &zonelist->_zonerefs[pos++]);
3164                                check_highest_zone(zone_type);
3165                        }
3166                }
3167        }
3168        zonelist->_zonerefs[pos].zone = NULL;
3169        zonelist->_zonerefs[pos].zone_idx = 0;
3170}
3171
3172static int default_zonelist_order(void)
3173{
3174        int nid, zone_type;
3175        unsigned long low_kmem_size,total_size;
3176        struct zone *z;
3177        int average_size;
3178        /*
3179         * ZONE_DMA and ZONE_DMA32 can be very small area in the system.
3180         * If they are really small and used heavily, the system can fall
3181         * into OOM very easily.
3182         * This function detect ZONE_DMA/DMA32 size and configures zone order.
3183         */
3184        /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
3185        low_kmem_size = 0;
3186        total_size = 0;
3187        for_each_online_node(nid) {
3188                for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
3189                        z = &NODE_DATA(nid)->node_zones[zone_type];
3190                        if (populated_zone(z)) {
3191                                if (zone_type < ZONE_NORMAL)
3192                                        low_kmem_size += z->present_pages;
3193                                total_size += z->present_pages;
3194                        } else if (zone_type == ZONE_NORMAL) {
3195                                /*
3196                                 * If any node has only lowmem, then node order
3197                                 * is preferred to allow kernel allocations
3198                                 * locally; otherwise, they can easily infringe
3199                                 * on other nodes when there is an abundance of
3200                                 * lowmem available to allocate from.
3201                                 */
3202                                return ZONELIST_ORDER_NODE;
3203                        }
3204                }
3205        }
3206        if (!low_kmem_size ||  /* there are no DMA area. */
3207            low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
3208                return ZONELIST_ORDER_NODE;
3209        /*
3210         * look into each node's config.
3211         * If there is a node whose DMA/DMA32 memory is very big area on
3212         * local memory, NODE_ORDER may be suitable.
3213         */
3214        average_size = total_size /
3215                                (nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
3216        for_each_online_node(nid) {
3217                low_kmem_size = 0;
3218                total_size = 0;
3219                for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
3220                        z = &NODE_DATA(nid)->node_zones[zone_type];
3221                        if (populated_zone(z)) {
3222                                if (zone_type < ZONE_NORMAL)
3223                                        low_kmem_size += z->present_pages;
3224                                total_size += z->present_pages;
3225                        }
3226                }
3227                if (low_kmem_size &&
3228                    total_size > average_size && /* ignore small node */
3229                    low_kmem_size > total_size * 70/100)
3230                        return ZONELIST_ORDER_NODE;
3231        }
3232        return ZONELIST_ORDER_ZONE;
3233}
3234
3235static void set_zonelist_order(void)
3236{
3237        if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
3238                current_zonelist_order = default_zonelist_order();
3239        else
3240                current_zonelist_order = user_zonelist_order;
3241}
3242
3243static void build_zonelists(pg_data_t *pgdat)
3244{
3245        int j, node, load;
3246        enum zone_type i;
3247        nodemask_t used_mask;
3248        int local_node, prev_node;
3249        struct zonelist *zonelist;
3250        int order = current_zonelist_order;
3251
3252        /* initialize zonelists */
3253        for (i = 0; i < MAX_ZONELISTS; i++) {
3254                zonelist = pgdat->node_zonelists + i;
3255                zonelist->_zonerefs[0].zone = NULL;
3256                zonelist->_zonerefs[0].zone_idx = 0;
3257        }
3258
3259        /* NUMA-aware ordering of nodes */
3260        local_node = pgdat->node_id;
3261        load = nr_online_nodes;
3262        prev_node = local_node;
3263        nodes_clear(used_mask);
3264
3265        memset(node_order, 0, sizeof(node_order));
3266        j = 0;
3267
3268        while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
3269                int distance = node_distance(local_node, node);
3270
3271                /*
3272                 * If another node is sufficiently far away then it is better
3273                 * to reclaim pages in a zone before going off node.
3274                 */
3275                if (distance > RECLAIM_DISTANCE)
3276                        zone_reclaim_mode = 1;
3277
3278                /*
3279                 * We don't want to pressure a particular node.
3280                 * So adding penalty to the first node in same
3281                 * distance group to make it round-robin.
3282                 */
3283                if (distance != node_distance(local_node, prev_node))
3284                        node_load[node] = load;
3285
3286                prev_node = node;
3287                load--;
3288                if (order == ZONELIST_ORDER_NODE)
3289                        build_zonelists_in_node_order(pgdat, node);
3290                else
3291                        node_order[j++] = node; /* remember order */
3292        }
3293
3294        if (order == ZONELIST_ORDER_ZONE) {
3295                /* calculate node order -- i.e., DMA last! */
3296                build_zonelists_in_zone_order(pgdat, j);
3297        }
3298
3299        build_thisnode_zonelists(pgdat);
3300}
3301
3302/* Construct the zonelist performance cache - see further mmzone.h */
3303static void build_zonelist_cache(pg_data_t *pgdat)
3304{
3305        struct zonelist *zonelist;
3306        struct zonelist_cache *zlc;
3307        struct zoneref *z;
3308
3309        zonelist = &pgdat->node_zonelists[0];
3310        zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
3311        bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
3312        for (z = zonelist->_zonerefs; z->zone; z++)
3313                zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
3314}
3315
3316#ifdef CONFIG_HAVE_MEMORYLESS_NODES
3317/*
3318 * Return node id of node used for "local" allocations.
3319 * I.e., first node id of first zone in arg node's generic zonelist.
3320 * Used for initializing percpu 'numa_mem', which is used primarily
3321 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
3322 */
3323int local_memory_node(int node)
3324{
3325        struct zone *zone;
3326
3327        (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
3328                                   gfp_zone(GFP_KERNEL),
3329                                   NULL,
3330                                   &zone);
3331        return zone->node;
3332}
3333#endif
3334
3335#else   /* CONFIG_NUMA */
3336
3337static void set_zonelist_order(void)
3338{
3339        current_zonelist_order = ZONELIST_ORDER_ZONE;
3340}
3341
3342static void build_zonelists(pg_data_t *pgdat)
3343{
3344        int node, local_node;
3345        enum zone_type j;
3346        struct zonelist *zonelist;
3347
3348        local_node = pgdat->node_id;
3349
3350        zonelist = &pgdat->node_zonelists[0];
3351        j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
3352
3353        /*
3354         * Now we build the zonelist so that it contains the zones
3355         * of all the other nodes.
3356         * We don't want to pressure a particular node, so when
3357         * building the zones for node N, we make sure that the
3358         * zones coming right after the local ones are those from
3359         * node N+1 (modulo N)
3360         */
3361        for (node = local_node + 1; node < MAX_NUMNODES; node++) {
3362                if (!node_online(node))
3363                        continue;
3364                j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3365                                                        MAX_NR_ZONES - 1);
3366        }
3367        for (node = 0; node < local_node; node++) {
3368                if (!node_online(node))
3369                        continue;
3370                j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3371                                                        MAX_NR_ZONES - 1);
3372        }
3373
3374        zonelist->_zonerefs[j].zone = NULL;
3375        zonelist->_zonerefs[j].zone_idx = 0;
3376}
3377
3378/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
3379static void build_zonelist_cache(pg_data_t *pgdat)
3380{
3381        pgdat->node_zonelists[0].zlcache_ptr = NULL;
3382}
3383
3384#endif  /* CONFIG_NUMA */
3385
3386/*
3387 * Boot pageset table. One per cpu which is going to be used for all
3388 * zones and all nodes. The parameters will be set in such a way
3389 * that an item put on a list will immediately be handed over to
3390 * the buddy list. This is safe since pageset manipulation is done
3391 * with interrupts disabled.
3392 *
3393 * The boot_pagesets must be kept even after bootup is complete for
3394 * unused processors and/or zones. They do play a role for bootstrapping
3395 * hotplugged processors.
3396 *
3397 * zoneinfo_show() and maybe other functions do
3398 * not check if the processor is online before following the pageset pointer.
3399 * Other parts of the kernel may not check if the zone is available.
3400 */
3401static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
3402static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
3403static void setup_zone_pageset(struct zone *zone);
3404
3405/*
3406 * Global mutex to protect against size modification of zonelists
3407 * as well as to serialize pageset setup for the new populated zone.
3408 */
3409DEFINE_MUTEX(zonelists_mutex);
3410
3411/* return values int ....just for stop_machine() */
3412static __init_refok int __build_all_zonelists(void *data)
3413{
3414        int nid;
3415        int cpu;
3416
3417#ifdef CONFIG_NUMA
3418        memset(node_load, 0, sizeof(node_load));
3419#endif
3420        for_each_online_node(nid) {
3421                pg_data_t *pgdat = NODE_DATA(nid);
3422
3423                build_zonelists(pgdat);
3424                build_zonelist_cache(pgdat);
3425        }
3426
3427        /*
3428         * Initialize the boot_pagesets that are going to be used
3429         * for bootstrapping processors. The real pagesets for
3430         * each zone will be allocated later when the per cpu
3431         * allocator is available.
3432         *
3433         * boot_pagesets are used also for bootstrapping offline
3434         * cpus if the system is already booted because the pagesets
3435         * are needed to initialize allocators on a specific cpu too.
3436         * F.e. the percpu allocator needs the page allocator which
3437         * needs the percpu allocator in order to allocate its pagesets
3438         * (a chicken-egg dilemma).
3439         */
3440        for_each_possible_cpu(cpu) {
3441                setup_pageset(&per_cpu(boot_pageset, cpu), 0);
3442
3443#ifdef CONFIG_HAVE_MEMORYLESS_NODES
3444                /*
3445                 * We now know the "local memory node" for each node--
3446                 * i.e., the node of the first zone in the generic zonelist.
3447                 * Set up numa_mem percpu variable for on-line cpus.  During
3448                 * boot, only the boot cpu should be on-line;  we'll init the
3449                 * secondary cpus' numa_mem as they come on-line.  During
3450                 * node/memory hotplug, we'll fixup all on-line cpus.
3451                 */
3452                if (cpu_online(cpu))
3453                        set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
3454#endif
3455        }
3456
3457        return 0;
3458}
3459
3460/*
3461 * Called with zonelists_mutex held always
3462 * unless system_state == SYSTEM_BOOTING.
3463 */
3464void __ref build_all_zonelists(void *data)
3465{
3466        set_zonelist_order();
3467
3468        if (system_state == SYSTEM_BOOTING) {
3469                __build_all_zonelists(NULL);
3470                mminit_verify_zonelist();
3471                cpuset_init_current_mems_allowed();
3472        } else {
3473                /* we have to stop all cpus to guarantee there is no user
3474                   of zonelist */
3475#ifdef CONFIG_MEMORY_HOTPLUG
3476                if (data)
3477                        setup_zone_pageset((struct zone *)data);
3478#endif
3479                stop_machine(__build_all_zonelists, NULL, NULL);
3480                /* cpuset refresh routine should be here */
3481        }
3482        vm_total_pages = nr_free_pagecache_pages();
3483        /*
3484         * Disable grouping by mobility if the number of pages in the
3485         * system is too low to allow the mechanism to work. It would be
3486         * more accurate, but expensive to check per-zone. This check is
3487         * made on memory-hotadd so a system can start with mobility
3488         * disabled and enable it later
3489         */
3490        if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
3491                page_group_by_mobility_disabled = 1;
3492        else
3493                page_group_by_mobility_disabled = 0;
3494
3495        printk("Built %i zonelists in %s order, mobility grouping %s.  "
3496                "Total pages: %ld\n",
3497                        nr_online_nodes,
3498                        zonelist_order_name[current_zonelist_order],
3499                        page_group_by_mobility_disabled ? "off" : "on",
3500                        vm_total_pages);
3501#ifdef CONFIG_NUMA
3502        printk("Policy zone: %s\n", zone_names[policy_zone]);
3503#endif
3504}
3505
3506/*
3507 * Helper functions to size the waitqueue hash table.
3508 * Essentially these want to choose hash table sizes sufficiently
3509 * large so that collisions trying to wait on pages are rare.
3510 * But in fact, the number of active page waitqueues on typical
3511 * systems is ridiculously low, less than 200. So this is even
3512 * conservative, even though it seems large.
3513 *
3514 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
3515 * waitqueues, i.e. the size of the waitq table given the number of pages.
3516 */
3517#define PAGES_PER_WAITQUEUE     256
3518
3519#ifndef CONFIG_MEMORY_HOTPLUG
3520static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3521{
3522        unsigned long size = 1;
3523
3524        pages /= PAGES_PER_WAITQUEUE;
3525
3526        while (size < pages)
3527                size <<= 1;
3528
3529        /*
3530         * Once we have dozens or even hundreds of threads sleeping
3531         * on IO we've got bigger problems than wait queue collision.
3532         * Limit the size of the wait table to a reasonable size.
3533         */
3534        size = min(size, 4096UL);
3535
3536        return max(size, 4UL);
3537}
3538#else
3539/*
3540 * A zone's size might be changed by hot-add, so it is not possible to determine
3541 * a suitable size for its wait_table.  So we use the maximum size now.
3542 *
3543 * The max wait table size = 4096 x sizeof(wait_queue_head_t).   ie:
3544 *
3545 *    i386 (preemption config)    : 4096 x 16 = 64Kbyte.
3546 *    ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
3547 *    ia64, x86-64 (preemption)   : 4096 x 24 = 96Kbyte.
3548 *
3549 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
3550 * or more by the traditional way. (See above).  It equals:
3551 *
3552 *    i386, x86-64, powerpc(4K page size) : =  ( 2G + 1M)byte.
3553 *    ia64(16K page size)                 : =  ( 8G + 4M)byte.
3554 *    powerpc (64K page size)             : =  (32G +16M)byte.
3555 */
3556static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3557{
3558        return 4096UL;
3559}
3560#endif
3561
3562/*
3563 * This is an integer logarithm so that shifts can be used later
3564 * to extract the more random high bits from the multiplicative
3565 * hash function before the remainder is taken.
3566 */
3567static inline unsigned long wait_table_bits(unsigned long size)
3568{
3569        return ffz(~size);
3570}
3571
3572#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
3573
3574/*
3575 * Check if a pageblock contains reserved pages
3576 */
3577static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
3578{
3579        unsigned long pfn;
3580
3581        for (pfn = start_pfn; pfn < end_pfn; pfn++) {
3582                if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
3583                        return 1;
3584        }
3585        return 0;
3586}
3587
3588/*
3589 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
3590 * of blocks reserved is based on min_wmark_pages(zone). The memory within
3591 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
3592 * higher will lead to a bigger reserve which will get freed as contiguous
3593 * blocks as reclaim kicks in
3594 */
3595static void setup_zone_migrate_reserve(struct zone *zone)
3596{
3597        unsigned long start_pfn, pfn, end_pfn, block_end_pfn;
3598        struct page *page;
3599        unsigned long block_migratetype;
3600        int reserve;
3601
3602        /*
3603         * Get the start pfn, end pfn and the number of blocks to reserve
3604         * We have to be careful to be aligned to pageblock_nr_pages to
3605         * make sure that we always check pfn_valid for the first page in
3606         * the block.
3607         */
3608        start_pfn = zone->zone_start_pfn;
3609        end_pfn = start_pfn + zone->spanned_pages;
3610        start_pfn = roundup(start_pfn, pageblock_nr_pages);
3611        reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
3612                                                        pageblock_order;
3613
3614        /*
3615         * Reserve blocks are generally in place to help high-order atomic
3616         * allocations that are short-lived. A min_free_kbytes value that
3617         * would result in more than 2 reserve blocks for atomic allocations
3618         * is assumed to be in place to help anti-fragmentation for the
3619         * future allocation of hugepages at runtime.
3620         */
3621        reserve = min(2, reserve);
3622
3623        for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
3624                if (!pfn_valid(pfn))
3625                        continue;
3626                page = pfn_to_page(pfn);
3627
3628                /* Watch out for overlapping nodes */
3629                if (page_to_nid(page) != zone_to_nid(zone))
3630                        continue;
3631
3632                block_migratetype = get_pageblock_migratetype(page);
3633
3634                /* Only test what is necessary when the reserves are not met */
3635                if (reserve > 0) {
3636                        /*
3637                         * Blocks with reserved pages will never free, skip
3638                         * them.
3639                         */
3640                        block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
3641                        if (pageblock_is_reserved(pfn, block_end_pfn))
3642                                continue;
3643
3644                        /* If this block is reserved, account for it */
3645                        if (block_migratetype == MIGRATE_RESERVE) {
3646                                reserve--;
3647                                continue;
3648                        }
3649
3650                        /* Suitable for reserving if this block is movable */
3651                        if (block_migratetype == MIGRATE_MOVABLE) {
3652                                set_pageblock_migratetype(page,
3653                                                        MIGRATE_RESERVE);
3654                                move_freepages_block(zone, page,
3655                                                        MIGRATE_RESERVE);
3656                                reserve--;
3657                                continue;
3658                        }
3659                }
3660
3661                /*
3662                 * If the reserve is met and this is a previous reserved block,
3663                 * take it back
3664                 */
3665                if (block_migratetype == MIGRATE_RESERVE) {
3666                        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3667                        move_freepages_block(zone, page, MIGRATE_MOVABLE);
3668                }
3669        }
3670}
3671
3672/*
3673 * Initially all pages are reserved - free ones are freed
3674 * up by free_all_bootmem() once the early boot process is
3675 * done. Non-atomic initialization, single-pass.
3676 */
3677void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
3678                unsigned long start_pfn, enum memmap_context context)
3679{
3680        struct page *page;
3681        unsigned long end_pfn = start_pfn + size;
3682        unsigned long pfn;
3683        struct zone *z;
3684
3685        if (highest_memmap_pfn < end_pfn - 1)
3686                highest_memmap_pfn = end_pfn - 1;
3687
3688        z = &NODE_DATA(nid)->node_zones[zone];
3689        for (pfn = start_pfn; pfn < end_pfn; pfn++) {
3690                /*
3691                 * There can be holes in boot-time mem_map[]s
3692                 * handed to this function.  They do not
3693                 * exist on hotplugged memory.
3694                 */
3695                if (context == MEMMAP_EARLY) {
3696                        if (!early_pfn_valid(pfn))
3697                                continue;
3698                        if (!early_pfn_in_nid(pfn, nid))
3699                                continue;
3700                }
3701                page = pfn_to_page(pfn);
3702                set_page_links(page, zone, nid, pfn);
3703                mminit_verify_page_links(page, zone, nid, pfn);
3704                init_page_count(page);
3705                reset_page_mapcount(page);
3706                SetPageReserved(page);
3707                /*
3708                 * Mark the block movable so that blocks are reserved for
3709                 * movable at startup. This will force kernel allocations
3710                 * to reserve their blocks rather than leaking throughout
3711                 * the address space during boot when many long-lived
3712                 * kernel allocations are made. Later some blocks near
3713                 * the start are marked MIGRATE_RESERVE by
3714                 * setup_zone_migrate_reserve()
3715                 *
3716                 * bitmap is created for zone's valid pfn range. but memmap
3717                 * can be created for invalid pages (for alignment)
3718                 * check here not to call set_pageblock_migratetype() against
3719                 * pfn out of zone.
3720                 */
3721                if ((z->zone_start_pfn <= pfn)
3722                    && (pfn < z->zone_start_pfn + z->spanned_pages)
3723                    && !(pfn & (pageblock_nr_pages - 1)))
3724                        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3725
3726                INIT_LIST_HEAD(&page->lru);
3727#ifdef WANT_PAGE_VIRTUAL
3728                /* The shift won't overflow because ZONE_NORMAL is below 4G. */
3729                if (!is_highmem_idx(zone))
3730                        set_page_address(page, __va(pfn << PAGE_SHIFT));
3731#endif
3732        }
3733}
3734
3735static void __meminit zone_init_free_lists(struct zone *zone)
3736{
3737        int order, t;
3738        for_each_migratetype_order(order, t) {
3739                INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
3740                zone->free_area[order].nr_free = 0;
3741        }
3742}
3743
3744#ifndef __HAVE_ARCH_MEMMAP_INIT
3745#define memmap_init(size, nid, zone, start_pfn) \
3746        memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
3747#endif
3748
3749static int zone_batchsize(struct zone *zone)
3750{
3751#ifdef CONFIG_MMU
3752        int batch;
3753
3754        /*
3755         * The per-cpu-pages pools are set to around 1000th of the
3756         * size of the zone.  But no more than 1/2 of a meg.
3757         *
3758         * OK, so we don't know how big the cache is.  So guess.
3759         */
3760        batch = zone->present_pages / 1024;
3761        if (batch * PAGE_SIZE > 512 * 1024)
3762                batch = (512 * 1024) / PAGE_SIZE;
3763        batch /= 4;             /* We effectively *= 4 below */
3764        if (batch < 1)
3765                batch = 1;
3766
3767        /*
3768         * Clamp the batch to a 2^n - 1 value. Having a power
3769         * of 2 value was found to be more likely to have
3770         * suboptimal cache aliasing properties in some cases.
3771         *
3772         * For example if 2 tasks are alternately allocating
3773         * batches of pages, one task can end up with a lot
3774         * of pages of one half of the possible page colors
3775         * and the other with pages of the other colors.
3776         */
3777        batch = rounddown_pow_of_two(batch + batch/2) - 1;
3778
3779        return batch;
3780
3781#else
3782        /* The deferral and batching of frees should be suppressed under NOMMU
3783         * conditions.
3784         *
3785         * The problem is that NOMMU needs to be able to allocate large chunks
3786         * of contiguous memory as there's no hardware page translation to
3787         * assemble apparent contiguous memory from discontiguous pages.
3788         *
3789         * Queueing large contiguous runs of pages for batching, however,
3790         * causes the pages to actually be freed in smaller chunks.  As there
3791         * can be a significant delay between the individual batches being
3792         * recycled, this leads to the once large chunks of space being
3793         * fragmented and becoming unavailable for high-order allocations.
3794         */
3795        return 0;
3796#endif
3797}
3798
3799static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
3800{
3801        struct per_cpu_pages *pcp;
3802        int migratetype;
3803
3804        memset(p, 0, sizeof(*p));
3805
3806        pcp = &p->pcp;
3807        pcp->count = 0;
3808        pcp->high = 6 * batch;
3809        pcp->batch = max(1UL, 1 * batch);
3810        for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
3811                INIT_LIST_HEAD(&pcp->lists[migratetype]);
3812}
3813
3814/*
3815 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
3816 * to the value high for the pageset p.
3817 */
3818
3819static void setup_pagelist_highmark(struct per_cpu_pageset *p,
3820                                unsigned long high)
3821{
3822        struct per_cpu_pages *pcp;
3823
3824        pcp = &p->pcp;
3825        pcp->high = high;
3826        pcp->batch = max(1UL, high/4);
3827        if ((high/4) > (PAGE_SHIFT * 8))
3828                pcp->batch = PAGE_SHIFT * 8;
3829}
3830
3831static void setup_zone_pageset(struct zone *zone)
3832{
3833        int cpu;
3834
3835        zone->pageset = alloc_percpu(struct per_cpu_pageset);
3836
3837        for_each_possible_cpu(cpu) {
3838                struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
3839
3840                setup_pageset(pcp, zone_batchsize(zone));
3841
3842                if (percpu_pagelist_fraction)
3843                        setup_pagelist_highmark(pcp,
3844                                (zone->present_pages /
3845                                        percpu_pagelist_fraction));
3846        }
3847}
3848
3849/*
3850 * Allocate per cpu pagesets and initialize them.
3851 * Before this call only boot pagesets were available.
3852 */
3853void __init setup_per_cpu_pageset(void)
3854{
3855        struct zone *zone;
3856
3857        for_each_populated_zone(zone)
3858                setup_zone_pageset(zone);
3859}
3860
3861static noinline __init_refok
3862int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
3863{
3864        int i;
3865        struct pglist_data *pgdat = zone->zone_pgdat;
3866        size_t alloc_size;
3867
3868        /*
3869         * The per-page waitqueue mechanism uses hashed waitqueues
3870         * per zone.
3871         */
3872        zone->wait_table_hash_nr_entries =
3873                 wait_table_hash_nr_entries(zone_size_pages);
3874        zone->wait_table_bits =
3875                wait_table_bits(zone->wait_table_hash_nr_entries);
3876        alloc_size = zone->wait_table_hash_nr_entries
3877                                        * sizeof(wait_queue_head_t);
3878
3879        if (!slab_is_available()) {
3880                zone->wait_table = (wait_queue_head_t *)
3881                        alloc_bootmem_node_nopanic(pgdat, alloc_size);
3882        } else {
3883                /*
3884                 * This case means that a zone whose size was 0 gets new memory
3885                 * via memory hot-add.
3886                 * But it may be the case that a new node was hot-added.  In
3887                 * this case vmalloc() will not be able to use this new node's
3888                 * memory - this wait_table must be initialized to use this new
3889                 * node itself as well.
3890                 * To use this new node's memory, further consideration will be
3891                 * necessary.
3892                 */
3893                zone->wait_table = vmalloc(alloc_size);
3894        }
3895        if (!zone->wait_table)
3896                return -ENOMEM;
3897
3898        for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
3899                init_waitqueue_head(zone->wait_table + i);
3900
3901        return 0;
3902}
3903
3904static int __zone_pcp_update(void *data)
3905{
3906        struct zone *zone = data;
3907        int cpu;
3908        unsigned long batch = zone_batchsize(zone), flags;
3909
3910        for_each_possible_cpu(cpu) {
3911                struct per_cpu_pageset *pset;
3912                struct per_cpu_pages *pcp;
3913
3914                pset = per_cpu_ptr(zone->pageset, cpu);
3915                pcp = &pset->pcp;
3916
3917                local_irq_save(flags);
3918                free_pcppages_bulk(zone, pcp->count, pcp);
3919                setup_pageset(pset, batch);
3920                local_irq_restore(flags);
3921        }
3922        return 0;
3923}
3924
3925void zone_pcp_update(struct zone *zone)
3926{
3927        stop_machine(__zone_pcp_update, zone, NULL);
3928}
3929
3930static __meminit void zone_pcp_init(struct zone *zone)
3931{
3932        /*
3933         * per cpu subsystem is not up at this point. The following code
3934         * relies on the ability of the linker to provide the
3935         * offset of a (static) per cpu variable into the per cpu area.
3936         */
3937        zone->pageset = &boot_pageset;
3938
3939        if (zone->present_pages)
3940                printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%u\n",
3941                        zone->name, zone->present_pages,
3942                                         zone_batchsize(zone));
3943}
3944
3945__meminit int init_currently_empty_zone(struct zone *zone,
3946                                        unsigned long zone_start_pfn,
3947                                        unsigned long size,
3948                                        enum memmap_context context)
3949{
3950        struct pglist_data *pgdat = zone->zone_pgdat;
3951        int ret;
3952        ret = zone_wait_table_init(zone, size);
3953        if (ret)
3954                return ret;
3955        pgdat->nr_zones = zone_idx(zone) + 1;
3956
3957        zone->zone_start_pfn = zone_start_pfn;
3958
3959        mminit_dprintk(MMINIT_TRACE, "memmap_init",
3960                        "Initialising map node %d zone %lu pfns %lu -> %lu\n",
3961                        pgdat->node_id,
3962                        (unsigned long)zone_idx(zone),
3963                        zone_start_pfn, (zone_start_pfn + size));
3964
3965        zone_init_free_lists(zone);
3966
3967        return 0;
3968}
3969
3970#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
3971#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
3972/*
3973 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
3974 * Architectures may implement their own version but if add_active_range()
3975 * was used and there are no special requirements, this is a convenient
3976 * alternative
3977 */
3978int __meminit __early_pfn_to_nid(unsigned long pfn)
3979{
3980        unsigned long start_pfn, end_pfn;
3981        int i, nid;
3982
3983        for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
3984                if (start_pfn <= pfn && pfn < end_pfn)
3985                        return nid;
3986        /* This is a memory hole */
3987        return -1;
3988}
3989#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
3990
3991int __meminit early_pfn_to_nid(unsigned long pfn)
3992{
3993        int nid;
3994
3995        nid = __early_pfn_to_nid(pfn);
3996        if (nid >= 0)
3997                return nid;
3998        /* just returns 0 */
3999        return 0;
4000}
4001
4002#ifdef CONFIG_NODES_SPAN_OTHER_NODES
4003bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
4004{
4005        int nid;
4006
4007        nid = __early_pfn_to_nid(pfn);
4008        if (nid >= 0 && nid != node)
4009                return false;
4010        return true;
4011}
4012#endif
4013
4014/**
4015 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
4016 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
4017 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
4018 *
4019 * If an architecture guarantees that all ranges registered with
4020 * add_active_ranges() contain no holes and may be freed, this
4021 * this function may be used instead of calling free_bootmem() manually.
4022 */
4023void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
4024{
4025        unsigned long start_pfn, end_pfn;
4026        int i, this_nid;
4027
4028        for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
4029                start_pfn = min(start_pfn, max_low_pfn);
4030                end_pfn = min(end_pfn, max_low_pfn);
4031
4032                if (start_pfn < end_pfn)
4033                        free_bootmem_node(NODE_DATA(this_nid),
4034                                          PFN_PHYS(start_pfn),
4035                                          (end_pfn - start_pfn) << PAGE_SHIFT);
4036        }
4037}
4038
4039/**
4040 * sparse_memory_present_with_active_regions - Call memory_present for each active range
4041 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
4042 *
4043 * If an architecture guarantees that all ranges registered with
4044 * add_active_ranges() contain no holes and may be freed, this
4045 * function may be used instead of calling memory_present() manually.
4046 */
4047void __init sparse_memory_present_with_active_regions(int nid)
4048{
4049        unsigned long start_pfn, end_pfn;
4050        int i, this_nid;
4051
4052        for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
4053                memory_present(this_nid, start_pfn, end_pfn);
4054}
4055
4056/**
4057 * get_pfn_range_for_nid - Return the start and end page frames for a node
4058 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
4059 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
4060 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
4061 *
4062 * It returns the start and end page frame of a node based on information
4063 * provided by an arch calling add_active_range(). If called for a node
4064 * with no available memory, a warning is printed and the start and end
4065 * PFNs will be 0.
4066 */
4067void __meminit get_pfn_range_for_nid(unsigned int nid,
4068                        unsigned long *start_pfn, unsigned long *end_pfn)
4069{
4070        unsigned long this_start_pfn, this_end_pfn;
4071        int i;
4072
4073        *start_pfn = -1UL;
4074        *end_pfn = 0;
4075
4076        for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
4077                *start_pfn = min(*start_pfn, this_start_pfn);
4078                *end_pfn = max(*end_pfn, this_end_pfn);
4079        }
4080
4081        if (*start_pfn == -1UL)
4082                *start_pfn = 0;
4083}
4084
4085/*
4086 * This finds a zone that can be used for ZONE_MOVABLE pages. The
4087 * assumption is made that zones within a node are ordered in monotonic
4088 * increasing memory addresses so that the "highest" populated zone is used
4089 */
4090static void __init find_usable_zone_for_movable(void)
4091{
4092        int zone_index;
4093        for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
4094                if (zone_index == ZONE_MOVABLE)
4095                        continue;
4096
4097                if (arch_zone_highest_possible_pfn[zone_index] >
4098                                arch_zone_lowest_possible_pfn[zone_index])
4099                        break;
4100        }
4101
4102        VM_BUG_ON(zone_index == -1);
4103        movable_zone = zone_index;
4104}
4105
4106/*
4107 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
4108 * because it is sized independent of architecture. Unlike the other zones,
4109 * the starting point for ZONE_MOVABLE is not fixed. It may be different
4110 * in each node depending on the size of each node and how evenly kernelcore
4111 * is distributed. This helper function adjusts the zone ranges
4112 * provided by the architecture for a given node by using the end of the
4113 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
4114 * zones within a node are in order of monotonic increases memory addresses
4115 */
4116static void __meminit adjust_zone_range_for_zone_movable(int nid,
4117                                        unsigned long zone_type,
4118                                        unsigned long node_start_pfn,
4119                                        unsigned long node_end_pfn,
4120                                        unsigned long *zone_start_pfn,
4121                                        unsigned long *zone_end_pfn)
4122{
4123        /* Only adjust if ZONE_MOVABLE is on this node */
4124        if (zone_movable_pfn[nid]) {
4125                /* Size ZONE_MOVABLE */
4126                if (zone_type == ZONE_MOVABLE) {
4127                        *zone_start_pfn = zone_movable_pfn[nid];
4128                        *zone_end_pfn = min(node_end_pfn,
4129                                arch_zone_highest_possible_pfn[movable_zone]);
4130
4131                /* Adjust for ZONE_MOVABLE starting within this range */
4132                } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
4133                                *zone_end_pfn > zone_movable_pfn[nid]) {
4134                        *zone_end_pfn = zone_movable_pfn[nid];
4135
4136                /* Check if this whole range is within ZONE_MOVABLE */
4137                } else if (*zone_start_pfn >= zone_movable_pfn[nid])
4138                        *zone_start_pfn = *zone_end_pfn;
4139        }
4140}
4141
4142/*
4143 * Return the number of pages a zone spans in a node, including holes
4144 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
4145 */
4146static unsigned long __meminit zone_spanned_pages_in_node(int nid,
4147                                        unsigned long zone_type,
4148                                        unsigned long *ignored)
4149{
4150        unsigned long node_start_pfn, node_end_pfn;
4151        unsigned long zone_start_pfn, zone_end_pfn;
4152
4153        /* Get the start and end of the node and zone */
4154        get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
4155        zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
4156        zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
4157        adjust_zone_range_for_zone_movable(nid, zone_type,
4158                                node_start_pfn, node_end_pfn,
4159                                &zone_start_pfn, &zone_end_pfn);
4160
4161        /* Check that this node has pages within the zone's required range */
4162        if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
4163                return 0;
4164
4165        /* Move the zone boundaries inside the node if necessary */
4166        zone_end_pfn = min(zone_end_pfn, node_end_pfn);
4167        zone_start_pfn = max(zone_start_pfn, node_start_pfn);
4168
4169        /* Return the spanned pages */
4170        return zone_end_pfn - zone_start_pfn;
4171}
4172
4173/*
4174 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
4175 * then all holes in the requested range will be accounted for.
4176 */
4177unsigned long __meminit __absent_pages_in_range(int nid,
4178                                unsigned long range_start_pfn,
4179                                unsigned long range_end_pfn)
4180{
4181        unsigned long nr_absent = range_end_pfn - range_start_pfn;
4182        unsigned long start_pfn, end_pfn;
4183        int i;
4184
4185        for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
4186                start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
4187                end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
4188                nr_absent -= end_pfn - start_pfn;
4189        }
4190        return nr_absent;
4191}
4192
4193/**
4194 * absent_pages_in_range - Return number of page frames in holes within a range
4195 * @start_pfn: The start PFN to start searching for holes
4196 * @end_pfn: The end PFN to stop searching for holes
4197 *
4198 * It returns the number of pages frames in memory holes within a range.
4199 */
4200unsigned long __init absent_pages_in_range(unsigned long start_pfn,
4201                                                        unsigned long end_pfn)
4202{
4203        return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
4204}
4205
4206/* Return the number of page frames in holes in a zone on a node */
4207static unsigned long __meminit zone_absent_pages_in_node(int nid,
4208                                        unsigned long zone_type,
4209                                        unsigned long *ignored)
4210{
4211        unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
4212        unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
4213        unsigned long node_start_pfn, node_end_pfn;
4214        unsigned long zone_start_pfn, zone_end_pfn;
4215
4216        get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
4217        zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
4218        zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
4219
4220        adjust_zone_range_for_zone_movable(nid, zone_type,
4221                        node_start_pfn, node_end_pfn,
4222                        &zone_start_pfn, &zone_end_pfn);
4223        return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
4224}
4225
4226#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
4227static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
4228                                        unsigned long zone_type,
4229                                        unsigned long *zones_size)
4230{
4231        return zones_size[zone_type];
4232}
4233
4234static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
4235                                                unsigned long zone_type,
4236                                                unsigned long *zholes_size)
4237{
4238        if (!zholes_size)
4239                return 0;
4240
4241        return zholes_size[zone_type];
4242}
4243
4244#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
4245
4246static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
4247                unsigned long *zones_size, unsigned long *zholes_size)
4248{
4249        unsigned long realtotalpages, totalpages = 0;
4250        enum zone_type i;
4251
4252        for (i = 0; i < MAX_NR_ZONES; i++)
4253                totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
4254                                                                zones_size);
4255        pgdat->node_spanned_pages = totalpages;
4256
4257        realtotalpages = totalpages;
4258        for (i = 0; i < MAX_NR_ZONES; i++)
4259                realtotalpages -=
4260                        zone_absent_pages_in_node(pgdat->node_id, i,
4261                                                                zholes_size);
4262        pgdat->node_present_pages = realtotalpages;
4263        printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
4264                                                        realtotalpages);
4265}
4266
4267#ifndef CONFIG_SPARSEMEM
4268/*
4269 * Calculate the size of the zone->blockflags rounded to an unsigned long
4270 * Start by making sure zonesize is a multiple of pageblock_order by rounding
4271 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
4272 * round what is now in bits to nearest long in bits, then return it in
4273 * bytes.
4274 */
4275static unsigned long __init usemap_size(unsigned long zonesize)
4276{
4277        unsigned long usemapsize;
4278
4279        usemapsize = roundup(zonesize, pageblock_nr_pages);
4280        usemapsize = usemapsize >> pageblock_order;
4281        usemapsize *= NR_PAGEBLOCK_BITS;
4282        usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
4283
4284        return usemapsize / 8;
4285}
4286
4287static void __init setup_usemap(struct pglist_data *pgdat,
4288                                struct zone *zone, unsigned long zonesize)
4289{
4290        unsigned long usemapsize = usemap_size(zonesize);
4291        zone->pageblock_flags = NULL;
4292        if (usemapsize)
4293                zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
4294                                                                   usemapsize);
4295}
4296#else
4297static inline void setup_usemap(struct pglist_data *pgdat,
4298                                struct zone *zone, unsigned long zonesize) {}
4299#endif /* CONFIG_SPARSEMEM */
4300
4301#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
4302
4303/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
4304void __init set_pageblock_order(void)
4305{
4306        unsigned int order;
4307
4308        /* Check that pageblock_nr_pages has not already been setup */
4309        if (pageblock_order)
4310                return;
4311
4312        if (HPAGE_SHIFT > PAGE_SHIFT)
4313                order = HUGETLB_PAGE_ORDER;
4314        else
4315                order = MAX_ORDER - 1;
4316
4317        /*
4318         * Assume the largest contiguous order of interest is a huge page.
4319         * This value may be variable depending on boot parameters on IA64 and
4320         * powerpc.
4321         */
4322        pageblock_order = order;
4323}
4324#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4325
4326/*
4327 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
4328 * is unused as pageblock_order is set at compile-time. See
4329 * include/linux/pageblock-flags.h for the values of pageblock_order based on
4330 * the kernel config
4331 */
4332void __init set_pageblock_order(void)
4333{
4334}
4335
4336#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4337
4338/*
4339 * Set up the zone data structures:
4340 *   - mark all pages reserved
4341 *   - mark all memory queues empty
4342 *   - clear the memory bitmaps
4343 */
4344static void __paginginit free_area_init_core(struct pglist_data *pgdat,
4345                unsigned long *zones_size, unsigned long *zholes_size)
4346{
4347        enum zone_type j;
4348        int nid = pgdat->node_id;
4349        unsigned long zone_start_pfn = pgdat->node_start_pfn;
4350        int ret;
4351
4352        pgdat_resize_init(pgdat);
4353        pgdat->nr_zones = 0;
4354        init_waitqueue_head(&pgdat->kswapd_wait);
4355        pgdat->kswapd_max_order = 0;
4356        pgdat_page_cgroup_init(pgdat);
4357
4358        for (j = 0; j < MAX_NR_ZONES; j++) {
4359                struct zone *zone = pgdat->node_zones + j;
4360                unsigned long size, realsize, memmap_pages;
4361
4362                size = zone_spanned_pages_in_node(nid, j, zones_size);
4363                realsize = size - zone_absent_pages_in_node(nid, j,
4364                                                                zholes_size);
4365
4366                /*
4367                 * Adjust realsize so that it accounts for how much memory
4368                 * is used by this zone for memmap. This affects the watermark
4369                 * and per-cpu initialisations
4370                 */
4371                memmap_pages =
4372                        PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
4373                if (realsize >= memmap_pages) {
4374                        realsize -= memmap_pages;
4375                        if (memmap_pages)
4376                                printk(KERN_DEBUG
4377                                       "  %s zone: %lu pages used for memmap\n",
4378                                       zone_names[j], memmap_pages);
4379                } else
4380                        printk(KERN_WARNING
4381                                "  %s zone: %lu pages exceeds realsize %lu\n",
4382                                zone_names[j], memmap_pages, realsize);
4383
4384                /* Account for reserved pages */
4385                if (j == 0 && realsize > dma_reserve) {
4386                        realsize -= dma_reserve;
4387                        printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
4388                                        zone_names[0], dma_reserve);
4389                }
4390
4391                if (!is_highmem_idx(j))
4392                        nr_kernel_pages += realsize;
4393                nr_all_pages += realsize;
4394
4395                zone->spanned_pages = size;
4396                zone->present_pages = realsize;
4397#ifdef CONFIG_NUMA
4398                zone->node = nid;
4399                zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
4400                                                / 100;
4401                zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
4402#endif
4403                zone->name = zone_names[j];
4404                spin_lock_init(&zone->lock);
4405                spin_lock_init(&zone->lru_lock);
4406                zone_seqlock_init(zone);
4407                zone->zone_pgdat = pgdat;
4408
4409                zone_pcp_init(zone);
4410                lruvec_init(&zone->lruvec, zone);
4411                zap_zone_vm_stats(zone);
4412                zone->flags = 0;
4413                if (!size)
4414                        continue;
4415
4416                set_pageblock_order();
4417                setup_usemap(pgdat, zone, size);
4418                ret = init_currently_empty_zone(zone, zone_start_pfn,
4419                                                size, MEMMAP_EARLY);
4420                BUG_ON(ret);
4421                memmap_init(size, nid, j, zone_start_pfn);
4422                zone_start_pfn += size;
4423        }
4424}
4425
4426static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
4427{
4428        /* Skip empty nodes */
4429        if (!pgdat->node_spanned_pages)
4430                return;
4431
4432#ifdef CONFIG_FLAT_NODE_MEM_MAP
4433        /* ia64 gets its own node_mem_map, before this, without bootmem */
4434        if (!pgdat->node_mem_map) {
4435                unsigned long size, start, end;
4436                struct page *map;
4437
4438                /*
4439                 * The zone's endpoints aren't required to be MAX_ORDER
4440                 * aligned but the node_mem_map endpoints must be in order
4441                 * for the buddy allocator to function correctly.
4442                 */
4443                start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
4444                end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
4445                end = ALIGN(end, MAX_ORDER_NR_PAGES);
4446                size =  (end - start) * sizeof(struct page);
4447                map = alloc_remap(pgdat->node_id, size);
4448                if (!map)
4449                        map = alloc_bootmem_node_nopanic(pgdat, size);
4450                pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
4451        }
4452#ifndef CONFIG_NEED_MULTIPLE_NODES
4453        /*
4454         * With no DISCONTIG, the global mem_map is just set as node 0's
4455         */
4456        if (pgdat == NODE_DATA(0)) {
4457                mem_map = NODE_DATA(0)->node_mem_map;
4458#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
4459                if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
4460                        mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
4461#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
4462        }
4463#endif
4464#endif /* CONFIG_FLAT_NODE_MEM_MAP */
4465}
4466
4467void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
4468                unsigned long node_start_pfn, unsigned long *zholes_size)
4469{
4470        pg_data_t *pgdat = NODE_DATA(nid);
4471
4472        pgdat->node_id = nid;
4473        pgdat->node_start_pfn = node_start_pfn;
4474        calculate_node_totalpages(pgdat, zones_size, zholes_size);
4475
4476        alloc_node_mem_map(pgdat);
4477#ifdef CONFIG_FLAT_NODE_MEM_MAP
4478        printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
4479                nid, (unsigned long)pgdat,
4480                (unsigned long)pgdat->node_mem_map);
4481#endif
4482
4483        free_area_init_core(pgdat, zones_size, zholes_size);
4484}
4485
4486#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
4487
4488#if MAX_NUMNODES > 1
4489/*
4490 * Figure out the number of possible node ids.
4491 */
4492static void __init setup_nr_node_ids(void)
4493{
4494        unsigned int node;
4495        unsigned int highest = 0;
4496
4497        for_each_node_mask(node, node_possible_map)
4498                highest = node;
4499        nr_node_ids = highest + 1;
4500}
4501#else
4502static inline void setup_nr_node_ids(void)
4503{
4504}
4505#endif
4506
4507/**
4508 * node_map_pfn_alignment - determine the maximum internode alignment
4509 *
4510 * This function should be called after node map is populated and sorted.
4511 * It calculates the maximum power of two alignment which can distinguish
4512 * all the nodes.
4513 *
4514 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
4515 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
4516 * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
4517 * shifted, 1GiB is enough and this function will indicate so.
4518 *
4519 * This is used to test whether pfn -> nid mapping of the chosen memory
4520 * model has fine enough granularity to avoid incorrect mapping for the
4521 * populated node map.
4522 *
4523 * Returns the determined alignment in pfn's.  0 if there is no alignment
4524 * requirement (single node).
4525 */
4526unsigned long __init node_map_pfn_alignment(void)
4527{
4528        unsigned long accl_mask = 0, last_end = 0;
4529        unsigned long start, end, mask;
4530        int last_nid = -1;
4531        int i, nid;
4532
4533        for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
4534                if (!start || last_nid < 0 || last_nid == nid) {
4535                        last_nid = nid;
4536                        last_end = end;
4537                        continue;
4538                }
4539
4540                /*
4541                 * Start with a mask granular enough to pin-point to the
4542                 * start pfn and tick off bits one-by-one until it becomes
4543                 * too coarse to separate the current node from the last.
4544                 */
4545                mask = ~((1 << __ffs(start)) - 1);
4546                while (mask && last_end <= (start & (mask << 1)))
4547                        mask <<= 1;
4548
4549                /* accumulate all internode masks */
4550                accl_mask |= mask;
4551        }
4552
4553        /* convert mask to number of pages */
4554        return ~accl_mask + 1;
4555}
4556
4557/* Find the lowest pfn for a node */
4558static unsigned long __init find_min_pfn_for_node(int nid)
4559{
4560        unsigned long min_pfn = ULONG_MAX;
4561        unsigned long start_pfn;
4562        int i;
4563
4564        for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
4565                min_pfn = min(min_pfn, start_pfn);
4566
4567        if (min_pfn == ULONG_MAX) {
4568                printk(KERN_WARNING
4569                        "Could not find start_pfn for node %d\n", nid);
4570                return 0;
4571        }
4572
4573        return min_pfn;
4574}
4575
4576/**
4577 * find_min_pfn_with_active_regions - Find the minimum PFN registered
4578 *
4579 * It returns the minimum PFN based on information provided via
4580 * add_active_range().
4581 */
4582unsigned long __init find_min_pfn_with_active_regions(void)
4583{
4584        return find_min_pfn_for_node(MAX_NUMNODES);
4585}
4586
4587/*
4588 * early_calculate_totalpages()
4589 * Sum pages in active regions for movable zone.
4590 * Populate N_HIGH_MEMORY for calculating usable_nodes.
4591 */
4592static unsigned long __init early_calculate_totalpages(void)
4593{
4594        unsigned long totalpages = 0;
4595        unsigned long start_pfn, end_pfn;
4596        int i, nid;
4597
4598        for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
4599                unsigned long pages = end_pfn - start_pfn;
4600
4601                totalpages += pages;
4602                if (pages)
4603                        node_set_state(nid, N_HIGH_MEMORY);
4604        }
4605        return totalpages;
4606}
4607
4608/*
4609 * Find the PFN the Movable zone begins in each node. Kernel memory
4610 * is spread evenly between nodes as long as the nodes have enough
4611 * memory. When they don't, some nodes will have more kernelcore than
4612 * others
4613 */
4614static void __init find_zone_movable_pfns_for_nodes(void)
4615{
4616        int i, nid;
4617        unsigned long usable_startpfn;
4618        unsigned long kernelcore_node, kernelcore_remaining;
4619        /* save the state before borrow the nodemask */
4620        nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
4621        unsigned long totalpages = early_calculate_totalpages();
4622        int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
4623
4624        /*
4625         * If movablecore was specified, calculate what size of
4626         * kernelcore that corresponds so that memory usable for
4627         * any allocation type is evenly spread. If both kernelcore
4628         * and movablecore are specified, then the value of kernelcore
4629         * will be used for required_kernelcore if it's greater than
4630         * what movablecore would have allowed.
4631         */
4632        if (required_movablecore) {
4633                unsigned long corepages;
4634
4635                /*
4636                 * Round-up so that ZONE_MOVABLE is at least as large as what
4637                 * was requested by the user
4638                 */
4639                required_movablecore =
4640                        roundup(required_movablecore, MAX_ORDER_NR_PAGES);
4641                corepages = totalpages - required_movablecore;
4642
4643                required_kernelcore = max(required_kernelcore, corepages);
4644        }
4645
4646        /* If kernelcore was not specified, there is no ZONE_MOVABLE */
4647        if (!required_kernelcore)
4648                goto out;
4649
4650        /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
4651        find_usable_zone_for_movable();
4652        usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
4653
4654restart:
4655        /* Spread kernelcore memory as evenly as possible throughout nodes */
4656        kernelcore_node = required_kernelcore / usable_nodes;
4657        for_each_node_state(nid, N_HIGH_MEMORY) {
4658                unsigned long start_pfn, end_pfn;
4659
4660                /*
4661                 * Recalculate kernelcore_node if the division per node
4662                 * now exceeds what is necessary to satisfy the requested
4663                 * amount of memory for the kernel
4664                 */
4665                if (required_kernelcore < kernelcore_node)
4666                        kernelcore_node = required_kernelcore / usable_nodes;
4667
4668                /*
4669                 * As the map is walked, we track how much memory is usable
4670                 * by the kernel using kernelcore_remaining. When it is
4671                 * 0, the rest of the node is usable by ZONE_MOVABLE
4672                 */
4673                kernelcore_remaining = kernelcore_node;
4674
4675                /* Go through each range of PFNs within this node */
4676                for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
4677                        unsigned long size_pages;
4678
4679                        start_pfn = max(start_pfn, zone_movable_pfn[nid]);
4680                        if (start_pfn >= end_pfn)
4681                                continue;
4682
4683                        /* Account for what is only usable for kernelcore */
4684                        if (start_pfn < usable_startpfn) {
4685                                unsigned long kernel_pages;
4686                                kernel_pages = min(end_pfn, usable_startpfn)
4687                                                                - start_pfn;
4688
4689                                kernelcore_remaining -= min(kernel_pages,
4690                                                        kernelcore_remaining);
4691                                required_kernelcore -= min(kernel_pages,
4692                                                        required_kernelcore);
4693
4694                                /* Continue if range is now fully accounted */
4695                                if (end_pfn <= usable_startpfn) {
4696
4697                                        /*
4698                                         * Push zone_movable_pfn to the end so
4699                                         * that if we have to rebalance
4700                                         * kernelcore across nodes, we will
4701                                         * not double account here
4702                                         */
4703                                        zone_movable_pfn[nid] = end_pfn;
4704                                        continue;
4705                                }
4706                                start_pfn = usable_startpfn;
4707                        }
4708
4709                        /*
4710                         * The usable PFN range for ZONE_MOVABLE is from
4711                         * start_pfn->end_pfn. Calculate size_pages as the
4712                         * number of pages used as kernelcore
4713                         */
4714                        size_pages = end_pfn - start_pfn;
4715                        if (size_pages > kernelcore_remaining)
4716                                size_pages = kernelcore_remaining;
4717                        zone_movable_pfn[nid] = start_pfn + size_pages;
4718
4719                        /*
4720                         * Some kernelcore has been met, update counts and
4721                         * break if the kernelcore for this node has been
4722                         * satisified
4723                         */
4724                        required_kernelcore -= min(required_kernelcore,
4725                                                                size_pages);
4726                        kernelcore_remaining -= size_pages;
4727                        if (!kernelcore_remaining)
4728                                break;
4729                }
4730        }
4731
4732        /*
4733         * If there is still required_kernelcore, we do another pass with one
4734         * less node in the count. This will push zone_movable_pfn[nid] further
4735         * along on the nodes that still have memory until kernelcore is
4736         * satisified
4737         */
4738        usable_nodes--;
4739        if (usable_nodes && required_kernelcore > usable_nodes)
4740                goto restart;
4741
4742        /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
4743        for (nid = 0; nid < MAX_NUMNODES; nid++)
4744                zone_movable_pfn[nid] =
4745                        roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
4746
4747out:
4748        /* restore the node_state */
4749        node_states[N_HIGH_MEMORY] = saved_node_state;
4750}
4751
4752/* Any regular memory on that node ? */
4753static void check_for_regular_memory(pg_data_t *pgdat)
4754{
4755#ifdef CONFIG_HIGHMEM
4756        enum zone_type zone_type;
4757
4758        for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
4759                struct zone *zone = &pgdat->node_zones[zone_type];
4760                if (zone->present_pages) {
4761                        node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
4762                        break;
4763                }
4764        }
4765#endif
4766}
4767
4768/**
4769 * free_area_init_nodes - Initialise all pg_data_t and zone data
4770 * @max_zone_pfn: an array of max PFNs for each zone
4771 *
4772 * This will call free_area_init_node() for each active node in the system.
4773 * Using the page ranges provided by add_active_range(), the size of each
4774 * zone in each node and their holes is calculated. If the maximum PFN
4775 * between two adjacent zones match, it is assumed that the zone is empty.
4776 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
4777 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
4778 * starts where the previous one ended. For example, ZONE_DMA32 starts
4779 * at arch_max_dma_pfn.
4780 */
4781void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4782{
4783        unsigned long start_pfn, end_pfn;
4784        int i, nid;
4785
4786        /* Record where the zone boundaries are */
4787        memset(arch_zone_lowest_possible_pfn, 0,
4788                                sizeof(arch_zone_lowest_possible_pfn));
4789        memset(arch_zone_highest_possible_pfn, 0,
4790                                sizeof(arch_zone_highest_possible_pfn));
4791        arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
4792        arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
4793        for (i = 1; i < MAX_NR_ZONES; i++) {
4794                if (i == ZONE_MOVABLE)
4795                        continue;
4796                arch_zone_lowest_possible_pfn[i] =
4797                        arch_zone_highest_possible_pfn[i-1];
4798                arch_zone_highest_possible_pfn[i] =
4799                        max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
4800        }
4801        arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
4802        arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
4803
4804        /* Find the PFNs that ZONE_MOVABLE begins at in each node */
4805        memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
4806        find_zone_movable_pfns_for_nodes();
4807
4808        /* Print out the zone ranges */
4809        printk("Zone ranges:\n");
4810        for (i = 0; i < MAX_NR_ZONES; i++) {
4811                if (i == ZONE_MOVABLE)
4812                        continue;
4813                printk(KERN_CONT "  %-8s ", zone_names[i]);
4814                if (arch_zone_lowest_possible_pfn[i] ==
4815                                arch_zone_highest_possible_pfn[i])
4816                        printk(KERN_CONT "empty\n");
4817                else
4818                        printk(KERN_CONT "[mem %0#10lx-%0#10lx]\n",
4819                                arch_zone_lowest_possible_pfn[i] << PAGE_SHIFT,
4820                                (arch_zone_highest_possible_pfn[i]
4821                                        << PAGE_SHIFT) - 1);
4822        }
4823
4824        /* Print out the PFNs ZONE_MOVABLE begins at in each node */
4825        printk("Movable zone start for each node\n");
4826        for (i = 0; i < MAX_NUMNODES; i++) {
4827                if (zone_movable_pfn[i])
4828                        printk("  Node %d: %#010lx\n", i,
4829                               zone_movable_pfn[i] << PAGE_SHIFT);
4830        }
4831
4832        /* Print out the early_node_map[] */
4833        printk("Early memory node ranges\n");
4834        for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
4835                printk("  node %3d: [mem %#010lx-%#010lx]\n", nid,
4836                       start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
4837
4838        /* Initialise every node */
4839        mminit_verify_pageflags_layout();
4840        setup_nr_node_ids();
4841        for_each_online_node(nid) {
4842                pg_data_t *pgdat = NODE_DATA(nid);
4843                free_area_init_node(nid, NULL,
4844                                find_min_pfn_for_node(nid), NULL);
4845
4846                /* Any memory on that node */
4847                if (pgdat->node_present_pages)
4848                        node_set_state(nid, N_HIGH_MEMORY);
4849                check_for_regular_memory(pgdat);
4850        }
4851}
4852
4853static int __init cmdline_parse_core(char *p, unsigned long *core)
4854{
4855        unsigned long long coremem;
4856        if (!p)
4857                return -EINVAL;
4858
4859        coremem = memparse(p, &p);
4860        *core = coremem >> PAGE_SHIFT;
4861
4862        /* Paranoid check that UL is enough for the coremem value */
4863        WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
4864
4865        return 0;
4866}
4867
4868/*
4869 * kernelcore=size sets the amount of memory for use for allocations that
4870 * cannot be reclaimed or migrated.
4871 */
4872static int __init cmdline_parse_kernelcore(char *p)
4873{
4874        return cmdline_parse_core(p, &required_kernelcore);
4875}
4876
4877/*
4878 * movablecore=size sets the amount of memory for use for allocations that
4879 * can be reclaimed or migrated.
4880 */
4881static int __init cmdline_parse_movablecore(char *p)
4882{
4883        return cmdline_parse_core(p, &required_movablecore);
4884}
4885
4886early_param("kernelcore", cmdline_parse_kernelcore);
4887early_param("movablecore", cmdline_parse_movablecore);
4888
4889#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
4890
4891/**
4892 * set_dma_reserve - set the specified number of pages reserved in the first zone
4893 * @new_dma_reserve: The number of pages to mark reserved
4894 *
4895 * The per-cpu batchsize and zone watermarks are determined by present_pages.
4896 * In the DMA zone, a significant percentage may be consumed by kernel image
4897 * and other unfreeable allocations which can skew the watermarks badly. This
4898 * function may optionally be used to account for unfreeable pages in the
4899 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
4900 * smaller per-cpu batchsize.
4901 */
4902void __init set_dma_reserve(unsigned long new_dma_reserve)
4903{
4904        dma_reserve = new_dma_reserve;
4905}
4906
4907void __init free_area_init(unsigned long *zones_size)
4908{
4909        free_area_init_node(0, zones_size,
4910                        __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
4911}
4912
4913static int page_alloc_cpu_notify(struct notifier_block *self,
4914                                 unsigned long action, void *hcpu)
4915{
4916        int cpu = (unsigned long)hcpu;
4917
4918        if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
4919                lru_add_drain_cpu(cpu);
4920                drain_pages(cpu);
4921
4922                /*
4923                 * Spill the event counters of the dead processor
4924                 * into the current processors event counters.
4925                 * This artificially elevates the count of the current
4926                 * processor.
4927                 */
4928                vm_events_fold_cpu(cpu);
4929
4930                /*
4931                 * Zero the differential counters of the dead processor
4932                 * so that the vm statistics are consistent.
4933                 *
4934                 * This is only okay since the processor is dead and cannot
4935                 * race with what we are doing.
4936                 */
4937                refresh_cpu_vm_stats(cpu);
4938        }
4939        return NOTIFY_OK;
4940}
4941
4942void __init page_alloc_init(void)
4943{
4944        hotcpu_notifier(page_alloc_cpu_notify, 0);
4945}
4946
4947/*
4948 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
4949 *      or min_free_kbytes changes.
4950 */
4951static void calculate_totalreserve_pages(void)
4952{
4953        struct pglist_data *pgdat;
4954        unsigned long reserve_pages = 0;
4955        enum zone_type i, j;
4956
4957        for_each_online_pgdat(pgdat) {
4958                for (i = 0; i < MAX_NR_ZONES; i++) {
4959                        struct zone *zone = pgdat->node_zones + i;
4960                        unsigned long max = 0;
4961
4962                        /* Find valid and maximum lowmem_reserve in the zone */
4963                        for (j = i; j < MAX_NR_ZONES; j++) {
4964                                if (zone->lowmem_reserve[j] > max)
4965                                        max = zone->lowmem_reserve[j];
4966                        }
4967
4968                        /* we treat the high watermark as reserved pages. */
4969                        max += high_wmark_pages(zone);
4970
4971                        if (max > zone->present_pages)
4972                                max = zone->present_pages;
4973                        reserve_pages += max;
4974                        /*
4975                         * Lowmem reserves are not available to
4976                         * GFP_HIGHUSER page cache allocations and
4977                         * kswapd tries to balance zones to their high
4978                         * watermark.  As a result, neither should be
4979                         * regarded as dirtyable memory, to prevent a
4980                         * situation where reclaim has to clean pages
4981                         * in order to balance the zones.
4982                         */
4983                        zone->dirty_balance_reserve = max;
4984                }
4985        }
4986        dirty_balance_reserve = reserve_pages;
4987        totalreserve_pages = reserve_pages;
4988}
4989
4990/*
4991 * setup_per_zone_lowmem_reserve - called whenever
4992 *      sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
4993 *      has a correct pages reserved value, so an adequate number of
4994 *      pages are left in the zone after a successful __alloc_pages().
4995 */
4996static void setup_per_zone_lowmem_reserve(void)
4997{
4998        struct pglist_data *pgdat;
4999        enum zone_type j, idx;
5000
5001        for_each_online_pgdat(pgdat) {
5002                for (j = 0; j < MAX_NR_ZONES; j++) {
5003                        struct zone *zone = pgdat->node_zones + j;
5004                        unsigned long present_pages = zone->present_pages;
5005
5006                        zone->lowmem_reserve[j] = 0;
5007
5008                        idx = j;
5009                        while (idx) {
5010                                struct zone *lower_zone;
5011
5012                                idx--;
5013
5014                                if (sysctl_lowmem_reserve_ratio[idx] < 1)
5015                                        sysctl_lowmem_reserve_ratio[idx] = 1;
5016
5017                                lower_zone = pgdat->node_zones + idx;
5018                                lower_zone->lowmem_reserve[j] = present_pages /
5019                                        sysctl_lowmem_reserve_ratio[idx];
5020                                present_pages += lower_zone->present_pages;
5021                        }
5022                }
5023        }
5024
5025        /* update totalreserve_pages */
5026        calculate_totalreserve_pages();
5027}
5028
5029static void __setup_per_zone_wmarks(void)
5030{
5031        unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
5032        unsigned long lowmem_pages = 0;
5033        struct zone *zone;
5034        unsigned long flags;
5035
5036        /* Calculate total number of !ZONE_HIGHMEM pages */
5037        for_each_zone(zone) {
5038                if (!is_highmem(zone))
5039                        lowmem_pages += zone->present_pages;
5040        }
5041
5042        for_each_zone(zone) {
5043                u64 tmp;
5044
5045                spin_lock_irqsave(&zone->lock, flags);
5046                tmp = (u64)pages_min * zone->present_pages;
5047                do_div(tmp, lowmem_pages);
5048                if (is_highmem(zone)) {
5049                        /*
5050                         * __GFP_HIGH and PF_MEMALLOC allocations usually don't
5051                         * need highmem pages, so cap pages_min to a small
5052                         * value here.
5053                         *
5054                         * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
5055                         * deltas controls asynch page reclaim, and so should
5056                         * not be capped for highmem.
5057                         */
5058                        int min_pages;
5059
5060                        min_pages = zone->present_pages / 1024;
5061                        if (min_pages < SWAP_CLUSTER_MAX)
5062                                min_pages = SWAP_CLUSTER_MAX;
5063                        if (min_pages > 128)
5064                                min_pages = 128;
5065                        zone->watermark[WMARK_MIN] = min_pages;
5066                } else {
5067                        /*
5068                         * If it's a lowmem zone, reserve a number of pages
5069                         * proportionate to the zone's size.
5070                         */
5071                        zone->watermark[WMARK_MIN] = tmp;
5072                }
5073
5074                zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
5075                zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
5076
5077                zone->watermark[WMARK_MIN] += cma_wmark_pages(zone);
5078                zone->watermark[WMARK_LOW] += cma_wmark_pages(zone);
5079                zone->watermark[WMARK_HIGH] += cma_wmark_pages(zone);
5080
5081                setup_zone_migrate_reserve(zone);
5082                spin_unlock_irqrestore(&zone->lock, flags);
5083        }
5084
5085        /* update totalreserve_pages */
5086        calculate_totalreserve_pages();
5087}
5088
5089/**
5090 * setup_per_zone_wmarks - called when min_free_kbytes changes
5091 * or when memory is hot-{added|removed}
5092 *
5093 * Ensures that the watermark[min,low,high] values for each zone are set
5094 * correctly with respect to min_free_kbytes.
5095 */
5096void setup_per_zone_wmarks(void)
5097{
5098        mutex_lock(&zonelists_mutex);
5099        __setup_per_zone_wmarks();
5100        mutex_unlock(&zonelists_mutex);
5101}
5102
5103/*
5104 * The inactive anon list should be small enough that the VM never has to
5105 * do too much work, but large enough that each inactive page has a chance
5106 * to be referenced again before it is swapped out.
5107 *
5108 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
5109 * INACTIVE_ANON pages on this zone's LRU, maintained by the
5110 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
5111 * the anonymous pages are kept on the inactive list.
5112 *
5113 * total     target    max
5114 * memory    ratio     inactive anon
5115 * -------------------------------------
5116 *   10MB       1         5MB
5117 *  100MB       1        50MB
5118 *    1GB       3       250MB
5119 *   10GB      10       0.9GB
5120 *  100GB      31         3GB
5121 *    1TB     101        10GB
5122 *   10TB     320        32GB
5123 */
5124static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
5125{
5126        unsigned int gb, ratio;
5127
5128        /* Zone size in gigabytes */
5129        gb = zone->present_pages >> (30 - PAGE_SHIFT);
5130        if (gb)
5131                ratio = int_sqrt(10 * gb);
5132        else
5133                ratio = 1;
5134
5135        zone->inactive_ratio = ratio;
5136}
5137
5138static void __meminit setup_per_zone_inactive_ratio(void)
5139{
5140        struct zone *zone;
5141
5142        for_each_zone(zone)
5143                calculate_zone_inactive_ratio(zone);
5144}
5145
5146/*
5147 * Initialise min_free_kbytes.
5148 *
5149 * For small machines we want it small (128k min).  For large machines
5150 * we want it large (64MB max).  But it is not linear, because network
5151 * bandwidth does not increase linearly with machine size.  We use
5152 *
5153 *      min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
5154 *      min_free_kbytes = sqrt(lowmem_kbytes * 16)
5155 *
5156 * which yields
5157 *
5158 * 16MB:        512k
5159 * 32MB:        724k
5160 * 64MB:        1024k
5161 * 128MB:       1448k
5162 * 256MB:       2048k
5163 * 512MB:       2896k
5164 * 1024MB:      4096k
5165 * 2048MB:      5792k
5166 * 4096MB:      8192k
5167 * 8192MB:      11584k
5168 * 16384MB:     16384k
5169 */
5170int __meminit init_per_zone_wmark_min(void)
5171{
5172        unsigned long lowmem_kbytes;
5173
5174        lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
5175
5176        min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
5177        if (min_free_kbytes < 128)
5178                min_free_kbytes = 128;
5179        if (min_free_kbytes > 65536)
5180                min_free_kbytes = 65536;
5181        setup_per_zone_wmarks();
5182        refresh_zone_stat_thresholds();
5183        setup_per_zone_lowmem_reserve();
5184        setup_per_zone_inactive_ratio();
5185        return 0;
5186}
5187module_init(init_per_zone_wmark_min)
5188
5189/*
5190 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 
5191 *      that we can call two helper functions whenever min_free_kbytes
5192 *      changes.
5193 */
5194int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 
5195        void __user *buffer, size_t *length, loff_t *ppos)
5196{
5197        proc_dointvec(table, write, buffer, length, ppos);
5198        if (write)
5199                setup_per_zone_wmarks();
5200        return 0;
5201}
5202
5203#ifdef CONFIG_NUMA
5204int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
5205        void __user *buffer, size_t *length, loff_t *ppos)
5206{
5207        struct zone *zone;
5208        int rc;
5209
5210        rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5211        if (rc)
5212                return rc;
5213
5214        for_each_zone(zone)
5215                zone->min_unmapped_pages = (zone->present_pages *
5216                                sysctl_min_unmapped_ratio) / 100;
5217        return 0;
5218}
5219
5220int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
5221        void __user *buffer, size_t *length, loff_t *ppos)
5222{
5223        struct zone *zone;
5224        int rc;
5225
5226        rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
5227        if (rc)
5228                return rc;
5229
5230        for_each_zone(zone)
5231                zone->min_slab_pages = (zone->present_pages *
5232                                sysctl_min_slab_ratio) / 100;
5233        return 0;
5234}
5235#endif
5236
5237/*
5238 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
5239 *      proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
5240 *      whenever sysctl_lowmem_reserve_ratio changes.
5241 *
5242 * The reserve ratio obviously has absolutely no relation with the
5243 * minimum watermarks. The lowmem reserve ratio can only make sense
5244 * if in function of the boot time zone sizes.
5245 */
5246int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
5247        void __user *buffer, size_t *length, loff_t *ppos)
5248{
5249        proc_dointvec_minmax(table, write, buffer, length, ppos);
5250        setup_per_zone_lowmem_reserve();
5251        return 0;
5252}
5253
5254/*
5255 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
5256 * cpu.  It is the fraction of total pages in each zone that a hot per cpu pagelist
5257 * can have before it gets flushed back to buddy allocator.
5258 */
5259
5260int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
5261        void __user *buffer, size_t *length, loff_t *ppos)
5262{
5263        struct zone *zone;
5264        unsigned int cpu;
5265        int ret;
5266
5267        ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
5268        if (!write || (ret < 0))
5269                return ret;
5270        for_each_populated_zone(zone) {
5271                for_each_possible_cpu(cpu) {
5272                        unsigned long  high;
5273                        high = zone->present_pages / percpu_pagelist_fraction;
5274                        setup_pagelist_highmark(
5275                                per_cpu_ptr(zone->pageset, cpu), high);
5276                }
5277        }
5278        return 0;
5279}
5280
5281int hashdist = HASHDIST_DEFAULT;
5282
5283#ifdef CONFIG_NUMA
5284static int __init set_hashdist(char *str)
5285{
5286        if (!str)
5287                return 0;
5288        hashdist = simple_strtoul(str, &str, 0);
5289        return 1;
5290}
5291__setup("hashdist=", set_hashdist);
5292#endif
5293
5294/*
5295 * allocate a large system hash table from bootmem
5296 * - it is assumed that the hash table must contain an exact power-of-2
5297 *   quantity of entries
5298 * - limit is the number of hash buckets, not the total allocation size
5299 */
5300void *__init alloc_large_system_hash(const char *tablename,
5301                                     unsigned long bucketsize,
5302                                     unsigned long numentries,
5303                                     int scale,
5304                                     int flags,
5305                                     unsigned int *_hash_shift,
5306                                     unsigned int *_hash_mask,
5307                                     unsigned long low_limit,
5308                                     unsigned long high_limit)
5309{
5310        unsigned long long max = high_limit;
5311        unsigned long log2qty, size;
5312        void *table = NULL;
5313
5314        /* allow the kernel cmdline to have a say */
5315        if (!numentries) {
5316                /* round applicable memory size up to nearest megabyte */
5317                numentries = nr_kernel_pages;
5318                numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
5319                numentries >>= 20 - PAGE_SHIFT;
5320                numentries <<= 20 - PAGE_SHIFT;
5321
5322                /* limit to 1 bucket per 2^scale bytes of low memory */
5323                if (scale > PAGE_SHIFT)
5324                        numentries >>= (scale - PAGE_SHIFT);
5325                else
5326                        numentries <<= (PAGE_SHIFT - scale);
5327
5328                /* Make sure we've got at least a 0-order allocation.. */
5329                if (unlikely(flags & HASH_SMALL)) {
5330                        /* Makes no sense without HASH_EARLY */
5331                        WARN_ON(!(flags & HASH_EARLY));
5332                        if (!(numentries >> *_hash_shift)) {
5333                                numentries = 1UL << *_hash_shift;
5334                                BUG_ON(!numentries);
5335                        }
5336                } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
5337                        numentries = PAGE_SIZE / bucketsize;
5338        }
5339        numentries = roundup_pow_of_two(numentries);
5340
5341        /* limit allocation size to 1/16 total memory by default */
5342        if (max == 0) {
5343                max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
5344                do_div(max, bucketsize);
5345        }
5346        max = min(max, 0x80000000ULL);
5347
5348        if (numentries < low_limit)
5349                numentries = low_limit;
5350        if (numentries > max)
5351                numentries = max;
5352
5353        log2qty = ilog2(numentries);
5354
5355        do {
5356                size = bucketsize << log2qty;
5357                if (flags & HASH_EARLY)
5358                        table = alloc_bootmem_nopanic(size);
5359                else if (hashdist)
5360                        table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
5361                else {
5362                        /*
5363                         * If bucketsize is not a power-of-two, we may free
5364                         * some pages at the end of hash table which
5365                         * alloc_pages_exact() automatically does
5366                         */
5367                        if (get_order(size) < MAX_ORDER) {
5368                                table = alloc_pages_exact(size, GFP_ATOMIC);
5369                                kmemleak_alloc(table, size, 1, GFP_ATOMIC);
5370                        }
5371                }
5372        } while (!table && size > PAGE_SIZE && --log2qty);
5373
5374        if (!table)
5375                panic("Failed to allocate %s hash table\n", tablename);
5376
5377        printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",
5378               tablename,
5379               (1UL << log2qty),
5380               ilog2(size) - PAGE_SHIFT,
5381               size);
5382
5383        if (_hash_shift)
5384                *_hash_shift = log2qty;
5385        if (_hash_mask)
5386                *_hash_mask = (1 << log2qty) - 1;
5387
5388        return table;
5389}
5390
5391/* Return a pointer to the bitmap storing bits affecting a block of pages */
5392static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
5393                                                        unsigned long pfn)
5394{
5395#ifdef CONFIG_SPARSEMEM
5396        return __pfn_to_section(pfn)->pageblock_flags;
5397#else
5398        return zone->pageblock_flags;
5399#endif /* CONFIG_SPARSEMEM */
5400}
5401
5402static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
5403{
5404#ifdef CONFIG_SPARSEMEM
5405        pfn &= (PAGES_PER_SECTION-1);
5406        return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
5407#else
5408        pfn = pfn - zone->zone_start_pfn;
5409        return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
5410#endif /* CONFIG_SPARSEMEM */
5411}
5412
5413/**
5414 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
5415 * @page: The page within the block of interest
5416 * @start_bitidx: The first bit of interest to retrieve
5417 * @end_bitidx: The last bit of interest
5418 * returns pageblock_bits flags
5419 */
5420unsigned long get_pageblock_flags_group(struct page *page,
5421                                        int start_bitidx, int end_bitidx)
5422{
5423        struct zone *zone;
5424        unsigned long *bitmap;
5425        unsigned long pfn, bitidx;
5426        unsigned long flags = 0;
5427        unsigned long value = 1;
5428
5429        zone = page_zone(page);
5430        pfn = page_to_pfn(page);
5431        bitmap = get_pageblock_bitmap(zone, pfn);
5432        bitidx = pfn_to_bitidx(zone, pfn);
5433
5434        for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5435                if (test_bit(bitidx + start_bitidx, bitmap))
5436                        flags |= value;
5437
5438        return flags;
5439}
5440
5441/**
5442 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
5443 * @page: The page within the block of interest
5444 * @start_bitidx: The first bit of interest
5445 * @end_bitidx: The last bit of interest
5446 * @flags: The flags to set
5447 */
5448void set_pageblock_flags_group(struct page *page, unsigned long flags,
5449                                        int start_bitidx, int end_bitidx)
5450{
5451        struct zone *zone;
5452        unsigned long *bitmap;
5453        unsigned long pfn, bitidx;
5454        unsigned long value = 1;
5455
5456        zone = page_zone(page);
5457        pfn = page_to_pfn(page);
5458        bitmap = get_pageblock_bitmap(zone, pfn);
5459        bitidx = pfn_to_bitidx(zone, pfn);
5460        VM_BUG_ON(pfn < zone->zone_start_pfn);
5461        VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
5462
5463        for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5464                if (flags & value)
5465                        __set_bit(bitidx + start_bitidx, bitmap);
5466                else
5467                        __clear_bit(bitidx + start_bitidx, bitmap);
5468}
5469
5470/*
5471 * This is designed as sub function...plz see page_isolation.c also.
5472 * set/clear page block's type to be ISOLATE.
5473 * page allocater never alloc memory from ISOLATE block.
5474 */
5475
5476static int
5477__count_immobile_pages(struct zone *zone, struct page *page, int count)
5478{
5479        unsigned long pfn, iter, found;
5480        int mt;
5481
5482        /*
5483         * For avoiding noise data, lru_add_drain_all() should be called
5484         * If ZONE_MOVABLE, the zone never contains immobile pages
5485         */
5486        if (zone_idx(zone) == ZONE_MOVABLE)
5487                return true;
5488        mt = get_pageblock_migratetype(page);
5489        if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
5490                return true;
5491
5492        pfn = page_to_pfn(page);
5493        for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
5494                unsigned long check = pfn + iter;
5495
5496                if (!pfn_valid_within(check))
5497                        continue;
5498
5499                page = pfn_to_page(check);
5500                if (!page_count(page)) {
5501                        if (PageBuddy(page))
5502                                iter += (1 << page_order(page)) - 1;
5503                        continue;
5504                }
5505                if (!PageLRU(page))
5506                        found++;
5507                /*
5508                 * If there are RECLAIMABLE pages, we need to check it.
5509                 * But now, memory offline itself doesn't call shrink_slab()
5510                 * and it still to be fixed.
5511                 */
5512                /*
5513                 * If the page is not RAM, page_count()should be 0.
5514                 * we don't need more check. This is an _used_ not-movable page.
5515                 *
5516                 * The problematic thing here is PG_reserved pages. PG_reserved
5517                 * is set to both of a memory hole page and a _used_ kernel
5518                 * page at boot.
5519                 */
5520                if (found > count)
5521                        return false;
5522        }
5523        return true;
5524}
5525
5526bool is_pageblock_removable_nolock(struct page *page)
5527{
5528        struct zone *zone;
5529        unsigned long pfn;
5530
5531        /*
5532         * We have to be careful here because we are iterating over memory
5533         * sections which are not zone aware so we might end up outside of
5534         * the zone but still within the section.
5535         * We have to take care about the node as well. If the node is offline
5536         * its NODE_DATA will be NULL - see page_zone.
5537         */
5538        if (!node_online(page_to_nid(page)))
5539                return false;
5540
5541        zone = page_zone(page);
5542        pfn = page_to_pfn(page);
5543        if (zone->zone_start_pfn > pfn ||
5544                        zone->zone_start_pfn + zone->spanned_pages <= pfn)
5545                return false;
5546
5547        return __count_immobile_pages(zone, page, 0);
5548}
5549
5550int set_migratetype_isolate(struct page *page)
5551{
5552        struct zone *zone;
5553        unsigned long flags, pfn;
5554        struct memory_isolate_notify arg;
5555        int notifier_ret;
5556        int ret = -EBUSY;
5557
5558        zone = page_zone(page);
5559
5560        spin_lock_irqsave(&zone->lock, flags);
5561
5562        pfn = page_to_pfn(page);
5563        arg.start_pfn = pfn;
5564        arg.nr_pages = pageblock_nr_pages;
5565        arg.pages_found = 0;
5566
5567        /*
5568         * It may be possible to isolate a pageblock even if the
5569         * migratetype is not MIGRATE_MOVABLE. The memory isolation
5570         * notifier chain is used by balloon drivers to return the
5571         * number of pages in a range that are held by the balloon
5572         * driver to shrink memory. If all the pages are accounted for
5573         * by balloons, are free, or on the LRU, isolation can continue.
5574         * Later, for example, when memory hotplug notifier runs, these
5575         * pages reported as "can be isolated" should be isolated(freed)
5576         * by the balloon driver through the memory notifier chain.
5577         */
5578        notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
5579        notifier_ret = notifier_to_errno(notifier_ret);
5580        if (notifier_ret)
5581                goto out;
5582        /*
5583         * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
5584         * We just check MOVABLE pages.
5585         */
5586        if (__count_immobile_pages(zone, page, arg.pages_found))
5587                ret = 0;
5588
5589        /*
5590         * immobile means "not-on-lru" paes. If immobile is larger than
5591         * removable-by-driver pages reported by notifier, we'll fail.
5592         */
5593
5594out:
5595        if (!ret) {
5596                set_pageblock_migratetype(page, MIGRATE_ISOLATE);
5597                move_freepages_block(zone, page, MIGRATE_ISOLATE);
5598        }
5599
5600        spin_unlock_irqrestore(&zone->lock, flags);
5601        if (!ret)
5602                drain_all_pages();
5603        return ret;
5604}
5605
5606void unset_migratetype_isolate(struct page *page, unsigned migratetype)
5607{
5608        struct zone *zone;
5609        unsigned long flags;
5610        zone = page_zone(page);
5611        spin_lock_irqsave(&zone->lock, flags);
5612        if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
5613                goto out;
5614        set_pageblock_migratetype(page, migratetype);
5615        move_freepages_block(zone, page, migratetype);
5616out:
5617        spin_unlock_irqrestore(&zone->lock, flags);
5618}
5619
5620#ifdef CONFIG_CMA
5621
5622static unsigned long pfn_max_align_down(unsigned long pfn)
5623{
5624        return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
5625                             pageblock_nr_pages) - 1);
5626}
5627
5628static unsigned long pfn_max_align_up(unsigned long pfn)
5629{
5630        return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
5631                                pageblock_nr_pages));
5632}
5633
5634static struct page *
5635__alloc_contig_migrate_alloc(struct page *page, unsigned long private,
5636                             int **resultp)
5637{
5638        gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
5639
5640        if (PageHighMem(page))
5641                gfp_mask |= __GFP_HIGHMEM;
5642
5643        return alloc_page(gfp_mask);
5644}
5645
5646/* [start, end) must belong to a single zone. */
5647static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
5648{
5649        /* This function is based on compact_zone() from compaction.c. */
5650
5651        unsigned long pfn = start;
5652        unsigned int tries = 0;
5653        int ret = 0;
5654
5655        struct compact_control cc = {
5656                .nr_migratepages = 0,
5657                .order = -1,
5658                .zone = page_zone(pfn_to_page(start)),
5659                .sync = true,
5660        };
5661        INIT_LIST_HEAD(&cc.migratepages);
5662
5663        migrate_prep_local();
5664
5665        while (pfn < end || !list_empty(&cc.migratepages)) {
5666                if (fatal_signal_pending(current)) {
5667                        ret = -EINTR;
5668                        break;
5669                }
5670
5671                if (list_empty(&cc.migratepages)) {
5672                        cc.nr_migratepages = 0;
5673                        pfn = isolate_migratepages_range(cc.zone, &cc,
5674                                                         pfn, end);
5675                        if (!pfn) {
5676                                ret = -EINTR;
5677                                break;
5678                        }
5679                        tries = 0;
5680                } else if (++tries == 5) {
5681                        ret = ret < 0 ? ret : -EBUSY;
5682                        break;
5683                }
5684
5685                ret = migrate_pages(&cc.migratepages,
5686                                    __alloc_contig_migrate_alloc,
5687                                    0, false, MIGRATE_SYNC);
5688        }
5689
5690        putback_lru_pages(&cc.migratepages);
5691        return ret > 0 ? 0 : ret;
5692}
5693
5694/*
5695 * Update zone's cma pages counter used for watermark level calculation.
5696 */
5697static inline void __update_cma_watermarks(struct zone *zone, int count)
5698{
5699        unsigned long flags;
5700        spin_lock_irqsave(&zone->lock, flags);
5701        zone->min_cma_pages += count;
5702        spin_unlock_irqrestore(&zone->lock, flags);
5703        setup_per_zone_wmarks();
5704}
5705
5706/*
5707 * Trigger memory pressure bump to reclaim some pages in order to be able to
5708 * allocate 'count' pages in single page units. Does similar work as
5709 *__alloc_pages_slowpath() function.
5710 */
5711static int __reclaim_pages(struct zone *zone, gfp_t gfp_mask, int count)
5712{
5713        enum zone_type high_zoneidx = gfp_zone(gfp_mask);
5714        struct zonelist *zonelist = node_zonelist(0, gfp_mask);
5715        int did_some_progress = 0;
5716        int order = 1;
5717
5718        /*
5719         * Increase level of watermarks to force kswapd do his job
5720         * to stabilise at new watermark level.
5721         */
5722        __update_cma_watermarks(zone, count);
5723
5724        /* Obey watermarks as if the page was being allocated */
5725        while (!zone_watermark_ok(zone, 0, low_wmark_pages(zone), 0, 0)) {
5726                wake_all_kswapd(order, zonelist, high_zoneidx, zone_idx(zone));
5727
5728                did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
5729                                                      NULL);
5730                if (!did_some_progress) {
5731                        /* Exhausted what can be done so it's blamo time */
5732                        out_of_memory(zonelist, gfp_mask, order, NULL, false);
5733                }
5734        }
5735
5736        /* Restore original watermark levels. */
5737        __update_cma_watermarks(zone, -count);
5738
5739        return count;
5740}
5741
5742/**
5743 * alloc_contig_range() -- tries to allocate given range of pages
5744 * @start:      start PFN to allocate
5745 * @end:        one-past-the-last PFN to allocate
5746 * @migratetype:        migratetype of the underlaying pageblocks (either
5747 *                      #MIGRATE_MOVABLE or #MIGRATE_CMA).  All pageblocks
5748 *                      in range must have the same migratetype and it must
5749 *                      be either of the two.
5750 *
5751 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
5752 * aligned, however it's the caller's responsibility to guarantee that
5753 * we are the only thread that changes migrate type of pageblocks the
5754 * pages fall in.
5755 *
5756 * The PFN range must belong to a single zone.
5757 *
5758 * Returns zero on success or negative error code.  On success all
5759 * pages which PFN is in [start, end) are allocated for the caller and
5760</