linux/mm/page_alloc.c
<<
>>
Prefs
   1/*
   2 *  linux/mm/page_alloc.c
   3 *
   4 *  Manages the free list, the system allocates free pages here.
   5 *  Note that kmalloc() lives in slab.c
   6 *
   7 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   8 *  Swap reorganised 29.12.95, Stephen Tweedie
   9 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  10 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
  11 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
  12 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
  13 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
  14 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
  15 */
  16
  17#include <linux/stddef.h>
  18#include <linux/mm.h>
  19#include <linux/swap.h>
  20#include <linux/interrupt.h>
  21#include <linux/pagemap.h>
  22#include <linux/jiffies.h>
  23#include <linux/bootmem.h>
  24#include <linux/memblock.h>
  25#include <linux/compiler.h>
  26#include <linux/kernel.h>
  27#include <linux/kmemcheck.h>
  28#include <linux/module.h>
  29#include <linux/suspend.h>
  30#include <linux/pagevec.h>
  31#include <linux/blkdev.h>
  32#include <linux/slab.h>
  33#include <linux/ratelimit.h>
  34#include <linux/oom.h>
  35#include <linux/notifier.h>
  36#include <linux/topology.h>
  37#include <linux/sysctl.h>
  38#include <linux/cpu.h>
  39#include <linux/cpuset.h>
  40#include <linux/memory_hotplug.h>
  41#include <linux/nodemask.h>
  42#include <linux/vmalloc.h>
  43#include <linux/vmstat.h>
  44#include <linux/mempolicy.h>
  45#include <linux/stop_machine.h>
  46#include <linux/sort.h>
  47#include <linux/pfn.h>
  48#include <linux/backing-dev.h>
  49#include <linux/fault-inject.h>
  50#include <linux/page-isolation.h>
  51#include <linux/page_cgroup.h>
  52#include <linux/debugobjects.h>
  53#include <linux/kmemleak.h>
  54#include <linux/memory.h>
  55#include <linux/compaction.h>
  56#include <trace/events/kmem.h>
  57#include <linux/ftrace_event.h>
  58#include <linux/memcontrol.h>
  59#include <linux/prefetch.h>
  60#include <linux/page-debug-flags.h>
  61
  62#include <asm/tlbflush.h>
  63#include <asm/div64.h>
  64#include "internal.h"
  65
  66#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
  67DEFINE_PER_CPU(int, numa_node);
  68EXPORT_PER_CPU_SYMBOL(numa_node);
  69#endif
  70
  71#ifdef CONFIG_HAVE_MEMORYLESS_NODES
  72/*
  73 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
  74 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
  75 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
  76 * defined in <linux/topology.h>.
  77 */
  78DEFINE_PER_CPU(int, _numa_mem_);                /* Kernel "local memory" node */
  79EXPORT_PER_CPU_SYMBOL(_numa_mem_);
  80#endif
  81
  82/*
  83 * Array of node states.
  84 */
  85nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
  86        [N_POSSIBLE] = NODE_MASK_ALL,
  87        [N_ONLINE] = { { [0] = 1UL } },
  88#ifndef CONFIG_NUMA
  89        [N_NORMAL_MEMORY] = { { [0] = 1UL } },
  90#ifdef CONFIG_HIGHMEM
  91        [N_HIGH_MEMORY] = { { [0] = 1UL } },
  92#endif
  93        [N_CPU] = { { [0] = 1UL } },
  94#endif  /* NUMA */
  95};
  96EXPORT_SYMBOL(node_states);
  97
  98unsigned long totalram_pages __read_mostly;
  99unsigned long totalreserve_pages __read_mostly;
 100/*
 101 * When calculating the number of globally allowed dirty pages, there
 102 * is a certain number of per-zone reserves that should not be
 103 * considered dirtyable memory.  This is the sum of those reserves
 104 * over all existing zones that contribute dirtyable memory.
 105 */
 106unsigned long dirty_balance_reserve __read_mostly;
 107
 108int percpu_pagelist_fraction;
 109gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
 110
 111#ifdef CONFIG_PM_SLEEP
 112/*
 113 * The following functions are used by the suspend/hibernate code to temporarily
 114 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
 115 * while devices are suspended.  To avoid races with the suspend/hibernate code,
 116 * they should always be called with pm_mutex held (gfp_allowed_mask also should
 117 * only be modified with pm_mutex held, unless the suspend/hibernate code is
 118 * guaranteed not to run in parallel with that modification).
 119 */
 120
 121static gfp_t saved_gfp_mask;
 122
 123void pm_restore_gfp_mask(void)
 124{
 125        WARN_ON(!mutex_is_locked(&pm_mutex));
 126        if (saved_gfp_mask) {
 127                gfp_allowed_mask = saved_gfp_mask;
 128                saved_gfp_mask = 0;
 129        }
 130}
 131
 132void pm_restrict_gfp_mask(void)
 133{
 134        WARN_ON(!mutex_is_locked(&pm_mutex));
 135        WARN_ON(saved_gfp_mask);
 136        saved_gfp_mask = gfp_allowed_mask;
 137        gfp_allowed_mask &= ~GFP_IOFS;
 138}
 139
 140bool pm_suspended_storage(void)
 141{
 142        if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS)
 143                return false;
 144        return true;
 145}
 146#endif /* CONFIG_PM_SLEEP */
 147
 148#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
 149int pageblock_order __read_mostly;
 150#endif
 151
 152static void __free_pages_ok(struct page *page, unsigned int order);
 153
 154/*
 155 * results with 256, 32 in the lowmem_reserve sysctl:
 156 *      1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
 157 *      1G machine -> (16M dma, 784M normal, 224M high)
 158 *      NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
 159 *      HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
 160 *      HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
 161 *
 162 * TBD: should special case ZONE_DMA32 machines here - in those we normally
 163 * don't need any ZONE_NORMAL reservation
 164 */
 165int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
 166#ifdef CONFIG_ZONE_DMA
 167         256,
 168#endif
 169#ifdef CONFIG_ZONE_DMA32
 170         256,
 171#endif
 172#ifdef CONFIG_HIGHMEM
 173         32,
 174#endif
 175         32,
 176};
 177
 178EXPORT_SYMBOL(totalram_pages);
 179
 180static char * const zone_names[MAX_NR_ZONES] = {
 181#ifdef CONFIG_ZONE_DMA
 182         "DMA",
 183#endif
 184#ifdef CONFIG_ZONE_DMA32
 185         "DMA32",
 186#endif
 187         "Normal",
 188#ifdef CONFIG_HIGHMEM
 189         "HighMem",
 190#endif
 191         "Movable",
 192};
 193
 194int min_free_kbytes = 1024;
 195
 196static unsigned long __meminitdata nr_kernel_pages;
 197static unsigned long __meminitdata nr_all_pages;
 198static unsigned long __meminitdata dma_reserve;
 199
 200#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 201static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
 202static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
 203static unsigned long __initdata required_kernelcore;
 204static unsigned long __initdata required_movablecore;
 205static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
 206
 207/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
 208int movable_zone;
 209EXPORT_SYMBOL(movable_zone);
 210#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
 211
 212#if MAX_NUMNODES > 1
 213int nr_node_ids __read_mostly = MAX_NUMNODES;
 214int nr_online_nodes __read_mostly = 1;
 215EXPORT_SYMBOL(nr_node_ids);
 216EXPORT_SYMBOL(nr_online_nodes);
 217#endif
 218
 219int page_group_by_mobility_disabled __read_mostly;
 220
 221static void set_pageblock_migratetype(struct page *page, int migratetype)
 222{
 223
 224        if (unlikely(page_group_by_mobility_disabled))
 225                migratetype = MIGRATE_UNMOVABLE;
 226
 227        set_pageblock_flags_group(page, (unsigned long)migratetype,
 228                                        PB_migrate, PB_migrate_end);
 229}
 230
 231bool oom_killer_disabled __read_mostly;
 232
 233#ifdef CONFIG_DEBUG_VM
 234static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
 235{
 236        int ret = 0;
 237        unsigned seq;
 238        unsigned long pfn = page_to_pfn(page);
 239
 240        do {
 241                seq = zone_span_seqbegin(zone);
 242                if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
 243                        ret = 1;
 244                else if (pfn < zone->zone_start_pfn)
 245                        ret = 1;
 246        } while (zone_span_seqretry(zone, seq));
 247
 248        return ret;
 249}
 250
 251static int page_is_consistent(struct zone *zone, struct page *page)
 252{
 253        if (!pfn_valid_within(page_to_pfn(page)))
 254                return 0;
 255        if (zone != page_zone(page))
 256                return 0;
 257
 258        return 1;
 259}
 260/*
 261 * Temporary debugging check for pages not lying within a given zone.
 262 */
 263static int bad_range(struct zone *zone, struct page *page)
 264{
 265        if (page_outside_zone_boundaries(zone, page))
 266                return 1;
 267        if (!page_is_consistent(zone, page))
 268                return 1;
 269
 270        return 0;
 271}
 272#else
 273static inline int bad_range(struct zone *zone, struct page *page)
 274{
 275        return 0;
 276}
 277#endif
 278
 279static void bad_page(struct page *page)
 280{
 281        static unsigned long resume;
 282        static unsigned long nr_shown;
 283        static unsigned long nr_unshown;
 284
 285        /* Don't complain about poisoned pages */
 286        if (PageHWPoison(page)) {
 287                reset_page_mapcount(page); /* remove PageBuddy */
 288                return;
 289        }
 290
 291        /*
 292         * Allow a burst of 60 reports, then keep quiet for that minute;
 293         * or allow a steady drip of one report per second.
 294         */
 295        if (nr_shown == 60) {
 296                if (time_before(jiffies, resume)) {
 297                        nr_unshown++;
 298                        goto out;
 299                }
 300                if (nr_unshown) {
 301                        printk(KERN_ALERT
 302                              "BUG: Bad page state: %lu messages suppressed\n",
 303                                nr_unshown);
 304                        nr_unshown = 0;
 305                }
 306                nr_shown = 0;
 307        }
 308        if (nr_shown++ == 0)
 309                resume = jiffies + 60 * HZ;
 310
 311        printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
 312                current->comm, page_to_pfn(page));
 313        dump_page(page);
 314
 315        print_modules();
 316        dump_stack();
 317out:
 318        /* Leave bad fields for debug, except PageBuddy could make trouble */
 319        reset_page_mapcount(page); /* remove PageBuddy */
 320        add_taint(TAINT_BAD_PAGE);
 321}
 322
 323/*
 324 * Higher-order pages are called "compound pages".  They are structured thusly:
 325 *
 326 * The first PAGE_SIZE page is called the "head page".
 327 *
 328 * The remaining PAGE_SIZE pages are called "tail pages".
 329 *
 330 * All pages have PG_compound set.  All tail pages have their ->first_page
 331 * pointing at the head page.
 332 *
 333 * The first tail page's ->lru.next holds the address of the compound page's
 334 * put_page() function.  Its ->lru.prev holds the order of allocation.
 335 * This usage means that zero-order pages may not be compound.
 336 */
 337
 338static void free_compound_page(struct page *page)
 339{
 340        __free_pages_ok(page, compound_order(page));
 341}
 342
 343void prep_compound_page(struct page *page, unsigned long order)
 344{
 345        int i;
 346        int nr_pages = 1 << order;
 347
 348        set_compound_page_dtor(page, free_compound_page);
 349        set_compound_order(page, order);
 350        __SetPageHead(page);
 351        for (i = 1; i < nr_pages; i++) {
 352                struct page *p = page + i;
 353                __SetPageTail(p);
 354                set_page_count(p, 0);
 355                p->first_page = page;
 356        }
 357}
 358
 359/* update __split_huge_page_refcount if you change this function */
 360static int destroy_compound_page(struct page *page, unsigned long order)
 361{
 362        int i;
 363        int nr_pages = 1 << order;
 364        int bad = 0;
 365
 366        if (unlikely(compound_order(page) != order) ||
 367            unlikely(!PageHead(page))) {
 368                bad_page(page);
 369                bad++;
 370        }
 371
 372        __ClearPageHead(page);
 373
 374        for (i = 1; i < nr_pages; i++) {
 375                struct page *p = page + i;
 376
 377                if (unlikely(!PageTail(p) || (p->first_page != page))) {
 378                        bad_page(page);
 379                        bad++;
 380                }
 381                __ClearPageTail(p);
 382        }
 383
 384        return bad;
 385}
 386
 387static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
 388{
 389        int i;
 390
 391        /*
 392         * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
 393         * and __GFP_HIGHMEM from hard or soft interrupt context.
 394         */
 395        VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
 396        for (i = 0; i < (1 << order); i++)
 397                clear_highpage(page + i);
 398}
 399
 400#ifdef CONFIG_DEBUG_PAGEALLOC
 401unsigned int _debug_guardpage_minorder;
 402
 403static int __init debug_guardpage_minorder_setup(char *buf)
 404{
 405        unsigned long res;
 406
 407        if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
 408                printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
 409                return 0;
 410        }
 411        _debug_guardpage_minorder = res;
 412        printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
 413        return 0;
 414}
 415__setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
 416
 417static inline void set_page_guard_flag(struct page *page)
 418{
 419        __set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
 420}
 421
 422static inline void clear_page_guard_flag(struct page *page)
 423{
 424        __clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
 425}
 426#else
 427static inline void set_page_guard_flag(struct page *page) { }
 428static inline void clear_page_guard_flag(struct page *page) { }
 429#endif
 430
 431static inline void set_page_order(struct page *page, int order)
 432{
 433        set_page_private(page, order);
 434        __SetPageBuddy(page);
 435}
 436
 437static inline void rmv_page_order(struct page *page)
 438{
 439        __ClearPageBuddy(page);
 440        set_page_private(page, 0);
 441}
 442
 443/*
 444 * Locate the struct page for both the matching buddy in our
 445 * pair (buddy1) and the combined O(n+1) page they form (page).
 446 *
 447 * 1) Any buddy B1 will have an order O twin B2 which satisfies
 448 * the following equation:
 449 *     B2 = B1 ^ (1 << O)
 450 * For example, if the starting buddy (buddy2) is #8 its order
 451 * 1 buddy is #10:
 452 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
 453 *
 454 * 2) Any buddy B will have an order O+1 parent P which
 455 * satisfies the following equation:
 456 *     P = B & ~(1 << O)
 457 *
 458 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
 459 */
 460static inline unsigned long
 461__find_buddy_index(unsigned long page_idx, unsigned int order)
 462{
 463        return page_idx ^ (1 << order);
 464}
 465
 466/*
 467 * This function checks whether a page is free && is the buddy
 468 * we can do coalesce a page and its buddy if
 469 * (a) the buddy is not in a hole &&
 470 * (b) the buddy is in the buddy system &&
 471 * (c) a page and its buddy have the same order &&
 472 * (d) a page and its buddy are in the same zone.
 473 *
 474 * For recording whether a page is in the buddy system, we set ->_mapcount -2.
 475 * Setting, clearing, and testing _mapcount -2 is serialized by zone->lock.
 476 *
 477 * For recording page's order, we use page_private(page).
 478 */
 479static inline int page_is_buddy(struct page *page, struct page *buddy,
 480                                                                int order)
 481{
 482        if (!pfn_valid_within(page_to_pfn(buddy)))
 483                return 0;
 484
 485        if (page_zone_id(page) != page_zone_id(buddy))
 486                return 0;
 487
 488        if (page_is_guard(buddy) && page_order(buddy) == order) {
 489                VM_BUG_ON(page_count(buddy) != 0);
 490                return 1;
 491        }
 492
 493        if (PageBuddy(buddy) && page_order(buddy) == order) {
 494                VM_BUG_ON(page_count(buddy) != 0);
 495                return 1;
 496        }
 497        return 0;
 498}
 499
 500/*
 501 * Freeing function for a buddy system allocator.
 502 *
 503 * The concept of a buddy system is to maintain direct-mapped table
 504 * (containing bit values) for memory blocks of various "orders".
 505 * The bottom level table contains the map for the smallest allocatable
 506 * units of memory (here, pages), and each level above it describes
 507 * pairs of units from the levels below, hence, "buddies".
 508 * At a high level, all that happens here is marking the table entry
 509 * at the bottom level available, and propagating the changes upward
 510 * as necessary, plus some accounting needed to play nicely with other
 511 * parts of the VM system.
 512 * At each level, we keep a list of pages, which are heads of continuous
 513 * free pages of length of (1 << order) and marked with _mapcount -2. Page's
 514 * order is recorded in page_private(page) field.
 515 * So when we are allocating or freeing one, we can derive the state of the
 516 * other.  That is, if we allocate a small block, and both were   
 517 * free, the remainder of the region must be split into blocks.   
 518 * If a block is freed, and its buddy is also free, then this
 519 * triggers coalescing into a block of larger size.            
 520 *
 521 * -- wli
 522 */
 523
 524static inline void __free_one_page(struct page *page,
 525                struct zone *zone, unsigned int order,
 526                int migratetype)
 527{
 528        unsigned long page_idx;
 529        unsigned long combined_idx;
 530        unsigned long uninitialized_var(buddy_idx);
 531        struct page *buddy;
 532
 533        if (unlikely(PageCompound(page)))
 534                if (unlikely(destroy_compound_page(page, order)))
 535                        return;
 536
 537        VM_BUG_ON(migratetype == -1);
 538
 539        page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
 540
 541        VM_BUG_ON(page_idx & ((1 << order) - 1));
 542        VM_BUG_ON(bad_range(zone, page));
 543
 544        while (order < MAX_ORDER-1) {
 545                buddy_idx = __find_buddy_index(page_idx, order);
 546                buddy = page + (buddy_idx - page_idx);
 547                if (!page_is_buddy(page, buddy, order))
 548                        break;
 549                /*
 550                 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
 551                 * merge with it and move up one order.
 552                 */
 553                if (page_is_guard(buddy)) {
 554                        clear_page_guard_flag(buddy);
 555                        set_page_private(page, 0);
 556                        __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
 557                } else {
 558                        list_del(&buddy->lru);
 559                        zone->free_area[order].nr_free--;
 560                        rmv_page_order(buddy);
 561                }
 562                combined_idx = buddy_idx & page_idx;
 563                page = page + (combined_idx - page_idx);
 564                page_idx = combined_idx;
 565                order++;
 566        }
 567        set_page_order(page, order);
 568
 569        /*
 570         * If this is not the largest possible page, check if the buddy
 571         * of the next-highest order is free. If it is, it's possible
 572         * that pages are being freed that will coalesce soon. In case,
 573         * that is happening, add the free page to the tail of the list
 574         * so it's less likely to be used soon and more likely to be merged
 575         * as a higher order page
 576         */
 577        if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
 578                struct page *higher_page, *higher_buddy;
 579                combined_idx = buddy_idx & page_idx;
 580                higher_page = page + (combined_idx - page_idx);
 581                buddy_idx = __find_buddy_index(combined_idx, order + 1);
 582                higher_buddy = page + (buddy_idx - combined_idx);
 583                if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
 584                        list_add_tail(&page->lru,
 585                                &zone->free_area[order].free_list[migratetype]);
 586                        goto out;
 587                }
 588        }
 589
 590        list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
 591out:
 592        zone->free_area[order].nr_free++;
 593}
 594
 595/*
 596 * free_page_mlock() -- clean up attempts to free and mlocked() page.
 597 * Page should not be on lru, so no need to fix that up.
 598 * free_pages_check() will verify...
 599 */
 600static inline void free_page_mlock(struct page *page)
 601{
 602        __dec_zone_page_state(page, NR_MLOCK);
 603        __count_vm_event(UNEVICTABLE_MLOCKFREED);
 604}
 605
 606static inline int free_pages_check(struct page *page)
 607{
 608        if (unlikely(page_mapcount(page) |
 609                (page->mapping != NULL)  |
 610                (atomic_read(&page->_count) != 0) |
 611                (page->flags & PAGE_FLAGS_CHECK_AT_FREE) |
 612                (mem_cgroup_bad_page_check(page)))) {
 613                bad_page(page);
 614                return 1;
 615        }
 616        if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
 617                page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
 618        return 0;
 619}
 620
 621/*
 622 * Frees a number of pages from the PCP lists
 623 * Assumes all pages on list are in same zone, and of same order.
 624 * count is the number of pages to free.
 625 *
 626 * If the zone was previously in an "all pages pinned" state then look to
 627 * see if this freeing clears that state.
 628 *
 629 * And clear the zone's pages_scanned counter, to hold off the "all pages are
 630 * pinned" detection logic.
 631 */
 632static void free_pcppages_bulk(struct zone *zone, int count,
 633                                        struct per_cpu_pages *pcp)
 634{
 635        int migratetype = 0;
 636        int batch_free = 0;
 637        int to_free = count;
 638
 639        spin_lock(&zone->lock);
 640        zone->all_unreclaimable = 0;
 641        zone->pages_scanned = 0;
 642
 643        while (to_free) {
 644                struct page *page;
 645                struct list_head *list;
 646
 647                /*
 648                 * Remove pages from lists in a round-robin fashion. A
 649                 * batch_free count is maintained that is incremented when an
 650                 * empty list is encountered.  This is so more pages are freed
 651                 * off fuller lists instead of spinning excessively around empty
 652                 * lists
 653                 */
 654                do {
 655                        batch_free++;
 656                        if (++migratetype == MIGRATE_PCPTYPES)
 657                                migratetype = 0;
 658                        list = &pcp->lists[migratetype];
 659                } while (list_empty(list));
 660
 661                /* This is the only non-empty list. Free them all. */
 662                if (batch_free == MIGRATE_PCPTYPES)
 663                        batch_free = to_free;
 664
 665                do {
 666                        page = list_entry(list->prev, struct page, lru);
 667                        /* must delete as __free_one_page list manipulates */
 668                        list_del(&page->lru);
 669                        /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
 670                        __free_one_page(page, zone, 0, page_private(page));
 671                        trace_mm_page_pcpu_drain(page, 0, page_private(page));
 672                } while (--to_free && --batch_free && !list_empty(list));
 673        }
 674        __mod_zone_page_state(zone, NR_FREE_PAGES, count);
 675        spin_unlock(&zone->lock);
 676}
 677
 678static void free_one_page(struct zone *zone, struct page *page, int order,
 679                                int migratetype)
 680{
 681        spin_lock(&zone->lock);
 682        zone->all_unreclaimable = 0;
 683        zone->pages_scanned = 0;
 684
 685        __free_one_page(page, zone, order, migratetype);
 686        __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
 687        spin_unlock(&zone->lock);
 688}
 689
 690static bool free_pages_prepare(struct page *page, unsigned int order)
 691{
 692        int i;
 693        int bad = 0;
 694
 695        trace_mm_page_free(page, order);
 696        kmemcheck_free_shadow(page, order);
 697
 698        if (PageAnon(page))
 699                page->mapping = NULL;
 700        for (i = 0; i < (1 << order); i++)
 701                bad += free_pages_check(page + i);
 702        if (bad)
 703                return false;
 704
 705        if (!PageHighMem(page)) {
 706                debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
 707                debug_check_no_obj_freed(page_address(page),
 708                                           PAGE_SIZE << order);
 709        }
 710        arch_free_page(page, order);
 711        kernel_map_pages(page, 1 << order, 0);
 712
 713        return true;
 714}
 715
 716static void __free_pages_ok(struct page *page, unsigned int order)
 717{
 718        unsigned long flags;
 719        int wasMlocked = __TestClearPageMlocked(page);
 720
 721        if (!free_pages_prepare(page, order))
 722                return;
 723
 724        local_irq_save(flags);
 725        if (unlikely(wasMlocked))
 726                free_page_mlock(page);
 727        __count_vm_events(PGFREE, 1 << order);
 728        free_one_page(page_zone(page), page, order,
 729                                        get_pageblock_migratetype(page));
 730        local_irq_restore(flags);
 731}
 732
 733void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
 734{
 735        unsigned int nr_pages = 1 << order;
 736        unsigned int loop;
 737
 738        prefetchw(page);
 739        for (loop = 0; loop < nr_pages; loop++) {
 740                struct page *p = &page[loop];
 741
 742                if (loop + 1 < nr_pages)
 743                        prefetchw(p + 1);
 744                __ClearPageReserved(p);
 745                set_page_count(p, 0);
 746        }
 747
 748        set_page_refcounted(page);
 749        __free_pages(page, order);
 750}
 751
 752
 753/*
 754 * The order of subdivision here is critical for the IO subsystem.
 755 * Please do not alter this order without good reasons and regression
 756 * testing. Specifically, as large blocks of memory are subdivided,
 757 * the order in which smaller blocks are delivered depends on the order
 758 * they're subdivided in this function. This is the primary factor
 759 * influencing the order in which pages are delivered to the IO
 760 * subsystem according to empirical testing, and this is also justified
 761 * by considering the behavior of a buddy system containing a single
 762 * large block of memory acted on by a series of small allocations.
 763 * This behavior is a critical factor in sglist merging's success.
 764 *
 765 * -- wli
 766 */
 767static inline void expand(struct zone *zone, struct page *page,
 768        int low, int high, struct free_area *area,
 769        int migratetype)
 770{
 771        unsigned long size = 1 << high;
 772
 773        while (high > low) {
 774                area--;
 775                high--;
 776                size >>= 1;
 777                VM_BUG_ON(bad_range(zone, &page[size]));
 778
 779#ifdef CONFIG_DEBUG_PAGEALLOC
 780                if (high < debug_guardpage_minorder()) {
 781                        /*
 782                         * Mark as guard pages (or page), that will allow to
 783                         * merge back to allocator when buddy will be freed.
 784                         * Corresponding page table entries will not be touched,
 785                         * pages will stay not present in virtual address space
 786                         */
 787                        INIT_LIST_HEAD(&page[size].lru);
 788                        set_page_guard_flag(&page[size]);
 789                        set_page_private(&page[size], high);
 790                        /* Guard pages are not available for any usage */
 791                        __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << high));
 792                        continue;
 793                }
 794#endif
 795                list_add(&page[size].lru, &area->free_list[migratetype]);
 796                area->nr_free++;
 797                set_page_order(&page[size], high);
 798        }
 799}
 800
 801/*
 802 * This page is about to be returned from the page allocator
 803 */
 804static inline int check_new_page(struct page *page)
 805{
 806        if (unlikely(page_mapcount(page) |
 807                (page->mapping != NULL)  |
 808                (atomic_read(&page->_count) != 0)  |
 809                (page->flags & PAGE_FLAGS_CHECK_AT_PREP) |
 810                (mem_cgroup_bad_page_check(page)))) {
 811                bad_page(page);
 812                return 1;
 813        }
 814        return 0;
 815}
 816
 817static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
 818{
 819        int i;
 820
 821        for (i = 0; i < (1 << order); i++) {
 822                struct page *p = page + i;
 823                if (unlikely(check_new_page(p)))
 824                        return 1;
 825        }
 826
 827        set_page_private(page, 0);
 828        set_page_refcounted(page);
 829
 830        arch_alloc_page(page, order);
 831        kernel_map_pages(page, 1 << order, 1);
 832
 833        if (gfp_flags & __GFP_ZERO)
 834                prep_zero_page(page, order, gfp_flags);
 835
 836        if (order && (gfp_flags & __GFP_COMP))
 837                prep_compound_page(page, order);
 838
 839        return 0;
 840}
 841
 842/*
 843 * Go through the free lists for the given migratetype and remove
 844 * the smallest available page from the freelists
 845 */
 846static inline
 847struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
 848                                                int migratetype)
 849{
 850        unsigned int current_order;
 851        struct free_area * area;
 852        struct page *page;
 853
 854        /* Find a page of the appropriate size in the preferred list */
 855        for (current_order = order; current_order < MAX_ORDER; ++current_order) {
 856                area = &(zone->free_area[current_order]);
 857                if (list_empty(&area->free_list[migratetype]))
 858                        continue;
 859
 860                page = list_entry(area->free_list[migratetype].next,
 861                                                        struct page, lru);
 862                list_del(&page->lru);
 863                rmv_page_order(page);
 864                area->nr_free--;
 865                expand(zone, page, order, current_order, area, migratetype);
 866                return page;
 867        }
 868
 869        return NULL;
 870}
 871
 872
 873/*
 874 * This array describes the order lists are fallen back to when
 875 * the free lists for the desirable migrate type are depleted
 876 */
 877static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
 878        [MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_RESERVE },
 879        [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_RESERVE },
 880        [MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
 881        [MIGRATE_RESERVE]     = { MIGRATE_RESERVE,     MIGRATE_RESERVE,   MIGRATE_RESERVE }, /* Never used */
 882};
 883
 884/*
 885 * Move the free pages in a range to the free lists of the requested type.
 886 * Note that start_page and end_pages are not aligned on a pageblock
 887 * boundary. If alignment is required, use move_freepages_block()
 888 */
 889static int move_freepages(struct zone *zone,
 890                          struct page *start_page, struct page *end_page,
 891                          int migratetype)
 892{
 893        struct page *page;
 894        unsigned long order;
 895        int pages_moved = 0;
 896
 897#ifndef CONFIG_HOLES_IN_ZONE
 898        /*
 899         * page_zone is not safe to call in this context when
 900         * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
 901         * anyway as we check zone boundaries in move_freepages_block().
 902         * Remove at a later date when no bug reports exist related to
 903         * grouping pages by mobility
 904         */
 905        BUG_ON(page_zone(start_page) != page_zone(end_page));
 906#endif
 907
 908        for (page = start_page; page <= end_page;) {
 909                /* Make sure we are not inadvertently changing nodes */
 910                VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
 911
 912                if (!pfn_valid_within(page_to_pfn(page))) {
 913                        page++;
 914                        continue;
 915                }
 916
 917                if (!PageBuddy(page)) {
 918                        page++;
 919                        continue;
 920                }
 921
 922                order = page_order(page);
 923                list_move(&page->lru,
 924                          &zone->free_area[order].free_list[migratetype]);
 925                page += 1 << order;
 926                pages_moved += 1 << order;
 927        }
 928
 929        return pages_moved;
 930}
 931
 932static int move_freepages_block(struct zone *zone, struct page *page,
 933                                int migratetype)
 934{
 935        unsigned long start_pfn, end_pfn;
 936        struct page *start_page, *end_page;
 937
 938        start_pfn = page_to_pfn(page);
 939        start_pfn = start_pfn & ~(pageblock_nr_pages-1);
 940        start_page = pfn_to_page(start_pfn);
 941        end_page = start_page + pageblock_nr_pages - 1;
 942        end_pfn = start_pfn + pageblock_nr_pages - 1;
 943
 944        /* Do not cross zone boundaries */
 945        if (start_pfn < zone->zone_start_pfn)
 946                start_page = page;
 947        if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
 948                return 0;
 949
 950        return move_freepages(zone, start_page, end_page, migratetype);
 951}
 952
 953static void change_pageblock_range(struct page *pageblock_page,
 954                                        int start_order, int migratetype)
 955{
 956        int nr_pageblocks = 1 << (start_order - pageblock_order);
 957
 958        while (nr_pageblocks--) {
 959                set_pageblock_migratetype(pageblock_page, migratetype);
 960                pageblock_page += pageblock_nr_pages;
 961        }
 962}
 963
 964/* Remove an element from the buddy allocator from the fallback list */
 965static inline struct page *
 966__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
 967{
 968        struct free_area * area;
 969        int current_order;
 970        struct page *page;
 971        int migratetype, i;
 972
 973        /* Find the largest possible block of pages in the other list */
 974        for (current_order = MAX_ORDER-1; current_order >= order;
 975                                                --current_order) {
 976                for (i = 0; i < MIGRATE_TYPES - 1; i++) {
 977                        migratetype = fallbacks[start_migratetype][i];
 978
 979                        /* MIGRATE_RESERVE handled later if necessary */
 980                        if (migratetype == MIGRATE_RESERVE)
 981                                continue;
 982
 983                        area = &(zone->free_area[current_order]);
 984                        if (list_empty(&area->free_list[migratetype]))
 985                                continue;
 986
 987                        page = list_entry(area->free_list[migratetype].next,
 988                                        struct page, lru);
 989                        area->nr_free--;
 990
 991                        /*
 992                         * If breaking a large block of pages, move all free
 993                         * pages to the preferred allocation list. If falling
 994                         * back for a reclaimable kernel allocation, be more
 995                         * aggressive about taking ownership of free pages
 996                         */
 997                        if (unlikely(current_order >= (pageblock_order >> 1)) ||
 998                                        start_migratetype == MIGRATE_RECLAIMABLE ||
 999                                        page_group_by_mobility_disabled) {
1000                                unsigned long pages;
1001                                pages = move_freepages_block(zone, page,
1002                                                                start_migratetype);
1003
1004                                /* Claim the whole block if over half of it is free */
1005                                if (pages >= (1 << (pageblock_order-1)) ||
1006                                                page_group_by_mobility_disabled)
1007                                        set_pageblock_migratetype(page,
1008                                                                start_migratetype);
1009
1010                                migratetype = start_migratetype;
1011                        }
1012
1013                        /* Remove the page from the freelists */
1014                        list_del(&page->lru);
1015                        rmv_page_order(page);
1016
1017                        /* Take ownership for orders >= pageblock_order */
1018                        if (current_order >= pageblock_order)
1019                                change_pageblock_range(page, current_order,
1020                                                        start_migratetype);
1021
1022                        expand(zone, page, order, current_order, area, migratetype);
1023
1024                        trace_mm_page_alloc_extfrag(page, order, current_order,
1025                                start_migratetype, migratetype);
1026
1027                        return page;
1028                }
1029        }
1030
1031        return NULL;
1032}
1033
1034/*
1035 * Do the hard work of removing an element from the buddy allocator.
1036 * Call me with the zone->lock already held.
1037 */
1038static struct page *__rmqueue(struct zone *zone, unsigned int order,
1039                                                int migratetype)
1040{
1041        struct page *page;
1042
1043retry_reserve:
1044        page = __rmqueue_smallest(zone, order, migratetype);
1045
1046        if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
1047                page = __rmqueue_fallback(zone, order, migratetype);
1048
1049                /*
1050                 * Use MIGRATE_RESERVE rather than fail an allocation. goto
1051                 * is used because __rmqueue_smallest is an inline function
1052                 * and we want just one call site
1053                 */
1054                if (!page) {
1055                        migratetype = MIGRATE_RESERVE;
1056                        goto retry_reserve;
1057                }
1058        }
1059
1060        trace_mm_page_alloc_zone_locked(page, order, migratetype);
1061        return page;
1062}
1063
1064/* 
1065 * Obtain a specified number of elements from the buddy allocator, all under
1066 * a single hold of the lock, for efficiency.  Add them to the supplied list.
1067 * Returns the number of new pages which were placed at *list.
1068 */
1069static int rmqueue_bulk(struct zone *zone, unsigned int order, 
1070                        unsigned long count, struct list_head *list,
1071                        int migratetype, int cold)
1072{
1073        int i;
1074        
1075        spin_lock(&zone->lock);
1076        for (i = 0; i < count; ++i) {
1077                struct page *page = __rmqueue(zone, order, migratetype);
1078                if (unlikely(page == NULL))
1079                        break;
1080
1081                /*
1082                 * Split buddy pages returned by expand() are received here
1083                 * in physical page order. The page is added to the callers and
1084                 * list and the list head then moves forward. From the callers
1085                 * perspective, the linked list is ordered by page number in
1086                 * some conditions. This is useful for IO devices that can
1087                 * merge IO requests if the physical pages are ordered
1088                 * properly.
1089                 */
1090                if (likely(cold == 0))
1091                        list_add(&page->lru, list);
1092                else
1093                        list_add_tail(&page->lru, list);
1094                set_page_private(page, migratetype);
1095                list = &page->lru;
1096        }
1097        __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
1098        spin_unlock(&zone->lock);
1099        return i;
1100}
1101
1102#ifdef CONFIG_NUMA
1103/*
1104 * Called from the vmstat counter updater to drain pagesets of this
1105 * currently executing processor on remote nodes after they have
1106 * expired.
1107 *
1108 * Note that this function must be called with the thread pinned to
1109 * a single processor.
1110 */
1111void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
1112{
1113        unsigned long flags;
1114        int to_drain;
1115
1116        local_irq_save(flags);
1117        if (pcp->count >= pcp->batch)
1118                to_drain = pcp->batch;
1119        else
1120                to_drain = pcp->count;
1121        free_pcppages_bulk(zone, to_drain, pcp);
1122        pcp->count -= to_drain;
1123        local_irq_restore(flags);
1124}
1125#endif
1126
1127/*
1128 * Drain pages of the indicated processor.
1129 *
1130 * The processor must either be the current processor and the
1131 * thread pinned to the current processor or a processor that
1132 * is not online.
1133 */
1134static void drain_pages(unsigned int cpu)
1135{
1136        unsigned long flags;
1137        struct zone *zone;
1138
1139        for_each_populated_zone(zone) {
1140                struct per_cpu_pageset *pset;
1141                struct per_cpu_pages *pcp;
1142
1143                local_irq_save(flags);
1144                pset = per_cpu_ptr(zone->pageset, cpu);
1145
1146                pcp = &pset->pcp;
1147                if (pcp->count) {
1148                        free_pcppages_bulk(zone, pcp->count, pcp);
1149                        pcp->count = 0;
1150                }
1151                local_irq_restore(flags);
1152        }
1153}
1154
1155/*
1156 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
1157 */
1158void drain_local_pages(void *arg)
1159{
1160        drain_pages(smp_processor_id());
1161}
1162
1163/*
1164 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
1165 */
1166void drain_all_pages(void)
1167{
1168        on_each_cpu(drain_local_pages, NULL, 1);
1169}
1170
1171#ifdef CONFIG_HIBERNATION
1172
1173void mark_free_pages(struct zone *zone)
1174{
1175        unsigned long pfn, max_zone_pfn;
1176        unsigned long flags;
1177        int order, t;
1178        struct list_head *curr;
1179
1180        if (!zone->spanned_pages)
1181                return;
1182
1183        spin_lock_irqsave(&zone->lock, flags);
1184
1185        max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1186        for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1187                if (pfn_valid(pfn)) {
1188                        struct page *page = pfn_to_page(pfn);
1189
1190                        if (!swsusp_page_is_forbidden(page))
1191                                swsusp_unset_page_free(page);
1192                }
1193
1194        for_each_migratetype_order(order, t) {
1195                list_for_each(curr, &zone->free_area[order].free_list[t]) {
1196                        unsigned long i;
1197
1198                        pfn = page_to_pfn(list_entry(curr, struct page, lru));
1199                        for (i = 0; i < (1UL << order); i++)
1200                                swsusp_set_page_free(pfn_to_page(pfn + i));
1201                }
1202        }
1203        spin_unlock_irqrestore(&zone->lock, flags);
1204}
1205#endif /* CONFIG_PM */
1206
1207/*
1208 * Free a 0-order page
1209 * cold == 1 ? free a cold page : free a hot page
1210 */
1211void free_hot_cold_page(struct page *page, int cold)
1212{
1213        struct zone *zone = page_zone(page);
1214        struct per_cpu_pages *pcp;
1215        unsigned long flags;
1216        int migratetype;
1217        int wasMlocked = __TestClearPageMlocked(page);
1218
1219        if (!free_pages_prepare(page, 0))
1220                return;
1221
1222        migratetype = get_pageblock_migratetype(page);
1223        set_page_private(page, migratetype);
1224        local_irq_save(flags);
1225        if (unlikely(wasMlocked))
1226                free_page_mlock(page);
1227        __count_vm_event(PGFREE);
1228
1229        /*
1230         * We only track unmovable, reclaimable and movable on pcp lists.
1231         * Free ISOLATE pages back to the allocator because they are being
1232         * offlined but treat RESERVE as movable pages so we can get those
1233         * areas back if necessary. Otherwise, we may have to free
1234         * excessively into the page allocator
1235         */
1236        if (migratetype >= MIGRATE_PCPTYPES) {
1237                if (unlikely(migratetype == MIGRATE_ISOLATE)) {
1238                        free_one_page(zone, page, 0, migratetype);
1239                        goto out;
1240                }
1241                migratetype = MIGRATE_MOVABLE;
1242        }
1243
1244        pcp = &this_cpu_ptr(zone->pageset)->pcp;
1245        if (cold)
1246                list_add_tail(&page->lru, &pcp->lists[migratetype]);
1247        else
1248                list_add(&page->lru, &pcp->lists[migratetype]);
1249        pcp->count++;
1250        if (pcp->count >= pcp->high) {
1251                free_pcppages_bulk(zone, pcp->batch, pcp);
1252                pcp->count -= pcp->batch;
1253        }
1254
1255out:
1256        local_irq_restore(flags);
1257}
1258
1259/*
1260 * Free a list of 0-order pages
1261 */
1262void free_hot_cold_page_list(struct list_head *list, int cold)
1263{
1264        struct page *page, *next;
1265
1266        list_for_each_entry_safe(page, next, list, lru) {
1267                trace_mm_page_free_batched(page, cold);
1268                free_hot_cold_page(page, cold);
1269        }
1270}
1271
1272/*
1273 * split_page takes a non-compound higher-order page, and splits it into
1274 * n (1<<order) sub-pages: page[0..n]
1275 * Each sub-page must be freed individually.
1276 *
1277 * Note: this is probably too low level an operation for use in drivers.
1278 * Please consult with lkml before using this in your driver.
1279 */
1280void split_page(struct page *page, unsigned int order)
1281{
1282        int i;
1283
1284        VM_BUG_ON(PageCompound(page));
1285        VM_BUG_ON(!page_count(page));
1286
1287#ifdef CONFIG_KMEMCHECK
1288        /*
1289         * Split shadow pages too, because free(page[0]) would
1290         * otherwise free the whole shadow.
1291         */
1292        if (kmemcheck_page_is_tracked(page))
1293                split_page(virt_to_page(page[0].shadow), order);
1294#endif
1295
1296        for (i = 1; i < (1 << order); i++)
1297                set_page_refcounted(page + i);
1298}
1299
1300/*
1301 * Similar to split_page except the page is already free. As this is only
1302 * being used for migration, the migratetype of the block also changes.
1303 * As this is called with interrupts disabled, the caller is responsible
1304 * for calling arch_alloc_page() and kernel_map_page() after interrupts
1305 * are enabled.
1306 *
1307 * Note: this is probably too low level an operation for use in drivers.
1308 * Please consult with lkml before using this in your driver.
1309 */
1310int split_free_page(struct page *page)
1311{
1312        unsigned int order;
1313        unsigned long watermark;
1314        struct zone *zone;
1315
1316        BUG_ON(!PageBuddy(page));
1317
1318        zone = page_zone(page);
1319        order = page_order(page);
1320
1321        /* Obey watermarks as if the page was being allocated */
1322        watermark = low_wmark_pages(zone) + (1 << order);
1323        if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
1324                return 0;
1325
1326        /* Remove page from free list */
1327        list_del(&page->lru);
1328        zone->free_area[order].nr_free--;
1329        rmv_page_order(page);
1330        __mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order));
1331
1332        /* Split into individual pages */
1333        set_page_refcounted(page);
1334        split_page(page, order);
1335
1336        if (order >= pageblock_order - 1) {
1337                struct page *endpage = page + (1 << order) - 1;
1338                for (; page < endpage; page += pageblock_nr_pages)
1339                        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1340        }
1341
1342        return 1 << order;
1343}
1344
1345/*
1346 * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
1347 * we cheat by calling it from here, in the order > 0 path.  Saves a branch
1348 * or two.
1349 */
1350static inline
1351struct page *buffered_rmqueue(struct zone *preferred_zone,
1352                        struct zone *zone, int order, gfp_t gfp_flags,
1353                        int migratetype)
1354{
1355        unsigned long flags;
1356        struct page *page;
1357        int cold = !!(gfp_flags & __GFP_COLD);
1358
1359again:
1360        if (likely(order == 0)) {
1361                struct per_cpu_pages *pcp;
1362                struct list_head *list;
1363
1364                local_irq_save(flags);
1365                pcp = &this_cpu_ptr(zone->pageset)->pcp;
1366                list = &pcp->lists[migratetype];
1367                if (list_empty(list)) {
1368                        pcp->count += rmqueue_bulk(zone, 0,
1369                                        pcp->batch, list,
1370                                        migratetype, cold);
1371                        if (unlikely(list_empty(list)))
1372                                goto failed;
1373                }
1374
1375                if (cold)
1376                        page = list_entry(list->prev, struct page, lru);
1377                else
1378                        page = list_entry(list->next, struct page, lru);
1379
1380                list_del(&page->lru);
1381                pcp->count--;
1382        } else {
1383                if (unlikely(gfp_flags & __GFP_NOFAIL)) {
1384                        /*
1385                         * __GFP_NOFAIL is not to be used in new code.
1386                         *
1387                         * All __GFP_NOFAIL callers should be fixed so that they
1388                         * properly detect and handle allocation failures.
1389                         *
1390                         * We most definitely don't want callers attempting to
1391                         * allocate greater than order-1 page units with
1392                         * __GFP_NOFAIL.
1393                         */
1394                        WARN_ON_ONCE(order > 1);
1395                }
1396                spin_lock_irqsave(&zone->lock, flags);
1397                page = __rmqueue(zone, order, migratetype);
1398                spin_unlock(&zone->lock);
1399                if (!page)
1400                        goto failed;
1401                __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
1402        }
1403
1404        __count_zone_vm_events(PGALLOC, zone, 1 << order);
1405        zone_statistics(preferred_zone, zone, gfp_flags);
1406        local_irq_restore(flags);
1407
1408        VM_BUG_ON(bad_range(zone, page));
1409        if (prep_new_page(page, order, gfp_flags))
1410                goto again;
1411        return page;
1412
1413failed:
1414        local_irq_restore(flags);
1415        return NULL;
1416}
1417
1418/* The ALLOC_WMARK bits are used as an index to zone->watermark */
1419#define ALLOC_WMARK_MIN         WMARK_MIN
1420#define ALLOC_WMARK_LOW         WMARK_LOW
1421#define ALLOC_WMARK_HIGH        WMARK_HIGH
1422#define ALLOC_NO_WATERMARKS     0x04 /* don't check watermarks at all */
1423
1424/* Mask to get the watermark bits */
1425#define ALLOC_WMARK_MASK        (ALLOC_NO_WATERMARKS-1)
1426
1427#define ALLOC_HARDER            0x10 /* try to alloc harder */
1428#define ALLOC_HIGH              0x20 /* __GFP_HIGH set */
1429#define ALLOC_CPUSET            0x40 /* check for correct cpuset */
1430
1431#ifdef CONFIG_FAIL_PAGE_ALLOC
1432
1433static struct {
1434        struct fault_attr attr;
1435
1436        u32 ignore_gfp_highmem;
1437        u32 ignore_gfp_wait;
1438        u32 min_order;
1439} fail_page_alloc = {
1440        .attr = FAULT_ATTR_INITIALIZER,
1441        .ignore_gfp_wait = 1,
1442        .ignore_gfp_highmem = 1,
1443        .min_order = 1,
1444};
1445
1446static int __init setup_fail_page_alloc(char *str)
1447{
1448        return setup_fault_attr(&fail_page_alloc.attr, str);
1449}
1450__setup("fail_page_alloc=", setup_fail_page_alloc);
1451
1452static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1453{
1454        if (order < fail_page_alloc.min_order)
1455                return 0;
1456        if (gfp_mask & __GFP_NOFAIL)
1457                return 0;
1458        if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1459                return 0;
1460        if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1461                return 0;
1462
1463        return should_fail(&fail_page_alloc.attr, 1 << order);
1464}
1465
1466#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1467
1468static int __init fail_page_alloc_debugfs(void)
1469{
1470        umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1471        struct dentry *dir;
1472
1473        dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
1474                                        &fail_page_alloc.attr);
1475        if (IS_ERR(dir))
1476                return PTR_ERR(dir);
1477
1478        if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
1479                                &fail_page_alloc.ignore_gfp_wait))
1480                goto fail;
1481        if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1482                                &fail_page_alloc.ignore_gfp_highmem))
1483                goto fail;
1484        if (!debugfs_create_u32("min-order", mode, dir,
1485                                &fail_page_alloc.min_order))
1486                goto fail;
1487
1488        return 0;
1489fail:
1490        debugfs_remove_recursive(dir);
1491
1492        return -ENOMEM;
1493}
1494
1495late_initcall(fail_page_alloc_debugfs);
1496
1497#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1498
1499#else /* CONFIG_FAIL_PAGE_ALLOC */
1500
1501static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1502{
1503        return 0;
1504}
1505
1506#endif /* CONFIG_FAIL_PAGE_ALLOC */
1507
1508/*
1509 * Return true if free pages are above 'mark'. This takes into account the order
1510 * of the allocation.
1511 */
1512static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1513                      int classzone_idx, int alloc_flags, long free_pages)
1514{
1515        /* free_pages my go negative - that's OK */
1516        long min = mark;
1517        int o;
1518
1519        free_pages -= (1 << order) - 1;
1520        if (alloc_flags & ALLOC_HIGH)
1521                min -= min / 2;
1522        if (alloc_flags & ALLOC_HARDER)
1523                min -= min / 4;
1524
1525        if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1526                return false;
1527        for (o = 0; o < order; o++) {
1528                /* At the next order, this order's pages become unavailable */
1529                free_pages -= z->free_area[o].nr_free << o;
1530
1531                /* Require fewer higher order pages to be free */
1532                min >>= 1;
1533
1534                if (free_pages <= min)
1535                        return false;
1536        }
1537        return true;
1538}
1539
1540bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1541                      int classzone_idx, int alloc_flags)
1542{
1543        return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1544                                        zone_page_state(z, NR_FREE_PAGES));
1545}
1546
1547bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
1548                      int classzone_idx, int alloc_flags)
1549{
1550        long free_pages = zone_page_state(z, NR_FREE_PAGES);
1551
1552        if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
1553                free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
1554
1555        return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
1556                                                                free_pages);
1557}
1558
1559#ifdef CONFIG_NUMA
1560/*
1561 * zlc_setup - Setup for "zonelist cache".  Uses cached zone data to
1562 * skip over zones that are not allowed by the cpuset, or that have
1563 * been recently (in last second) found to be nearly full.  See further
1564 * comments in mmzone.h.  Reduces cache footprint of zonelist scans
1565 * that have to skip over a lot of full or unallowed zones.
1566 *
1567 * If the zonelist cache is present in the passed in zonelist, then
1568 * returns a pointer to the allowed node mask (either the current
1569 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
1570 *
1571 * If the zonelist cache is not available for this zonelist, does
1572 * nothing and returns NULL.
1573 *
1574 * If the fullzones BITMAP in the zonelist cache is stale (more than
1575 * a second since last zap'd) then we zap it out (clear its bits.)
1576 *
1577 * We hold off even calling zlc_setup, until after we've checked the
1578 * first zone in the zonelist, on the theory that most allocations will
1579 * be satisfied from that first zone, so best to examine that zone as
1580 * quickly as we can.
1581 */
1582static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1583{
1584        struct zonelist_cache *zlc;     /* cached zonelist speedup info */
1585        nodemask_t *allowednodes;       /* zonelist_cache approximation */
1586
1587        zlc = zonelist->zlcache_ptr;
1588        if (!zlc)
1589                return NULL;
1590
1591        if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1592                bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1593                zlc->last_full_zap = jiffies;
1594        }
1595
1596        allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1597                                        &cpuset_current_mems_allowed :
1598                                        &node_states[N_HIGH_MEMORY];
1599        return allowednodes;
1600}
1601
1602/*
1603 * Given 'z' scanning a zonelist, run a couple of quick checks to see
1604 * if it is worth looking at further for free memory:
1605 *  1) Check that the zone isn't thought to be full (doesn't have its
1606 *     bit set in the zonelist_cache fullzones BITMAP).
1607 *  2) Check that the zones node (obtained from the zonelist_cache
1608 *     z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1609 * Return true (non-zero) if zone is worth looking at further, or
1610 * else return false (zero) if it is not.
1611 *
1612 * This check -ignores- the distinction between various watermarks,
1613 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ...  If a zone is
1614 * found to be full for any variation of these watermarks, it will
1615 * be considered full for up to one second by all requests, unless
1616 * we are so low on memory on all allowed nodes that we are forced
1617 * into the second scan of the zonelist.
1618 *
1619 * In the second scan we ignore this zonelist cache and exactly
1620 * apply the watermarks to all zones, even it is slower to do so.
1621 * We are low on memory in the second scan, and should leave no stone
1622 * unturned looking for a free page.
1623 */
1624static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1625                                                nodemask_t *allowednodes)
1626{
1627        struct zonelist_cache *zlc;     /* cached zonelist speedup info */
1628        int i;                          /* index of *z in zonelist zones */
1629        int n;                          /* node that zone *z is on */
1630
1631        zlc = zonelist->zlcache_ptr;
1632        if (!zlc)
1633                return 1;
1634
1635        i = z - zonelist->_zonerefs;
1636        n = zlc->z_to_n[i];
1637
1638        /* This zone is worth trying if it is allowed but not full */
1639        return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1640}
1641
1642/*
1643 * Given 'z' scanning a zonelist, set the corresponding bit in
1644 * zlc->fullzones, so that subsequent attempts to allocate a page
1645 * from that zone don't waste time re-examining it.
1646 */
1647static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1648{
1649        struct zonelist_cache *zlc;     /* cached zonelist speedup info */
1650        int i;                          /* index of *z in zonelist zones */
1651
1652        zlc = zonelist->zlcache_ptr;
1653        if (!zlc)
1654                return;
1655
1656        i = z - zonelist->_zonerefs;
1657
1658        set_bit(i, zlc->fullzones);
1659}
1660
1661/*
1662 * clear all zones full, called after direct reclaim makes progress so that
1663 * a zone that was recently full is not skipped over for up to a second
1664 */
1665static void zlc_clear_zones_full(struct zonelist *zonelist)
1666{
1667        struct zonelist_cache *zlc;     /* cached zonelist speedup info */
1668
1669        zlc = zonelist->zlcache_ptr;
1670        if (!zlc)
1671                return;
1672
1673        bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1674}
1675
1676#else   /* CONFIG_NUMA */
1677
1678static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1679{
1680        return NULL;
1681}
1682
1683static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1684                                nodemask_t *allowednodes)
1685{
1686        return 1;
1687}
1688
1689static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1690{
1691}
1692
1693static void zlc_clear_zones_full(struct zonelist *zonelist)
1694{
1695}
1696#endif  /* CONFIG_NUMA */
1697
1698/*
1699 * get_page_from_freelist goes through the zonelist trying to allocate
1700 * a page.
1701 */
1702static struct page *
1703get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1704                struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
1705                struct zone *preferred_zone, int migratetype)
1706{
1707        struct zoneref *z;
1708        struct page *page = NULL;
1709        int classzone_idx;
1710        struct zone *zone;
1711        nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1712        int zlc_active = 0;             /* set if using zonelist_cache */
1713        int did_zlc_setup = 0;          /* just call zlc_setup() one time */
1714
1715        classzone_idx = zone_idx(preferred_zone);
1716zonelist_scan:
1717        /*
1718         * Scan zonelist, looking for a zone with enough free.
1719         * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1720         */
1721        for_each_zone_zonelist_nodemask(zone, z, zonelist,
1722                                                high_zoneidx, nodemask) {
1723                if (NUMA_BUILD && zlc_active &&
1724                        !zlc_zone_worth_trying(zonelist, z, allowednodes))
1725                                continue;
1726                if ((alloc_flags & ALLOC_CPUSET) &&
1727                        !cpuset_zone_allowed_softwall(zone, gfp_mask))
1728                                continue;
1729                /*
1730                 * When allocating a page cache page for writing, we
1731                 * want to get it from a zone that is within its dirty
1732                 * limit, such that no single zone holds more than its
1733                 * proportional share of globally allowed dirty pages.
1734                 * The dirty limits take into account the zone's
1735                 * lowmem reserves and high watermark so that kswapd
1736                 * should be able to balance it without having to
1737                 * write pages from its LRU list.
1738                 *
1739                 * This may look like it could increase pressure on
1740                 * lower zones by failing allocations in higher zones
1741                 * before they are full.  But the pages that do spill
1742                 * over are limited as the lower zones are protected
1743                 * by this very same mechanism.  It should not become
1744                 * a practical burden to them.
1745                 *
1746                 * XXX: For now, allow allocations to potentially
1747                 * exceed the per-zone dirty limit in the slowpath
1748                 * (ALLOC_WMARK_LOW unset) before going into reclaim,
1749                 * which is important when on a NUMA setup the allowed
1750                 * zones are together not big enough to reach the
1751                 * global limit.  The proper fix for these situations
1752                 * will require awareness of zones in the
1753                 * dirty-throttling and the flusher threads.
1754                 */
1755                if ((alloc_flags & ALLOC_WMARK_LOW) &&
1756                    (gfp_mask & __GFP_WRITE) && !zone_dirty_ok(zone))
1757                        goto this_zone_full;
1758
1759                BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
1760                if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1761                        unsigned long mark;
1762                        int ret;
1763
1764                        mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
1765                        if (zone_watermark_ok(zone, order, mark,
1766                                    classzone_idx, alloc_flags))
1767                                goto try_this_zone;
1768
1769                        if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
1770                                /*
1771                                 * we do zlc_setup if there are multiple nodes
1772                                 * and before considering the first zone allowed
1773                                 * by the cpuset.
1774                                 */
1775                                allowednodes = zlc_setup(zonelist, alloc_flags);
1776                                zlc_active = 1;
1777                                did_zlc_setup = 1;
1778                        }
1779
1780                        if (zone_reclaim_mode == 0)
1781                                goto this_zone_full;
1782
1783                        /*
1784                         * As we may have just activated ZLC, check if the first
1785                         * eligible zone has failed zone_reclaim recently.
1786                         */
1787                        if (NUMA_BUILD && zlc_active &&
1788                                !zlc_zone_worth_trying(zonelist, z, allowednodes))
1789                                continue;
1790
1791                        ret = zone_reclaim(zone, gfp_mask, order);
1792                        switch (ret) {
1793                        case ZONE_RECLAIM_NOSCAN:
1794                                /* did not scan */
1795                                continue;
1796                        case ZONE_RECLAIM_FULL:
1797                                /* scanned but unreclaimable */
1798                                continue;
1799                        default:
1800                                /* did we reclaim enough */
1801                                if (!zone_watermark_ok(zone, order, mark,
1802                                                classzone_idx, alloc_flags))
1803                                        goto this_zone_full;
1804                        }
1805                }
1806
1807try_this_zone:
1808                page = buffered_rmqueue(preferred_zone, zone, order,
1809                                                gfp_mask, migratetype);
1810                if (page)
1811                        break;
1812this_zone_full:
1813                if (NUMA_BUILD)
1814                        zlc_mark_zone_full(zonelist, z);
1815        }
1816
1817        if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1818                /* Disable zlc cache for second zonelist scan */
1819                zlc_active = 0;
1820                goto zonelist_scan;
1821        }
1822        return page;
1823}
1824
1825/*
1826 * Large machines with many possible nodes should not always dump per-node
1827 * meminfo in irq context.
1828 */
1829static inline bool should_suppress_show_mem(void)
1830{
1831        bool ret = false;
1832
1833#if NODES_SHIFT > 8
1834        ret = in_interrupt();
1835#endif
1836        return ret;
1837}
1838
1839static DEFINE_RATELIMIT_STATE(nopage_rs,
1840                DEFAULT_RATELIMIT_INTERVAL,
1841                DEFAULT_RATELIMIT_BURST);
1842
1843void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
1844{
1845        unsigned int filter = SHOW_MEM_FILTER_NODES;
1846
1847        if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
1848            debug_guardpage_minorder() > 0)
1849                return;
1850
1851        /*
1852         * This documents exceptions given to allocations in certain
1853         * contexts that are allowed to allocate outside current's set
1854         * of allowed nodes.
1855         */
1856        if (!(gfp_mask & __GFP_NOMEMALLOC))
1857                if (test_thread_flag(TIF_MEMDIE) ||
1858                    (current->flags & (PF_MEMALLOC | PF_EXITING)))
1859                        filter &= ~SHOW_MEM_FILTER_NODES;
1860        if (in_interrupt() || !(gfp_mask & __GFP_WAIT))
1861                filter &= ~SHOW_MEM_FILTER_NODES;
1862
1863        if (fmt) {
1864                struct va_format vaf;
1865                va_list args;
1866
1867                va_start(args, fmt);
1868
1869                vaf.fmt = fmt;
1870                vaf.va = &args;
1871
1872                pr_warn("%pV", &vaf);
1873
1874                va_end(args);
1875        }
1876
1877        pr_warn("%s: page allocation failure: order:%d, mode:0x%x\n",
1878                current->comm, order, gfp_mask);
1879
1880        dump_stack();
1881        if (!should_suppress_show_mem())
1882                show_mem(filter);
1883}
1884
1885static inline int
1886should_alloc_retry(gfp_t gfp_mask, unsigned int order,
1887                                unsigned long did_some_progress,
1888                                unsigned long pages_reclaimed)
1889{
1890        /* Do not loop if specifically requested */
1891        if (gfp_mask & __GFP_NORETRY)
1892                return 0;
1893
1894        /* Always retry if specifically requested */
1895        if (gfp_mask & __GFP_NOFAIL)
1896                return 1;
1897
1898        /*
1899         * Suspend converts GFP_KERNEL to __GFP_WAIT which can prevent reclaim
1900         * making forward progress without invoking OOM. Suspend also disables
1901         * storage devices so kswapd will not help. Bail if we are suspending.
1902         */
1903        if (!did_some_progress && pm_suspended_storage())
1904                return 0;
1905
1906        /*
1907         * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
1908         * means __GFP_NOFAIL, but that may not be true in other
1909         * implementations.
1910         */
1911        if (order <= PAGE_ALLOC_COSTLY_ORDER)
1912                return 1;
1913
1914        /*
1915         * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
1916         * specified, then we retry until we no longer reclaim any pages
1917         * (above), or we've reclaimed an order of pages at least as
1918         * large as the allocation's order. In both cases, if the
1919         * allocation still fails, we stop retrying.
1920         */
1921        if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
1922                return 1;
1923
1924        return 0;
1925}
1926
1927static inline struct page *
1928__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
1929        struct zonelist *zonelist, enum zone_type high_zoneidx,
1930        nodemask_t *nodemask, struct zone *preferred_zone,
1931        int migratetype)
1932{
1933        struct page *page;
1934
1935        /* Acquire the OOM killer lock for the zones in zonelist */
1936        if (!try_set_zonelist_oom(zonelist, gfp_mask)) {
1937                schedule_timeout_uninterruptible(1);
1938                return NULL;
1939        }
1940
1941        /*
1942         * Go through the zonelist yet one more time, keep very high watermark
1943         * here, this is only to catch a parallel oom killing, we must fail if
1944         * we're still under heavy pressure.
1945         */
1946        page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1947                order, zonelist, high_zoneidx,
1948                ALLOC_WMARK_HIGH|ALLOC_CPUSET,
1949                preferred_zone, migratetype);
1950        if (page)
1951                goto out;
1952
1953        if (!(gfp_mask & __GFP_NOFAIL)) {
1954                /* The OOM killer will not help higher order allocs */
1955                if (order > PAGE_ALLOC_COSTLY_ORDER)
1956                        goto out;
1957                /* The OOM killer does not needlessly kill tasks for lowmem */
1958                if (high_zoneidx < ZONE_NORMAL)
1959                        goto out;
1960                /*
1961                 * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
1962                 * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
1963                 * The caller should handle page allocation failure by itself if
1964                 * it specifies __GFP_THISNODE.
1965                 * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
1966                 */
1967                if (gfp_mask & __GFP_THISNODE)
1968                        goto out;
1969        }
1970        /* Exhausted what can be done so it's blamo time */
1971        out_of_memory(zonelist, gfp_mask, order, nodemask);
1972
1973out:
1974        clear_zonelist_oom(zonelist, gfp_mask);
1975        return page;
1976}
1977
1978#ifdef CONFIG_COMPACTION
1979/* Try memory compaction for high-order allocations before reclaim */
1980static struct page *
1981__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1982        struct zonelist *zonelist, enum zone_type high_zoneidx,
1983        nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1984        int migratetype, bool sync_migration,
1985        bool *deferred_compaction,
1986        unsigned long *did_some_progress)
1987{
1988        struct page *page;
1989
1990        if (!order)
1991                return NULL;
1992
1993        if (compaction_deferred(preferred_zone)) {
1994                *deferred_compaction = true;
1995                return NULL;
1996        }
1997
1998        current->flags |= PF_MEMALLOC;
1999        *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
2000                                                nodemask, sync_migration);
2001        current->flags &= ~PF_MEMALLOC;
2002        if (*did_some_progress != COMPACT_SKIPPED) {
2003
2004                /* Page migration frees to the PCP lists but we want merging */
2005                drain_pages(get_cpu());
2006                put_cpu();
2007
2008                page = get_page_from_freelist(gfp_mask, nodemask,
2009                                order, zonelist, high_zoneidx,
2010                                alloc_flags, preferred_zone,
2011                                migratetype);
2012                if (page) {
2013                        preferred_zone->compact_considered = 0;
2014                        preferred_zone->compact_defer_shift = 0;
2015                        count_vm_event(COMPACTSUCCESS);
2016                        return page;
2017                }
2018
2019                /*
2020                 * It's bad if compaction run occurs and fails.
2021                 * The most likely reason is that pages exist,
2022                 * but not enough to satisfy watermarks.
2023                 */
2024                count_vm_event(COMPACTFAIL);
2025
2026                /*
2027                 * As async compaction considers a subset of pageblocks, only
2028                 * defer if the failure was a sync compaction failure.
2029                 */
2030                if (sync_migration)
2031                        defer_compaction(preferred_zone);
2032
2033                cond_resched();
2034        }
2035
2036        return NULL;
2037}
2038#else
2039static inline struct page *
2040__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2041        struct zonelist *zonelist, enum zone_type high_zoneidx,
2042        nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2043        int migratetype, bool sync_migration,
2044        bool *deferred_compaction,
2045        unsigned long *did_some_progress)
2046{
2047        return NULL;
2048}
2049#endif /* CONFIG_COMPACTION */
2050
2051/* The really slow allocator path where we enter direct reclaim */
2052static inline struct page *
2053__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
2054        struct zonelist *zonelist, enum zone_type high_zoneidx,
2055        nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2056        int migratetype, unsigned long *did_some_progress)
2057{
2058        struct page *page = NULL;
2059        struct reclaim_state reclaim_state;
2060        bool drained = false;
2061
2062        cond_resched();
2063
2064        /* We now go into synchronous reclaim */
2065        cpuset_memory_pressure_bump();
2066        current->flags |= PF_MEMALLOC;
2067        lockdep_set_current_reclaim_state(gfp_mask);
2068        reclaim_state.reclaimed_slab = 0;
2069        current->reclaim_state = &reclaim_state;
2070
2071        *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
2072
2073        current->reclaim_state = NULL;
2074        lockdep_clear_current_reclaim_state();
2075        current->flags &= ~PF_MEMALLOC;
2076
2077        cond_resched();
2078
2079        if (unlikely(!(*did_some_progress)))
2080                return NULL;
2081
2082        /* After successful reclaim, reconsider all zones for allocation */
2083        if (NUMA_BUILD)
2084                zlc_clear_zones_full(zonelist);
2085
2086retry:
2087        page = get_page_from_freelist(gfp_mask, nodemask, order,
2088                                        zonelist, high_zoneidx,
2089                                        alloc_flags, preferred_zone,
2090                                        migratetype);
2091
2092        /*
2093         * If an allocation failed after direct reclaim, it could be because
2094         * pages are pinned on the per-cpu lists. Drain them and try again
2095         */
2096        if (!page && !drained) {
2097                drain_all_pages();
2098                drained = true;
2099                goto retry;
2100        }
2101
2102        return page;
2103}
2104
2105/*
2106 * This is called in the allocator slow-path if the allocation request is of
2107 * sufficient urgency to ignore watermarks and take other desperate measures
2108 */
2109static inline struct page *
2110__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
2111        struct zonelist *zonelist, enum zone_type high_zoneidx,
2112        nodemask_t *nodemask, struct zone *preferred_zone,
2113        int migratetype)
2114{
2115        struct page *page;
2116
2117        do {
2118                page = get_page_from_freelist(gfp_mask, nodemask, order,
2119                        zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
2120                        preferred_zone, migratetype);
2121
2122                if (!page && gfp_mask & __GFP_NOFAIL)
2123                        wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
2124        } while (!page && (gfp_mask & __GFP_NOFAIL));
2125
2126        return page;
2127}
2128
2129static inline
2130void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
2131                                                enum zone_type high_zoneidx,
2132                                                enum zone_type classzone_idx)
2133{
2134        struct zoneref *z;
2135        struct zone *zone;
2136
2137        for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
2138                wakeup_kswapd(zone, order, classzone_idx);
2139}
2140
2141static inline int
2142gfp_to_alloc_flags(gfp_t gfp_mask)
2143{
2144        int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
2145        const gfp_t wait = gfp_mask & __GFP_WAIT;
2146
2147        /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
2148        BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
2149
2150        /*
2151         * The caller may dip into page reserves a bit more if the caller
2152         * cannot run direct reclaim, or if the caller has realtime scheduling
2153         * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
2154         * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
2155         */
2156        alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
2157
2158        if (!wait) {
2159                /*
2160                 * Not worth trying to allocate harder for
2161                 * __GFP_NOMEMALLOC even if it can't schedule.
2162                 */
2163                if  (!(gfp_mask & __GFP_NOMEMALLOC))
2164                        alloc_flags |= ALLOC_HARDER;
2165                /*
2166                 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
2167                 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
2168                 */
2169                alloc_flags &= ~ALLOC_CPUSET;
2170        } else if (unlikely(rt_task(current)) && !in_interrupt())
2171                alloc_flags |= ALLOC_HARDER;
2172
2173        if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
2174                if (!in_interrupt() &&
2175                    ((current->flags & PF_MEMALLOC) ||
2176                     unlikely(test_thread_flag(TIF_MEMDIE))))
2177                        alloc_flags |= ALLOC_NO_WATERMARKS;
2178        }
2179
2180        return alloc_flags;
2181}
2182
2183static inline struct page *
2184__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2185        struct zonelist *zonelist, enum zone_type high_zoneidx,
2186        nodemask_t *nodemask, struct zone *preferred_zone,
2187        int migratetype)
2188{
2189        const gfp_t wait = gfp_mask & __GFP_WAIT;
2190        struct page *page = NULL;
2191        int alloc_flags;
2192        unsigned long pages_reclaimed = 0;
2193        unsigned long did_some_progress;
2194        bool sync_migration = false;
2195        bool deferred_compaction = false;
2196
2197        /*
2198         * In the slowpath, we sanity check order to avoid ever trying to
2199         * reclaim >= MAX_ORDER areas which will never succeed. Callers may
2200         * be using allocators in order of preference for an area that is
2201         * too large.
2202         */
2203        if (order >= MAX_ORDER) {
2204                WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
2205                return NULL;
2206        }
2207
2208        /*
2209         * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
2210         * __GFP_NOWARN set) should not cause reclaim since the subsystem
2211         * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
2212         * using a larger set of nodes after it has established that the
2213         * allowed per node queues are empty and that nodes are
2214         * over allocated.
2215         */
2216        if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
2217                goto nopage;
2218
2219restart:
2220        if (!(gfp_mask & __GFP_NO_KSWAPD))
2221                wake_all_kswapd(order, zonelist, high_zoneidx,
2222                                                zone_idx(preferred_zone));
2223
2224        /*
2225         * OK, we're below the kswapd watermark and have kicked background
2226         * reclaim. Now things get more complex, so set up alloc_flags according
2227         * to how we want to proceed.
2228         */
2229        alloc_flags = gfp_to_alloc_flags(gfp_mask);
2230
2231        /*
2232         * Find the true preferred zone if the allocation is unconstrained by
2233         * cpusets.
2234         */
2235        if (!(alloc_flags & ALLOC_CPUSET) && !nodemask)
2236                first_zones_zonelist(zonelist, high_zoneidx, NULL,
2237                                        &preferred_zone);
2238
2239rebalance:
2240        /* This is the last chance, in general, before the goto nopage. */
2241        page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
2242                        high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
2243                        preferred_zone, migratetype);
2244        if (page)
2245                goto got_pg;
2246
2247        /* Allocate without watermarks if the context allows */
2248        if (alloc_flags & ALLOC_NO_WATERMARKS) {
2249                page = __alloc_pages_high_priority(gfp_mask, order,
2250                                zonelist, high_zoneidx, nodemask,
2251                                preferred_zone, migratetype);
2252                if (page)
2253                        goto got_pg;
2254        }
2255
2256        /* Atomic allocations - we can't balance anything */
2257        if (!wait)
2258                goto nopage;
2259
2260        /* Avoid recursion of direct reclaim */
2261        if (current->flags & PF_MEMALLOC)
2262                goto nopage;
2263
2264        /* Avoid allocations with no watermarks from looping endlessly */
2265        if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
2266                goto nopage;
2267
2268        /*
2269         * Try direct compaction. The first pass is asynchronous. Subsequent
2270         * attempts after direct reclaim are synchronous
2271         */
2272        page = __alloc_pages_direct_compact(gfp_mask, order,
2273                                        zonelist, high_zoneidx,
2274                                        nodemask,
2275                                        alloc_flags, preferred_zone,
2276                                        migratetype, sync_migration,
2277                                        &deferred_compaction,
2278                                        &did_some_progress);
2279        if (page)
2280                goto got_pg;
2281        sync_migration = true;
2282
2283        /*
2284         * If compaction is deferred for high-order allocations, it is because
2285         * sync compaction recently failed. In this is the case and the caller
2286         * has requested the system not be heavily disrupted, fail the
2287         * allocation now instead of entering direct reclaim
2288         */
2289        if (deferred_compaction && (gfp_mask & __GFP_NO_KSWAPD))
2290                goto nopage;
2291
2292        /* Try direct reclaim and then allocating */
2293        page = __alloc_pages_direct_reclaim(gfp_mask, order,
2294                                        zonelist, high_zoneidx,
2295                                        nodemask,
2296                                        alloc_flags, preferred_zone,
2297                                        migratetype, &did_some_progress);
2298        if (page)
2299                goto got_pg;
2300
2301        /*
2302         * If we failed to make any progress reclaiming, then we are
2303         * running out of options and have to consider going OOM
2304         */
2305        if (!did_some_progress) {
2306                if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
2307                        if (oom_killer_disabled)
2308                                goto nopage;
2309                        page = __alloc_pages_may_oom(gfp_mask, order,
2310                                        zonelist, high_zoneidx,
2311                                        nodemask, preferred_zone,
2312                                        migratetype);
2313                        if (page)
2314                                goto got_pg;
2315
2316                        if (!(gfp_mask & __GFP_NOFAIL)) {
2317                                /*
2318                                 * The oom killer is not called for high-order
2319                                 * allocations that may fail, so if no progress
2320                                 * is being made, there are no other options and
2321                                 * retrying is unlikely to help.
2322                                 */
2323                                if (order > PAGE_ALLOC_COSTLY_ORDER)
2324                                        goto nopage;
2325                                /*
2326                                 * The oom killer is not called for lowmem
2327                                 * allocations to prevent needlessly killing
2328                                 * innocent tasks.
2329                                 */
2330                                if (high_zoneidx < ZONE_NORMAL)
2331                                        goto nopage;
2332                        }
2333
2334                        goto restart;
2335                }
2336        }
2337
2338        /* Check if we should retry the allocation */
2339        pages_reclaimed += did_some_progress;
2340        if (should_alloc_retry(gfp_mask, order, did_some_progress,
2341                                                pages_reclaimed)) {
2342                /* Wait for some write requests to complete then retry */
2343                wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
2344                goto rebalance;
2345        } else {
2346                /*
2347                 * High-order allocations do not necessarily loop after
2348                 * direct reclaim and reclaim/compaction depends on compaction
2349                 * being called after reclaim so call directly if necessary
2350                 */
2351                page = __alloc_pages_direct_compact(gfp_mask, order,
2352                                        zonelist, high_zoneidx,
2353                                        nodemask,
2354                                        alloc_flags, preferred_zone,
2355                                        migratetype, sync_migration,
2356                                        &deferred_compaction,
2357                                        &did_some_progress);
2358                if (page)
2359                        goto got_pg;
2360        }
2361
2362nopage:
2363        warn_alloc_failed(gfp_mask, order, NULL);
2364        return page;
2365got_pg:
2366        if (kmemcheck_enabled)
2367                kmemcheck_pagealloc_alloc(page, order, gfp_mask);
2368        return page;
2369
2370}
2371
2372/*
2373 * This is the 'heart' of the zoned buddy allocator.
2374 */
2375struct page *
2376__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
2377                        struct zonelist *zonelist, nodemask_t *nodemask)
2378{
2379        enum zone_type high_zoneidx = gfp_zone(gfp_mask);
2380        struct zone *preferred_zone;
2381        struct page *page;
2382        int migratetype = allocflags_to_migratetype(gfp_mask);
2383
2384        gfp_mask &= gfp_allowed_mask;
2385
2386        lockdep_trace_alloc(gfp_mask);
2387
2388        might_sleep_if(gfp_mask & __GFP_WAIT);
2389
2390        if (should_fail_alloc_page(gfp_mask, order))
2391                return NULL;
2392
2393        /*
2394         * Check the zones suitable for the gfp_mask contain at least one
2395         * valid zone. It's possible to have an empty zonelist as a result
2396         * of GFP_THISNODE and a memoryless node
2397         */
2398        if (unlikely(!zonelist->_zonerefs->zone))
2399                return NULL;
2400
2401        get_mems_allowed();
2402        /* The preferred zone is used for statistics later */
2403        first_zones_zonelist(zonelist, high_zoneidx,
2404                                nodemask ? : &cpuset_current_mems_allowed,
2405                                &preferred_zone);
2406        if (!preferred_zone) {
2407                put_mems_allowed();
2408                return NULL;
2409        }
2410
2411        /* First allocation attempt */
2412        page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
2413                        zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
2414                        preferred_zone, migratetype);
2415        if (unlikely(!page))
2416                page = __alloc_pages_slowpath(gfp_mask, order,
2417                                zonelist, high_zoneidx, nodemask,
2418                                preferred_zone, migratetype);
2419        put_mems_allowed();
2420
2421        trace_mm_page_alloc(page, order, gfp_mask, migratetype);
2422        return page;
2423}
2424EXPORT_SYMBOL(__alloc_pages_nodemask);
2425
2426/*
2427 * Common helper functions.
2428 */
2429unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
2430{
2431        struct page *page;
2432
2433        /*
2434         * __get_free_pages() returns a 32-bit address, which cannot represent
2435         * a highmem page
2436         */
2437        VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
2438
2439        page = alloc_pages(gfp_mask, order);
2440        if (!page)
2441                return 0;
2442        return (unsigned long) page_address(page);
2443}
2444EXPORT_SYMBOL(__get_free_pages);
2445
2446unsigned long get_zeroed_page(gfp_t gfp_mask)
2447{
2448        return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
2449}
2450EXPORT_SYMBOL(get_zeroed_page);
2451
2452void __free_pages(struct page *page, unsigned int order)
2453{
2454        if (put_page_testzero(page)) {
2455                if (order == 0)
2456                        free_hot_cold_page(page, 0);
2457                else
2458                        __free_pages_ok(page, order);
2459        }
2460}
2461
2462EXPORT_SYMBOL(__free_pages);
2463
2464void free_pages(unsigned long addr, unsigned int order)
2465{
2466        if (addr != 0) {
2467                VM_BUG_ON(!virt_addr_valid((void *)addr));
2468                __free_pages(virt_to_page((void *)addr), order);
2469        }
2470}
2471
2472EXPORT_SYMBOL(free_pages);
2473
2474static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
2475{
2476        if (addr) {
2477                unsigned long alloc_end = addr + (PAGE_SIZE << order);
2478                unsigned long used = addr + PAGE_ALIGN(size);
2479
2480                split_page(virt_to_page((void *)addr), order);
2481                while (used < alloc_end) {
2482                        free_page(used);
2483                        used += PAGE_SIZE;
2484                }
2485        }
2486        return (void *)addr;
2487}
2488
2489/**
2490 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
2491 * @size: the number of bytes to allocate
2492 * @gfp_mask: GFP flags for the allocation
2493 *
2494 * This function is similar to alloc_pages(), except that it allocates the
2495 * minimum number of pages to satisfy the request.  alloc_pages() can only
2496 * allocate memory in power-of-two pages.
2497 *
2498 * This function is also limited by MAX_ORDER.
2499 *
2500 * Memory allocated by this function must be released by free_pages_exact().
2501 */
2502void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
2503{
2504        unsigned int order = get_order(size);
2505        unsigned long addr;
2506
2507        addr = __get_free_pages(gfp_mask, order);
2508        return make_alloc_exact(addr, order, size);
2509}
2510EXPORT_SYMBOL(alloc_pages_exact);
2511
2512/**
2513 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
2514 *                         pages on a node.
2515 * @nid: the preferred node ID where memory should be allocated
2516 * @size: the number of bytes to allocate
2517 * @gfp_mask: GFP flags for the allocation
2518 *
2519 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
2520 * back.
2521 * Note this is not alloc_pages_exact_node() which allocates on a specific node,
2522 * but is not exact.
2523 */
2524void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
2525{
2526        unsigned order = get_order(size);
2527        struct page *p = alloc_pages_node(nid, gfp_mask, order);
2528        if (!p)
2529                return NULL;
2530        return make_alloc_exact((unsigned long)page_address(p), order, size);
2531}
2532EXPORT_SYMBOL(alloc_pages_exact_nid);
2533
2534/**
2535 * free_pages_exact - release memory allocated via alloc_pages_exact()
2536 * @virt: the value returned by alloc_pages_exact.
2537 * @size: size of allocation, same value as passed to alloc_pages_exact().
2538 *
2539 * Release the memory allocated by a previous call to alloc_pages_exact.
2540 */
2541void free_pages_exact(void *virt, size_t size)
2542{
2543        unsigned long addr = (unsigned long)virt;
2544        unsigned long end = addr + PAGE_ALIGN(size);
2545
2546        while (addr < end) {
2547                free_page(addr);
2548                addr += PAGE_SIZE;
2549        }
2550}
2551EXPORT_SYMBOL(free_pages_exact);
2552
2553static unsigned int nr_free_zone_pages(int offset)
2554{
2555        struct zoneref *z;
2556        struct zone *zone;
2557
2558        /* Just pick one node, since fallback list is circular */
2559        unsigned int sum = 0;
2560
2561        struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
2562
2563        for_each_zone_zonelist(zone, z, zonelist, offset) {
2564                unsigned long size = zone->present_pages;
2565                unsigned long high = high_wmark_pages(zone);
2566                if (size > high)
2567                        sum += size - high;
2568        }
2569
2570        return sum;
2571}
2572
2573/*
2574 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
2575 */
2576unsigned int nr_free_buffer_pages(void)
2577{
2578        return nr_free_zone_pages(gfp_zone(GFP_USER));
2579}
2580EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
2581
2582/*
2583 * Amount of free RAM allocatable within all zones
2584 */
2585unsigned int nr_free_pagecache_pages(void)
2586{
2587        return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
2588}
2589
2590static inline void show_node(struct zone *zone)
2591{
2592        if (NUMA_BUILD)
2593                printk("Node %d ", zone_to_nid(zone));
2594}
2595
2596void si_meminfo(struct sysinfo *val)
2597{
2598        val->totalram = totalram_pages;
2599        val->sharedram = 0;
2600        val->freeram = global_page_state(NR_FREE_PAGES);
2601        val->bufferram = nr_blockdev_pages();
2602        val->totalhigh = totalhigh_pages;
2603        val->freehigh = nr_free_highpages();
2604        val->mem_unit = PAGE_SIZE;
2605}
2606
2607EXPORT_SYMBOL(si_meminfo);
2608
2609#ifdef CONFIG_NUMA
2610void si_meminfo_node(struct sysinfo *val, int nid)
2611{
2612        pg_data_t *pgdat = NODE_DATA(nid);
2613
2614        val->totalram = pgdat->node_present_pages;
2615        val->freeram = node_page_state(nid, NR_FREE_PAGES);
2616#ifdef CONFIG_HIGHMEM
2617        val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
2618        val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
2619                        NR_FREE_PAGES);
2620#else
2621        val->totalhigh = 0;
2622        val->freehigh = 0;
2623#endif
2624        val->mem_unit = PAGE_SIZE;
2625}
2626#endif
2627
2628/*
2629 * Determine whether the node should be displayed or not, depending on whether
2630 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
2631 */
2632bool skip_free_areas_node(unsigned int flags, int nid)
2633{
2634        bool ret = false;
2635
2636        if (!(flags & SHOW_MEM_FILTER_NODES))
2637                goto out;
2638
2639        get_mems_allowed();
2640        ret = !node_isset(nid, cpuset_current_mems_allowed);
2641        put_mems_allowed();
2642out:
2643        return ret;
2644}
2645
2646#define K(x) ((x) << (PAGE_SHIFT-10))
2647
2648/*
2649 * Show free area list (used inside shift_scroll-lock stuff)
2650 * We also calculate the percentage fragmentation. We do this by counting the
2651 * memory on each free list with the exception of the first item on the list.
2652 * Suppresses nodes that are not allowed by current's cpuset if
2653 * SHOW_MEM_FILTER_NODES is passed.
2654 */
2655void show_free_areas(unsigned int filter)
2656{
2657        int cpu;
2658        struct zone *zone;
2659
2660        for_each_populated_zone(zone) {
2661                if (skip_free_areas_node(filter, zone_to_nid(zone)))
2662                        continue;
2663                show_node(zone);
2664                printk("%s per-cpu:\n", zone->name);
2665
2666                for_each_online_cpu(cpu) {
2667                        struct per_cpu_pageset *pageset;
2668
2669                        pageset = per_cpu_ptr(zone->pageset, cpu);
2670
2671                        printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
2672                               cpu, pageset->pcp.high,
2673                               pageset->pcp.batch, pageset->pcp.count);
2674                }
2675        }
2676
2677        printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
2678                " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
2679                " unevictable:%lu"
2680                " dirty:%lu writeback:%lu unstable:%lu\n"
2681                " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
2682                " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
2683                global_page_state(NR_ACTIVE_ANON),
2684                global_page_state(NR_INACTIVE_ANON),
2685                global_page_state(NR_ISOLATED_ANON),
2686                global_page_state(NR_ACTIVE_FILE),
2687                global_page_state(NR_INACTIVE_FILE),
2688                global_page_state(NR_ISOLATED_FILE),
2689                global_page_state(NR_UNEVICTABLE),
2690                global_page_state(NR_FILE_DIRTY),
2691                global_page_state(NR_WRITEBACK),
2692                global_page_state(NR_UNSTABLE_NFS),
2693                global_page_state(NR_FREE_PAGES),
2694                global_page_state(NR_SLAB_RECLAIMABLE),
2695                global_page_state(NR_SLAB_UNRECLAIMABLE),
2696                global_page_state(NR_FILE_MAPPED),
2697                global_page_state(NR_SHMEM),
2698                global_page_state(NR_PAGETABLE),
2699                global_page_state(NR_BOUNCE));
2700
2701        for_each_populated_zone(zone) {
2702                int i;
2703
2704                if (skip_free_areas_node(filter, zone_to_nid(zone)))
2705                        continue;
2706                show_node(zone);
2707                printk("%s"
2708                        " free:%lukB"
2709                        " min:%lukB"
2710                        " low:%lukB"
2711                        " high:%lukB"
2712                        " active_anon:%lukB"
2713                        " inactive_anon:%lukB"
2714                        " active_file:%lukB"
2715                        " inactive_file:%lukB"
2716                        " unevictable:%lukB"
2717                        " isolated(anon):%lukB"
2718                        " isolated(file):%lukB"
2719                        " present:%lukB"
2720                        " mlocked:%lukB"
2721                        " dirty:%lukB"
2722                        " writeback:%lukB"
2723                        " mapped:%lukB"
2724                        " shmem:%lukB"
2725                        " slab_reclaimable:%lukB"
2726                        " slab_unreclaimable:%lukB"
2727                        " kernel_stack:%lukB"
2728                        " pagetables:%lukB"
2729                        " unstable:%lukB"
2730                        " bounce:%lukB"
2731                        " writeback_tmp:%lukB"
2732                        " pages_scanned:%lu"
2733                        " all_unreclaimable? %s"
2734                        "\n",
2735                        zone->name,
2736                        K(zone_page_state(zone, NR_FREE_PAGES)),
2737                        K(min_wmark_pages(zone)),
2738                        K(low_wmark_pages(zone)),
2739                        K(high_wmark_pages(zone)),
2740                        K(zone_page_state(zone, NR_ACTIVE_ANON)),
2741                        K(zone_page_state(zone, NR_INACTIVE_ANON)),
2742                        K(zone_page_state(zone, NR_ACTIVE_FILE)),
2743                        K(zone_page_state(zone, NR_INACTIVE_FILE)),
2744                        K(zone_page_state(zone, NR_UNEVICTABLE)),
2745                        K(zone_page_state(zone, NR_ISOLATED_ANON)),
2746                        K(zone_page_state(zone, NR_ISOLATED_FILE)),
2747                        K(zone->present_pages),
2748                        K(zone_page_state(zone, NR_MLOCK)),
2749                        K(zone_page_state(zone, NR_FILE_DIRTY)),
2750                        K(zone_page_state(zone, NR_WRITEBACK)),
2751                        K(zone_page_state(zone, NR_FILE_MAPPED)),
2752                        K(zone_page_state(zone, NR_SHMEM)),
2753                        K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
2754                        K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
2755                        zone_page_state(zone, NR_KERNEL_STACK) *
2756                                THREAD_SIZE / 1024,
2757                        K(zone_page_state(zone, NR_PAGETABLE)),
2758                        K(zone_page_state(zone, NR_UNSTABLE_NFS)),
2759                        K(zone_page_state(zone, NR_BOUNCE)),
2760                        K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
2761                        zone->pages_scanned,
2762                        (zone->all_unreclaimable ? "yes" : "no")
2763                        );
2764                printk("lowmem_reserve[]:");
2765                for (i = 0; i < MAX_NR_ZONES; i++)
2766                        printk(" %lu", zone->lowmem_reserve[i]);
2767                printk("\n");
2768        }
2769
2770        for_each_populated_zone(zone) {
2771                unsigned long nr[MAX_ORDER], flags, order, total = 0;
2772
2773                if (skip_free_areas_node(filter, zone_to_nid(zone)))
2774                        continue;
2775                show_node(zone);
2776                printk("%s: ", zone->name);
2777
2778                spin_lock_irqsave(&zone->lock, flags);
2779                for (order = 0; order < MAX_ORDER; order++) {
2780                        nr[order] = zone->free_area[order].nr_free;
2781                        total += nr[order] << order;
2782                }
2783                spin_unlock_irqrestore(&zone->lock, flags);
2784                for (order = 0; order < MAX_ORDER; order++)
2785                        printk("%lu*%lukB ", nr[order], K(1UL) << order);
2786                printk("= %lukB\n", K(total));
2787        }
2788
2789        printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
2790
2791        show_swap_cache_info();
2792}
2793
2794static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
2795{
2796        zoneref->zone = zone;
2797        zoneref->zone_idx = zone_idx(zone);
2798}
2799
2800/*
2801 * Builds allocation fallback zone lists.
2802 *
2803 * Add all populated zones of a node to the zonelist.
2804 */
2805static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
2806                                int nr_zones, enum zone_type zone_type)
2807{
2808        struct zone *zone;
2809
2810        BUG_ON(zone_type >= MAX_NR_ZONES);
2811        zone_type++;
2812
2813        do {
2814                zone_type--;
2815                zone = pgdat->node_zones + zone_type;
2816                if (populated_zone(zone)) {
2817                        zoneref_set_zone(zone,
2818                                &zonelist->_zonerefs[nr_zones++]);
2819                        check_highest_zone(zone_type);
2820                }
2821
2822        } while (zone_type);
2823        return nr_zones;
2824}
2825
2826
2827/*
2828 *  zonelist_order:
2829 *  0 = automatic detection of better ordering.
2830 *  1 = order by ([node] distance, -zonetype)
2831 *  2 = order by (-zonetype, [node] distance)
2832 *
2833 *  If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
2834 *  the same zonelist. So only NUMA can configure this param.
2835 */
2836#define ZONELIST_ORDER_DEFAULT  0
2837#define ZONELIST_ORDER_NODE     1
2838#define ZONELIST_ORDER_ZONE     2
2839
2840/* zonelist order in the kernel.
2841 * set_zonelist_order() will set this to NODE or ZONE.
2842 */
2843static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
2844static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
2845
2846
2847#ifdef CONFIG_NUMA
2848/* The value user specified ....changed by config */
2849static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2850/* string for sysctl */
2851#define NUMA_ZONELIST_ORDER_LEN 16
2852char numa_zonelist_order[16] = "default";
2853
2854/*
2855 * interface for configure zonelist ordering.
2856 * command line option "numa_zonelist_order"
2857 *      = "[dD]efault   - default, automatic configuration.
2858 *      = "[nN]ode      - order by node locality, then by zone within node
2859 *      = "[zZ]one      - order by zone, then by locality within zone
2860 */
2861
2862static int __parse_numa_zonelist_order(char *s)
2863{
2864        if (*s == 'd' || *s == 'D') {
2865                user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2866        } else if (*s == 'n' || *s == 'N') {
2867                user_zonelist_order = ZONELIST_ORDER_NODE;
2868        } else if (*s == 'z' || *s == 'Z') {
2869                user_zonelist_order = ZONELIST_ORDER_ZONE;
2870        } else {
2871                printk(KERN_WARNING
2872                        "Ignoring invalid numa_zonelist_order value:  "
2873                        "%s\n", s);
2874                return -EINVAL;
2875        }
2876        return 0;
2877}
2878
2879static __init int setup_numa_zonelist_order(char *s)
2880{
2881        int ret;
2882
2883        if (!s)
2884                return 0;
2885
2886        ret = __parse_numa_zonelist_order(s);
2887        if (ret == 0)
2888                strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
2889
2890        return ret;
2891}
2892early_param("numa_zonelist_order", setup_numa_zonelist_order);
2893
2894/*
2895 * sysctl handler for numa_zonelist_order
2896 */
2897int numa_zonelist_order_handler(ctl_table *table, int write,
2898                void __user *buffer, size_t *length,
2899                loff_t *ppos)
2900{
2901        char saved_string[NUMA_ZONELIST_ORDER_LEN];
2902        int ret;
2903        static DEFINE_MUTEX(zl_order_mutex);
2904
2905        mutex_lock(&zl_order_mutex);
2906        if (write)
2907                strcpy(saved_string, (char*)table->data);
2908        ret = proc_dostring(table, write, buffer, length, ppos);
2909        if (ret)
2910                goto out;
2911        if (write) {
2912                int oldval = user_zonelist_order;
2913                if (__parse_numa_zonelist_order((char*)table->data)) {
2914                        /*
2915                         * bogus value.  restore saved string
2916                         */
2917                        strncpy((char*)table->data, saved_string,
2918                                NUMA_ZONELIST_ORDER_LEN);
2919                        user_zonelist_order = oldval;
2920                } else if (oldval != user_zonelist_order) {
2921                        mutex_lock(&zonelists_mutex);
2922                        build_all_zonelists(NULL);
2923                        mutex_unlock(&zonelists_mutex);
2924                }
2925        }
2926out:
2927        mutex_unlock(&zl_order_mutex);
2928        return ret;
2929}
2930
2931
2932#define MAX_NODE_LOAD (nr_online_nodes)
2933static int node_load[MAX_NUMNODES];
2934
2935/**
2936 * find_next_best_node - find the next node that should appear in a given node's fallback list
2937 * @node: node whose fallback list we're appending
2938 * @used_node_mask: nodemask_t of already used nodes
2939 *
2940 * We use a number of factors to determine which is the next node that should
2941 * appear on a given node's fallback list.  The node should not have appeared
2942 * already in @node's fallback list, and it should be the next closest node
2943 * according to the distance array (which contains arbitrary distance values
2944 * from each node to each node in the system), and should also prefer nodes
2945 * with no CPUs, since presumably they'll have very little allocation pressure
2946 * on them otherwise.
2947 * It returns -1 if no node is found.
2948 */
2949static int find_next_best_node(int node, nodemask_t *used_node_mask)
2950{
2951        int n, val;
2952        int min_val = INT_MAX;
2953        int best_node = -1;
2954        const struct cpumask *tmp = cpumask_of_node(0);
2955
2956        /* Use the local node if we haven't already */
2957        if (!node_isset(node, *used_node_mask)) {
2958                node_set(node, *used_node_mask);
2959                return node;
2960        }
2961
2962        for_each_node_state(n, N_HIGH_MEMORY) {
2963
2964                /* Don't want a node to appear more than once */
2965                if (node_isset(n, *used_node_mask))
2966                        continue;
2967
2968                /* Use the distance array to find the distance */
2969                val = node_distance(node, n);
2970
2971                /* Penalize nodes under us ("prefer the next node") */
2972                val += (n < node);
2973
2974                /* Give preference to headless and unused nodes */
2975                tmp = cpumask_of_node(n);
2976                if (!cpumask_empty(tmp))
2977                        val += PENALTY_FOR_NODE_WITH_CPUS;
2978
2979                /* Slight preference for less loaded node */
2980                val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2981                val += node_load[n];
2982
2983                if (val < min_val) {
2984                        min_val = val;
2985                        best_node = n;
2986                }
2987        }
2988
2989        if (best_node >= 0)
2990                node_set(best_node, *used_node_mask);
2991
2992        return best_node;
2993}
2994
2995
2996/*
2997 * Build zonelists ordered by node and zones within node.
2998 * This results in maximum locality--normal zone overflows into local
2999 * DMA zone, if any--but risks exhausting DMA zone.
3000 */
3001static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
3002{
3003        int j;
3004        struct zonelist *zonelist;
3005
3006        zonelist = &pgdat->node_zonelists[0];
3007        for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
3008                ;
3009        j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3010                                                        MAX_NR_ZONES - 1);
3011        zonelist->_zonerefs[j].zone = NULL;
3012        zonelist->_zonerefs[j].zone_idx = 0;
3013}
3014
3015/*
3016 * Build gfp_thisnode zonelists
3017 */
3018static void build_thisnode_zonelists(pg_data_t *pgdat)
3019{
3020        int j;
3021        struct zonelist *zonelist;
3022
3023        zonelist = &pgdat->node_zonelists[1];
3024        j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
3025        zonelist->_zonerefs[j].zone = NULL;
3026        zonelist->_zonerefs[j].zone_idx = 0;
3027}
3028
3029/*
3030 * Build zonelists ordered by zone and nodes within zones.
3031 * This results in conserving DMA zone[s] until all Normal memory is
3032 * exhausted, but results in overflowing to remote node while memory
3033 * may still exist in local DMA zone.
3034 */
3035static int node_order[MAX_NUMNODES];
3036
3037static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
3038{
3039        int pos, j, node;
3040        int zone_type;          /* needs to be signed */
3041        struct zone *z;
3042        struct zonelist *zonelist;
3043
3044        zonelist = &pgdat->node_zonelists[0];
3045        pos = 0;
3046        for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
3047                for (j = 0; j < nr_nodes; j++) {
3048                        node = node_order[j];
3049                        z = &NODE_DATA(node)->node_zones[zone_type];
3050                        if (populated_zone(z)) {
3051                                zoneref_set_zone(z,
3052                                        &zonelist->_zonerefs[pos++]);
3053                                check_highest_zone(zone_type);
3054                        }
3055                }
3056        }
3057        zonelist->_zonerefs[pos].zone = NULL;
3058        zonelist->_zonerefs[pos].zone_idx = 0;
3059}
3060
3061static int default_zonelist_order(void)
3062{
3063        int nid, zone_type;
3064        unsigned long low_kmem_size,total_size;
3065        struct zone *z;
3066        int average_size;
3067        /*
3068         * ZONE_DMA and ZONE_DMA32 can be very small area in the system.
3069         * If they are really small and used heavily, the system can fall
3070         * into OOM very easily.
3071         * This function detect ZONE_DMA/DMA32 size and configures zone order.
3072         */
3073        /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
3074        low_kmem_size = 0;
3075        total_size = 0;
3076        for_each_online_node(nid) {
3077                for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
3078                        z = &NODE_DATA(nid)->node_zones[zone_type];
3079                        if (populated_zone(z)) {
3080                                if (zone_type < ZONE_NORMAL)
3081                                        low_kmem_size += z->present_pages;
3082                                total_size += z->present_pages;
3083                        } else if (zone_type == ZONE_NORMAL) {
3084                                /*
3085                                 * If any node has only lowmem, then node order
3086                                 * is preferred to allow kernel allocations
3087                                 * locally; otherwise, they can easily infringe
3088                                 * on other nodes when there is an abundance of
3089                                 * lowmem available to allocate from.
3090                                 */
3091                                return ZONELIST_ORDER_NODE;
3092                        }
3093                }
3094        }
3095        if (!low_kmem_size ||  /* there are no DMA area. */
3096            low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
3097                return ZONELIST_ORDER_NODE;
3098        /*
3099         * look into each node's config.
3100         * If there is a node whose DMA/DMA32 memory is very big area on
3101         * local memory, NODE_ORDER may be suitable.
3102         */
3103        average_size = total_size /
3104                                (nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
3105        for_each_online_node(nid) {
3106                low_kmem_size = 0;
3107                total_size = 0;
3108                for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
3109                        z = &NODE_DATA(nid)->node_zones[zone_type];
3110                        if (populated_zone(z)) {
3111                                if (zone_type < ZONE_NORMAL)
3112                                        low_kmem_size += z->present_pages;
3113                                total_size += z->present_pages;
3114                        }
3115                }
3116                if (low_kmem_size &&
3117                    total_size > average_size && /* ignore small node */
3118                    low_kmem_size > total_size * 70/100)
3119                        return ZONELIST_ORDER_NODE;
3120        }
3121        return ZONELIST_ORDER_ZONE;
3122}
3123
3124static void set_zonelist_order(void)
3125{
3126        if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
3127                current_zonelist_order = default_zonelist_order();
3128        else
3129                current_zonelist_order = user_zonelist_order;
3130}
3131
3132static void build_zonelists(pg_data_t *pgdat)
3133{
3134        int j, node, load;
3135        enum zone_type i;
3136        nodemask_t used_mask;
3137        int local_node, prev_node;
3138        struct zonelist *zonelist;
3139        int order = current_zonelist_order;
3140
3141        /* initialize zonelists */
3142        for (i = 0; i < MAX_ZONELISTS; i++) {
3143                zonelist = pgdat->node_zonelists + i;
3144                zonelist->_zonerefs[0].zone = NULL;
3145                zonelist->_zonerefs[0].zone_idx = 0;
3146        }
3147
3148        /* NUMA-aware ordering of nodes */
3149        local_node = pgdat->node_id;
3150        load = nr_online_nodes;
3151        prev_node = local_node;
3152        nodes_clear(used_mask);
3153
3154        memset(node_order, 0, sizeof(node_order));
3155        j = 0;
3156
3157        while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
3158                int distance = node_distance(local_node, node);
3159
3160                /*
3161                 * If another node is sufficiently far away then it is better
3162                 * to reclaim pages in a zone before going off node.
3163                 */
3164                if (distance > RECLAIM_DISTANCE)
3165                        zone_reclaim_mode = 1;
3166
3167                /*
3168                 * We don't want to pressure a particular node.
3169                 * So adding penalty to the first node in same
3170                 * distance group to make it round-robin.
3171                 */
3172                if (distance != node_distance(local_node, prev_node))
3173                        node_load[node] = load;
3174
3175                prev_node = node;
3176                load--;
3177                if (order == ZONELIST_ORDER_NODE)
3178                        build_zonelists_in_node_order(pgdat, node);
3179                else
3180                        node_order[j++] = node; /* remember order */
3181        }
3182
3183        if (order == ZONELIST_ORDER_ZONE) {
3184                /* calculate node order -- i.e., DMA last! */
3185                build_zonelists_in_zone_order(pgdat, j);
3186        }
3187
3188        build_thisnode_zonelists(pgdat);
3189}
3190
3191/* Construct the zonelist performance cache - see further mmzone.h */
3192static void build_zonelist_cache(pg_data_t *pgdat)
3193{
3194        struct zonelist *zonelist;
3195        struct zonelist_cache *zlc;
3196        struct zoneref *z;
3197
3198        zonelist = &pgdat->node_zonelists[0];
3199        zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
3200        bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
3201        for (z = zonelist->_zonerefs; z->zone; z++)
3202                zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
3203}
3204
3205#ifdef CONFIG_HAVE_MEMORYLESS_NODES
3206/*
3207 * Return node id of node used for "local" allocations.
3208 * I.e., first node id of first zone in arg node's generic zonelist.
3209 * Used for initializing percpu 'numa_mem', which is used primarily
3210 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
3211 */
3212int local_memory_node(int node)
3213{
3214        struct zone *zone;
3215
3216        (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
3217                                   gfp_zone(GFP_KERNEL),
3218                                   NULL,
3219                                   &zone);
3220        return zone->node;
3221}
3222#endif
3223
3224#else   /* CONFIG_NUMA */
3225
3226static void set_zonelist_order(void)
3227{
3228        current_zonelist_order = ZONELIST_ORDER_ZONE;
3229}
3230
3231static void build_zonelists(pg_data_t *pgdat)
3232{
3233        int node, local_node;
3234        enum zone_type j;
3235        struct zonelist *zonelist;
3236
3237        local_node = pgdat->node_id;
3238
3239        zonelist = &pgdat->node_zonelists[0];
3240        j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
3241
3242        /*
3243         * Now we build the zonelist so that it contains the zones
3244         * of all the other nodes.
3245         * We don't want to pressure a particular node, so when
3246         * building the zones for node N, we make sure that the
3247         * zones coming right after the local ones are those from
3248         * node N+1 (modulo N)
3249         */
3250        for (node = local_node + 1; node < MAX_NUMNODES; node++) {
3251                if (!node_online(node))
3252                        continue;
3253                j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3254                                                        MAX_NR_ZONES - 1);
3255        }
3256        for (node = 0; node < local_node; node++) {
3257                if (!node_online(node))
3258                        continue;
3259                j = build_zonelists_node(NODE_DATA(node), zonelist, j,
3260                                                        MAX_NR_ZONES - 1);
3261        }
3262
3263        zonelist->_zonerefs[j].zone = NULL;
3264        zonelist->_zonerefs[j].zone_idx = 0;
3265}
3266
3267/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
3268static void build_zonelist_cache(pg_data_t *pgdat)
3269{
3270        pgdat->node_zonelists[0].zlcache_ptr = NULL;
3271}
3272
3273#endif  /* CONFIG_NUMA */
3274
3275/*
3276 * Boot pageset table. One per cpu which is going to be used for all
3277 * zones and all nodes. The parameters will be set in such a way
3278 * that an item put on a list will immediately be handed over to
3279 * the buddy list. This is safe since pageset manipulation is done
3280 * with interrupts disabled.
3281 *
3282 * The boot_pagesets must be kept even after bootup is complete for
3283 * unused processors and/or zones. They do play a role for bootstrapping
3284 * hotplugged processors.
3285 *
3286 * zoneinfo_show() and maybe other functions do
3287 * not check if the processor is online before following the pageset pointer.
3288 * Other parts of the kernel may not check if the zone is available.
3289 */
3290static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
3291static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
3292static void setup_zone_pageset(struct zone *zone);
3293
3294/*
3295 * Global mutex to protect against size modification of zonelists
3296 * as well as to serialize pageset setup for the new populated zone.
3297 */
3298DEFINE_MUTEX(zonelists_mutex);
3299
3300/* return values int ....just for stop_machine() */
3301static __init_refok int __build_all_zonelists(void *data)
3302{
3303        int nid;
3304        int cpu;
3305
3306#ifdef CONFIG_NUMA
3307        memset(node_load, 0, sizeof(node_load));
3308#endif
3309        for_each_online_node(nid) {
3310                pg_data_t *pgdat = NODE_DATA(nid);
3311
3312                build_zonelists(pgdat);
3313                build_zonelist_cache(pgdat);
3314        }
3315
3316        /*
3317         * Initialize the boot_pagesets that are going to be used
3318         * for bootstrapping processors. The real pagesets for
3319         * each zone will be allocated later when the per cpu
3320         * allocator is available.
3321         *
3322         * boot_pagesets are used also for bootstrapping offline
3323         * cpus if the system is already booted because the pagesets
3324         * are needed to initialize allocators on a specific cpu too.
3325         * F.e. the percpu allocator needs the page allocator which
3326         * needs the percpu allocator in order to allocate its pagesets
3327         * (a chicken-egg dilemma).
3328         */
3329        for_each_possible_cpu(cpu) {
3330                setup_pageset(&per_cpu(boot_pageset, cpu), 0);
3331
3332#ifdef CONFIG_HAVE_MEMORYLESS_NODES
3333                /*
3334                 * We now know the "local memory node" for each node--
3335                 * i.e., the node of the first zone in the generic zonelist.
3336                 * Set up numa_mem percpu variable for on-line cpus.  During
3337                 * boot, only the boot cpu should be on-line;  we'll init the
3338                 * secondary cpus' numa_mem as they come on-line.  During
3339                 * node/memory hotplug, we'll fixup all on-line cpus.
3340                 */
3341                if (cpu_online(cpu))
3342                        set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
3343#endif
3344        }
3345
3346        return 0;
3347}
3348
3349/*
3350 * Called with zonelists_mutex held always
3351 * unless system_state == SYSTEM_BOOTING.
3352 */
3353void __ref build_all_zonelists(void *data)
3354{
3355        set_zonelist_order();
3356
3357        if (system_state == SYSTEM_BOOTING) {
3358                __build_all_zonelists(NULL);
3359                mminit_verify_zonelist();
3360                cpuset_init_current_mems_allowed();
3361        } else {
3362                /* we have to stop all cpus to guarantee there is no user
3363                   of zonelist */
3364#ifdef CONFIG_MEMORY_HOTPLUG
3365                if (data)
3366                        setup_zone_pageset((struct zone *)data);
3367#endif
3368                stop_machine(__build_all_zonelists, NULL, NULL);
3369                /* cpuset refresh routine should be here */
3370        }
3371        vm_total_pages = nr_free_pagecache_pages();
3372        /*
3373         * Disable grouping by mobility if the number of pages in the
3374         * system is too low to allow the mechanism to work. It would be
3375         * more accurate, but expensive to check per-zone. This check is
3376         * made on memory-hotadd so a system can start with mobility
3377         * disabled and enable it later
3378         */
3379        if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
3380                page_group_by_mobility_disabled = 1;
3381        else
3382                page_group_by_mobility_disabled = 0;
3383
3384        printk("Built %i zonelists in %s order, mobility grouping %s.  "
3385                "Total pages: %ld\n",
3386                        nr_online_nodes,
3387                        zonelist_order_name[current_zonelist_order],
3388                        page_group_by_mobility_disabled ? "off" : "on",
3389                        vm_total_pages);
3390#ifdef CONFIG_NUMA
3391        printk("Policy zone: %s\n", zone_names[policy_zone]);
3392#endif
3393}
3394
3395/*
3396 * Helper functions to size the waitqueue hash table.
3397 * Essentially these want to choose hash table sizes sufficiently
3398 * large so that collisions trying to wait on pages are rare.
3399 * But in fact, the number of active page waitqueues on typical
3400 * systems is ridiculously low, less than 200. So this is even
3401 * conservative, even though it seems large.
3402 *
3403 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
3404 * waitqueues, i.e. the size of the waitq table given the number of pages.
3405 */
3406#define PAGES_PER_WAITQUEUE     256
3407
3408#ifndef CONFIG_MEMORY_HOTPLUG
3409static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3410{
3411        unsigned long size = 1;
3412
3413        pages /= PAGES_PER_WAITQUEUE;
3414
3415        while (size < pages)
3416                size <<= 1;
3417
3418        /*
3419         * Once we have dozens or even hundreds of threads sleeping
3420         * on IO we've got bigger problems than wait queue collision.
3421         * Limit the size of the wait table to a reasonable size.
3422         */
3423        size = min(size, 4096UL);
3424
3425        return max(size, 4UL);
3426}
3427#else
3428/*
3429 * A zone's size might be changed by hot-add, so it is not possible to determine
3430 * a suitable size for its wait_table.  So we use the maximum size now.
3431 *
3432 * The max wait table size = 4096 x sizeof(wait_queue_head_t).   ie:
3433 *
3434 *    i386 (preemption config)    : 4096 x 16 = 64Kbyte.
3435 *    ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
3436 *    ia64, x86-64 (preemption)   : 4096 x 24 = 96Kbyte.
3437 *
3438 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
3439 * or more by the traditional way. (See above).  It equals:
3440 *
3441 *    i386, x86-64, powerpc(4K page size) : =  ( 2G + 1M)byte.
3442 *    ia64(16K page size)                 : =  ( 8G + 4M)byte.
3443 *    powerpc (64K page size)             : =  (32G +16M)byte.
3444 */
3445static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
3446{
3447        return 4096UL;
3448}
3449#endif
3450
3451/*
3452 * This is an integer logarithm so that shifts can be used later
3453 * to extract the more random high bits from the multiplicative
3454 * hash function before the remainder is taken.
3455 */
3456static inline unsigned long wait_table_bits(unsigned long size)
3457{
3458        return ffz(~size);
3459}
3460
3461#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
3462
3463/*
3464 * Check if a pageblock contains reserved pages
3465 */
3466static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
3467{
3468        unsigned long pfn;
3469
3470        for (pfn = start_pfn; pfn < end_pfn; pfn++) {
3471                if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
3472                        return 1;
3473        }
3474        return 0;
3475}
3476
3477/*
3478 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
3479 * of blocks reserved is based on min_wmark_pages(zone). The memory within
3480 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
3481 * higher will lead to a bigger reserve which will get freed as contiguous
3482 * blocks as reclaim kicks in
3483 */
3484static void setup_zone_migrate_reserve(struct zone *zone)
3485{
3486        unsigned long start_pfn, pfn, end_pfn, block_end_pfn;
3487        struct page *page;
3488        unsigned long block_migratetype;
3489        int reserve;
3490
3491        /*
3492         * Get the start pfn, end pfn and the number of blocks to reserve
3493         * We have to be careful to be aligned to pageblock_nr_pages to
3494         * make sure that we always check pfn_valid for the first page in
3495         * the block.
3496         */
3497        start_pfn = zone->zone_start_pfn;
3498        end_pfn = start_pfn + zone->spanned_pages;
3499        start_pfn = roundup(start_pfn, pageblock_nr_pages);
3500        reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
3501                                                        pageblock_order;
3502
3503        /*
3504         * Reserve blocks are generally in place to help high-order atomic
3505         * allocations that are short-lived. A min_free_kbytes value that
3506         * would result in more than 2 reserve blocks for atomic allocations
3507         * is assumed to be in place to help anti-fragmentation for the
3508         * future allocation of hugepages at runtime.
3509         */
3510        reserve = min(2, reserve);
3511
3512        for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
3513                if (!pfn_valid(pfn))
3514                        continue;
3515                page = pfn_to_page(pfn);
3516
3517                /* Watch out for overlapping nodes */
3518                if (page_to_nid(page) != zone_to_nid(zone))
3519                        continue;
3520
3521                block_migratetype = get_pageblock_migratetype(page);
3522
3523                /* Only test what is necessary when the reserves are not met */
3524                if (reserve > 0) {
3525                        /*
3526                         * Blocks with reserved pages will never free, skip
3527                         * them.
3528                         */
3529                        block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
3530                        if (pageblock_is_reserved(pfn, block_end_pfn))
3531                                continue;
3532
3533                        /* If this block is reserved, account for it */
3534                        if (block_migratetype == MIGRATE_RESERVE) {
3535                                reserve--;
3536                                continue;
3537                        }
3538
3539                        /* Suitable for reserving if this block is movable */
3540                        if (block_migratetype == MIGRATE_MOVABLE) {
3541                                set_pageblock_migratetype(page,
3542                                                        MIGRATE_RESERVE);
3543                                move_freepages_block(zone, page,
3544                                                        MIGRATE_RESERVE);
3545                                reserve--;
3546                                continue;
3547                        }
3548                }
3549
3550                /*
3551                 * If the reserve is met and this is a previous reserved block,
3552                 * take it back
3553                 */
3554                if (block_migratetype == MIGRATE_RESERVE) {
3555                        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3556                        move_freepages_block(zone, page, MIGRATE_MOVABLE);
3557                }
3558        }
3559}
3560
3561/*
3562 * Initially all pages are reserved - free ones are freed
3563 * up by free_all_bootmem() once the early boot process is
3564 * done. Non-atomic initialization, single-pass.
3565 */
3566void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
3567                unsigned long start_pfn, enum memmap_context context)
3568{
3569        struct page *page;
3570        unsigned long end_pfn = start_pfn + size;
3571        unsigned long pfn;
3572        struct zone *z;
3573
3574        if (highest_memmap_pfn < end_pfn - 1)
3575                highest_memmap_pfn = end_pfn - 1;
3576
3577        z = &NODE_DATA(nid)->node_zones[zone];
3578        for (pfn = start_pfn; pfn < end_pfn; pfn++) {
3579                /*
3580                 * There can be holes in boot-time mem_map[]s
3581                 * handed to this function.  They do not
3582                 * exist on hotplugged memory.
3583                 */
3584                if (context == MEMMAP_EARLY) {
3585                        if (!early_pfn_valid(pfn))
3586                                continue;
3587                        if (!early_pfn_in_nid(pfn, nid))
3588                                continue;
3589                }
3590                page = pfn_to_page(pfn);
3591                set_page_links(page, zone, nid, pfn);
3592                mminit_verify_page_links(page, zone, nid, pfn);
3593                init_page_count(page);
3594                reset_page_mapcount(page);
3595                SetPageReserved(page);
3596                /*
3597                 * Mark the block movable so that blocks are reserved for
3598                 * movable at startup. This will force kernel allocations
3599                 * to reserve their blocks rather than leaking throughout
3600                 * the address space during boot when many long-lived
3601                 * kernel allocations are made. Later some blocks near
3602                 * the start are marked MIGRATE_RESERVE by
3603                 * setup_zone_migrate_reserve()
3604                 *
3605                 * bitmap is created for zone's valid pfn range. but memmap
3606                 * can be created for invalid pages (for alignment)
3607                 * check here not to call set_pageblock_migratetype() against
3608                 * pfn out of zone.
3609                 */
3610                if ((z->zone_start_pfn <= pfn)
3611                    && (pfn < z->zone_start_pfn + z->spanned_pages)
3612                    && !(pfn & (pageblock_nr_pages - 1)))
3613                        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
3614
3615                INIT_LIST_HEAD(&page->lru);
3616#ifdef WANT_PAGE_VIRTUAL
3617                /* The shift won't overflow because ZONE_NORMAL is below 4G. */
3618                if (!is_highmem_idx(zone))
3619                        set_page_address(page, __va(pfn << PAGE_SHIFT));
3620#endif
3621        }
3622}
3623
3624static void __meminit zone_init_free_lists(struct zone *zone)
3625{
3626        int order, t;
3627        for_each_migratetype_order(order, t) {
3628                INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
3629                zone->free_area[order].nr_free = 0;
3630        }
3631}
3632
3633#ifndef __HAVE_ARCH_MEMMAP_INIT
3634#define memmap_init(size, nid, zone, start_pfn) \
3635        memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
3636#endif
3637
3638static int zone_batchsize(struct zone *zone)
3639{
3640#ifdef CONFIG_MMU
3641        int batch;
3642
3643        /*
3644         * The per-cpu-pages pools are set to around 1000th of the
3645         * size of the zone.  But no more than 1/2 of a meg.
3646         *
3647         * OK, so we don't know how big the cache is.  So guess.
3648         */
3649        batch = zone->present_pages / 1024;
3650        if (batch * PAGE_SIZE > 512 * 1024)
3651                batch = (512 * 1024) / PAGE_SIZE;
3652        batch /= 4;             /* We effectively *= 4 below */
3653        if (batch < 1)
3654                batch = 1;
3655
3656        /*
3657         * Clamp the batch to a 2^n - 1 value. Having a power
3658         * of 2 value was found to be more likely to have
3659         * suboptimal cache aliasing properties in some cases.
3660         *
3661         * For example if 2 tasks are alternately allocating
3662         * batches of pages, one task can end up with a lot
3663         * of pages of one half of the possible page colors
3664         * and the other with pages of the other colors.
3665         */
3666        batch = rounddown_pow_of_two(batch + batch/2) - 1;
3667
3668        return batch;
3669
3670#else
3671        /* The deferral and batching of frees should be suppressed under NOMMU
3672         * conditions.
3673         *
3674         * The problem is that NOMMU needs to be able to allocate large chunks
3675         * of contiguous memory as there's no hardware page translation to
3676         * assemble apparent contiguous memory from discontiguous pages.
3677         *
3678         * Queueing large contiguous runs of pages for batching, however,
3679         * causes the pages to actually be freed in smaller chunks.  As there
3680         * can be a significant delay between the individual batches being
3681         * recycled, this leads to the once large chunks of space being
3682         * fragmented and becoming unavailable for high-order allocations.
3683         */
3684        return 0;
3685#endif
3686}
3687
3688static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
3689{
3690        struct per_cpu_pages *pcp;
3691        int migratetype;
3692
3693        memset(p, 0, sizeof(*p));
3694
3695        pcp = &p->pcp;
3696        pcp->count = 0;
3697        pcp->high = 6 * batch;
3698        pcp->batch = max(1UL, 1 * batch);
3699        for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
3700                INIT_LIST_HEAD(&pcp->lists[migratetype]);
3701}
3702
3703/*
3704 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
3705 * to the value high for the pageset p.
3706 */
3707
3708static void setup_pagelist_highmark(struct per_cpu_pageset *p,
3709                                unsigned long high)
3710{
3711        struct per_cpu_pages *pcp;
3712
3713        pcp = &p->pcp;
3714        pcp->high = high;
3715        pcp->batch = max(1UL, high/4);
3716        if ((high/4) > (PAGE_SHIFT * 8))
3717                pcp->batch = PAGE_SHIFT * 8;
3718}
3719
3720static void setup_zone_pageset(struct zone *zone)
3721{
3722        int cpu;
3723
3724        zone->pageset = alloc_percpu(struct per_cpu_pageset);
3725
3726        for_each_possible_cpu(cpu) {
3727                struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
3728
3729                setup_pageset(pcp, zone_batchsize(zone));
3730
3731                if (percpu_pagelist_fraction)
3732                        setup_pagelist_highmark(pcp,
3733                                (zone->present_pages /
3734                                        percpu_pagelist_fraction));
3735        }
3736}
3737
3738/*
3739 * Allocate per cpu pagesets and initialize them.
3740 * Before this call only boot pagesets were available.
3741 */
3742void __init setup_per_cpu_pageset(void)
3743{
3744        struct zone *zone;
3745
3746        for_each_populated_zone(zone)
3747                setup_zone_pageset(zone);
3748}
3749
3750static noinline __init_refok
3751int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
3752{
3753        int i;
3754        struct pglist_data *pgdat = zone->zone_pgdat;
3755        size_t alloc_size;
3756
3757        /*
3758         * The per-page waitqueue mechanism uses hashed waitqueues
3759         * per zone.
3760         */
3761        zone->wait_table_hash_nr_entries =
3762                 wait_table_hash_nr_entries(zone_size_pages);
3763        zone->wait_table_bits =
3764                wait_table_bits(zone->wait_table_hash_nr_entries);
3765        alloc_size = zone->wait_table_hash_nr_entries
3766                                        * sizeof(wait_queue_head_t);
3767
3768        if (!slab_is_available()) {
3769                zone->wait_table = (wait_queue_head_t *)
3770                        alloc_bootmem_node_nopanic(pgdat, alloc_size);
3771        } else {
3772                /*
3773                 * This case means that a zone whose size was 0 gets new memory
3774                 * via memory hot-add.
3775                 * But it may be the case that a new node was hot-added.  In
3776                 * this case vmalloc() will not be able to use this new node's
3777                 * memory - this wait_table must be initialized to use this new
3778                 * node itself as well.
3779                 * To use this new node's memory, further consideration will be
3780                 * necessary.
3781                 */
3782                zone->wait_table = vmalloc(alloc_size);
3783        }
3784        if (!zone->wait_table)
3785                return -ENOMEM;
3786
3787        for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
3788                init_waitqueue_head(zone->wait_table + i);
3789
3790        return 0;
3791}
3792
3793static int __zone_pcp_update(void *data)
3794{
3795        struct zone *zone = data;
3796        int cpu;
3797        unsigned long batch = zone_batchsize(zone), flags;
3798
3799        for_each_possible_cpu(cpu) {
3800                struct per_cpu_pageset *pset;
3801                struct per_cpu_pages *pcp;
3802
3803                pset = per_cpu_ptr(zone->pageset, cpu);
3804                pcp = &pset->pcp;
3805
3806                local_irq_save(flags);
3807                free_pcppages_bulk(zone, pcp->count, pcp);
3808                setup_pageset(pset, batch);
3809                local_irq_restore(flags);
3810        }
3811        return 0;
3812}
3813
3814void zone_pcp_update(struct zone *zone)
3815{
3816        stop_machine(__zone_pcp_update, zone, NULL);
3817}
3818
3819static __meminit void zone_pcp_init(struct zone *zone)
3820{
3821        /*
3822         * per cpu subsystem is not up at this point. The following code
3823         * relies on the ability of the linker to provide the
3824         * offset of a (static) per cpu variable into the per cpu area.
3825         */
3826        zone->pageset = &boot_pageset;
3827
3828        if (zone->present_pages)
3829                printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%u\n",
3830                        zone->name, zone->present_pages,
3831                                         zone_batchsize(zone));
3832}
3833
3834__meminit int init_currently_empty_zone(struct zone *zone,
3835                                        unsigned long zone_start_pfn,
3836                                        unsigned long size,
3837                                        enum memmap_context context)
3838{
3839        struct pglist_data *pgdat = zone->zone_pgdat;
3840        int ret;
3841        ret = zone_wait_table_init(zone, size);
3842        if (ret)
3843                return ret;
3844        pgdat->nr_zones = zone_idx(zone) + 1;
3845
3846        zone->zone_start_pfn = zone_start_pfn;
3847
3848        mminit_dprintk(MMINIT_TRACE, "memmap_init",
3849                        "Initialising map node %d zone %lu pfns %lu -> %lu\n",
3850                        pgdat->node_id,
3851                        (unsigned long)zone_idx(zone),
3852                        zone_start_pfn, (zone_start_pfn + size));
3853
3854        zone_init_free_lists(zone);
3855
3856        return 0;
3857}
3858
3859#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
3860#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
3861/*
3862 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
3863 * Architectures may implement their own version but if add_active_range()
3864 * was used and there are no special requirements, this is a convenient
3865 * alternative
3866 */
3867int __meminit __early_pfn_to_nid(unsigned long pfn)
3868{
3869        unsigned long start_pfn, end_pfn;
3870        int i, nid;
3871
3872        for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
3873                if (start_pfn <= pfn && pfn < end_pfn)
3874                        return nid;
3875        /* This is a memory hole */
3876        return -1;
3877}
3878#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
3879
3880int __meminit early_pfn_to_nid(unsigned long pfn)
3881{
3882        int nid;
3883
3884        nid = __early_pfn_to_nid(pfn);
3885        if (nid >= 0)
3886                return nid;
3887        /* just returns 0 */
3888        return 0;
3889}
3890
3891#ifdef CONFIG_NODES_SPAN_OTHER_NODES
3892bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
3893{
3894        int nid;
3895
3896        nid = __early_pfn_to_nid(pfn);
3897        if (nid >= 0 && nid != node)
3898                return false;
3899        return true;
3900}
3901#endif
3902
3903/**
3904 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
3905 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
3906 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
3907 *
3908 * If an architecture guarantees that all ranges registered with
3909 * add_active_ranges() contain no holes and may be freed, this
3910 * this function may be used instead of calling free_bootmem() manually.
3911 */
3912void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
3913{
3914        unsigned long start_pfn, end_pfn;
3915        int i, this_nid;
3916
3917        for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
3918                start_pfn = min(start_pfn, max_low_pfn);
3919                end_pfn = min(end_pfn, max_low_pfn);
3920
3921                if (start_pfn < end_pfn)
3922                        free_bootmem_node(NODE_DATA(this_nid),
3923                                          PFN_PHYS(start_pfn),
3924                                          (end_pfn - start_pfn) << PAGE_SHIFT);
3925        }
3926}
3927
3928int __init add_from_early_node_map(struct range *range, int az,
3929                                   int nr_range, int nid)
3930{
3931        unsigned long start_pfn, end_pfn;
3932        int i;
3933
3934        /* need to go over early_node_map to find out good range for node */
3935        for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL)
3936                nr_range = add_range(range, az, nr_range, start_pfn, end_pfn);
3937        return nr_range;
3938}
3939
3940/**
3941 * sparse_memory_present_with_active_regions - Call memory_present for each active range
3942 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
3943 *
3944 * If an architecture guarantees that all ranges registered with
3945 * add_active_ranges() contain no holes and may be freed, this
3946 * function may be used instead of calling memory_present() manually.
3947 */
3948void __init sparse_memory_present_with_active_regions(int nid)
3949{
3950        unsigned long start_pfn, end_pfn;
3951        int i, this_nid;
3952
3953        for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
3954                memory_present(this_nid, start_pfn, end_pfn);
3955}
3956
3957/**
3958 * get_pfn_range_for_nid - Return the start and end page frames for a node
3959 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
3960 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
3961 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
3962 *
3963 * It returns the start and end page frame of a node based on information
3964 * provided by an arch calling add_active_range(). If called for a node
3965 * with no available memory, a warning is printed and the start and end
3966 * PFNs will be 0.
3967 */
3968void __meminit get_pfn_range_for_nid(unsigned int nid,
3969                        unsigned long *start_pfn, unsigned long *end_pfn)
3970{
3971        unsigned long this_start_pfn, this_end_pfn;
3972        int i;
3973
3974        *start_pfn = -1UL;
3975        *end_pfn = 0;
3976
3977        for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
3978                *start_pfn = min(*start_pfn, this_start_pfn);
3979                *end_pfn = max(*end_pfn, this_end_pfn);
3980        }
3981
3982        if (*start_pfn == -1UL)
3983                *start_pfn = 0;
3984}
3985
3986/*
3987 * This finds a zone that can be used for ZONE_MOVABLE pages. The
3988 * assumption is made that zones within a node are ordered in monotonic
3989 * increasing memory addresses so that the "highest" populated zone is used
3990 */
3991static void __init find_usable_zone_for_movable(void)
3992{
3993        int zone_index;
3994        for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3995                if (zone_index == ZONE_MOVABLE)
3996                        continue;
3997
3998                if (arch_zone_highest_possible_pfn[zone_index] >
3999                                arch_zone_lowest_possible_pfn[zone_index])
4000                        break;
4001        }
4002
4003        VM_BUG_ON(zone_index == -1);
4004        movable_zone = zone_index;
4005}
4006
4007/*
4008 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
4009 * because it is sized independent of architecture. Unlike the other zones,
4010 * the starting point for ZONE_MOVABLE is not fixed. It may be different
4011 * in each node depending on the size of each node and how evenly kernelcore
4012 * is distributed. This helper function adjusts the zone ranges
4013 * provided by the architecture for a given node by using the end of the
4014 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
4015 * zones within a node are in order of monotonic increases memory addresses
4016 */
4017static void __meminit adjust_zone_range_for_zone_movable(int nid,
4018                                        unsigned long zone_type,
4019                                        unsigned long node_start_pfn,
4020                                        unsigned long node_end_pfn,
4021                                        unsigned long *zone_start_pfn,
4022                                        unsigned long *zone_end_pfn)
4023{
4024        /* Only adjust if ZONE_MOVABLE is on this node */
4025        if (zone_movable_pfn[nid]) {
4026                /* Size ZONE_MOVABLE */
4027                if (zone_type == ZONE_MOVABLE) {
4028                        *zone_start_pfn = zone_movable_pfn[nid];
4029                        *zone_end_pfn = min(node_end_pfn,
4030                                arch_zone_highest_possible_pfn[movable_zone]);
4031
4032                /* Adjust for ZONE_MOVABLE starting within this range */
4033                } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
4034                                *zone_end_pfn > zone_movable_pfn[nid]) {
4035                        *zone_end_pfn = zone_movable_pfn[nid];
4036
4037                /* Check if this whole range is within ZONE_MOVABLE */
4038                } else if (*zone_start_pfn >= zone_movable_pfn[nid])
4039                        *zone_start_pfn = *zone_end_pfn;
4040        }
4041}
4042
4043/*
4044 * Return the number of pages a zone spans in a node, including holes
4045 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
4046 */
4047static unsigned long __meminit zone_spanned_pages_in_node(int nid,
4048                                        unsigned long zone_type,
4049                                        unsigned long *ignored)
4050{
4051        unsigned long node_start_pfn, node_end_pfn;
4052        unsigned long zone_start_pfn, zone_end_pfn;
4053
4054        /* Get the start and end of the node and zone */
4055        get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
4056        zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
4057        zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
4058        adjust_zone_range_for_zone_movable(nid, zone_type,
4059                                node_start_pfn, node_end_pfn,
4060                                &zone_start_pfn, &zone_end_pfn);
4061
4062        /* Check that this node has pages within the zone's required range */
4063        if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
4064                return 0;
4065
4066        /* Move the zone boundaries inside the node if necessary */
4067        zone_end_pfn = min(zone_end_pfn, node_end_pfn);
4068        zone_start_pfn = max(zone_start_pfn, node_start_pfn);
4069
4070        /* Return the spanned pages */
4071        return zone_end_pfn - zone_start_pfn;
4072}
4073
4074/*
4075 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
4076 * then all holes in the requested range will be accounted for.
4077 */
4078unsigned long __meminit __absent_pages_in_range(int nid,
4079                                unsigned long range_start_pfn,
4080                                unsigned long range_end_pfn)
4081{
4082        unsigned long nr_absent = range_end_pfn - range_start_pfn;
4083        unsigned long start_pfn, end_pfn;
4084        int i;
4085
4086        for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
4087                start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
4088                end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
4089                nr_absent -= end_pfn - start_pfn;
4090        }
4091        return nr_absent;
4092}
4093
4094/**
4095 * absent_pages_in_range - Return number of page frames in holes within a range
4096 * @start_pfn: The start PFN to start searching for holes
4097 * @end_pfn: The end PFN to stop searching for holes
4098 *
4099 * It returns the number of pages frames in memory holes within a range.
4100 */
4101unsigned long __init absent_pages_in_range(unsigned long start_pfn,
4102                                                        unsigned long end_pfn)
4103{
4104        return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
4105}
4106
4107/* Return the number of page frames in holes in a zone on a node */
4108static unsigned long __meminit zone_absent_pages_in_node(int nid,
4109                                        unsigned long zone_type,
4110                                        unsigned long *ignored)
4111{
4112        unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
4113        unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
4114        unsigned long node_start_pfn, node_end_pfn;
4115        unsigned long zone_start_pfn, zone_end_pfn;
4116
4117        get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
4118        zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
4119        zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
4120
4121        adjust_zone_range_for_zone_movable(nid, zone_type,
4122                        node_start_pfn, node_end_pfn,
4123                        &zone_start_pfn, &zone_end_pfn);
4124        return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
4125}
4126
4127#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
4128static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
4129                                        unsigned long zone_type,
4130                                        unsigned long *zones_size)
4131{
4132        return zones_size[zone_type];
4133}
4134
4135static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
4136                                                unsigned long zone_type,
4137                                                unsigned long *zholes_size)
4138{
4139        if (!zholes_size)
4140                return 0;
4141
4142        return zholes_size[zone_type];
4143}
4144
4145#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
4146
4147static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
4148                unsigned long *zones_size, unsigned long *zholes_size)
4149{
4150        unsigned long realtotalpages, totalpages = 0;
4151        enum zone_type i;
4152
4153        for (i = 0; i < MAX_NR_ZONES; i++)
4154                totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
4155                                                                zones_size);
4156        pgdat->node_spanned_pages = totalpages;
4157
4158        realtotalpages = totalpages;
4159        for (i = 0; i < MAX_NR_ZONES; i++)
4160                realtotalpages -=
4161                        zone_absent_pages_in_node(pgdat->node_id, i,
4162                                                                zholes_size);
4163        pgdat->node_present_pages = realtotalpages;
4164        printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
4165                                                        realtotalpages);
4166}
4167
4168#ifndef CONFIG_SPARSEMEM
4169/*
4170 * Calculate the size of the zone->blockflags rounded to an unsigned long
4171 * Start by making sure zonesize is a multiple of pageblock_order by rounding
4172 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
4173 * round what is now in bits to nearest long in bits, then return it in
4174 * bytes.
4175 */
4176static unsigned long __init usemap_size(unsigned long zonesize)
4177{
4178        unsigned long usemapsize;
4179
4180        usemapsize = roundup(zonesize, pageblock_nr_pages);
4181        usemapsize = usemapsize >> pageblock_order;
4182        usemapsize *= NR_PAGEBLOCK_BITS;
4183        usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
4184
4185        return usemapsize / 8;
4186}
4187
4188static void __init setup_usemap(struct pglist_data *pgdat,
4189                                struct zone *zone, unsigned long zonesize)
4190{
4191        unsigned long usemapsize = usemap_size(zonesize);
4192        zone->pageblock_flags = NULL;
4193        if (usemapsize)
4194                zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
4195                                                                   usemapsize);
4196}
4197#else
4198static inline void setup_usemap(struct pglist_data *pgdat,
4199                                struct zone *zone, unsigned long zonesize) {}
4200#endif /* CONFIG_SPARSEMEM */
4201
4202#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
4203
4204/* Return a sensible default order for the pageblock size. */
4205static inline int pageblock_default_order(void)
4206{
4207        if (HPAGE_SHIFT > PAGE_SHIFT)
4208                return HUGETLB_PAGE_ORDER;
4209
4210        return MAX_ORDER-1;
4211}
4212
4213/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
4214static inline void __init set_pageblock_order(unsigned int order)
4215{
4216        /* Check that pageblock_nr_pages has not already been setup */
4217        if (pageblock_order)
4218                return;
4219
4220        /*
4221         * Assume the largest contiguous order of interest is a huge page.
4222         * This value may be variable depending on boot parameters on IA64
4223         */
4224        pageblock_order = order;
4225}
4226#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4227
4228/*
4229 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
4230 * and pageblock_default_order() are unused as pageblock_order is set
4231 * at compile-time. See include/linux/pageblock-flags.h for the values of
4232 * pageblock_order based on the kernel config
4233 */
4234static inline int pageblock_default_order(unsigned int order)
4235{
4236        return MAX_ORDER-1;
4237}
4238#define set_pageblock_order(x)  do {} while (0)
4239
4240#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4241
4242/*
4243 * Set up the zone data structures:
4244 *   - mark all pages reserved
4245 *   - mark all memory queues empty
4246 *   - clear the memory bitmaps
4247 */
4248static void __paginginit free_area_init_core(struct pglist_data *pgdat,
4249                unsigned long *zones_size, unsigned long *zholes_size)
4250{
4251        enum zone_type j;
4252        int nid = pgdat->node_id;
4253        unsigned long zone_start_pfn = pgdat->node_start_pfn;
4254        int ret;
4255
4256        pgdat_resize_init(pgdat);
4257        pgdat->nr_zones = 0;
4258        init_waitqueue_head(&pgdat->kswapd_wait);
4259        pgdat->kswapd_max_order = 0;
4260        pgdat_page_cgroup_init(pgdat);
4261        
4262        for (j = 0; j < MAX_NR_ZONES; j++) {
4263                struct zone *zone = pgdat->node_zones + j;
4264                unsigned long size, realsize, memmap_pages;
4265                enum lru_list lru;
4266
4267                size = zone_spanned_pages_in_node(nid, j, zones_size);
4268                realsize = size - zone_absent_pages_in_node(nid, j,
4269                                                                zholes_size);
4270
4271                /*
4272                 * Adjust realsize so that it accounts for how much memory
4273                 * is used by this zone for memmap. This affects the watermark
4274                 * and per-cpu initialisations
4275                 */
4276                memmap_pages =
4277                        PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
4278                if (realsize >= memmap_pages) {
4279                        realsize -= memmap_pages;
4280                        if (memmap_pages)
4281                                printk(KERN_DEBUG
4282                                       "  %s zone: %lu pages used for memmap\n",
4283                                       zone_names[j], memmap_pages);
4284                } else
4285                        printk(KERN_WARNING
4286                                "  %s zone: %lu pages exceeds realsize %lu\n",
4287                                zone_names[j], memmap_pages, realsize);
4288
4289                /* Account for reserved pages */
4290                if (j == 0 && realsize > dma_reserve) {
4291                        realsize -= dma_reserve;
4292                        printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
4293                                        zone_names[0], dma_reserve);
4294                }
4295
4296                if (!is_highmem_idx(j))
4297                        nr_kernel_pages += realsize;
4298                nr_all_pages += realsize;
4299
4300                zone->spanned_pages = size;
4301                zone->present_pages = realsize;
4302#ifdef CONFIG_NUMA
4303                zone->node = nid;
4304                zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
4305                                                / 100;
4306                zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
4307#endif
4308                zone->name = zone_names[j];
4309                spin_lock_init(&zone->lock);
4310                spin_lock_init(&zone->lru_lock);
4311                zone_seqlock_init(zone);
4312                zone->zone_pgdat = pgdat;
4313
4314                zone_pcp_init(zone);
4315                for_each_lru(lru)
4316                        INIT_LIST_HEAD(&zone->lruvec.lists[lru]);
4317                zone->reclaim_stat.recent_rotated[0] = 0;
4318                zone->reclaim_stat.recent_rotated[1] = 0;
4319                zone->reclaim_stat.recent_scanned[0] = 0;
4320                zone->reclaim_stat.recent_scanned[1] = 0;
4321                zap_zone_vm_stats(zone);
4322                zone->flags = 0;
4323                if (!size)
4324                        continue;
4325
4326                set_pageblock_order(pageblock_default_order());
4327                setup_usemap(pgdat, zone, size);
4328                ret = init_currently_empty_zone(zone, zone_start_pfn,
4329                                                size, MEMMAP_EARLY);
4330                BUG_ON(ret);
4331                memmap_init(size, nid, j, zone_start_pfn);
4332                zone_start_pfn += size;
4333        }
4334}
4335
4336static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
4337{
4338        /* Skip empty nodes */
4339        if (!pgdat->node_spanned_pages)
4340                return;
4341
4342#ifdef CONFIG_FLAT_NODE_MEM_MAP
4343        /* ia64 gets its own node_mem_map, before this, without bootmem */
4344        if (!pgdat->node_mem_map) {
4345                unsigned long size, start, end;
4346                struct page *map;
4347
4348                /*
4349                 * The zone's endpoints aren't required to be MAX_ORDER
4350                 * aligned but the node_mem_map endpoints must be in order
4351                 * for the buddy allocator to function correctly.
4352                 */
4353                start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
4354                end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
4355                end = ALIGN(end, MAX_ORDER_NR_PAGES);
4356                size =  (end - start) * sizeof(struct page);
4357                map = alloc_remap(pgdat->node_id, size);
4358                if (!map)
4359                        map = alloc_bootmem_node_nopanic(pgdat, size);
4360                pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
4361        }
4362#ifndef CONFIG_NEED_MULTIPLE_NODES
4363        /*
4364         * With no DISCONTIG, the global mem_map is just set as node 0's
4365         */
4366        if (pgdat == NODE_DATA(0)) {
4367                mem_map = NODE_DATA(0)->node_mem_map;
4368#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
4369                if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
4370                        mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
4371#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
4372        }
4373#endif
4374#endif /* CONFIG_FLAT_NODE_MEM_MAP */
4375}
4376
4377void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
4378                unsigned long node_start_pfn, unsigned long *zholes_size)
4379{
4380        pg_data_t *pgdat = NODE_DATA(nid);
4381
4382        pgdat->node_id = nid;
4383        pgdat->node_start_pfn = node_start_pfn;
4384        calculate_node_totalpages(pgdat, zones_size, zholes_size);
4385
4386        alloc_node_mem_map(pgdat);
4387#ifdef CONFIG_FLAT_NODE_MEM_MAP
4388        printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
4389                nid, (unsigned long)pgdat,
4390                (unsigned long)pgdat->node_mem_map);
4391#endif
4392
4393        free_area_init_core(pgdat, zones_size, zholes_size);
4394}
4395
4396#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
4397
4398#if MAX_NUMNODES > 1
4399/*
4400 * Figure out the number of possible node ids.
4401 */
4402static void __init setup_nr_node_ids(void)
4403{
4404        unsigned int node;
4405        unsigned int highest = 0;
4406
4407        for_each_node_mask(node, node_possible_map)
4408                highest = node;
4409        nr_node_ids = highest + 1;
4410}
4411#else
4412static inline void setup_nr_node_ids(void)
4413{
4414}
4415#endif
4416
4417/**
4418 * node_map_pfn_alignment - determine the maximum internode alignment
4419 *
4420 * This function should be called after node map is populated and sorted.
4421 * It calculates the maximum power of two alignment which can distinguish
4422 * all the nodes.
4423 *
4424 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
4425 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
4426 * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
4427 * shifted, 1GiB is enough and this function will indicate so.
4428 *
4429 * This is used to test whether pfn -> nid mapping of the chosen memory
4430 * model has fine enough granularity to avoid incorrect mapping for the
4431 * populated node map.
4432 *
4433 * Returns the determined alignment in pfn's.  0 if there is no alignment
4434 * requirement (single node).
4435 */
4436unsigned long __init node_map_pfn_alignment(void)
4437{
4438        unsigned long accl_mask = 0, last_end = 0;
4439        unsigned long start, end, mask;
4440        int last_nid = -1;
4441        int i, nid;
4442
4443        for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
4444                if (!start || last_nid < 0 || last_nid == nid) {
4445                        last_nid = nid;
4446                        last_end = end;
4447                        continue;
4448                }
4449
4450                /*
4451                 * Start with a mask granular enough to pin-point to the
4452                 * start pfn and tick off bits one-by-one until it becomes
4453                 * too coarse to separate the current node from the last.
4454                 */
4455                mask = ~((1 << __ffs(start)) - 1);
4456                while (mask && last_end <= (start & (mask << 1)))
4457                        mask <<= 1;
4458
4459                /* accumulate all internode masks */
4460                accl_mask |= mask;
4461        }
4462
4463        /* convert mask to number of pages */
4464        return ~accl_mask + 1;
4465}
4466
4467/* Find the lowest pfn for a node */
4468static unsigned long __init find_min_pfn_for_node(int nid)
4469{
4470        unsigned long min_pfn = ULONG_MAX;
4471        unsigned long start_pfn;
4472        int i;
4473
4474        for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
4475                min_pfn = min(min_pfn, start_pfn);
4476
4477        if (min_pfn == ULONG_MAX) {
4478                printk(KERN_WARNING
4479                        "Could not find start_pfn for node %d\n", nid);
4480                return 0;
4481        }
4482
4483        return min_pfn;
4484}
4485
4486/**
4487 * find_min_pfn_with_active_regions - Find the minimum PFN registered
4488 *
4489 * It returns the minimum PFN based on information provided via
4490 * add_active_range().
4491 */
4492unsigned long __init find_min_pfn_with_active_regions(void)
4493{
4494        return find_min_pfn_for_node(MAX_NUMNODES);
4495}
4496
4497/*
4498 * early_calculate_totalpages()
4499 * Sum pages in active regions for movable zone.
4500 * Populate N_HIGH_MEMORY for calculating usable_nodes.
4501 */
4502static unsigned long __init early_calculate_totalpages(void)
4503{
4504        unsigned long totalpages = 0;
4505        unsigned long start_pfn, end_pfn;
4506        int i, nid;
4507
4508        for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
4509                unsigned long pages = end_pfn - start_pfn;
4510
4511                totalpages += pages;
4512                if (pages)
4513                        node_set_state(nid, N_HIGH_MEMORY);
4514        }
4515        return totalpages;
4516}
4517
4518/*
4519 * Find the PFN the Movable zone begins in each node. Kernel memory
4520 * is spread evenly between nodes as long as the nodes have enough
4521 * memory. When they don't, some nodes will have more kernelcore than
4522 * others
4523 */
4524static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
4525{
4526        int i, nid;
4527        unsigned long usable_startpfn;
4528        unsigned long kernelcore_node, kernelcore_remaining;
4529        /* save the state before borrow the nodemask */
4530        nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
4531        unsigned long totalpages = early_calculate_totalpages();
4532        int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
4533
4534        /*
4535         * If movablecore was specified, calculate what size of
4536         * kernelcore that corresponds so that memory usable for
4537         * any allocation type is evenly spread. If both kernelcore
4538         * and movablecore are specified, then the value of kernelcore
4539         * will be used for required_kernelcore if it's greater than
4540         * what movablecore would have allowed.
4541         */
4542        if (required_movablecore) {
4543                unsigned long corepages;
4544
4545                /*
4546                 * Round-up so that ZONE_MOVABLE is at least as large as what
4547                 * was requested by the user
4548                 */
4549                required_movablecore =
4550                        roundup(required_movablecore, MAX_ORDER_NR_PAGES);
4551                corepages = totalpages - required_movablecore;
4552
4553                required_kernelcore = max(required_kernelcore, corepages);
4554        }
4555
4556        /* If kernelcore was not specified, there is no ZONE_MOVABLE */
4557        if (!required_kernelcore)
4558                goto out;
4559
4560        /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
4561        find_usable_zone_for_movable();
4562        usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
4563
4564restart:
4565        /* Spread kernelcore memory as evenly as possible throughout nodes */
4566        kernelcore_node = required_kernelcore / usable_nodes;
4567        for_each_node_state(nid, N_HIGH_MEMORY) {
4568                unsigned long start_pfn, end_pfn;
4569
4570                /*
4571                 * Recalculate kernelcore_node if the division per node
4572                 * now exceeds what is necessary to satisfy the requested
4573                 * amount of memory for the kernel
4574                 */
4575                if (required_kernelcore < kernelcore_node)
4576                        kernelcore_node = required_kernelcore / usable_nodes;
4577
4578                /*
4579                 * As the map is walked, we track how much memory is usable
4580                 * by the kernel using kernelcore_remaining. When it is
4581                 * 0, the rest of the node is usable by ZONE_MOVABLE
4582                 */
4583                kernelcore_remaining = kernelcore_node;
4584
4585                /* Go through each range of PFNs within this node */
4586                for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
4587                        unsigned long size_pages;
4588
4589                        start_pfn = max(start_pfn, zone_movable_pfn[nid]);
4590                        if (start_pfn >= end_pfn)
4591                                continue;
4592
4593                        /* Account for what is only usable for kernelcore */
4594                        if (start_pfn < usable_startpfn) {
4595                                unsigned long kernel_pages;
4596                                kernel_pages = min(end_pfn, usable_startpfn)
4597                                                                - start_pfn;
4598
4599                                kernelcore_remaining -= min(kernel_pages,
4600                                                        kernelcore_remaining);
4601                                required_kernelcore -= min(kernel_pages,
4602                                                        required_kernelcore);
4603
4604                                /* Continue if range is now fully accounted */
4605                                if (end_pfn <= usable_startpfn) {
4606
4607                                        /*
4608                                         * Push zone_movable_pfn to the end so
4609                                         * that if we have to rebalance
4610                                         * kernelcore across nodes, we will
4611                                         * not double account here
4612                                         */
4613                                        zone_movable_pfn[nid] = end_pfn;
4614                                        continue;
4615                                }
4616                                start_pfn = usable_startpfn;
4617                        }
4618
4619                        /*
4620                         * The usable PFN range for ZONE_MOVABLE is from
4621                         * start_pfn->end_pfn. Calculate size_pages as the
4622                         * number of pages used as kernelcore
4623                         */
4624                        size_pages = end_pfn - start_pfn;
4625                        if (size_pages > kernelcore_remaining)
4626                                size_pages = kernelcore_remaining;
4627                        zone_movable_pfn[nid] = start_pfn + size_pages;
4628
4629                        /*
4630                         * Some kernelcore has been met, update counts and
4631                         * break if the kernelcore for this node has been
4632                         * satisified
4633                         */
4634                        required_kernelcore -= min(required_kernelcore,
4635                                                                size_pages);
4636                        kernelcore_remaining -= size_pages;
4637                        if (!kernelcore_remaining)
4638                                break;
4639                }
4640        }
4641
4642        /*
4643         * If there is still required_kernelcore, we do another pass with one
4644         * less node in the count. This will push zone_movable_pfn[nid] further
4645         * along on the nodes that still have memory until kernelcore is
4646         * satisified
4647         */
4648        usable_nodes--;
4649        if (usable_nodes && required_kernelcore > usable_nodes)
4650                goto restart;
4651
4652        /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
4653        for (nid = 0; nid < MAX_NUMNODES; nid++)
4654                zone_movable_pfn[nid] =
4655                        roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
4656
4657out:
4658        /* restore the node_state */
4659        node_states[N_HIGH_MEMORY] = saved_node_state;
4660}
4661
4662/* Any regular memory on that node ? */
4663static void check_for_regular_memory(pg_data_t *pgdat)
4664{
4665#ifdef CONFIG_HIGHMEM
4666        enum zone_type zone_type;
4667
4668        for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
4669                struct zone *zone = &pgdat->node_zones[zone_type];
4670                if (zone->present_pages) {
4671                        node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
4672                        break;
4673                }
4674        }
4675#endif
4676}
4677
4678/**
4679 * free_area_init_nodes - Initialise all pg_data_t and zone data
4680 * @max_zone_pfn: an array of max PFNs for each zone
4681 *
4682 * This will call free_area_init_node() for each active node in the system.
4683 * Using the page ranges provided by add_active_range(), the size of each
4684 * zone in each node and their holes is calculated. If the maximum PFN
4685 * between two adjacent zones match, it is assumed that the zone is empty.
4686 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
4687 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
4688 * starts where the previous one ended. For example, ZONE_DMA32 starts
4689 * at arch_max_dma_pfn.
4690 */
4691void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4692{
4693        unsigned long start_pfn, end_pfn;
4694        int i, nid;
4695
4696        /* Record where the zone boundaries are */
4697        memset(arch_zone_lowest_possible_pfn, 0,
4698                                sizeof(arch_zone_lowest_possible_pfn));
4699        memset(arch_zone_highest_possible_pfn, 0,
4700                                sizeof(arch_zone_highest_possible_pfn));
4701        arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
4702        arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
4703        for (i = 1; i < MAX_NR_ZONES; i++) {
4704                if (i == ZONE_MOVABLE)
4705                        continue;
4706                arch_zone_lowest_possible_pfn[i] =
4707                        arch_zone_highest_possible_pfn[i-1];
4708                arch_zone_highest_possible_pfn[i] =
4709                        max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
4710        }
4711        arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
4712        arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
4713
4714        /* Find the PFNs that ZONE_MOVABLE begins at in each node */
4715        memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
4716        find_zone_movable_pfns_for_nodes(zone_movable_pfn);
4717
4718        /* Print out the zone ranges */
4719        printk("Zone PFN ranges:\n");
4720        for (i = 0; i < MAX_NR_ZONES; i++) {
4721                if (i == ZONE_MOVABLE)
4722                        continue;
4723                printk("  %-8s ", zone_names[i]);
4724                if (arch_zone_lowest_possible_pfn[i] ==
4725                                arch_zone_highest_possible_pfn[i])
4726                        printk("empty\n");
4727                else
4728                        printk("%0#10lx -> %0#10lx\n",
4729                                arch_zone_lowest_possible_pfn[i],
4730                                arch_zone_highest_possible_pfn[i]);
4731        }
4732
4733        /* Print out the PFNs ZONE_MOVABLE begins at in each node */
4734        printk("Movable zone start PFN for each node\n");
4735        for (i = 0; i < MAX_NUMNODES; i++) {
4736                if (zone_movable_pfn[i])
4737                        printk("  Node %d: %lu\n", i, zone_movable_pfn[i]);
4738        }
4739
4740        /* Print out the early_node_map[] */
4741        printk("Early memory PFN ranges\n");
4742        for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
4743                printk("  %3d: %0#10lx -> %0#10lx\n", nid, start_pfn, end_pfn);
4744
4745        /* Initialise every node */
4746        mminit_verify_pageflags_layout();
4747        setup_nr_node_ids();
4748        for_each_online_node(nid) {
4749                pg_data_t *pgdat = NODE_DATA(nid);
4750                free_area_init_node(nid, NULL,
4751                                find_min_pfn_for_node(nid), NULL);
4752
4753                /* Any memory on that node */
4754                if (pgdat->node_present_pages)
4755                        node_set_state(nid, N_HIGH_MEMORY);
4756                check_for_regular_memory(pgdat);
4757        }
4758}
4759
4760static int __init cmdline_parse_core(char *p, unsigned long *core)
4761{
4762        unsigned long long coremem;
4763        if (!p)
4764                return -EINVAL;
4765
4766        coremem = memparse(p, &p);
4767        *core = coremem >> PAGE_SHIFT;
4768
4769        /* Paranoid check that UL is enough for the coremem value */
4770        WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
4771
4772        return 0;
4773}
4774
4775/*
4776 * kernelcore=size sets the amount of memory for use for allocations that
4777 * cannot be reclaimed or migrated.
4778 */
4779static int __init cmdline_parse_kernelcore(char *p)
4780{
4781        return cmdline_parse_core(p, &required_kernelcore);
4782}
4783
4784/*
4785 * movablecore=size sets the amount of memory for use for allocations that
4786 * can be reclaimed or migrated.
4787 */
4788static int __init cmdline_parse_movablecore(char *p)
4789{
4790        return cmdline_parse_core(p, &required_movablecore);
4791}
4792
4793early_param("kernelcore", cmdline_parse_kernelcore);
4794early_param("movablecore", cmdline_parse_movablecore);
4795
4796#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
4797
4798/**
4799 * set_dma_reserve - set the specified number of pages reserved in the first zone
4800 * @new_dma_reserve: The number of pages to mark reserved
4801 *
4802 * The per-cpu batchsize and zone watermarks are determined by present_pages.
4803 * In the DMA zone, a significant percentage may be consumed by kernel image
4804 * and other unfreeable allocations which can skew the watermarks badly. This
4805 * function may optionally be used to account for unfreeable pages in the
4806 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
4807 * smaller per-cpu batchsize.
4808 */
4809void __init set_dma_reserve(unsigned long new_dma_reserve)
4810{
4811        dma_reserve = new_dma_reserve;
4812}
4813
4814void __init free_area_init(unsigned long *zones_size)
4815{
4816        free_area_init_node(0, zones_size,
4817                        __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
4818}
4819
4820static int page_alloc_cpu_notify(struct notifier_block *self,
4821                                 unsigned long action, void *hcpu)
4822{
4823        int cpu = (unsigned long)hcpu;
4824
4825        if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
4826                drain_pages(cpu);
4827
4828                /*
4829                 * Spill the event counters of the dead processor
4830                 * into the current processors event counters.
4831                 * This artificially elevates the count of the current
4832                 * processor.
4833                 */
4834                vm_events_fold_cpu(cpu);
4835
4836                /*
4837                 * Zero the differential counters of the dead processor
4838                 * so that the vm statistics are consistent.
4839                 *
4840                 * This is only okay since the processor is dead and cannot
4841                 * race with what we are doing.
4842                 */
4843                refresh_cpu_vm_stats(cpu);
4844        }
4845        return NOTIFY_OK;
4846}
4847
4848void __init page_alloc_init(void)
4849{
4850        hotcpu_notifier(page_alloc_cpu_notify, 0);
4851}
4852
4853/*
4854 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
4855 *      or min_free_kbytes changes.
4856 */
4857static void calculate_totalreserve_pages(void)
4858{
4859        struct pglist_data *pgdat;
4860        unsigned long reserve_pages = 0;
4861        enum zone_type i, j;
4862
4863        for_each_online_pgdat(pgdat) {