linux/mm/page_alloc.c
<<
>>
Prefs
   1/*
   2 *  linux/mm/page_alloc.c
   3 *
   4 *  Manages the free list, the system allocates free pages here.
   5 *  Note that kmalloc() lives in slab.c
   6 *
   7 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   8 *  Swap reorganised 29.12.95, Stephen Tweedie
   9 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  10 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
  11 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
  12 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
  13 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
  14 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
  15 */
  16
  17#include <linux/stddef.h>
  18#include <linux/mm.h>
  19#include <linux/swap.h>
  20#include <linux/interrupt.h>
  21#include <linux/pagemap.h>
  22#include <linux/jiffies.h>
  23#include <linux/bootmem.h>
  24#include <linux/compiler.h>
  25#include <linux/kernel.h>
  26#include <linux/module.h>
  27#include <linux/suspend.h>
  28#include <linux/pagevec.h>
  29#include <linux/blkdev.h>
  30#include <linux/slab.h>
  31#include <linux/oom.h>
  32#include <linux/notifier.h>
  33#include <linux/topology.h>
  34#include <linux/sysctl.h>
  35#include <linux/cpu.h>
  36#include <linux/cpuset.h>
  37#include <linux/memory_hotplug.h>
  38#include <linux/nodemask.h>
  39#include <linux/vmalloc.h>
  40#include <linux/mempolicy.h>
  41#include <linux/stop_machine.h>
  42#include <linux/sort.h>
  43#include <linux/pfn.h>
  44#include <linux/backing-dev.h>
  45#include <linux/fault-inject.h>
  46#include <linux/page-isolation.h>
  47#include <linux/memcontrol.h>
  48#include <linux/debugobjects.h>
  49
  50#include <asm/tlbflush.h>
  51#include <asm/div64.h>
  52#include "internal.h"
  53
  54/*
  55 * Array of node states.
  56 */
  57nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
  58        [N_POSSIBLE] = NODE_MASK_ALL,
  59        [N_ONLINE] = { { [0] = 1UL } },
  60#ifndef CONFIG_NUMA
  61        [N_NORMAL_MEMORY] = { { [0] = 1UL } },
  62#ifdef CONFIG_HIGHMEM
  63        [N_HIGH_MEMORY] = { { [0] = 1UL } },
  64#endif
  65        [N_CPU] = { { [0] = 1UL } },
  66#endif  /* NUMA */
  67};
  68EXPORT_SYMBOL(node_states);
  69
  70unsigned long totalram_pages __read_mostly;
  71unsigned long totalreserve_pages __read_mostly;
  72long nr_swap_pages;
  73int percpu_pagelist_fraction;
  74
  75#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
  76int pageblock_order __read_mostly;
  77#endif
  78
  79static void __free_pages_ok(struct page *page, unsigned int order);
  80
  81/*
  82 * results with 256, 32 in the lowmem_reserve sysctl:
  83 *      1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
  84 *      1G machine -> (16M dma, 784M normal, 224M high)
  85 *      NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
  86 *      HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
  87 *      HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
  88 *
  89 * TBD: should special case ZONE_DMA32 machines here - in those we normally
  90 * don't need any ZONE_NORMAL reservation
  91 */
  92int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
  93#ifdef CONFIG_ZONE_DMA
  94         256,
  95#endif
  96#ifdef CONFIG_ZONE_DMA32
  97         256,
  98#endif
  99#ifdef CONFIG_HIGHMEM
 100         32,
 101#endif
 102         32,
 103};
 104
 105EXPORT_SYMBOL(totalram_pages);
 106
 107static char * const zone_names[MAX_NR_ZONES] = {
 108#ifdef CONFIG_ZONE_DMA
 109         "DMA",
 110#endif
 111#ifdef CONFIG_ZONE_DMA32
 112         "DMA32",
 113#endif
 114         "Normal",
 115#ifdef CONFIG_HIGHMEM
 116         "HighMem",
 117#endif
 118         "Movable",
 119};
 120
 121int min_free_kbytes = 1024;
 122
 123unsigned long __meminitdata nr_kernel_pages;
 124unsigned long __meminitdata nr_all_pages;
 125static unsigned long __meminitdata dma_reserve;
 126
 127#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
 128  /*
 129   * MAX_ACTIVE_REGIONS determines the maximum number of distinct
 130   * ranges of memory (RAM) that may be registered with add_active_range().
 131   * Ranges passed to add_active_range() will be merged if possible
 132   * so the number of times add_active_range() can be called is
 133   * related to the number of nodes and the number of holes
 134   */
 135  #ifdef CONFIG_MAX_ACTIVE_REGIONS
 136    /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */
 137    #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS
 138  #else
 139    #if MAX_NUMNODES >= 32
 140      /* If there can be many nodes, allow up to 50 holes per node */
 141      #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50)
 142    #else
 143      /* By default, allow up to 256 distinct regions */
 144      #define MAX_ACTIVE_REGIONS 256
 145    #endif
 146  #endif
 147
 148  static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
 149  static int __meminitdata nr_nodemap_entries;
 150  static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
 151  static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
 152#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
 153  static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES];
 154  static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
 155#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
 156  static unsigned long __initdata required_kernelcore;
 157  static unsigned long __initdata required_movablecore;
 158  static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
 159
 160  /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
 161  int movable_zone;
 162  EXPORT_SYMBOL(movable_zone);
 163#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
 164
 165#if MAX_NUMNODES > 1
 166int nr_node_ids __read_mostly = MAX_NUMNODES;
 167EXPORT_SYMBOL(nr_node_ids);
 168#endif
 169
 170int page_group_by_mobility_disabled __read_mostly;
 171
 172static void set_pageblock_migratetype(struct page *page, int migratetype)
 173{
 174        set_pageblock_flags_group(page, (unsigned long)migratetype,
 175                                        PB_migrate, PB_migrate_end);
 176}
 177
 178#ifdef CONFIG_DEBUG_VM
 179static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
 180{
 181        int ret = 0;
 182        unsigned seq;
 183        unsigned long pfn = page_to_pfn(page);
 184
 185        do {
 186                seq = zone_span_seqbegin(zone);
 187                if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
 188                        ret = 1;
 189                else if (pfn < zone->zone_start_pfn)
 190                        ret = 1;
 191        } while (zone_span_seqretry(zone, seq));
 192
 193        return ret;
 194}
 195
 196static int page_is_consistent(struct zone *zone, struct page *page)
 197{
 198        if (!pfn_valid_within(page_to_pfn(page)))
 199                return 0;
 200        if (zone != page_zone(page))
 201                return 0;
 202
 203        return 1;
 204}
 205/*
 206 * Temporary debugging check for pages not lying within a given zone.
 207 */
 208static int bad_range(struct zone *zone, struct page *page)
 209{
 210        if (page_outside_zone_boundaries(zone, page))
 211                return 1;
 212        if (!page_is_consistent(zone, page))
 213                return 1;
 214
 215        return 0;
 216}
 217#else
 218static inline int bad_range(struct zone *zone, struct page *page)
 219{
 220        return 0;
 221}
 222#endif
 223
 224static void bad_page(struct page *page)
 225{
 226        void *pc = page_get_page_cgroup(page);
 227
 228        printk(KERN_EMERG "Bad page state in process '%s'\n" KERN_EMERG
 229                "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n",
 230                current->comm, page, (int)(2*sizeof(unsigned long)),
 231                (unsigned long)page->flags, page->mapping,
 232                page_mapcount(page), page_count(page));
 233        if (pc) {
 234                printk(KERN_EMERG "cgroup:%p\n", pc);
 235                page_reset_bad_cgroup(page);
 236        }
 237        printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
 238                KERN_EMERG "Backtrace:\n");
 239        dump_stack();
 240        page->flags &= ~PAGE_FLAGS_CLEAR_WHEN_BAD;
 241        set_page_count(page, 0);
 242        reset_page_mapcount(page);
 243        page->mapping = NULL;
 244        add_taint(TAINT_BAD_PAGE);
 245}
 246
 247/*
 248 * Higher-order pages are called "compound pages".  They are structured thusly:
 249 *
 250 * The first PAGE_SIZE page is called the "head page".
 251 *
 252 * The remaining PAGE_SIZE pages are called "tail pages".
 253 *
 254 * All pages have PG_compound set.  All pages have their ->private pointing at
 255 * the head page (even the head page has this).
 256 *
 257 * The first tail page's ->lru.next holds the address of the compound page's
 258 * put_page() function.  Its ->lru.prev holds the order of allocation.
 259 * This usage means that zero-order pages may not be compound.
 260 */
 261
 262static void free_compound_page(struct page *page)
 263{
 264        __free_pages_ok(page, compound_order(page));
 265}
 266
 267void prep_compound_page(struct page *page, unsigned long order)
 268{
 269        int i;
 270        int nr_pages = 1 << order;
 271
 272        set_compound_page_dtor(page, free_compound_page);
 273        set_compound_order(page, order);
 274        __SetPageHead(page);
 275        for (i = 1; i < nr_pages; i++) {
 276                struct page *p = page + i;
 277
 278                __SetPageTail(p);
 279                p->first_page = page;
 280        }
 281}
 282
 283#ifdef CONFIG_HUGETLBFS
 284void prep_compound_gigantic_page(struct page *page, unsigned long order)
 285{
 286        int i;
 287        int nr_pages = 1 << order;
 288        struct page *p = page + 1;
 289
 290        set_compound_page_dtor(page, free_compound_page);
 291        set_compound_order(page, order);
 292        __SetPageHead(page);
 293        for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
 294                __SetPageTail(p);
 295                p->first_page = page;
 296        }
 297}
 298#endif
 299
 300static void destroy_compound_page(struct page *page, unsigned long order)
 301{
 302        int i;
 303        int nr_pages = 1 << order;
 304
 305        if (unlikely(compound_order(page) != order))
 306                bad_page(page);
 307
 308        if (unlikely(!PageHead(page)))
 309                        bad_page(page);
 310        __ClearPageHead(page);
 311        for (i = 1; i < nr_pages; i++) {
 312                struct page *p = page + i;
 313
 314                if (unlikely(!PageTail(p) |
 315                                (p->first_page != page)))
 316                        bad_page(page);
 317                __ClearPageTail(p);
 318        }
 319}
 320
 321static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
 322{
 323        int i;
 324
 325        /*
 326         * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
 327         * and __GFP_HIGHMEM from hard or soft interrupt context.
 328         */
 329        VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
 330        for (i = 0; i < (1 << order); i++)
 331                clear_highpage(page + i);
 332}
 333
 334static inline void set_page_order(struct page *page, int order)
 335{
 336        set_page_private(page, order);
 337        __SetPageBuddy(page);
 338}
 339
 340static inline void rmv_page_order(struct page *page)
 341{
 342        __ClearPageBuddy(page);
 343        set_page_private(page, 0);
 344}
 345
 346/*
 347 * Locate the struct page for both the matching buddy in our
 348 * pair (buddy1) and the combined O(n+1) page they form (page).
 349 *
 350 * 1) Any buddy B1 will have an order O twin B2 which satisfies
 351 * the following equation:
 352 *     B2 = B1 ^ (1 << O)
 353 * For example, if the starting buddy (buddy2) is #8 its order
 354 * 1 buddy is #10:
 355 *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
 356 *
 357 * 2) Any buddy B will have an order O+1 parent P which
 358 * satisfies the following equation:
 359 *     P = B & ~(1 << O)
 360 *
 361 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
 362 */
 363static inline struct page *
 364__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
 365{
 366        unsigned long buddy_idx = page_idx ^ (1 << order);
 367
 368        return page + (buddy_idx - page_idx);
 369}
 370
 371static inline unsigned long
 372__find_combined_index(unsigned long page_idx, unsigned int order)
 373{
 374        return (page_idx & ~(1 << order));
 375}
 376
 377/*
 378 * This function checks whether a page is free && is the buddy
 379 * we can do coalesce a page and its buddy if
 380 * (a) the buddy is not in a hole &&
 381 * (b) the buddy is in the buddy system &&
 382 * (c) a page and its buddy have the same order &&
 383 * (d) a page and its buddy are in the same zone.
 384 *
 385 * For recording whether a page is in the buddy system, we use PG_buddy.
 386 * Setting, clearing, and testing PG_buddy is serialized by zone->lock.
 387 *
 388 * For recording page's order, we use page_private(page).
 389 */
 390static inline int page_is_buddy(struct page *page, struct page *buddy,
 391                                                                int order)
 392{
 393        if (!pfn_valid_within(page_to_pfn(buddy)))
 394                return 0;
 395
 396        if (page_zone_id(page) != page_zone_id(buddy))
 397                return 0;
 398
 399        if (PageBuddy(buddy) && page_order(buddy) == order) {
 400                BUG_ON(page_count(buddy) != 0);
 401                return 1;
 402        }
 403        return 0;
 404}
 405
 406/*
 407 * Freeing function for a buddy system allocator.
 408 *
 409 * The concept of a buddy system is to maintain direct-mapped table
 410 * (containing bit values) for memory blocks of various "orders".
 411 * The bottom level table contains the map for the smallest allocatable
 412 * units of memory (here, pages), and each level above it describes
 413 * pairs of units from the levels below, hence, "buddies".
 414 * At a high level, all that happens here is marking the table entry
 415 * at the bottom level available, and propagating the changes upward
 416 * as necessary, plus some accounting needed to play nicely with other
 417 * parts of the VM system.
 418 * At each level, we keep a list of pages, which are heads of continuous
 419 * free pages of length of (1 << order) and marked with PG_buddy. Page's
 420 * order is recorded in page_private(page) field.
 421 * So when we are allocating or freeing one, we can derive the state of the
 422 * other.  That is, if we allocate a small block, and both were   
 423 * free, the remainder of the region must be split into blocks.   
 424 * If a block is freed, and its buddy is also free, then this
 425 * triggers coalescing into a block of larger size.            
 426 *
 427 * -- wli
 428 */
 429
 430static inline void __free_one_page(struct page *page,
 431                struct zone *zone, unsigned int order)
 432{
 433        unsigned long page_idx;
 434        int order_size = 1 << order;
 435        int migratetype = get_pageblock_migratetype(page);
 436
 437        if (unlikely(PageCompound(page)))
 438                destroy_compound_page(page, order);
 439
 440        page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
 441
 442        VM_BUG_ON(page_idx & (order_size - 1));
 443        VM_BUG_ON(bad_range(zone, page));
 444
 445        __mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
 446        while (order < MAX_ORDER-1) {
 447                unsigned long combined_idx;
 448                struct page *buddy;
 449
 450                buddy = __page_find_buddy(page, page_idx, order);
 451                if (!page_is_buddy(page, buddy, order))
 452                        break;
 453
 454                /* Our buddy is free, merge with it and move up one order. */
 455                list_del(&buddy->lru);
 456                zone->free_area[order].nr_free--;
 457                rmv_page_order(buddy);
 458                combined_idx = __find_combined_index(page_idx, order);
 459                page = page + (combined_idx - page_idx);
 460                page_idx = combined_idx;
 461                order++;
 462        }
 463        set_page_order(page, order);
 464        list_add(&page->lru,
 465                &zone->free_area[order].free_list[migratetype]);
 466        zone->free_area[order].nr_free++;
 467}
 468
 469static inline int free_pages_check(struct page *page)
 470{
 471        if (unlikely(page_mapcount(page) |
 472                (page->mapping != NULL)  |
 473                (page_get_page_cgroup(page) != NULL) |
 474                (page_count(page) != 0)  |
 475                (page->flags & PAGE_FLAGS_CHECK_AT_FREE)))
 476                bad_page(page);
 477        if (PageDirty(page))
 478                __ClearPageDirty(page);
 479        /*
 480         * For now, we report if PG_reserved was found set, but do not
 481         * clear it, and do not free the page.  But we shall soon need
 482         * to do more, for when the ZERO_PAGE count wraps negative.
 483         */
 484        return PageReserved(page);
 485}
 486
 487/*
 488 * Frees a list of pages. 
 489 * Assumes all pages on list are in same zone, and of same order.
 490 * count is the number of pages to free.
 491 *
 492 * If the zone was previously in an "all pages pinned" state then look to
 493 * see if this freeing clears that state.
 494 *
 495 * And clear the zone's pages_scanned counter, to hold off the "all pages are
 496 * pinned" detection logic.
 497 */
 498static void free_pages_bulk(struct zone *zone, int count,
 499                                        struct list_head *list, int order)
 500{
 501        spin_lock(&zone->lock);
 502        zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
 503        zone->pages_scanned = 0;
 504        while (count--) {
 505                struct page *page;
 506
 507                VM_BUG_ON(list_empty(list));
 508                page = list_entry(list->prev, struct page, lru);
 509                /* have to delete it as __free_one_page list manipulates */
 510                list_del(&page->lru);
 511                __free_one_page(page, zone, order);
 512        }
 513        spin_unlock(&zone->lock);
 514}
 515
 516static void free_one_page(struct zone *zone, struct page *page, int order)
 517{
 518        spin_lock(&zone->lock);
 519        zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
 520        zone->pages_scanned = 0;
 521        __free_one_page(page, zone, order);
 522        spin_unlock(&zone->lock);
 523}
 524
 525static void __free_pages_ok(struct page *page, unsigned int order)
 526{
 527        unsigned long flags;
 528        int i;
 529        int reserved = 0;
 530
 531        for (i = 0 ; i < (1 << order) ; ++i)
 532                reserved += free_pages_check(page + i);
 533        if (reserved)
 534                return;
 535
 536        if (!PageHighMem(page)) {
 537                debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
 538                debug_check_no_obj_freed(page_address(page),
 539                                           PAGE_SIZE << order);
 540        }
 541        arch_free_page(page, order);
 542        kernel_map_pages(page, 1 << order, 0);
 543
 544        local_irq_save(flags);
 545        __count_vm_events(PGFREE, 1 << order);
 546        free_one_page(page_zone(page), page, order);
 547        local_irq_restore(flags);
 548}
 549
 550/*
 551 * permit the bootmem allocator to evade page validation on high-order frees
 552 */
 553void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
 554{
 555        if (order == 0) {
 556                __ClearPageReserved(page);
 557                set_page_count(page, 0);
 558                set_page_refcounted(page);
 559                __free_page(page);
 560        } else {
 561                int loop;
 562
 563                prefetchw(page);
 564                for (loop = 0; loop < BITS_PER_LONG; loop++) {
 565                        struct page *p = &page[loop];
 566
 567                        if (loop + 1 < BITS_PER_LONG)
 568                                prefetchw(p + 1);
 569                        __ClearPageReserved(p);
 570                        set_page_count(p, 0);
 571                }
 572
 573                set_page_refcounted(page);
 574                __free_pages(page, order);
 575        }
 576}
 577
 578
 579/*
 580 * The order of subdivision here is critical for the IO subsystem.
 581 * Please do not alter this order without good reasons and regression
 582 * testing. Specifically, as large blocks of memory are subdivided,
 583 * the order in which smaller blocks are delivered depends on the order
 584 * they're subdivided in this function. This is the primary factor
 585 * influencing the order in which pages are delivered to the IO
 586 * subsystem according to empirical testing, and this is also justified
 587 * by considering the behavior of a buddy system containing a single
 588 * large block of memory acted on by a series of small allocations.
 589 * This behavior is a critical factor in sglist merging's success.
 590 *
 591 * -- wli
 592 */
 593static inline void expand(struct zone *zone, struct page *page,
 594        int low, int high, struct free_area *area,
 595        int migratetype)
 596{
 597        unsigned long size = 1 << high;
 598
 599        while (high > low) {
 600                area--;
 601                high--;
 602                size >>= 1;
 603                VM_BUG_ON(bad_range(zone, &page[size]));
 604                list_add(&page[size].lru, &area->free_list[migratetype]);
 605                area->nr_free++;
 606                set_page_order(&page[size], high);
 607        }
 608}
 609
 610/*
 611 * This page is about to be returned from the page allocator
 612 */
 613static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
 614{
 615        if (unlikely(page_mapcount(page) |
 616                (page->mapping != NULL)  |
 617                (page_get_page_cgroup(page) != NULL) |
 618                (page_count(page) != 0)  |
 619                (page->flags & PAGE_FLAGS_CHECK_AT_PREP)))
 620                bad_page(page);
 621
 622        /*
 623         * For now, we report if PG_reserved was found set, but do not
 624         * clear it, and do not allocate the page: as a safety net.
 625         */
 626        if (PageReserved(page))
 627                return 1;
 628
 629        page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_reclaim |
 630                        1 << PG_referenced | 1 << PG_arch_1 |
 631                        1 << PG_owner_priv_1 | 1 << PG_mappedtodisk);
 632        set_page_private(page, 0);
 633        set_page_refcounted(page);
 634
 635        arch_alloc_page(page, order);
 636        kernel_map_pages(page, 1 << order, 1);
 637
 638        if (gfp_flags & __GFP_ZERO)
 639                prep_zero_page(page, order, gfp_flags);
 640
 641        if (order && (gfp_flags & __GFP_COMP))
 642                prep_compound_page(page, order);
 643
 644        return 0;
 645}
 646
 647/*
 648 * Go through the free lists for the given migratetype and remove
 649 * the smallest available page from the freelists
 650 */
 651static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
 652                                                int migratetype)
 653{
 654        unsigned int current_order;
 655        struct free_area * area;
 656        struct page *page;
 657
 658        /* Find a page of the appropriate size in the preferred list */
 659        for (current_order = order; current_order < MAX_ORDER; ++current_order) {
 660                area = &(zone->free_area[current_order]);
 661                if (list_empty(&area->free_list[migratetype]))
 662                        continue;
 663
 664                page = list_entry(area->free_list[migratetype].next,
 665                                                        struct page, lru);
 666                list_del(&page->lru);
 667                rmv_page_order(page);
 668                area->nr_free--;
 669                __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
 670                expand(zone, page, order, current_order, area, migratetype);
 671                return page;
 672        }
 673
 674        return NULL;
 675}
 676
 677
 678/*
 679 * This array describes the order lists are fallen back to when
 680 * the free lists for the desirable migrate type are depleted
 681 */
 682static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
 683        [MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_RESERVE },
 684        [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_RESERVE },
 685        [MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
 686        [MIGRATE_RESERVE]     = { MIGRATE_RESERVE,     MIGRATE_RESERVE,   MIGRATE_RESERVE }, /* Never used */
 687};
 688
 689/*
 690 * Move the free pages in a range to the free lists of the requested type.
 691 * Note that start_page and end_pages are not aligned on a pageblock
 692 * boundary. If alignment is required, use move_freepages_block()
 693 */
 694static int move_freepages(struct zone *zone,
 695                          struct page *start_page, struct page *end_page,
 696                          int migratetype)
 697{
 698        struct page *page;
 699        unsigned long order;
 700        int pages_moved = 0;
 701
 702#ifndef CONFIG_HOLES_IN_ZONE
 703        /*
 704         * page_zone is not safe to call in this context when
 705         * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
 706         * anyway as we check zone boundaries in move_freepages_block().
 707         * Remove at a later date when no bug reports exist related to
 708         * grouping pages by mobility
 709         */
 710        BUG_ON(page_zone(start_page) != page_zone(end_page));
 711#endif
 712
 713        for (page = start_page; page <= end_page;) {
 714                /* Make sure we are not inadvertently changing nodes */
 715                VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
 716
 717                if (!pfn_valid_within(page_to_pfn(page))) {
 718                        page++;
 719                        continue;
 720                }
 721
 722                if (!PageBuddy(page)) {
 723                        page++;
 724                        continue;
 725                }
 726
 727                order = page_order(page);
 728                list_del(&page->lru);
 729                list_add(&page->lru,
 730                        &zone->free_area[order].free_list[migratetype]);
 731                page += 1 << order;
 732                pages_moved += 1 << order;
 733        }
 734
 735        return pages_moved;
 736}
 737
 738static int move_freepages_block(struct zone *zone, struct page *page,
 739                                int migratetype)
 740{
 741        unsigned long start_pfn, end_pfn;
 742        struct page *start_page, *end_page;
 743
 744        start_pfn = page_to_pfn(page);
 745        start_pfn = start_pfn & ~(pageblock_nr_pages-1);
 746        start_page = pfn_to_page(start_pfn);
 747        end_page = start_page + pageblock_nr_pages - 1;
 748        end_pfn = start_pfn + pageblock_nr_pages - 1;
 749
 750        /* Do not cross zone boundaries */
 751        if (start_pfn < zone->zone_start_pfn)
 752                start_page = page;
 753        if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
 754                return 0;
 755
 756        return move_freepages(zone, start_page, end_page, migratetype);
 757}
 758
 759/* Remove an element from the buddy allocator from the fallback list */
 760static struct page *__rmqueue_fallback(struct zone *zone, int order,
 761                                                int start_migratetype)
 762{
 763        struct free_area * area;
 764        int current_order;
 765        struct page *page;
 766        int migratetype, i;
 767
 768        /* Find the largest possible block of pages in the other list */
 769        for (current_order = MAX_ORDER-1; current_order >= order;
 770                                                --current_order) {
 771                for (i = 0; i < MIGRATE_TYPES - 1; i++) {
 772                        migratetype = fallbacks[start_migratetype][i];
 773
 774                        /* MIGRATE_RESERVE handled later if necessary */
 775                        if (migratetype == MIGRATE_RESERVE)
 776                                continue;
 777
 778                        area = &(zone->free_area[current_order]);
 779                        if (list_empty(&area->free_list[migratetype]))
 780                                continue;
 781
 782                        page = list_entry(area->free_list[migratetype].next,
 783                                        struct page, lru);
 784                        area->nr_free--;
 785
 786                        /*
 787                         * If breaking a large block of pages, move all free
 788                         * pages to the preferred allocation list. If falling
 789                         * back for a reclaimable kernel allocation, be more
 790                         * agressive about taking ownership of free pages
 791                         */
 792                        if (unlikely(current_order >= (pageblock_order >> 1)) ||
 793                                        start_migratetype == MIGRATE_RECLAIMABLE) {
 794                                unsigned long pages;
 795                                pages = move_freepages_block(zone, page,
 796                                                                start_migratetype);
 797
 798                                /* Claim the whole block if over half of it is free */
 799                                if (pages >= (1 << (pageblock_order-1)))
 800                                        set_pageblock_migratetype(page,
 801                                                                start_migratetype);
 802
 803                                migratetype = start_migratetype;
 804                        }
 805
 806                        /* Remove the page from the freelists */
 807                        list_del(&page->lru);
 808                        rmv_page_order(page);
 809                        __mod_zone_page_state(zone, NR_FREE_PAGES,
 810                                                        -(1UL << order));
 811
 812                        if (current_order == pageblock_order)
 813                                set_pageblock_migratetype(page,
 814                                                        start_migratetype);
 815
 816                        expand(zone, page, order, current_order, area, migratetype);
 817                        return page;
 818                }
 819        }
 820
 821        /* Use MIGRATE_RESERVE rather than fail an allocation */
 822        return __rmqueue_smallest(zone, order, MIGRATE_RESERVE);
 823}
 824
 825/*
 826 * Do the hard work of removing an element from the buddy allocator.
 827 * Call me with the zone->lock already held.
 828 */
 829static struct page *__rmqueue(struct zone *zone, unsigned int order,
 830                                                int migratetype)
 831{
 832        struct page *page;
 833
 834        page = __rmqueue_smallest(zone, order, migratetype);
 835
 836        if (unlikely(!page))
 837                page = __rmqueue_fallback(zone, order, migratetype);
 838
 839        return page;
 840}
 841
 842/* 
 843 * Obtain a specified number of elements from the buddy allocator, all under
 844 * a single hold of the lock, for efficiency.  Add them to the supplied list.
 845 * Returns the number of new pages which were placed at *list.
 846 */
 847static int rmqueue_bulk(struct zone *zone, unsigned int order, 
 848                        unsigned long count, struct list_head *list,
 849                        int migratetype, int cold)
 850{
 851        int i;
 852        
 853        spin_lock(&zone->lock);
 854        for (i = 0; i < count; ++i) {
 855                struct page *page = __rmqueue(zone, order, migratetype);
 856                if (unlikely(page == NULL))
 857                        break;
 858
 859                /*
 860                 * Split buddy pages returned by expand() are received here
 861                 * in physical page order. The page is added to the callers and
 862                 * list and the list head then moves forward. From the callers
 863                 * perspective, the linked list is ordered by page number in
 864                 * some conditions. This is useful for IO devices that can
 865                 * merge IO requests if the physical pages are ordered
 866                 * properly.
 867                 */
 868                if (likely(cold == 0))
 869                        list_add(&page->lru, list);
 870                else
 871                        list_add_tail(&page->lru, list);
 872                set_page_private(page, migratetype);
 873                list = &page->lru;
 874        }
 875        spin_unlock(&zone->lock);
 876        return i;
 877}
 878
 879#ifdef CONFIG_NUMA
 880/*
 881 * Called from the vmstat counter updater to drain pagesets of this
 882 * currently executing processor on remote nodes after they have
 883 * expired.
 884 *
 885 * Note that this function must be called with the thread pinned to
 886 * a single processor.
 887 */
 888void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
 889{
 890        unsigned long flags;
 891        int to_drain;
 892
 893        local_irq_save(flags);
 894        if (pcp->count >= pcp->batch)
 895                to_drain = pcp->batch;
 896        else
 897                to_drain = pcp->count;
 898        free_pages_bulk(zone, to_drain, &pcp->list, 0);
 899        pcp->count -= to_drain;
 900        local_irq_restore(flags);
 901}
 902#endif
 903
 904/*
 905 * Drain pages of the indicated processor.
 906 *
 907 * The processor must either be the current processor and the
 908 * thread pinned to the current processor or a processor that
 909 * is not online.
 910 */
 911static void drain_pages(unsigned int cpu)
 912{
 913        unsigned long flags;
 914        struct zone *zone;
 915
 916        for_each_zone(zone) {
 917                struct per_cpu_pageset *pset;
 918                struct per_cpu_pages *pcp;
 919
 920                if (!populated_zone(zone))
 921                        continue;
 922
 923                pset = zone_pcp(zone, cpu);
 924
 925                pcp = &pset->pcp;
 926                local_irq_save(flags);
 927                free_pages_bulk(zone, pcp->count, &pcp->list, 0);
 928                pcp->count = 0;
 929                local_irq_restore(flags);
 930        }
 931}
 932
 933/*
 934 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
 935 */
 936void drain_local_pages(void *arg)
 937{
 938        drain_pages(smp_processor_id());
 939}
 940
 941/*
 942 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
 943 */
 944void drain_all_pages(void)
 945{
 946        on_each_cpu(drain_local_pages, NULL, 1);
 947}
 948
 949#ifdef CONFIG_HIBERNATION
 950
 951void mark_free_pages(struct zone *zone)
 952{
 953        unsigned long pfn, max_zone_pfn;
 954        unsigned long flags;
 955        int order, t;
 956        struct list_head *curr;
 957
 958        if (!zone->spanned_pages)
 959                return;
 960
 961        spin_lock_irqsave(&zone->lock, flags);
 962
 963        max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
 964        for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
 965                if (pfn_valid(pfn)) {
 966                        struct page *page = pfn_to_page(pfn);
 967
 968                        if (!swsusp_page_is_forbidden(page))
 969                                swsusp_unset_page_free(page);
 970                }
 971
 972        for_each_migratetype_order(order, t) {
 973                list_for_each(curr, &zone->free_area[order].free_list[t]) {
 974                        unsigned long i;
 975
 976                        pfn = page_to_pfn(list_entry(curr, struct page, lru));
 977                        for (i = 0; i < (1UL << order); i++)
 978                                swsusp_set_page_free(pfn_to_page(pfn + i));
 979                }
 980        }
 981        spin_unlock_irqrestore(&zone->lock, flags);
 982}
 983#endif /* CONFIG_PM */
 984
 985/*
 986 * Free a 0-order page
 987 */
 988static void free_hot_cold_page(struct page *page, int cold)
 989{
 990        struct zone *zone = page_zone(page);
 991        struct per_cpu_pages *pcp;
 992        unsigned long flags;
 993
 994        if (PageAnon(page))
 995                page->mapping = NULL;
 996        if (free_pages_check(page))
 997                return;
 998
 999        if (!PageHighMem(page)) {
1000                debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
1001                debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
1002        }
1003        arch_free_page(page, 0);
1004        kernel_map_pages(page, 1, 0);
1005
1006        pcp = &zone_pcp(zone, get_cpu())->pcp;
1007        local_irq_save(flags);
1008        __count_vm_event(PGFREE);
1009        if (cold)
1010                list_add_tail(&page->lru, &pcp->list);
1011        else
1012                list_add(&page->lru, &pcp->list);
1013        set_page_private(page, get_pageblock_migratetype(page));
1014        pcp->count++;
1015        if (pcp->count >= pcp->high) {
1016                free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
1017                pcp->count -= pcp->batch;
1018        }
1019        local_irq_restore(flags);
1020        put_cpu();
1021}
1022
1023void free_hot_page(struct page *page)
1024{
1025        free_hot_cold_page(page, 0);
1026}
1027        
1028void free_cold_page(struct page *page)
1029{
1030        free_hot_cold_page(page, 1);
1031}
1032
1033/*
1034 * split_page takes a non-compound higher-order page, and splits it into
1035 * n (1<<order) sub-pages: page[0..n]
1036 * Each sub-page must be freed individually.
1037 *
1038 * Note: this is probably too low level an operation for use in drivers.
1039 * Please consult with lkml before using this in your driver.
1040 */
1041void split_page(struct page *page, unsigned int order)
1042{
1043        int i;
1044
1045        VM_BUG_ON(PageCompound(page));
1046        VM_BUG_ON(!page_count(page));
1047        for (i = 1; i < (1 << order); i++)
1048                set_page_refcounted(page + i);
1049}
1050
1051/*
1052 * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
1053 * we cheat by calling it from here, in the order > 0 path.  Saves a branch
1054 * or two.
1055 */
1056static struct page *buffered_rmqueue(struct zone *preferred_zone,
1057                        struct zone *zone, int order, gfp_t gfp_flags)
1058{
1059        unsigned long flags;
1060        struct page *page;
1061        int cold = !!(gfp_flags & __GFP_COLD);
1062        int cpu;
1063        int migratetype = allocflags_to_migratetype(gfp_flags);
1064
1065again:
1066        cpu  = get_cpu();
1067        if (likely(order == 0)) {
1068                struct per_cpu_pages *pcp;
1069
1070                pcp = &zone_pcp(zone, cpu)->pcp;
1071                local_irq_save(flags);
1072                if (!pcp->count) {
1073                        pcp->count = rmqueue_bulk(zone, 0,
1074                                        pcp->batch, &pcp->list,
1075                                        migratetype, cold);
1076                        if (unlikely(!pcp->count))
1077                                goto failed;
1078                }
1079
1080                /* Find a page of the appropriate migrate type */
1081                if (cold) {
1082                        list_for_each_entry_reverse(page, &pcp->list, lru)
1083                                if (page_private(page) == migratetype)
1084                                        break;
1085                } else {
1086                        list_for_each_entry(page, &pcp->list, lru)
1087                                if (page_private(page) == migratetype)
1088                                        break;
1089                }
1090
1091                /* Allocate more to the pcp list if necessary */
1092                if (unlikely(&page->lru == &pcp->list)) {
1093                        pcp->count += rmqueue_bulk(zone, 0,
1094                                        pcp->batch, &pcp->list,
1095                                        migratetype, cold);
1096                        page = list_entry(pcp->list.next, struct page, lru);
1097                }
1098
1099                list_del(&page->lru);
1100                pcp->count--;
1101        } else {
1102                spin_lock_irqsave(&zone->lock, flags);
1103                page = __rmqueue(zone, order, migratetype);
1104                spin_unlock(&zone->lock);
1105                if (!page)
1106                        goto failed;
1107        }
1108
1109        __count_zone_vm_events(PGALLOC, zone, 1 << order);
1110        zone_statistics(preferred_zone, zone);
1111        local_irq_restore(flags);
1112        put_cpu();
1113
1114        VM_BUG_ON(bad_range(zone, page));
1115        if (prep_new_page(page, order, gfp_flags))
1116                goto again;
1117        return page;
1118
1119failed:
1120        local_irq_restore(flags);
1121        put_cpu();
1122        return NULL;
1123}
1124
1125#define ALLOC_NO_WATERMARKS     0x01 /* don't check watermarks at all */
1126#define ALLOC_WMARK_MIN         0x02 /* use pages_min watermark */
1127#define ALLOC_WMARK_LOW         0x04 /* use pages_low watermark */
1128#define ALLOC_WMARK_HIGH        0x08 /* use pages_high watermark */
1129#define ALLOC_HARDER            0x10 /* try to alloc harder */
1130#define ALLOC_HIGH              0x20 /* __GFP_HIGH set */
1131#define ALLOC_CPUSET            0x40 /* check for correct cpuset */
1132
1133#ifdef CONFIG_FAIL_PAGE_ALLOC
1134
1135static struct fail_page_alloc_attr {
1136        struct fault_attr attr;
1137
1138        u32 ignore_gfp_highmem;
1139        u32 ignore_gfp_wait;
1140        u32 min_order;
1141
1142#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1143
1144        struct dentry *ignore_gfp_highmem_file;
1145        struct dentry *ignore_gfp_wait_file;
1146        struct dentry *min_order_file;
1147
1148#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1149
1150} fail_page_alloc = {
1151        .attr = FAULT_ATTR_INITIALIZER,
1152        .ignore_gfp_wait = 1,
1153        .ignore_gfp_highmem = 1,
1154        .min_order = 1,
1155};
1156
1157static int __init setup_fail_page_alloc(char *str)
1158{
1159        return setup_fault_attr(&fail_page_alloc.attr, str);
1160}
1161__setup("fail_page_alloc=", setup_fail_page_alloc);
1162
1163static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1164{
1165        if (order < fail_page_alloc.min_order)
1166                return 0;
1167        if (gfp_mask & __GFP_NOFAIL)
1168                return 0;
1169        if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
1170                return 0;
1171        if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
1172                return 0;
1173
1174        return should_fail(&fail_page_alloc.attr, 1 << order);
1175}
1176
1177#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
1178
1179static int __init fail_page_alloc_debugfs(void)
1180{
1181        mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
1182        struct dentry *dir;
1183        int err;
1184
1185        err = init_fault_attr_dentries(&fail_page_alloc.attr,
1186                                       "fail_page_alloc");
1187        if (err)
1188                return err;
1189        dir = fail_page_alloc.attr.dentries.dir;
1190
1191        fail_page_alloc.ignore_gfp_wait_file =
1192                debugfs_create_bool("ignore-gfp-wait", mode, dir,
1193                                      &fail_page_alloc.ignore_gfp_wait);
1194
1195        fail_page_alloc.ignore_gfp_highmem_file =
1196                debugfs_create_bool("ignore-gfp-highmem", mode, dir,
1197                                      &fail_page_alloc.ignore_gfp_highmem);
1198        fail_page_alloc.min_order_file =
1199                debugfs_create_u32("min-order", mode, dir,
1200                                   &fail_page_alloc.min_order);
1201
1202        if (!fail_page_alloc.ignore_gfp_wait_file ||
1203            !fail_page_alloc.ignore_gfp_highmem_file ||
1204            !fail_page_alloc.min_order_file) {
1205                err = -ENOMEM;
1206                debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
1207                debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
1208                debugfs_remove(fail_page_alloc.min_order_file);
1209                cleanup_fault_attr_dentries(&fail_page_alloc.attr);
1210        }
1211
1212        return err;
1213}
1214
1215late_initcall(fail_page_alloc_debugfs);
1216
1217#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
1218
1219#else /* CONFIG_FAIL_PAGE_ALLOC */
1220
1221static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
1222{
1223        return 0;
1224}
1225
1226#endif /* CONFIG_FAIL_PAGE_ALLOC */
1227
1228/*
1229 * Return 1 if free pages are above 'mark'. This takes into account the order
1230 * of the allocation.
1231 */
1232int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
1233                      int classzone_idx, int alloc_flags)
1234{
1235        /* free_pages my go negative - that's OK */
1236        long min = mark;
1237        long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
1238        int o;
1239
1240        if (alloc_flags & ALLOC_HIGH)
1241                min -= min / 2;
1242        if (alloc_flags & ALLOC_HARDER)
1243                min -= min / 4;
1244
1245        if (free_pages <= min + z->lowmem_reserve[classzone_idx])
1246                return 0;
1247        for (o = 0; o < order; o++) {
1248                /* At the next order, this order's pages become unavailable */
1249                free_pages -= z->free_area[o].nr_free << o;
1250
1251                /* Require fewer higher order pages to be free */
1252                min >>= 1;
1253
1254                if (free_pages <= min)
1255                        return 0;
1256        }
1257        return 1;
1258}
1259
1260#ifdef CONFIG_NUMA
1261/*
1262 * zlc_setup - Setup for "zonelist cache".  Uses cached zone data to
1263 * skip over zones that are not allowed by the cpuset, or that have
1264 * been recently (in last second) found to be nearly full.  See further
1265 * comments in mmzone.h.  Reduces cache footprint of zonelist scans
1266 * that have to skip over a lot of full or unallowed zones.
1267 *
1268 * If the zonelist cache is present in the passed in zonelist, then
1269 * returns a pointer to the allowed node mask (either the current
1270 * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
1271 *
1272 * If the zonelist cache is not available for this zonelist, does
1273 * nothing and returns NULL.
1274 *
1275 * If the fullzones BITMAP in the zonelist cache is stale (more than
1276 * a second since last zap'd) then we zap it out (clear its bits.)
1277 *
1278 * We hold off even calling zlc_setup, until after we've checked the
1279 * first zone in the zonelist, on the theory that most allocations will
1280 * be satisfied from that first zone, so best to examine that zone as
1281 * quickly as we can.
1282 */
1283static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1284{
1285        struct zonelist_cache *zlc;     /* cached zonelist speedup info */
1286        nodemask_t *allowednodes;       /* zonelist_cache approximation */
1287
1288        zlc = zonelist->zlcache_ptr;
1289        if (!zlc)
1290                return NULL;
1291
1292        if (time_after(jiffies, zlc->last_full_zap + HZ)) {
1293                bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
1294                zlc->last_full_zap = jiffies;
1295        }
1296
1297        allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
1298                                        &cpuset_current_mems_allowed :
1299                                        &node_states[N_HIGH_MEMORY];
1300        return allowednodes;
1301}
1302
1303/*
1304 * Given 'z' scanning a zonelist, run a couple of quick checks to see
1305 * if it is worth looking at further for free memory:
1306 *  1) Check that the zone isn't thought to be full (doesn't have its
1307 *     bit set in the zonelist_cache fullzones BITMAP).
1308 *  2) Check that the zones node (obtained from the zonelist_cache
1309 *     z_to_n[] mapping) is allowed in the passed in allowednodes mask.
1310 * Return true (non-zero) if zone is worth looking at further, or
1311 * else return false (zero) if it is not.
1312 *
1313 * This check -ignores- the distinction between various watermarks,
1314 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ...  If a zone is
1315 * found to be full for any variation of these watermarks, it will
1316 * be considered full for up to one second by all requests, unless
1317 * we are so low on memory on all allowed nodes that we are forced
1318 * into the second scan of the zonelist.
1319 *
1320 * In the second scan we ignore this zonelist cache and exactly
1321 * apply the watermarks to all zones, even it is slower to do so.
1322 * We are low on memory in the second scan, and should leave no stone
1323 * unturned looking for a free page.
1324 */
1325static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1326                                                nodemask_t *allowednodes)
1327{
1328        struct zonelist_cache *zlc;     /* cached zonelist speedup info */
1329        int i;                          /* index of *z in zonelist zones */
1330        int n;                          /* node that zone *z is on */
1331
1332        zlc = zonelist->zlcache_ptr;
1333        if (!zlc)
1334                return 1;
1335
1336        i = z - zonelist->_zonerefs;
1337        n = zlc->z_to_n[i];
1338
1339        /* This zone is worth trying if it is allowed but not full */
1340        return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
1341}
1342
1343/*
1344 * Given 'z' scanning a zonelist, set the corresponding bit in
1345 * zlc->fullzones, so that subsequent attempts to allocate a page
1346 * from that zone don't waste time re-examining it.
1347 */
1348static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1349{
1350        struct zonelist_cache *zlc;     /* cached zonelist speedup info */
1351        int i;                          /* index of *z in zonelist zones */
1352
1353        zlc = zonelist->zlcache_ptr;
1354        if (!zlc)
1355                return;
1356
1357        i = z - zonelist->_zonerefs;
1358
1359        set_bit(i, zlc->fullzones);
1360}
1361
1362#else   /* CONFIG_NUMA */
1363
1364static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
1365{
1366        return NULL;
1367}
1368
1369static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
1370                                nodemask_t *allowednodes)
1371{
1372        return 1;
1373}
1374
1375static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
1376{
1377}
1378#endif  /* CONFIG_NUMA */
1379
1380/*
1381 * get_page_from_freelist goes through the zonelist trying to allocate
1382 * a page.
1383 */
1384static struct page *
1385get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
1386                struct zonelist *zonelist, int high_zoneidx, int alloc_flags)
1387{
1388        struct zoneref *z;
1389        struct page *page = NULL;
1390        int classzone_idx;
1391        struct zone *zone, *preferred_zone;
1392        nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
1393        int zlc_active = 0;             /* set if using zonelist_cache */
1394        int did_zlc_setup = 0;          /* just call zlc_setup() one time */
1395
1396        (void)first_zones_zonelist(zonelist, high_zoneidx, nodemask,
1397                                                        &preferred_zone);
1398        if (!preferred_zone)
1399                return NULL;
1400
1401        classzone_idx = zone_idx(preferred_zone);
1402
1403zonelist_scan:
1404        /*
1405         * Scan zonelist, looking for a zone with enough free.
1406         * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1407         */
1408        for_each_zone_zonelist_nodemask(zone, z, zonelist,
1409                                                high_zoneidx, nodemask) {
1410                if (NUMA_BUILD && zlc_active &&
1411                        !zlc_zone_worth_trying(zonelist, z, allowednodes))
1412                                continue;
1413                if ((alloc_flags & ALLOC_CPUSET) &&
1414                        !cpuset_zone_allowed_softwall(zone, gfp_mask))
1415                                goto try_next_zone;
1416
1417                if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
1418                        unsigned long mark;
1419                        if (alloc_flags & ALLOC_WMARK_MIN)
1420                                mark = zone->pages_min;
1421                        else if (alloc_flags & ALLOC_WMARK_LOW)
1422                                mark = zone->pages_low;
1423                        else
1424                                mark = zone->pages_high;
1425                        if (!zone_watermark_ok(zone, order, mark,
1426                                    classzone_idx, alloc_flags)) {
1427                                if (!zone_reclaim_mode ||
1428                                    !zone_reclaim(zone, gfp_mask, order))
1429                                        goto this_zone_full;
1430                        }
1431                }
1432
1433                page = buffered_rmqueue(preferred_zone, zone, order, gfp_mask);
1434                if (page)
1435                        break;
1436this_zone_full:
1437                if (NUMA_BUILD)
1438                        zlc_mark_zone_full(zonelist, z);
1439try_next_zone:
1440                if (NUMA_BUILD && !did_zlc_setup) {
1441                        /* we do zlc_setup after the first zone is tried */
1442                        allowednodes = zlc_setup(zonelist, alloc_flags);
1443                        zlc_active = 1;
1444                        did_zlc_setup = 1;
1445                }
1446        }
1447
1448        if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
1449                /* Disable zlc cache for second zonelist scan */
1450                zlc_active = 0;
1451                goto zonelist_scan;
1452        }
1453        return page;
1454}
1455
1456/*
1457 * This is the 'heart' of the zoned buddy allocator.
1458 */
1459struct page *
1460__alloc_pages_internal(gfp_t gfp_mask, unsigned int order,
1461                        struct zonelist *zonelist, nodemask_t *nodemask)
1462{
1463        const gfp_t wait = gfp_mask & __GFP_WAIT;
1464        enum zone_type high_zoneidx = gfp_zone(gfp_mask);
1465        struct zoneref *z;
1466        struct zone *zone;
1467        struct page *page;
1468        struct reclaim_state reclaim_state;
1469        struct task_struct *p = current;
1470        int do_retry;
1471        int alloc_flags;
1472        unsigned long did_some_progress;
1473        unsigned long pages_reclaimed = 0;
1474
1475        might_sleep_if(wait);
1476
1477        if (should_fail_alloc_page(gfp_mask, order))
1478                return NULL;
1479
1480restart:
1481        z = zonelist->_zonerefs;  /* the list of zones suitable for gfp_mask */
1482
1483        if (unlikely(!z->zone)) {
1484                /*
1485                 * Happens if we have an empty zonelist as a result of
1486                 * GFP_THISNODE being used on a memoryless node
1487                 */
1488                return NULL;
1489        }
1490
1491        page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
1492                        zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET);
1493        if (page)
1494                goto got_pg;
1495
1496        /*
1497         * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
1498         * __GFP_NOWARN set) should not cause reclaim since the subsystem
1499         * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
1500         * using a larger set of nodes after it has established that the
1501         * allowed per node queues are empty and that nodes are
1502         * over allocated.
1503         */
1504        if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
1505                goto nopage;
1506
1507        for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
1508                wakeup_kswapd(zone, order);
1509
1510        /*
1511         * OK, we're below the kswapd watermark and have kicked background
1512         * reclaim. Now things get more complex, so set up alloc_flags according
1513         * to how we want to proceed.
1514         *
1515         * The caller may dip into page reserves a bit more if the caller
1516         * cannot run direct reclaim, or if the caller has realtime scheduling
1517         * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
1518         * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
1519         */
1520        alloc_flags = ALLOC_WMARK_MIN;
1521        if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
1522                alloc_flags |= ALLOC_HARDER;
1523        if (gfp_mask & __GFP_HIGH)
1524                alloc_flags |= ALLOC_HIGH;
1525        if (wait)
1526                alloc_flags |= ALLOC_CPUSET;
1527
1528        /*
1529         * Go through the zonelist again. Let __GFP_HIGH and allocations
1530         * coming from realtime tasks go deeper into reserves.
1531         *
1532         * This is the last chance, in general, before the goto nopage.
1533         * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
1534         * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
1535         */
1536        page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
1537                                                high_zoneidx, alloc_flags);
1538        if (page)
1539                goto got_pg;
1540
1541        /* This allocation should allow future memory freeing. */
1542
1543rebalance:
1544        if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
1545                        && !in_interrupt()) {
1546                if (!(gfp_mask & __GFP_NOMEMALLOC)) {
1547nofail_alloc:
1548                        /* go through the zonelist yet again, ignoring mins */
1549                        page = get_page_from_freelist(gfp_mask, nodemask, order,
1550                                zonelist, high_zoneidx, ALLOC_NO_WATERMARKS);
1551                        if (page)
1552                                goto got_pg;
1553                        if (gfp_mask & __GFP_NOFAIL) {
1554                                congestion_wait(WRITE, HZ/50);
1555                                goto nofail_alloc;
1556                        }
1557                }
1558                goto nopage;
1559        }
1560
1561        /* Atomic allocations - we can't balance anything */
1562        if (!wait)
1563                goto nopage;
1564
1565        cond_resched();
1566
1567        /* We now go into synchronous reclaim */
1568        cpuset_memory_pressure_bump();
1569        p->flags |= PF_MEMALLOC;
1570        reclaim_state.reclaimed_slab = 0;
1571        p->reclaim_state = &reclaim_state;
1572
1573        did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
1574
1575        p->reclaim_state = NULL;
1576        p->flags &= ~PF_MEMALLOC;
1577
1578        cond_resched();
1579
1580        if (order != 0)
1581                drain_all_pages();
1582
1583        if (likely(did_some_progress)) {
1584                page = get_page_from_freelist(gfp_mask, nodemask, order,
1585                                        zonelist, high_zoneidx, alloc_flags);
1586                if (page)
1587                        goto got_pg;
1588        } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
1589                if (!try_set_zone_oom(zonelist, gfp_mask)) {
1590                        schedule_timeout_uninterruptible(1);
1591                        goto restart;
1592                }
1593
1594                /*
1595                 * Go through the zonelist yet one more time, keep
1596                 * very high watermark here, this is only to catch
1597                 * a parallel oom killing, we must fail if we're still
1598                 * under heavy pressure.
1599                 */
1600                page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
1601                        order, zonelist, high_zoneidx,
1602                        ALLOC_WMARK_HIGH|ALLOC_CPUSET);
1603                if (page) {
1604                        clear_zonelist_oom(zonelist, gfp_mask);
1605                        goto got_pg;
1606                }
1607
1608                /* The OOM killer will not help higher order allocs so fail */
1609                if (order > PAGE_ALLOC_COSTLY_ORDER) {
1610                        clear_zonelist_oom(zonelist, gfp_mask);
1611                        goto nopage;
1612                }
1613
1614                out_of_memory(zonelist, gfp_mask, order);
1615                clear_zonelist_oom(zonelist, gfp_mask);
1616                goto restart;
1617        }
1618
1619        /*
1620         * Don't let big-order allocations loop unless the caller explicitly
1621         * requests that.  Wait for some write requests to complete then retry.
1622         *
1623         * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
1624         * means __GFP_NOFAIL, but that may not be true in other
1625         * implementations.
1626         *
1627         * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
1628         * specified, then we retry until we no longer reclaim any pages
1629         * (above), or we've reclaimed an order of pages at least as
1630         * large as the allocation's order. In both cases, if the
1631         * allocation still fails, we stop retrying.
1632         */
1633        pages_reclaimed += did_some_progress;
1634        do_retry = 0;
1635        if (!(gfp_mask & __GFP_NORETRY)) {
1636                if (order <= PAGE_ALLOC_COSTLY_ORDER) {
1637                        do_retry = 1;
1638                } else {
1639                        if (gfp_mask & __GFP_REPEAT &&
1640                                pages_reclaimed < (1 << order))
1641                                        do_retry = 1;
1642                }
1643                if (gfp_mask & __GFP_NOFAIL)
1644                        do_retry = 1;
1645        }
1646        if (do_retry) {
1647                congestion_wait(WRITE, HZ/50);
1648                goto rebalance;
1649        }
1650
1651nopage:
1652        if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
1653                printk(KERN_WARNING "%s: page allocation failure."
1654                        " order:%d, mode:0x%x\n",
1655                        p->comm, order, gfp_mask);
1656                dump_stack();
1657                show_mem();
1658        }
1659got_pg:
1660        return page;
1661}
1662EXPORT_SYMBOL(__alloc_pages_internal);
1663
1664/*
1665 * Common helper functions.
1666 */
1667unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
1668{
1669        struct page * page;
1670        page = alloc_pages(gfp_mask, order);
1671        if (!page)
1672                return 0;
1673        return (unsigned long) page_address(page);
1674}
1675
1676EXPORT_SYMBOL(__get_free_pages);
1677
1678unsigned long get_zeroed_page(gfp_t gfp_mask)
1679{
1680        struct page * page;
1681
1682        /*
1683         * get_zeroed_page() returns a 32-bit address, which cannot represent
1684         * a highmem page
1685         */
1686        VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
1687
1688        page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
1689        if (page)
1690                return (unsigned long) page_address(page);
1691        return 0;
1692}
1693
1694EXPORT_SYMBOL(get_zeroed_page);
1695
1696void __pagevec_free(struct pagevec *pvec)
1697{
1698        int i = pagevec_count(pvec);
1699
1700        while (--i >= 0)
1701                free_hot_cold_page(pvec->pages[i], pvec->cold);
1702}
1703
1704void __free_pages(struct page *page, unsigned int order)
1705{
1706        if (put_page_testzero(page)) {
1707                if (order == 0)
1708                        free_hot_page(page);
1709                else
1710                        __free_pages_ok(page, order);
1711        }
1712}
1713
1714EXPORT_SYMBOL(__free_pages);
1715
1716void free_pages(unsigned long addr, unsigned int order)
1717{
1718        if (addr != 0) {
1719                VM_BUG_ON(!virt_addr_valid((void *)addr));
1720                __free_pages(virt_to_page((void *)addr), order);
1721        }
1722}
1723
1724EXPORT_SYMBOL(free_pages);
1725
1726/**
1727 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
1728 * @size: the number of bytes to allocate
1729 * @gfp_mask: GFP flags for the allocation
1730 *
1731 * This function is similar to alloc_pages(), except that it allocates the
1732 * minimum number of pages to satisfy the request.  alloc_pages() can only
1733 * allocate memory in power-of-two pages.
1734 *
1735 * This function is also limited by MAX_ORDER.
1736 *
1737 * Memory allocated by this function must be released by free_pages_exact().
1738 */
1739void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
1740{
1741        unsigned int order = get_order(size);
1742        unsigned long addr;
1743
1744        addr = __get_free_pages(gfp_mask, order);
1745        if (addr) {
1746                unsigned long alloc_end = addr + (PAGE_SIZE << order);
1747                unsigned long used = addr + PAGE_ALIGN(size);
1748
1749                split_page(virt_to_page(addr), order);
1750                while (used < alloc_end) {
1751                        free_page(used);
1752                        used += PAGE_SIZE;
1753                }
1754        }
1755
1756        return (void *)addr;
1757}
1758EXPORT_SYMBOL(alloc_pages_exact);
1759
1760/**
1761 * free_pages_exact - release memory allocated via alloc_pages_exact()
1762 * @virt: the value returned by alloc_pages_exact.
1763 * @size: size of allocation, same value as passed to alloc_pages_exact().
1764 *
1765 * Release the memory allocated by a previous call to alloc_pages_exact.
1766 */
1767void free_pages_exact(void *virt, size_t size)
1768{
1769        unsigned long addr = (unsigned long)virt;
1770        unsigned long end = addr + PAGE_ALIGN(size);
1771
1772        while (addr < end) {
1773                free_page(addr);
1774                addr += PAGE_SIZE;
1775        }
1776}
1777EXPORT_SYMBOL(free_pages_exact);
1778
1779static unsigned int nr_free_zone_pages(int offset)
1780{
1781        struct zoneref *z;
1782        struct zone *zone;
1783
1784        /* Just pick one node, since fallback list is circular */
1785        unsigned int sum = 0;
1786
1787        struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
1788
1789        for_each_zone_zonelist(zone, z, zonelist, offset) {
1790                unsigned long size = zone->present_pages;
1791                unsigned long high = zone->pages_high;
1792                if (size > high)
1793                        sum += size - high;
1794        }
1795
1796        return sum;
1797}
1798
1799/*
1800 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
1801 */
1802unsigned int nr_free_buffer_pages(void)
1803{
1804        return nr_free_zone_pages(gfp_zone(GFP_USER));
1805}
1806EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
1807
1808/*
1809 * Amount of free RAM allocatable within all zones
1810 */
1811unsigned int nr_free_pagecache_pages(void)
1812{
1813        return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
1814}
1815
1816static inline void show_node(struct zone *zone)
1817{
1818        if (NUMA_BUILD)
1819                printk("Node %d ", zone_to_nid(zone));
1820}
1821
1822void si_meminfo(struct sysinfo *val)
1823{
1824        val->totalram = totalram_pages;
1825        val->sharedram = 0;
1826        val->freeram = global_page_state(NR_FREE_PAGES);
1827        val->bufferram = nr_blockdev_pages();
1828        val->totalhigh = totalhigh_pages;
1829        val->freehigh = nr_free_highpages();
1830        val->mem_unit = PAGE_SIZE;
1831}
1832
1833EXPORT_SYMBOL(si_meminfo);
1834
1835#ifdef CONFIG_NUMA
1836void si_meminfo_node(struct sysinfo *val, int nid)
1837{
1838        pg_data_t *pgdat = NODE_DATA(nid);
1839
1840        val->totalram = pgdat->node_present_pages;
1841        val->freeram = node_page_state(nid, NR_FREE_PAGES);
1842#ifdef CONFIG_HIGHMEM
1843        val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
1844        val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
1845                        NR_FREE_PAGES);
1846#else
1847        val->totalhigh = 0;
1848        val->freehigh = 0;
1849#endif
1850        val->mem_unit = PAGE_SIZE;
1851}
1852#endif
1853
1854#define K(x) ((x) << (PAGE_SHIFT-10))
1855
1856/*
1857 * Show free area list (used inside shift_scroll-lock stuff)
1858 * We also calculate the percentage fragmentation. We do this by counting the
1859 * memory on each free list with the exception of the first item on the list.
1860 */
1861void show_free_areas(void)
1862{
1863        int cpu;
1864        struct zone *zone;
1865
1866        for_each_zone(zone) {
1867                if (!populated_zone(zone))
1868                        continue;
1869
1870                show_node(zone);
1871                printk("%s per-cpu:\n", zone->name);
1872
1873                for_each_online_cpu(cpu) {
1874                        struct per_cpu_pageset *pageset;
1875
1876                        pageset = zone_pcp(zone, cpu);
1877
1878                        printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
1879                               cpu, pageset->pcp.high,
1880                               pageset->pcp.batch, pageset->pcp.count);
1881                }
1882        }
1883
1884        printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu\n"
1885                " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
1886                global_page_state(NR_ACTIVE),
1887                global_page_state(NR_INACTIVE),
1888                global_page_state(NR_FILE_DIRTY),
1889                global_page_state(NR_WRITEBACK),
1890                global_page_state(NR_UNSTABLE_NFS),
1891                global_page_state(NR_FREE_PAGES),
1892                global_page_state(NR_SLAB_RECLAIMABLE) +
1893                        global_page_state(NR_SLAB_UNRECLAIMABLE),
1894                global_page_state(NR_FILE_MAPPED),
1895                global_page_state(NR_PAGETABLE),
1896                global_page_state(NR_BOUNCE));
1897
1898        for_each_zone(zone) {
1899                int i;
1900
1901                if (!populated_zone(zone))
1902                        continue;
1903
1904                show_node(zone);
1905                printk("%s"
1906                        " free:%lukB"
1907                        " min:%lukB"
1908                        " low:%lukB"
1909                        " high:%lukB"
1910                        " active:%lukB"
1911                        " inactive:%lukB"
1912                        " present:%lukB"
1913                        " pages_scanned:%lu"
1914                        " all_unreclaimable? %s"
1915                        "\n",
1916                        zone->name,
1917                        K(zone_page_state(zone, NR_FREE_PAGES)),
1918                        K(zone->pages_min),
1919                        K(zone->pages_low),
1920                        K(zone->pages_high),
1921                        K(zone_page_state(zone, NR_ACTIVE)),
1922                        K(zone_page_state(zone, NR_INACTIVE)),
1923                        K(zone->present_pages),
1924                        zone->pages_scanned,
1925                        (zone_is_all_unreclaimable(zone) ? "yes" : "no")
1926                        );
1927                printk("lowmem_reserve[]:");
1928                for (i = 0; i < MAX_NR_ZONES; i++)
1929                        printk(" %lu", zone->lowmem_reserve[i]);
1930                printk("\n");
1931        }
1932
1933        for_each_zone(zone) {
1934                unsigned long nr[MAX_ORDER], flags, order, total = 0;
1935
1936                if (!populated_zone(zone))
1937                        continue;
1938
1939                show_node(zone);
1940                printk("%s: ", zone->name);
1941
1942                spin_lock_irqsave(&zone->lock, flags);
1943                for (order = 0; order < MAX_ORDER; order++) {
1944                        nr[order] = zone->free_area[order].nr_free;
1945                        total += nr[order] << order;
1946                }
1947                spin_unlock_irqrestore(&zone->lock, flags);
1948                for (order = 0; order < MAX_ORDER; order++)
1949                        printk("%lu*%lukB ", nr[order], K(1UL) << order);
1950                printk("= %lukB\n", K(total));
1951        }
1952
1953        printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
1954
1955        show_swap_cache_info();
1956}
1957
1958static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
1959{
1960        zoneref->zone = zone;
1961        zoneref->zone_idx = zone_idx(zone);
1962}
1963
1964/*
1965 * Builds allocation fallback zone lists.
1966 *
1967 * Add all populated zones of a node to the zonelist.
1968 */
1969static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
1970                                int nr_zones, enum zone_type zone_type)
1971{
1972        struct zone *zone;
1973
1974        BUG_ON(zone_type >= MAX_NR_ZONES);
1975        zone_type++;
1976
1977        do {
1978                zone_type--;
1979                zone = pgdat->node_zones + zone_type;
1980                if (populated_zone(zone)) {
1981                        zoneref_set_zone(zone,
1982                                &zonelist->_zonerefs[nr_zones++]);
1983                        check_highest_zone(zone_type);
1984                }
1985
1986        } while (zone_type);
1987        return nr_zones;
1988}
1989
1990
1991/*
1992 *  zonelist_order:
1993 *  0 = automatic detection of better ordering.
1994 *  1 = order by ([node] distance, -zonetype)
1995 *  2 = order by (-zonetype, [node] distance)
1996 *
1997 *  If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
1998 *  the same zonelist. So only NUMA can configure this param.
1999 */
2000#define ZONELIST_ORDER_DEFAULT  0
2001#define ZONELIST_ORDER_NODE     1
2002#define ZONELIST_ORDER_ZONE     2
2003
2004/* zonelist order in the kernel.
2005 * set_zonelist_order() will set this to NODE or ZONE.
2006 */
2007static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
2008static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
2009
2010
2011#ifdef CONFIG_NUMA
2012/* The value user specified ....changed by config */
2013static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2014/* string for sysctl */
2015#define NUMA_ZONELIST_ORDER_LEN 16
2016char numa_zonelist_order[16] = "default";
2017
2018/*
2019 * interface for configure zonelist ordering.
2020 * command line option "numa_zonelist_order"
2021 *      = "[dD]efault   - default, automatic configuration.
2022 *      = "[nN]ode      - order by node locality, then by zone within node
2023 *      = "[zZ]one      - order by zone, then by locality within zone
2024 */
2025
2026static int __parse_numa_zonelist_order(char *s)
2027{
2028        if (*s == 'd' || *s == 'D') {
2029                user_zonelist_order = ZONELIST_ORDER_DEFAULT;
2030        } else if (*s == 'n' || *s == 'N') {
2031                user_zonelist_order = ZONELIST_ORDER_NODE;
2032        } else if (*s == 'z' || *s == 'Z') {
2033                user_zonelist_order = ZONELIST_ORDER_ZONE;
2034        } else {
2035                printk(KERN_WARNING
2036                        "Ignoring invalid numa_zonelist_order value:  "
2037                        "%s\n", s);
2038                return -EINVAL;
2039        }
2040        return 0;
2041}
2042
2043static __init int setup_numa_zonelist_order(char *s)
2044{
2045        if (s)
2046                return __parse_numa_zonelist_order(s);
2047        return 0;
2048}
2049early_param("numa_zonelist_order", setup_numa_zonelist_order);
2050
2051/*
2052 * sysctl handler for numa_zonelist_order
2053 */
2054int numa_zonelist_order_handler(ctl_table *table, int write,
2055                struct file *file, void __user *buffer, size_t *length,
2056                loff_t *ppos)
2057{
2058        char saved_string[NUMA_ZONELIST_ORDER_LEN];
2059        int ret;
2060
2061        if (write)
2062                strncpy(saved_string, (char*)table->data,
2063                        NUMA_ZONELIST_ORDER_LEN);
2064        ret = proc_dostring(table, write, file, buffer, length, ppos);
2065        if (ret)
2066                return ret;
2067        if (write) {
2068                int oldval = user_zonelist_order;
2069                if (__parse_numa_zonelist_order((char*)table->data)) {
2070                        /*
2071                         * bogus value.  restore saved string
2072                         */
2073                        strncpy((char*)table->data, saved_string,
2074                                NUMA_ZONELIST_ORDER_LEN);
2075                        user_zonelist_order = oldval;
2076                } else if (oldval != user_zonelist_order)
2077                        build_all_zonelists();
2078        }
2079        return 0;
2080}
2081
2082
2083#define MAX_NODE_LOAD (num_online_nodes())
2084static int node_load[MAX_NUMNODES];
2085
2086/**
2087 * find_next_best_node - find the next node that should appear in a given node's fallback list
2088 * @node: node whose fallback list we're appending
2089 * @used_node_mask: nodemask_t of already used nodes
2090 *
2091 * We use a number of factors to determine which is the next node that should
2092 * appear on a given node's fallback list.  The node should not have appeared
2093 * already in @node's fallback list, and it should be the next closest node
2094 * according to the distance array (which contains arbitrary distance values
2095 * from each node to each node in the system), and should also prefer nodes
2096 * with no CPUs, since presumably they'll have very little allocation pressure
2097 * on them otherwise.
2098 * It returns -1 if no node is found.
2099 */
2100static int find_next_best_node(int node, nodemask_t *used_node_mask)
2101{
2102        int n, val;
2103        int min_val = INT_MAX;
2104        int best_node = -1;
2105        node_to_cpumask_ptr(tmp, 0);
2106
2107        /* Use the local node if we haven't already */
2108        if (!node_isset(node, *used_node_mask)) {
2109                node_set(node, *used_node_mask);
2110                return node;
2111        }
2112
2113        for_each_node_state(n, N_HIGH_MEMORY) {
2114
2115                /* Don't want a node to appear more than once */
2116                if (node_isset(n, *used_node_mask))
2117                        continue;
2118
2119                /* Use the distance array to find the distance */
2120                val = node_distance(node, n);
2121
2122                /* Penalize nodes under us ("prefer the next node") */
2123                val += (n < node);
2124
2125                /* Give preference to headless and unused nodes */
2126                node_to_cpumask_ptr_next(tmp, n);
2127                if (!cpus_empty(*tmp))
2128                        val += PENALTY_FOR_NODE_WITH_CPUS;
2129
2130                /* Slight preference for less loaded node */
2131                val *= (MAX_NODE_LOAD*MAX_NUMNODES);
2132                val += node_load[n];
2133
2134                if (val < min_val) {
2135                        min_val = val;
2136                        best_node = n;
2137                }
2138        }
2139
2140        if (best_node >= 0)
2141                node_set(best_node, *used_node_mask);
2142
2143        return best_node;
2144}
2145
2146
2147/*
2148 * Build zonelists ordered by node and zones within node.
2149 * This results in maximum locality--normal zone overflows into local
2150 * DMA zone, if any--but risks exhausting DMA zone.
2151 */
2152static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
2153{
2154        int j;
2155        struct zonelist *zonelist;
2156
2157        zonelist = &pgdat->node_zonelists[0];
2158        for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
2159                ;
2160        j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2161                                                        MAX_NR_ZONES - 1);
2162        zonelist->_zonerefs[j].zone = NULL;
2163        zonelist->_zonerefs[j].zone_idx = 0;
2164}
2165
2166/*
2167 * Build gfp_thisnode zonelists
2168 */
2169static void build_thisnode_zonelists(pg_data_t *pgdat)
2170{
2171        int j;
2172        struct zonelist *zonelist;
2173
2174        zonelist = &pgdat->node_zonelists[1];
2175        j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2176        zonelist->_zonerefs[j].zone = NULL;
2177        zonelist->_zonerefs[j].zone_idx = 0;
2178}
2179
2180/*
2181 * Build zonelists ordered by zone and nodes within zones.
2182 * This results in conserving DMA zone[s] until all Normal memory is
2183 * exhausted, but results in overflowing to remote node while memory
2184 * may still exist in local DMA zone.
2185 */
2186static int node_order[MAX_NUMNODES];
2187
2188static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
2189{
2190        int pos, j, node;
2191        int zone_type;          /* needs to be signed */
2192        struct zone *z;
2193        struct zonelist *zonelist;
2194
2195        zonelist = &pgdat->node_zonelists[0];
2196        pos = 0;
2197        for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
2198                for (j = 0; j < nr_nodes; j++) {
2199                        node = node_order[j];
2200                        z = &NODE_DATA(node)->node_zones[zone_type];
2201                        if (populated_zone(z)) {
2202                                zoneref_set_zone(z,
2203                                        &zonelist->_zonerefs[pos++]);
2204                                check_highest_zone(zone_type);
2205                        }
2206                }
2207        }
2208        zonelist->_zonerefs[pos].zone = NULL;
2209        zonelist->_zonerefs[pos].zone_idx = 0;
2210}
2211
2212static int default_zonelist_order(void)
2213{
2214        int nid, zone_type;
2215        unsigned long low_kmem_size,total_size;
2216        struct zone *z;
2217        int average_size;
2218        /*
2219         * ZONE_DMA and ZONE_DMA32 can be very small area in the sytem.
2220         * If they are really small and used heavily, the system can fall
2221         * into OOM very easily.
2222         * This function detect ZONE_DMA/DMA32 size and confgigures zone order.
2223         */
2224        /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
2225        low_kmem_size = 0;
2226        total_size = 0;
2227        for_each_online_node(nid) {
2228                for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2229                        z = &NODE_DATA(nid)->node_zones[zone_type];
2230                        if (populated_zone(z)) {
2231                                if (zone_type < ZONE_NORMAL)
2232                                        low_kmem_size += z->present_pages;
2233                                total_size += z->present_pages;
2234                        }
2235                }
2236        }
2237        if (!low_kmem_size ||  /* there are no DMA area. */
2238            low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
2239                return ZONELIST_ORDER_NODE;
2240        /*
2241         * look into each node's config.
2242         * If there is a node whose DMA/DMA32 memory is very big area on
2243         * local memory, NODE_ORDER may be suitable.
2244         */
2245        average_size = total_size /
2246                                (nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
2247        for_each_online_node(nid) {
2248                low_kmem_size = 0;
2249                total_size = 0;
2250                for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
2251                        z = &NODE_DATA(nid)->node_zones[zone_type];
2252                        if (populated_zone(z)) {
2253                                if (zone_type < ZONE_NORMAL)
2254                                        low_kmem_size += z->present_pages;
2255                                total_size += z->present_pages;
2256                        }
2257                }
2258                if (low_kmem_size &&
2259                    total_size > average_size && /* ignore small node */
2260                    low_kmem_size > total_size * 70/100)
2261                        return ZONELIST_ORDER_NODE;
2262        }
2263        return ZONELIST_ORDER_ZONE;
2264}
2265
2266static void set_zonelist_order(void)
2267{
2268        if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
2269                current_zonelist_order = default_zonelist_order();
2270        else
2271                current_zonelist_order = user_zonelist_order;
2272}
2273
2274static void build_zonelists(pg_data_t *pgdat)
2275{
2276        int j, node, load;
2277        enum zone_type i;
2278        nodemask_t used_mask;
2279        int local_node, prev_node;
2280        struct zonelist *zonelist;
2281        int order = current_zonelist_order;
2282
2283        /* initialize zonelists */
2284        for (i = 0; i < MAX_ZONELISTS; i++) {
2285                zonelist = pgdat->node_zonelists + i;
2286                zonelist->_zonerefs[0].zone = NULL;
2287                zonelist->_zonerefs[0].zone_idx = 0;
2288        }
2289
2290        /* NUMA-aware ordering of nodes */
2291        local_node = pgdat->node_id;
2292        load = num_online_nodes();
2293        prev_node = local_node;
2294        nodes_clear(used_mask);
2295
2296        memset(node_load, 0, sizeof(node_load));
2297        memset(node_order, 0, sizeof(node_order));
2298        j = 0;
2299
2300        while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
2301                int distance = node_distance(local_node, node);
2302
2303                /*
2304                 * If another node is sufficiently far away then it is better
2305                 * to reclaim pages in a zone before going off node.
2306                 */
2307                if (distance > RECLAIM_DISTANCE)
2308                        zone_reclaim_mode = 1;
2309
2310                /*
2311                 * We don't want to pressure a particular node.
2312                 * So adding penalty to the first node in same
2313                 * distance group to make it round-robin.
2314                 */
2315                if (distance != node_distance(local_node, prev_node))
2316                        node_load[node] = load;
2317
2318                prev_node = node;
2319                load--;
2320                if (order == ZONELIST_ORDER_NODE)
2321                        build_zonelists_in_node_order(pgdat, node);
2322                else
2323                        node_order[j++] = node; /* remember order */
2324        }
2325
2326        if (order == ZONELIST_ORDER_ZONE) {
2327                /* calculate node order -- i.e., DMA last! */
2328                build_zonelists_in_zone_order(pgdat, j);
2329        }
2330
2331        build_thisnode_zonelists(pgdat);
2332}
2333
2334/* Construct the zonelist performance cache - see further mmzone.h */
2335static void build_zonelist_cache(pg_data_t *pgdat)
2336{
2337        struct zonelist *zonelist;
2338        struct zonelist_cache *zlc;
2339        struct zoneref *z;
2340
2341        zonelist = &pgdat->node_zonelists[0];
2342        zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
2343        bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
2344        for (z = zonelist->_zonerefs; z->zone; z++)
2345                zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
2346}
2347
2348
2349#else   /* CONFIG_NUMA */
2350
2351static void set_zonelist_order(void)
2352{
2353        current_zonelist_order = ZONELIST_ORDER_ZONE;
2354}
2355
2356static void build_zonelists(pg_data_t *pgdat)
2357{
2358        int node, local_node;
2359        enum zone_type j;
2360        struct zonelist *zonelist;
2361
2362        local_node = pgdat->node_id;
2363
2364        zonelist = &pgdat->node_zonelists[0];
2365        j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
2366
2367        /*
2368         * Now we build the zonelist so that it contains the zones
2369         * of all the other nodes.
2370         * We don't want to pressure a particular node, so when
2371         * building the zones for node N, we make sure that the
2372         * zones coming right after the local ones are those from
2373         * node N+1 (modulo N)
2374         */
2375        for (node = local_node + 1; node < MAX_NUMNODES; node++) {
2376                if (!node_online(node))
2377                        continue;
2378                j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2379                                                        MAX_NR_ZONES - 1);
2380        }
2381        for (node = 0; node < local_node; node++) {
2382                if (!node_online(node))
2383                        continue;
2384                j = build_zonelists_node(NODE_DATA(node), zonelist, j,
2385                                                        MAX_NR_ZONES - 1);
2386        }
2387
2388        zonelist->_zonerefs[j].zone = NULL;
2389        zonelist->_zonerefs[j].zone_idx = 0;
2390}
2391
2392/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
2393static void build_zonelist_cache(pg_data_t *pgdat)
2394{
2395        pgdat->node_zonelists[0].zlcache_ptr = NULL;
2396}
2397
2398#endif  /* CONFIG_NUMA */
2399
2400/* return values int ....just for stop_machine() */
2401static int __build_all_zonelists(void *dummy)
2402{
2403        int nid;
2404
2405        for_each_online_node(nid) {
2406                pg_data_t *pgdat = NODE_DATA(nid);
2407
2408                build_zonelists(pgdat);
2409                build_zonelist_cache(pgdat);
2410        }
2411        return 0;
2412}
2413
2414void build_all_zonelists(void)
2415{
2416        set_zonelist_order();
2417
2418        if (system_state == SYSTEM_BOOTING) {
2419                __build_all_zonelists(NULL);
2420                mminit_verify_zonelist();
2421                cpuset_init_current_mems_allowed();
2422        } else {
2423                /* we have to stop all cpus to guarantee there is no user
2424                   of zonelist */
2425                stop_machine(__build_all_zonelists, NULL, NULL);
2426                /* cpuset refresh routine should be here */
2427        }
2428        vm_total_pages = nr_free_pagecache_pages();
2429        /*
2430         * Disable grouping by mobility if the number of pages in the
2431         * system is too low to allow the mechanism to work. It would be
2432         * more accurate, but expensive to check per-zone. This check is
2433         * made on memory-hotadd so a system can start with mobility
2434         * disabled and enable it later
2435         */
2436        if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
2437                page_group_by_mobility_disabled = 1;
2438        else
2439                page_group_by_mobility_disabled = 0;
2440
2441        printk("Built %i zonelists in %s order, mobility grouping %s.  "
2442                "Total pages: %ld\n",
2443                        num_online_nodes(),
2444                        zonelist_order_name[current_zonelist_order],
2445                        page_group_by_mobility_disabled ? "off" : "on",
2446                        vm_total_pages);
2447#ifdef CONFIG_NUMA
2448        printk("Policy zone: %s\n", zone_names[policy_zone]);
2449#endif
2450}
2451
2452/*
2453 * Helper functions to size the waitqueue hash table.
2454 * Essentially these want to choose hash table sizes sufficiently
2455 * large so that collisions trying to wait on pages are rare.
2456 * But in fact, the number of active page waitqueues on typical
2457 * systems is ridiculously low, less than 200. So this is even
2458 * conservative, even though it seems large.
2459 *
2460 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
2461 * waitqueues, i.e. the size of the waitq table given the number of pages.
2462 */
2463#define PAGES_PER_WAITQUEUE     256
2464
2465#ifndef CONFIG_MEMORY_HOTPLUG
2466static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2467{
2468        unsigned long size = 1;
2469
2470        pages /= PAGES_PER_WAITQUEUE;
2471
2472        while (size < pages)
2473                size <<= 1;
2474
2475        /*
2476         * Once we have dozens or even hundreds of threads sleeping
2477         * on IO we've got bigger problems than wait queue collision.
2478         * Limit the size of the wait table to a reasonable size.
2479         */
2480        size = min(size, 4096UL);
2481
2482        return max(size, 4UL);
2483}
2484#else
2485/*
2486 * A zone's size might be changed by hot-add, so it is not possible to determine
2487 * a suitable size for its wait_table.  So we use the maximum size now.
2488 *
2489 * The max wait table size = 4096 x sizeof(wait_queue_head_t).   ie:
2490 *
2491 *    i386 (preemption config)    : 4096 x 16 = 64Kbyte.
2492 *    ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
2493 *    ia64, x86-64 (preemption)   : 4096 x 24 = 96Kbyte.
2494 *
2495 * The maximum entries are prepared when a zone's memory is (512K + 256) pages
2496 * or more by the traditional way. (See above).  It equals:
2497 *
2498 *    i386, x86-64, powerpc(4K page size) : =  ( 2G + 1M)byte.
2499 *    ia64(16K page size)                 : =  ( 8G + 4M)byte.
2500 *    powerpc (64K page size)             : =  (32G +16M)byte.
2501 */
2502static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
2503{
2504        return 4096UL;
2505}
2506#endif
2507
2508/*
2509 * This is an integer logarithm so that shifts can be used later
2510 * to extract the more random high bits from the multiplicative
2511 * hash function before the remainder is taken.
2512 */
2513static inline unsigned long wait_table_bits(unsigned long size)
2514{
2515        return ffz(~size);
2516}
2517
2518#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
2519
2520/*
2521 * Mark a number of pageblocks as MIGRATE_RESERVE. The number
2522 * of blocks reserved is based on zone->pages_min. The memory within the
2523 * reserve will tend to store contiguous free pages. Setting min_free_kbytes
2524 * higher will lead to a bigger reserve which will get freed as contiguous
2525 * blocks as reclaim kicks in
2526 */
2527static void setup_zone_migrate_reserve(struct zone *zone)
2528{
2529        unsigned long start_pfn, pfn, end_pfn;
2530        struct page *page;
2531        unsigned long reserve, block_migratetype;
2532
2533        /* Get the start pfn, end pfn and the number of blocks to reserve */
2534        start_pfn = zone->zone_start_pfn;
2535        end_pfn = start_pfn + zone->spanned_pages;
2536        reserve = roundup(zone->pages_min, pageblock_nr_pages) >>
2537                                                        pageblock_order;
2538
2539        for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
2540                if (!pfn_valid(pfn))
2541                        continue;
2542                page = pfn_to_page(pfn);
2543
2544                /* Watch out for overlapping nodes */
2545                if (page_to_nid(page) != zone_to_nid(zone))
2546                        continue;
2547
2548                /* Blocks with reserved pages will never free, skip them. */
2549                if (PageReserved(page))
2550                        continue;
2551
2552                block_migratetype = get_pageblock_migratetype(page);
2553
2554                /* If this block is reserved, account for it */
2555                if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
2556                        reserve--;
2557                        continue;
2558                }
2559
2560                /* Suitable for reserving if this block is movable */
2561                if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
2562                        set_pageblock_migratetype(page, MIGRATE_RESERVE);
2563                        move_freepages_block(zone, page, MIGRATE_RESERVE);
2564                        reserve--;
2565                        continue;
2566                }
2567
2568                /*
2569                 * If the reserve is met and this is a previous reserved block,
2570                 * take it back
2571                 */
2572                if (block_migratetype == MIGRATE_RESERVE) {
2573                        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2574                        move_freepages_block(zone, page, MIGRATE_MOVABLE);
2575                }
2576        }
2577}
2578
2579/*
2580 * Initially all pages are reserved - free ones are freed
2581 * up by free_all_bootmem() once the early boot process is
2582 * done. Non-atomic initialization, single-pass.
2583 */
2584void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
2585                unsigned long start_pfn, enum memmap_context context)
2586{
2587        struct page *page;
2588        unsigned long end_pfn = start_pfn + size;
2589        unsigned long pfn;
2590        struct zone *z;
2591
2592        z = &NODE_DATA(nid)->node_zones[zone];
2593        for (pfn = start_pfn; pfn < end_pfn; pfn++) {
2594                /*
2595                 * There can be holes in boot-time mem_map[]s
2596                 * handed to this function.  They do not
2597                 * exist on hotplugged memory.
2598                 */
2599                if (context == MEMMAP_EARLY) {
2600                        if (!early_pfn_valid(pfn))
2601                                continue;
2602                        if (!early_pfn_in_nid(pfn, nid))
2603                                continue;
2604                }
2605                page = pfn_to_page(pfn);
2606                set_page_links(page, zone, nid, pfn);
2607                mminit_verify_page_links(page, zone, nid, pfn);
2608                init_page_count(page);
2609                reset_page_mapcount(page);
2610                SetPageReserved(page);
2611                /*
2612                 * Mark the block movable so that blocks are reserved for
2613                 * movable at startup. This will force kernel allocations
2614                 * to reserve their blocks rather than leaking throughout
2615                 * the address space during boot when many long-lived
2616                 * kernel allocations are made. Later some blocks near
2617                 * the start are marked MIGRATE_RESERVE by
2618                 * setup_zone_migrate_reserve()
2619                 *
2620                 * bitmap is created for zone's valid pfn range. but memmap
2621                 * can be created for invalid pages (for alignment)
2622                 * check here not to call set_pageblock_migratetype() against
2623                 * pfn out of zone.
2624                 */
2625                if ((z->zone_start_pfn <= pfn)
2626                    && (pfn < z->zone_start_pfn + z->spanned_pages)
2627                    && !(pfn & (pageblock_nr_pages - 1)))
2628                        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
2629
2630                INIT_LIST_HEAD(&page->lru);
2631#ifdef WANT_PAGE_VIRTUAL
2632                /* The shift won't overflow because ZONE_NORMAL is below 4G. */
2633                if (!is_highmem_idx(zone))
2634                        set_page_address(page, __va(pfn << PAGE_SHIFT));
2635#endif
2636        }
2637}
2638
2639static void __meminit zone_init_free_lists(struct zone *zone)
2640{
2641        int order, t;
2642        for_each_migratetype_order(order, t) {
2643                INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
2644                zone->free_area[order].nr_free = 0;
2645        }
2646}
2647
2648#ifndef __HAVE_ARCH_MEMMAP_INIT
2649#define memmap_init(size, nid, zone, start_pfn) \
2650        memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
2651#endif
2652
2653static int zone_batchsize(struct zone *zone)
2654{
2655        int batch;
2656
2657        /*
2658         * The per-cpu-pages pools are set to around 1000th of the
2659         * size of the zone.  But no more than 1/2 of a meg.
2660         *
2661         * OK, so we don't know how big the cache is.  So guess.
2662         */
2663        batch = zone->present_pages / 1024;
2664        if (batch * PAGE_SIZE > 512 * 1024)
2665                batch = (512 * 1024) / PAGE_SIZE;
2666        batch /= 4;             /* We effectively *= 4 below */
2667        if (batch < 1)
2668                batch = 1;
2669
2670        /*
2671         * Clamp the batch to a 2^n - 1 value. Having a power
2672         * of 2 value was found to be more likely to have
2673         * suboptimal cache aliasing properties in some cases.
2674         *
2675         * For example if 2 tasks are alternately allocating
2676         * batches of pages, one task can end up with a lot
2677         * of pages of one half of the possible page colors
2678         * and the other with pages of the other colors.
2679         */
2680        batch = (1 << (fls(batch + batch/2)-1)) - 1;
2681
2682        return batch;
2683}
2684
2685static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
2686{
2687        struct per_cpu_pages *pcp;
2688
2689        memset(p, 0, sizeof(*p));
2690
2691        pcp = &p->pcp;
2692        pcp->count = 0;
2693        pcp->high = 6 * batch;
2694        pcp->batch = max(1UL, 1 * batch);
2695        INIT_LIST_HEAD(&pcp->list);
2696}
2697
2698/*
2699 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist
2700 * to the value high for the pageset p.
2701 */
2702
2703static void setup_pagelist_highmark(struct per_cpu_pageset *p,
2704                                unsigned long high)
2705{
2706        struct per_cpu_pages *pcp;
2707
2708        pcp = &p->pcp;
2709        pcp->high = high;
2710        pcp->batch = max(1UL, high/4);
2711        if ((high/4) > (PAGE_SHIFT * 8))
2712                pcp->batch = PAGE_SHIFT * 8;
2713}
2714
2715
2716#ifdef CONFIG_NUMA
2717/*
2718 * Boot pageset table. One per cpu which is going to be used for all
2719 * zones and all nodes. The parameters will be set in such a way
2720 * that an item put on a list will immediately be handed over to
2721 * the buddy list. This is safe since pageset manipulation is done
2722 * with interrupts disabled.
2723 *
2724 * Some NUMA counter updates may also be caught by the boot pagesets.
2725 *
2726 * The boot_pagesets must be kept even after bootup is complete for
2727 * unused processors and/or zones. They do play a role for bootstrapping
2728 * hotplugged processors.
2729 *
2730 * zoneinfo_show() and maybe other functions do
2731 * not check if the processor is online before following the pageset pointer.
2732 * Other parts of the kernel may not check if the zone is available.
2733 */
2734static struct per_cpu_pageset boot_pageset[NR_CPUS];
2735
2736/*
2737 * Dynamically allocate memory for the
2738 * per cpu pageset array in struct zone.
2739 */
2740static int __cpuinit process_zones(int cpu)
2741{
2742        struct zone *zone, *dzone;
2743        int node = cpu_to_node(cpu);
2744
2745        node_set_state(node, N_CPU);    /* this node has a cpu */
2746
2747        for_each_zone(zone) {
2748
2749                if (!populated_zone(zone))
2750                        continue;
2751
2752                zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
2753                                         GFP_KERNEL, node);
2754                if (!zone_pcp(zone, cpu))
2755                        goto bad;
2756
2757                setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
2758
2759                if (percpu_pagelist_fraction)
2760                        setup_pagelist_highmark(zone_pcp(zone, cpu),
2761                                (zone->present_pages / percpu_pagelist_fraction));
2762        }
2763
2764        return 0;
2765bad:
2766        for_each_zone(dzone) {
2767                if (!populated_zone(dzone))
2768                        continue;
2769                if (dzone == zone)
2770                        break;
2771                kfree(zone_pcp(dzone, cpu));
2772                zone_pcp(dzone, cpu) = &boot_pageset[cpu];
2773        }
2774        return -ENOMEM;
2775}
2776
2777static inline void free_zone_pagesets(int cpu)
2778{
2779        struct zone *zone;
2780
2781        for_each_zone(zone) {
2782                struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
2783
2784                /* Free per_cpu_pageset if it is slab allocated */
2785                if (pset != &boot_pageset[cpu])
2786                        kfree(pset);
2787                zone_pcp(zone, cpu) = &boot_pageset[cpu];
2788        }
2789}
2790
2791static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
2792                unsigned long action,
2793                void *hcpu)
2794{
2795        int cpu = (long)hcpu;
2796        int ret = NOTIFY_OK;
2797
2798        switch (action) {
2799        case CPU_UP_PREPARE:
2800        case CPU_UP_PREPARE_FROZEN:
2801                if (process_zones(cpu))
2802                        ret = NOTIFY_BAD;
2803                break;
2804        case CPU_UP_CANCELED:
2805        case CPU_UP_CANCELED_FROZEN:
2806        case CPU_DEAD:
2807        case CPU_DEAD_FROZEN:
2808                free_zone_pagesets(cpu);
2809                break;
2810        default:
2811                break;
2812        }
2813        return ret;
2814}
2815
2816static struct notifier_block __cpuinitdata pageset_notifier =
2817        { &pageset_cpuup_callback, NULL, 0 };
2818
2819void __init setup_per_cpu_pageset(void)
2820{
2821        int err;
2822
2823        /* Initialize per_cpu_pageset for cpu 0.
2824         * A cpuup callback will do this for every cpu
2825         * as it comes online
2826         */
2827        err = process_zones(smp_processor_id());
2828        BUG_ON(err);
2829        register_cpu_notifier(&pageset_notifier);
2830}
2831
2832#endif
2833
2834static noinline __init_refok
2835int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
2836{
2837        int i;
2838        struct pglist_data *pgdat = zone->zone_pgdat;
2839        size_t alloc_size;
2840
2841        /*
2842         * The per-page waitqueue mechanism uses hashed waitqueues
2843         * per zone.
2844         */
2845        zone->wait_table_hash_nr_entries =
2846                 wait_table_hash_nr_entries(zone_size_pages);
2847        zone->wait_table_bits =
2848                wait_table_bits(zone->wait_table_hash_nr_entries);
2849        alloc_size = zone->wait_table_hash_nr_entries
2850                                        * sizeof(wait_queue_head_t);
2851
2852        if (!slab_is_available()) {
2853                zone->wait_table = (wait_queue_head_t *)
2854                        alloc_bootmem_node(pgdat, alloc_size);
2855        } else {
2856                /*
2857                 * This case means that a zone whose size was 0 gets new memory
2858                 * via memory hot-add.
2859                 * But it may be the case that a new node was hot-added.  In
2860                 * this case vmalloc() will not be able to use this new node's
2861                 * memory - this wait_table must be initialized to use this new
2862                 * node itself as well.
2863                 * To use this new node's memory, further consideration will be
2864                 * necessary.
2865                 */
2866                zone->wait_table = vmalloc(alloc_size);
2867        }
2868        if (!zone->wait_table)
2869                return -ENOMEM;
2870
2871        for(i = 0; i < zone->wait_table_hash_nr_entries; ++i)
2872                init_waitqueue_head(zone->wait_table + i);
2873
2874        return 0;
2875}
2876
2877static __meminit void zone_pcp_init(struct zone *zone)
2878{
2879        int cpu;
2880        unsigned long batch = zone_batchsize(zone);
2881
2882        for (cpu = 0; cpu < NR_CPUS; cpu++) {
2883#ifdef CONFIG_NUMA
2884                /* Early boot. Slab allocator not functional yet */
2885                zone_pcp(zone, cpu) = &boot_pageset[cpu];
2886                setup_pageset(&boot_pageset[cpu],0);
2887#else
2888                setup_pageset(zone_pcp(zone,cpu), batch);
2889#endif
2890        }
2891        if (zone->present_pages)
2892                printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%lu\n",
2893                        zone->name, zone->present_pages, batch);
2894}
2895
2896__meminit int init_currently_empty_zone(struct zone *zone,
2897                                        unsigned long zone_start_pfn,
2898                                        unsigned long size,
2899                                        enum memmap_context context)
2900{
2901        struct pglist_data *pgdat = zone->zone_pgdat;
2902        int ret;
2903        ret = zone_wait_table_init(zone, size);
2904        if (ret)
2905                return ret;
2906        pgdat->nr_zones = zone_idx(zone) + 1;
2907
2908        zone->zone_start_pfn = zone_start_pfn;
2909
2910        mminit_dprintk(MMINIT_TRACE, "memmap_init",
2911                        "Initialising map node %d zone %lu pfns %lu -> %lu\n",
2912                        pgdat->node_id,
2913                        (unsigned long)zone_idx(zone),
2914                        zone_start_pfn, (zone_start_pfn + size));
2915
2916        zone_init_free_lists(zone);
2917
2918        return 0;
2919}
2920
2921#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
2922/*
2923 * Basic iterator support. Return the first range of PFNs for a node
2924 * Note: nid == MAX_NUMNODES returns first region regardless of node
2925 */
2926static int __meminit first_active_region_index_in_nid(int nid)
2927{
2928        int i;
2929
2930        for (i = 0; i < nr_nodemap_entries; i++)
2931                if (nid == MAX_NUMNODES || early_node_map[i].nid == nid)
2932                        return i;
2933
2934        return -1;
2935}
2936
2937/*
2938 * Basic iterator support. Return the next active range of PFNs for a node
2939 * Note: nid == MAX_NUMNODES returns next region regardless of node
2940 */
2941static int __meminit next_active_region_index_in_nid(int index, int nid)
2942{
2943        for (index = index + 1; index < nr_nodemap_entries; index++)
2944                if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
2945                        return index;
2946
2947        return -1;
2948}
2949
2950#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
2951/*
2952 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
2953 * Architectures may implement their own version but if add_active_range()
2954 * was used and there are no special requirements, this is a convenient
2955 * alternative
2956 */
2957int __meminit __early_pfn_to_nid(unsigned long pfn)
2958{
2959        int i;
2960
2961        for (i = 0; i < nr_nodemap_entries; i++) {
2962                unsigned long start_pfn = early_node_map[i].start_pfn;
2963                unsigned long end_pfn = early_node_map[i].end_pfn;
2964
2965                if (start_pfn <= pfn && pfn < end_pfn)
2966                        return early_node_map[i].nid;
2967        }
2968        /* This is a memory hole */
2969        return -1;
2970}
2971#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
2972
2973int __meminit early_pfn_to_nid(unsigned long pfn)
2974{
2975        int nid;
2976
2977        nid = __early_pfn_to_nid(pfn);
2978        if (nid >= 0)
2979                return nid;
2980        /* just returns 0 */
2981        return 0;
2982}
2983
2984#ifdef CONFIG_NODES_SPAN_OTHER_NODES
2985bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
2986{
2987        int nid;
2988
2989        nid = __early_pfn_to_nid(pfn);
2990        if (nid >= 0 && nid != node)
2991                return false;
2992        return true;
2993}
2994#endif
2995
2996/* Basic iterator support to walk early_node_map[] */
2997#define for_each_active_range_index_in_nid(i, nid) \
2998        for (i = first_active_region_index_in_nid(nid); i != -1; \
2999                                i = next_active_region_index_in_nid(i, nid))
3000
3001/**
3002 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range
3003 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
3004 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node
3005 *
3006 * If an architecture guarantees that all ranges registered with
3007 * add_active_ranges() contain no holes and may be freed, this
3008 * this function may be used instead of calling free_bootmem() manually.
3009 */
3010void __init free_bootmem_with_active_regions(int nid,
3011                                                unsigned long max_low_pfn)
3012{
3013        int i;
3014
3015        for_each_active_range_index_in_nid(i, nid) {
3016                unsigned long size_pages = 0;
3017                unsigned long end_pfn = early_node_map[i].end_pfn;
3018
3019                if (early_node_map[i].start_pfn >= max_low_pfn)
3020                        continue;
3021
3022                if (end_pfn > max_low_pfn)
3023                        end_pfn = max_low_pfn;
3024
3025                size_pages = end_pfn - early_node_map[i].start_pfn;
3026                free_bootmem_node(NODE_DATA(early_node_map[i].nid),
3027                                PFN_PHYS(early_node_map[i].start_pfn),
3028                                size_pages << PAGE_SHIFT);
3029        }
3030}
3031
3032void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
3033{
3034        int i;
3035        int ret;
3036
3037        for_each_active_range_index_in_nid(i, nid) {
3038                ret = work_fn(early_node_map[i].start_pfn,
3039                              early_node_map[i].end_pfn, data);
3040                if (ret)
3041                        break;
3042        }
3043}
3044/**
3045 * sparse_memory_present_with_active_regions - Call memory_present for each active range
3046 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
3047 *
3048 * If an architecture guarantees that all ranges registered with
3049 * add_active_ranges() contain no holes and may be freed, this
3050 * function may be used instead of calling memory_present() manually.
3051 */
3052void __init sparse_memory_present_with_active_regions(int nid)
3053{
3054        int i;
3055
3056        for_each_active_range_index_in_nid(i, nid)
3057                memory_present(early_node_map[i].nid,
3058                                early_node_map[i].start_pfn,
3059                                early_node_map[i].end_pfn);
3060}
3061
3062/**
3063 * push_node_boundaries - Push node boundaries to at least the requested boundary
3064 * @nid: The nid of the node to push the boundary for
3065 * @start_pfn: The start pfn of the node
3066 * @end_pfn: The end pfn of the node
3067 *
3068 * In reserve-based hot-add, mem_map is allocated that is unused until hotadd
3069 * time. Specifically, on x86_64, SRAT will report ranges that can potentially
3070 * be hotplugged even though no physical memory exists. This function allows
3071 * an arch to push out the node boundaries so mem_map is allocated that can
3072 * be used later.
3073 */
3074#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
3075void __init push_node_boundaries(unsigned int nid,
3076                unsigned long start_pfn, unsigned long end_pfn)
3077{
3078        mminit_dprintk(MMINIT_TRACE, "zoneboundary",
3079                        "Entering push_node_boundaries(%u, %lu, %lu)\n",
3080                        nid, start_pfn, end_pfn);
3081
3082        /* Initialise the boundary for this node if necessary */
3083        if (node_boundary_end_pfn[nid] == 0)
3084                node_boundary_start_pfn[nid] = -1UL;
3085
3086        /* Update the boundaries */
3087        if (node_boundary_start_pfn[nid] > start_pfn)
3088                node_boundary_start_pfn[nid] = start_pfn;
3089        if (node_boundary_end_pfn[nid] < end_pfn)
3090                node_boundary_end_pfn[nid] = end_pfn;
3091}
3092
3093/* If necessary, push the node boundary out for reserve hotadd */
3094static void __meminit account_node_boundary(unsigned int nid,
3095                unsigned long *start_pfn, unsigned long *end_pfn)
3096{
3097        mminit_dprintk(MMINIT_TRACE, "zoneboundary",
3098                        "Entering account_node_boundary(%u, %lu, %lu)\n",
3099                        nid, *start_pfn, *end_pfn);
3100
3101        /* Return if boundary information has not been provided */
3102        if (node_boundary_end_pfn[nid] == 0)
3103                return;
3104
3105        /* Check the boundaries and update if necessary */
3106        if (node_boundary_start_pfn[nid] < *start_pfn)
3107                *start_pfn = node_boundary_start_pfn[nid];
3108        if (node_boundary_end_pfn[nid] > *end_pfn)
3109                *end_pfn = node_boundary_end_pfn[nid];
3110}
3111#else
3112void __init push_node_boundaries(unsigned int nid,
3113                unsigned long start_pfn, unsigned long end_pfn) {}
3114
3115static void __meminit account_node_boundary(unsigned int nid,
3116                unsigned long *start_pfn, unsigned long *end_pfn) {}
3117#endif
3118
3119
3120/**
3121 * get_pfn_range_for_nid - Return the start and end page frames for a node
3122 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
3123 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
3124 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
3125 *
3126 * It returns the start and end page frame of a node based on information
3127 * provided by an arch calling add_active_range(). If called for a node
3128 * with no available memory, a warning is printed and the start and end
3129 * PFNs will be 0.
3130 */
3131void __meminit get_pfn_range_for_nid(unsigned int nid,
3132                        unsigned long *start_pfn, unsigned long *end_pfn)
3133{
3134        int i;
3135        *start_pfn = -1UL;
3136        *end_pfn = 0;
3137
3138        for_each_active_range_index_in_nid(i, nid) {
3139                *start_pfn = min(*start_pfn, early_node_map[i].start_pfn);
3140                *end_pfn = max(*end_pfn, early_node_map[i].end_pfn);
3141        }
3142
3143        if (*start_pfn == -1UL)
3144                *start_pfn = 0;
3145
3146        /* Push the node boundaries out if requested */
3147        account_node_boundary(nid, start_pfn, end_pfn);
3148}
3149
3150/*
3151 * This finds a zone that can be used for ZONE_MOVABLE pages. The
3152 * assumption is made that zones within a node are ordered in monotonic
3153 * increasing memory addresses so that the "highest" populated zone is used
3154 */
3155static void __init find_usable_zone_for_movable(void)
3156{
3157        int zone_index;
3158        for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
3159                if (zone_index == ZONE_MOVABLE)
3160                        continue;
3161
3162                if (arch_zone_highest_possible_pfn[zone_index] >
3163                                arch_zone_lowest_possible_pfn[zone_index])
3164                        break;
3165        }
3166
3167        VM_BUG_ON(zone_index == -1);
3168        movable_zone = zone_index;
3169}
3170
3171/*
3172 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
3173 * because it is sized independant of architecture. Unlike the other zones,
3174 * the starting point for ZONE_MOVABLE is not fixed. It may be different
3175 * in each node depending on the size of each node and how evenly kernelcore
3176 * is distributed. This helper function adjusts the zone ranges
3177 * provided by the architecture for a given node by using the end of the
3178 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
3179 * zones within a node are in order of monotonic increases memory addresses
3180 */
3181static void __meminit adjust_zone_range_for_zone_movable(int nid,
3182                                        unsigned long zone_type,
3183                                        unsigned long node_start_pfn,
3184                                        unsigned long node_end_pfn,
3185                                        unsigned long *zone_start_pfn,
3186                                        unsigned long *zone_end_pfn)
3187{
3188        /* Only adjust if ZONE_MOVABLE is on this node */
3189        if (zone_movable_pfn[nid]) {
3190                /* Size ZONE_MOVABLE */
3191                if (zone_type == ZONE_MOVABLE) {
3192                        *zone_start_pfn = zone_movable_pfn[nid];
3193                        *zone_end_pfn = min(node_end_pfn,
3194                                arch_zone_highest_possible_pfn[movable_zone]);
3195
3196                /* Adjust for ZONE_MOVABLE starting within this range */
3197                } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
3198                                *zone_end_pfn > zone_movable_pfn[nid]) {
3199                        *zone_end_pfn = zone_movable_pfn[nid];
3200
3201                /* Check if this whole range is within ZONE_MOVABLE */
3202                } else if (*zone_start_pfn >= zone_movable_pfn[nid])
3203                        *zone_start_pfn = *zone_end_pfn;
3204        }
3205}
3206
3207/*
3208 * Return the number of pages a zone spans in a node, including holes
3209 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
3210 */
3211static unsigned long __meminit zone_spanned_pages_in_node(int nid,
3212                                        unsigned long zone_type,
3213                                        unsigned long *ignored)
3214{
3215        unsigned long node_start_pfn, node_end_pfn;
3216        unsigned long zone_start_pfn, zone_end_pfn;
3217
3218        /* Get the start and end of the node and zone */
3219        get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3220        zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
3221        zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
3222        adjust_zone_range_for_zone_movable(nid, zone_type,
3223                                node_start_pfn, node_end_pfn,
3224                                &zone_start_pfn, &zone_end_pfn);
3225
3226        /* Check that this node has pages within the zone's required range */
3227        if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
3228                return 0;
3229
3230        /* Move the zone boundaries inside the node if necessary */
3231        zone_end_pfn = min(zone_end_pfn, node_end_pfn);
3232        zone_start_pfn = max(zone_start_pfn, node_start_pfn);
3233
3234        /* Return the spanned pages */
3235        return zone_end_pfn - zone_start_pfn;
3236}
3237
3238/*
3239 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
3240 * then all holes in the requested range will be accounted for.
3241 */
3242static unsigned long __meminit __absent_pages_in_range(int nid,
3243                                unsigned long range_start_pfn,
3244                                unsigned long range_end_pfn)
3245{
3246        int i = 0;
3247        unsigned long prev_end_pfn = 0, hole_pages = 0;
3248        unsigned long start_pfn;
3249
3250        /* Find the end_pfn of the first active range of pfns in the node */
3251        i = first_active_region_index_in_nid(nid);
3252        if (i == -1)
3253                return 0;
3254
3255        prev_end_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3256
3257        /* Account for ranges before physical memory on this node */
3258        if (early_node_map[i].start_pfn > range_start_pfn)
3259                hole_pages = prev_end_pfn - range_start_pfn;
3260
3261        /* Find all holes for the zone within the node */
3262        for (; i != -1; i = next_active_region_index_in_nid(i, nid)) {
3263
3264                /* No need to continue if prev_end_pfn is outside the zone */
3265                if (prev_end_pfn >= range_end_pfn)
3266                        break;
3267
3268                /* Make sure the end of the zone is not within the hole */
3269                start_pfn = min(early_node_map[i].start_pfn, range_end_pfn);
3270                prev_end_pfn = max(prev_end_pfn, range_start_pfn);
3271
3272                /* Update the hole size cound and move on */
3273                if (start_pfn > range_start_pfn) {
3274                        BUG_ON(prev_end_pfn > start_pfn);
3275                        hole_pages += start_pfn - prev_end_pfn;
3276                }
3277                prev_end_pfn = early_node_map[i].end_pfn;
3278        }
3279
3280        /* Account for ranges past physical memory on this node */
3281        if (range_end_pfn > prev_end_pfn)
3282                hole_pages += range_end_pfn -
3283                                max(range_start_pfn, prev_end_pfn);
3284
3285        return hole_pages;
3286}
3287
3288/**
3289 * absent_pages_in_range - Return number of page frames in holes within a range
3290 * @start_pfn: The start PFN to start searching for holes
3291 * @end_pfn: The end PFN to stop searching for holes
3292 *
3293 * It returns the number of pages frames in memory holes within a range.
3294 */
3295unsigned long __init absent_pages_in_range(unsigned long start_pfn,
3296                                                        unsigned long end_pfn)
3297{
3298        return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
3299}
3300
3301/* Return the number of page frames in holes in a zone on a node */
3302static unsigned long __meminit zone_absent_pages_in_node(int nid,
3303                                        unsigned long zone_type,
3304                                        unsigned long *ignored)
3305{
3306        unsigned long node_start_pfn, node_end_pfn;
3307        unsigned long zone_start_pfn, zone_end_pfn;
3308
3309        get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
3310        zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type],
3311                                                        node_start_pfn);
3312        zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
3313                                                        node_end_pfn);
3314
3315        adjust_zone_range_for_zone_movable(nid, zone_type,
3316                        node_start_pfn, node_end_pfn,
3317                        &zone_start_pfn, &zone_end_pfn);
3318        return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
3319}
3320
3321#else
3322static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
3323                                        unsigned long zone_type,
3324                                        unsigned long *zones_size)
3325{
3326        return zones_size[zone_type];
3327}
3328
3329static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
3330                                                unsigned long zone_type,
3331                                                unsigned long *zholes_size)
3332{
3333        if (!zholes_size)
3334                return 0;
3335
3336        return zholes_size[zone_type];
3337}
3338
3339#endif
3340
3341static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
3342                unsigned long *zones_size, unsigned long *zholes_size)
3343{
3344        unsigned long realtotalpages, totalpages = 0;
3345        enum zone_type i;
3346
3347        for (i = 0; i < MAX_NR_ZONES; i++)
3348                totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
3349                                                                zones_size);
3350        pgdat->node_spanned_pages = totalpages;
3351
3352        realtotalpages = totalpages;
3353        for (i = 0; i < MAX_NR_ZONES; i++)
3354                realtotalpages -=
3355                        zone_absent_pages_in_node(pgdat->node_id, i,
3356                                                                zholes_size);
3357        pgdat->node_present_pages = realtotalpages;
3358        printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
3359                                                        realtotalpages);
3360}
3361
3362#ifndef CONFIG_SPARSEMEM
3363/*
3364 * Calculate the size of the zone->blockflags rounded to an unsigned long
3365 * Start by making sure zonesize is a multiple of pageblock_order by rounding
3366 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
3367 * round what is now in bits to nearest long in bits, then return it in
3368 * bytes.
3369 */
3370static unsigned long __init usemap_size(unsigned long zonesize)
3371{
3372        unsigned long usemapsize;
3373
3374        usemapsize = roundup(zonesize, pageblock_nr_pages);
3375        usemapsize = usemapsize >> pageblock_order;
3376        usemapsize *= NR_PAGEBLOCK_BITS;
3377        usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
3378
3379        return usemapsize / 8;
3380}
3381
3382static void __init setup_usemap(struct pglist_data *pgdat,
3383                                struct zone *zone, unsigned long zonesize)
3384{
3385        unsigned long usemapsize = usemap_size(zonesize);
3386        zone->pageblock_flags = NULL;
3387        if (usemapsize) {
3388                zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
3389                memset(zone->pageblock_flags, 0, usemapsize);
3390        }
3391}
3392#else
3393static void inline setup_usemap(struct pglist_data *pgdat,
3394                                struct zone *zone, unsigned long zonesize) {}
3395#endif /* CONFIG_SPARSEMEM */
3396
3397#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
3398
3399/* Return a sensible default order for the pageblock size. */
3400static inline int pageblock_default_order(void)
3401{
3402        if (HPAGE_SHIFT > PAGE_SHIFT)
3403                return HUGETLB_PAGE_ORDER;
3404
3405        return MAX_ORDER-1;
3406}
3407
3408/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
3409static inline void __init set_pageblock_order(unsigned int order)
3410{
3411        /* Check that pageblock_nr_pages has not already been setup */
3412        if (pageblock_order)
3413                return;
3414
3415        /*
3416         * Assume the largest contiguous order of interest is a huge page.
3417         * This value may be variable depending on boot parameters on IA64
3418         */
3419        pageblock_order = order;
3420}
3421#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3422
3423/*
3424 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
3425 * and pageblock_default_order() are unused as pageblock_order is set
3426 * at compile-time. See include/linux/pageblock-flags.h for the values of
3427 * pageblock_order based on the kernel config
3428 */
3429static inline int pageblock_default_order(unsigned int order)
3430{
3431        return MAX_ORDER-1;
3432}
3433#define set_pageblock_order(x)  do {} while (0)
3434
3435#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
3436
3437/*
3438 * Set up the zone data structures:
3439 *   - mark all pages reserved
3440 *   - mark all memory queues empty
3441 *   - clear the memory bitmaps
3442 */
3443static void __paginginit free_area_init_core(struct pglist_data *pgdat,
3444                unsigned long *zones_size, unsigned long *zholes_size)
3445{
3446        enum zone_type j;
3447        int nid = pgdat->node_id;
3448        unsigned long zone_start_pfn = pgdat->node_start_pfn;
3449        int ret;
3450
3451        pgdat_resize_init(pgdat);
3452        pgdat->nr_zones = 0;
3453        init_waitqueue_head(&pgdat->kswapd_wait);
3454        pgdat->kswapd_max_order = 0;
3455        
3456        for (j = 0; j < MAX_NR_ZONES; j++) {
3457                struct zone *zone = pgdat->node_zones + j;
3458                unsigned long size, realsize, memmap_pages;
3459
3460                size = zone_spanned_pages_in_node(nid, j, zones_size);
3461                realsize = size - zone_absent_pages_in_node(nid, j,
3462                                                                zholes_size);
3463
3464                /*
3465                 * Adjust realsize so that it accounts for how much memory
3466                 * is used by this zone for memmap. This affects the watermark
3467                 * and per-cpu initialisations
3468                 */
3469                memmap_pages =
3470                        PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
3471                if (realsize >= memmap_pages) {
3472                        realsize -= memmap_pages;
3473                        mminit_dprintk(MMINIT_TRACE, "memmap_init",
3474                                "%s zone: %lu pages used for memmap\n",
3475                                zone_names[j], memmap_pages);
3476                } else
3477                        printk(KERN_WARNING
3478                                "  %s zone: %lu pages exceeds realsize %lu\n",
3479                                zone_names[j], memmap_pages, realsize);
3480
3481                /* Account for reserved pages */
3482                if (j == 0 && realsize > dma_reserve) {
3483                        realsize -= dma_reserve;
3484                        mminit_dprintk(MMINIT_TRACE, "memmap_init",
3485                                        "%s zone: %lu pages reserved\n",
3486                                        zone_names[0], dma_reserve);
3487                }
3488
3489                if (!is_highmem_idx(j))
3490                        nr_kernel_pages += realsize;
3491                nr_all_pages += realsize;
3492
3493                zone->spanned_pages = size;
3494                zone->present_pages = realsize;
3495#ifdef CONFIG_NUMA
3496                zone->node = nid;
3497                zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio)
3498                                                / 100;
3499                zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100;
3500#endif
3501                zone->name = zone_names[j];
3502                spin_lock_init(&zone->lock);
3503                spin_lock_init(&zone->lru_lock);
3504                zone_seqlock_init(zone);
3505                zone->zone_pgdat = pgdat;
3506
3507                zone->prev_priority = DEF_PRIORITY;
3508
3509                zone_pcp_init(zone);
3510                INIT_LIST_HEAD(&zone->active_list);
3511                INIT_LIST_HEAD(&zone->inactive_list);
3512                zone->nr_scan_active = 0;
3513                zone->nr_scan_inactive = 0;
3514                zap_zone_vm_stats(zone);
3515                zone->flags = 0;
3516                if (!size)
3517                        continue;
3518
3519                set_pageblock_order(pageblock_default_order());
3520                setup_usemap(pgdat, zone, size);
3521                ret = init_currently_empty_zone(zone, zone_start_pfn,
3522                                                size, MEMMAP_EARLY);
3523                BUG_ON(ret);
3524                memmap_init(size, nid, j, zone_start_pfn);
3525                zone_start_pfn += size;
3526        }
3527}
3528
3529static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
3530{
3531        /* Skip empty nodes */
3532        if (!pgdat->node_spanned_pages)
3533                return;
3534
3535#ifdef CONFIG_FLAT_NODE_MEM_MAP
3536        /* ia64 gets its own node_mem_map, before this, without bootmem */
3537        if (!pgdat->node_mem_map) {
3538                unsigned long size, start, end;
3539                struct page *map;
3540
3541                /*
3542                 * The zone's endpoints aren't required to be MAX_ORDER
3543                 * aligned but the node_mem_map endpoints must be in order
3544                 * for the buddy allocator to function correctly.
3545                 */
3546                start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
3547                end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
3548                end = ALIGN(end, MAX_ORDER_NR_PAGES);
3549                size =  (end - start) * sizeof(struct page);
3550                map = alloc_remap(pgdat->node_id, size);
3551                if (!map)
3552                        map = alloc_bootmem_node(pgdat, size);
3553                pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
3554        }
3555#ifndef CONFIG_NEED_MULTIPLE_NODES
3556        /*
3557         * With no DISCONTIG, the global mem_map is just set as node 0's
3558         */
3559        if (pgdat == NODE_DATA(0)) {
3560                mem_map = NODE_DATA(0)->node_mem_map;
3561#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3562                if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
3563                        mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
3564#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
3565        }
3566#endif
3567#endif /* CONFIG_FLAT_NODE_MEM_MAP */
3568}
3569
3570void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
3571                unsigned long node_start_pfn, unsigned long *zholes_size)
3572{
3573        pg_data_t *pgdat = NODE_DATA(nid);
3574
3575        pgdat->node_id = nid;
3576        pgdat->node_start_pfn = node_start_pfn;
3577        calculate_node_totalpages(pgdat, zones_size, zholes_size);
3578
3579        alloc_node_mem_map(pgdat);
3580#ifdef CONFIG_FLAT_NODE_MEM_MAP
3581        printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
3582                nid, (unsigned long)pgdat,
3583                (unsigned long)pgdat->node_mem_map);
3584#endif
3585
3586        free_area_init_core(pgdat, zones_size, zholes_size);
3587}
3588
3589#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
3590
3591#if MAX_NUMNODES > 1
3592/*
3593 * Figure out the number of possible node ids.
3594 */
3595static void __init setup_nr_node_ids(void)
3596{
3597        unsigned int node;
3598        unsigned int highest = 0;
3599
3600        for_each_node_mask(node, node_possible_map)
3601                highest = node;
3602        nr_node_ids = highest + 1;
3603}
3604#else
3605static inline void setup_nr_node_ids(void)
3606{
3607}
3608#endif
3609
3610/**
3611 * add_active_range - Register a range of PFNs backed by physical memory
3612 * @nid: The node ID the range resides on
3613 * @start_pfn: The start PFN of the available physical memory
3614 * @end_pfn: The end PFN of the available physical memory
3615 *
3616 * These ranges are stored in an early_node_map[] and later used by
3617 * free_area_init_nodes() to calculate zone sizes and holes. If the
3618 * range spans a memory hole, it is up to the architecture to ensure
3619 * the memory is not freed by the bootmem allocator. If possible
3620 * the range being registered will be merged with existing ranges.
3621 */
3622void __init add_active_range(unsigned int nid, unsigned long start_pfn,
3623                                                unsigned long end_pfn)
3624{
3625        int i;
3626
3627        mminit_dprintk(MMINIT_TRACE, "memory_register",
3628                        "Entering add_active_range(%d, %#lx, %#lx) "
3629                        "%d entries of %d used\n",
3630                        nid, start_pfn, end_pfn,
3631                        nr_nodemap_entries, MAX_ACTIVE_REGIONS);
3632
3633        mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
3634
3635        /* Merge with existing active regions if possible */
3636        for (i = 0; i < nr_nodemap_entries; i++) {
3637                if (early_node_map[i].nid != nid)
3638                        continue;
3639
3640                /* Skip if an existing region covers this new one */
3641                if (start_pfn >= early_node_map[i].start_pfn &&
3642                                end_pfn <= early_node_map[i].end_pfn)
3643                        return;
3644
3645                /* Merge forward if suitable */
3646                if (start_pfn <= early_node_map[i].end_pfn &&
3647                                end_pfn > early_node_map[i].end_pfn) {
3648                        early_node_map[i].end_pfn = end_pfn;
3649                        return;
3650                }
3651
3652                /* Merge backward if suitable */
3653                if (start_pfn < early_node_map[i].end_pfn &&
3654                                end_pfn >= early_node_map[i].start_pfn) {
3655                        early_node_map[i].start_pfn = start_pfn;
3656                        return;
3657                }
3658        }
3659
3660        /* Check that early_node_map is large enough */
3661        if (i >= MAX_ACTIVE_REGIONS) {
3662                printk(KERN_CRIT "More than %d memory regions, truncating\n",
3663                                                        MAX_ACTIVE_REGIONS);
3664                return;
3665        }
3666
3667        early_node_map[i].nid = nid;
3668        early_node_map[i].start_pfn = start_pfn;
3669        early_node_map[i].end_pfn = end_pfn;
3670        nr_nodemap_entries = i + 1;
3671}
3672
3673/**
3674 * remove_active_range - Shrink an existing registered range of PFNs
3675 * @nid: The node id the range is on that should be shrunk
3676 * @start_pfn: The new PFN of the range
3677 * @end_pfn: The new PFN of the range
3678 *
3679 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
3680 * The map is kept near the end physical page range that has already been
3681 * registered. This function allows an arch to shrink an existing registered
3682 * range.
3683 */
3684void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
3685                                unsigned long end_pfn)
3686{
3687        int i, j;
3688        int removed = 0;
3689
3690        printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
3691                          nid, start_pfn, end_pfn);
3692
3693        /* Find the old active region end and shrink */
3694        for_each_active_range_index_in_nid(i, nid) {
3695                if (early_node_map[i].start_pfn >= start_pfn &&
3696                    early_node_map[i].end_pfn <= end_pfn) {
3697                        /* clear it */
3698                        early_node_map[i].start_pfn = 0;
3699                        early_node_map[i].end_pfn = 0;
3700                        removed = 1;
3701                        continue;
3702                }
3703                if (early_node_map[i].start_pfn < start_pfn &&
3704                    early_node_map[i].end_pfn > start_pfn) {
3705                        unsigned long temp_end_pfn = early_node_map[i].end_pfn;
3706                        early_node_map[i].end_pfn = start_pfn;
3707                        if (temp_end_pfn > end_pfn)
3708                                add_active_range(nid, end_pfn, temp_end_pfn);
3709                        continue;
3710                }
3711                if (early_node_map[i].start_pfn >= start_pfn &&
3712                    early_node_map[i].end_pfn > end_pfn &&
3713                    early_node_map[i].start_pfn < end_pfn) {
3714                        early_node_map[i].start_pfn = end_pfn;
3715                        continue;
3716                }
3717        }
3718
3719        if (!removed)
3720                return;
3721
3722        /* remove the blank ones */
3723        for (i = nr_nodemap_entries - 1; i > 0; i--) {
3724                if (early_node_map[i].nid != nid)
3725                        continue;
3726                if (early_node_map[i].end_pfn)
3727                        continue;
3728                /* we found it, get rid of it */
3729                for (j = i; j < nr_nodemap_entries - 1; j++)
3730                        memcpy(&early_node_map[j], &early_node_map[j+1],
3731                                sizeof(early_node_map[j]));
3732                j = nr_nodemap_entries - 1;
3733                memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
3734                nr_nodemap_entries--;
3735        }
3736}
3737
3738/**
3739 * remove_all_active_ranges - Remove all currently registered regions
3740 *
3741 * During discovery, it may be found that a table like SRAT is invalid
3742 * and an alternative discovery method must be used. This function removes
3743 * all currently registered regions.
3744 */
3745void __init remove_all_active_ranges(void)
3746{
3747        memset(early_node_map, 0, sizeof(early_node_map));
3748        nr_nodemap_entries = 0;
3749#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
3750        memset(node_boundary_start_pfn, 0, sizeof(node_boundary_start_pfn));
3751        memset(node_boundary_end_pfn, 0, sizeof(node_boundary_end_pfn));
3752#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
3753}
3754
3755/* Compare two active node_active_regions */
3756static int __init cmp_node_active_region(const void *a, const void *b)
3757{
3758        struct node_active_region *arange = (struct node_active_region *)a;
3759        struct node_active_region *brange = (struct node_active_region *)b;
3760
3761        /* Done this way to avoid overflows */
3762        if (arange->start_pfn > brange->start_pfn)
3763                return 1;
3764        if (arange->start_pfn < brange->start_pfn)
3765                return -1;
3766
3767        return 0;
3768}
3769
3770/* sort the node_map by start_pfn */
3771static void __init sort_node_map(void)
3772{
3773        sort(early_node_map, (size_t)nr_nodemap_entries,
3774                        sizeof(struct node_active_region),
3775                        cmp_node_active_region, NULL);
3776}
3777
3778/* Find the lowest pfn for a node */
3779static unsigned long __init find_min_pfn_for_node(int nid)
3780{
3781        int i;
3782        unsigned long min_pfn = ULONG_MAX;
3783
3784        /* Assuming a sorted map, the first range found has the starting pfn */
3785        for_each_active_range_index_in_nid(i, nid)
3786                min_pfn = min(min_pfn, early_node_map[i].start_pfn);
3787
3788        if (min_pfn == ULONG_MAX) {
3789                printk(KERN_WARNING
3790                        "Could not find start_pfn for node %d\n", nid);
3791                return 0;
3792        }
3793
3794        return min_pfn;
3795}
3796
3797/**
3798 * find_min_pfn_with_active_regions - Find the minimum PFN registered
3799 *
3800 * It returns the minimum PFN based on information provided via
3801 * add_active_range().
3802 */
3803unsigned long __init find_min_pfn_with_active_regions(void)
3804{
3805        return find_min_pfn_for_node(MAX_NUMNODES);
3806}
3807
3808/*
3809 * early_calculate_totalpages()
3810 * Sum pages in active regions for movable zone.
3811 * Populate N_HIGH_MEMORY for calculating usable_nodes.
3812 */
3813static unsigned long __init early_calculate_totalpages(void)
3814{
3815        int i;
3816        unsigned long totalpages = 0;
3817
3818        for (i = 0; i < nr_nodemap_entries; i++) {
3819                unsigned long pages = early_node_map[i].end_pfn -
3820                                                early_node_map[i].start_pfn;
3821                totalpages += pages;
3822                if (pages)
3823                        node_set_state(early_node_map[i].nid, N_HIGH_MEMORY);
3824        }
3825        return totalpages;
3826}
3827
3828/*
3829 * Find the PFN the Movable zone begins in each node. Kernel memory
3830 * is spread evenly between nodes as long as the nodes have enough
3831 * memory. When they don't, some nodes will have more kernelcore than
3832 * others
3833 */
3834static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
3835{
3836        int i, nid;
3837        unsigned long usable_startpfn;
3838        unsigned long kernelcore_node, kernelcore_remaining;
3839        unsigned long totalpages = early_calculate_totalpages();
3840        int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
3841
3842        /*
3843         * If movablecore was specified, calculate what size of
3844         * kernelcore that corresponds so that memory usable for
3845         * any allocation type is evenly spread. If both kernelcore
3846         * and movablecore are specified, then the value of kernelcore
3847         * will be used for required_kernelcore if it's greater than
3848         * what movablecore would have allowed.
3849         */
3850        if (required_movablecore) {
3851                unsigned long corepages;
3852
3853                /*
3854                 * Round-up so that ZONE_MOVABLE is at least as large as what
3855                 * was requested by the user
3856                 */
3857                required_movablecore =
3858                        roundup(required_movablecore, MAX_ORDER_NR_PAGES);
3859                corepages = totalpages - required_movablecore;
3860
3861                required_kernelcore = max(required_kernelcore, corepages);
3862        }
3863
3864        /* If kernelcore was not specified, there is no ZONE_MOVABLE */
3865        if (!required_kernelcore)
3866                return;
3867
3868        /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
3869        find_usable_zone_for_movable();
3870        usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
3871
3872restart:
3873        /* Spread kernelcore memory as evenly as possible throughout nodes */
3874        kernelcore_node = required_kernelcore / usable_nodes;
3875        for_each_node_state(nid, N_HIGH_MEMORY) {
3876                /*
3877                 * Recalculate kernelcore_node if the division per node
3878                 * now exceeds what is necessary to satisfy the requested
3879                 * amount of memory for the kernel
3880                 */
3881                if (required_kernelcore < kernelcore_node)
3882                        kernelcore_node = required_kernelcore / usable_nodes;
3883
3884                /*
3885                 * As the map is walked, we track how much memory is usable
3886                 * by the kernel using kernelcore_remaining. When it is
3887                 * 0, the rest of the node is usable by ZONE_MOVABLE
3888                 */
3889                kernelcore_remaining = kernelcore_node;
3890
3891                /* Go through each range of PFNs within this node */
3892                for_each_active_range_index_in_nid(i, nid) {
3893                        unsigned long start_pfn, end_pfn;
3894                        unsigned long size_pages;
3895
3896                        start_pfn = max(early_node_map[i].start_pfn,
3897                                                zone_movable_pfn[nid]);
3898                        end_pfn = early_node_map[i].end_pfn;
3899                        if (start_pfn >= end_pfn)
3900                                continue;
3901
3902                        /* Account for what is only usable for kernelcore */
3903                        if (start_pfn < usable_startpfn) {
3904                                unsigned long kernel_pages;
3905                                kernel_pages = min(end_pfn, usable_startpfn)
3906                                                                - start_pfn;
3907
3908                                kernelcore_remaining -= min(kernel_pages,
3909                                                        kernelcore_remaining);
3910                                required_kernelcore -= min(kernel_pages,
3911                                                        required_kernelcore);
3912
3913                                /* Continue if range is now fully accounted */
3914                                if (end_pfn <= usable_startpfn) {
3915
3916                                        /*
3917                                         * Push zone_movable_pfn to the end so
3918                                         * that if we have to rebalance
3919                                         * kernelcore across nodes, we will
3920                                         * not double account here
3921                                         */
3922                                        zone_movable_pfn[nid] = end_pfn;
3923                                        continue;
3924                                }
3925                                start_pfn = usable_startpfn;
3926                        }
3927
3928                        /*
3929                         * The usable PFN range for ZONE_MOVABLE is from
3930                         * start_pfn->end_pfn. Calculate size_pages as the
3931                         * number of pages used as kernelcore
3932                         */
3933                        size_pages = end_pfn - start_pfn;
3934                        if (size_pages > kernelcore_remaining)
3935                                size_pages = kernelcore_remaining;
3936                        zone_movable_pfn[nid] = start_pfn + size_pages;
3937
3938                        /*
3939                         * Some kernelcore has been met, update counts and
3940                         * break if the kernelcore for this node has been
3941                         * satisified
3942                         */
3943                        required_kernelcore -= min(required_kernelcore,
3944                                                                size_pages);
3945                        kernelcore_remaining -= size_pages;
3946                        if (!kernelcore_remaining)
3947                                break;
3948                }
3949        }
3950
3951        /*
3952         * If there is still required_kernelcore, we do another pass with one
3953         * less node in the count. This will push zone_movable_pfn[nid] further
3954         * along on the nodes that still have memory until kernelcore is
3955         * satisified
3956         */
3957        usable_nodes--;
3958        if (usable_nodes && required_kernelcore > usable_nodes)
3959                goto restart;
3960
3961        /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
3962        for (nid = 0; nid < MAX_NUMNODES; nid++)
3963                zone_movable_pfn[nid] =
3964                        roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
3965}
3966
3967/* Any regular memory on that node ? */
3968static void check_for_regular_memory(pg_data_t *pgdat)
3969{
3970#ifdef CONFIG_HIGHMEM
3971        enum zone_type zone_type;
3972
3973        for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
3974                struct zone *zone = &pgdat->node_zones[zone_type];
3975                if (zone->present_pages)
3976                        node_set_state(zone_to_nid(zone), N_NORMAL_MEMORY);
3977        }
3978#endif
3979}
3980
3981/**
3982 * free_area_init_nodes - Initialise all pg_data_t and zone data
3983 * @max_zone_pfn: an array of max PFNs for each zone
3984 *
3985 * This will call free_area_init_node() for each active node in the system.
3986 * Using the page ranges provided by add_active_range(), the size of each
3987 * zone in each node and their holes is calculated. If the maximum PFN
3988 * between two adjacent zones match, it is assumed that the zone is empty.
3989 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
3990 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
3991 * starts where the previous one ended. For example, ZONE_DMA32 starts
3992 * at arch_max_dma_pfn.
3993 */
3994void __init free_area_init_nodes(unsigned long *max_zone_pfn)
3995{
3996        unsigned long nid;
3997        enum zone_type i;
3998
3999        /* Sort early_node_map as initialisation assumes it is sorted */
4000        sort_node_map();
4001
4002        /* Record where the zone boundaries are */
4003        memset(arch_zone_lowest_possible_pfn, 0,
4004                                sizeof(arch_zone_lowest_possible_pfn));
4005        memset(arch_zone_highest_possible_pfn, 0,
4006                                sizeof(arch_zone_highest_possible_pfn));
4007        arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
4008        arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
4009        for (i = 1; i < MAX_NR_ZONES; i++) {
4010                if (i == ZONE_MOVABLE)
4011                        continue;
4012                arch_zone_lowest_possible_pfn[i] =
4013                        arch_zone_highest_possible_pfn[i-1];
4014                arch_zone_highest_possible_pfn[i] =
4015                        max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
4016        }
4017        arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
4018        arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
4019
4020        /* Find the PFNs that ZONE_MOVABLE begins at in each node */
4021        memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
4022        find_zone_movable_pfns_for_nodes(zone_movable_pfn);
4023
4024        /* Print out the zone ranges */
4025        printk("Zone PFN ranges:\n");
4026        for (i = 0; i < MAX_NR_ZONES; i++) {
4027                if (i == ZONE_MOVABLE)
4028                        continue;
4029                printk("  %-8s %0#10lx -> %0#10lx\n",
4030                                zone_names[i],
4031                                arch_zone_lowest_possible_pfn[i],
4032                                arch_zone_highest_possible_pfn[i]);
4033        }
4034
4035        /* Print out the PFNs ZONE_MOVABLE begins at in each node */
4036        printk("Movable zone start PFN for each node\n");
4037        for (i = 0; i < MAX_NUMNODES; i++) {
4038                if (zone_movable_pfn[i])
4039                        printk("  Node %d: %lu\n", i, zone_movable_pfn[i]);
4040        }
4041
4042        /* Print out the early_node_map[] */
4043        printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
4044        for (i = 0; i < nr_nodemap_entries; i++)
4045                printk("  %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
4046                                                early_node_map[i].start_pfn,
4047                                                early_node_map[i].end_pfn);
4048
4049        /* Initialise every node */
4050        mminit_verify_pageflags_layout();
4051        setup_nr_node_ids();
4052        for_each_online_node(nid) {
4053                pg_data_t *pgdat = NODE_DATA(nid);
4054                free_area_init_node(nid, NULL,
4055                                find_min_pfn_for_node(nid), NULL);
4056
4057                /* Any memory on that node */
4058                if (pgdat->node_present_pages)
4059                        node_set_state(nid, N_HIGH_MEMORY);
4060                check_for_regular_memory(pgdat);
4061        }
4062}
4063
4064static int __init cmdline_parse_core(char *p, unsigned long *core)
4065{
4066        unsigned long long coremem;
4067        if (!p)
4068                return -EINVAL;
4069
4070        coremem = memparse(p, &p);
4071        *core = coremem >> PAGE_SHIFT;
4072
4073        /* Paranoid check that UL is enough for the coremem value */
4074        WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
4075
4076        return 0;
4077}
4078
4079/*
4080 * kernelcore=size sets the amount of memory for use for allocations that
4081 * cannot be reclaimed or migrated.
4082 */
4083static int __init cmdline_parse_kernelcore(char *p)
4084{
4085        return cmdline_parse_core(p, &required_kernelcore);
4086}
4087
4088/*
4089 * movablecore=size sets the amount of memory for use for allocations that
4090 * can be reclaimed or migrated.
4091 */
4092static int __init cmdline_parse_movablecore(char *p)
4093{
4094        return cmdline_parse_core(p, &required_movablecore);
4095}
4096
4097early_param("kernelcore", cmdline_parse_kernelcore);
4098early_param("movablecore", cmdline_parse_movablecore);
4099
4100#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
4101
4102/**
4103 * set_dma_reserve - set the specified number of pages reserved in the first zone
4104 * @new_dma_reserve: The number of pages to mark reserved
4105 *
4106 * The per-cpu batchsize and zone watermarks are determined by present_pages.
4107 * In the DMA zone, a significant percentage may be consumed by kernel image
4108 * and other unfreeable allocations which can skew the watermarks badly. This
4109 * function may optionally be used to account for unfreeable pages in the
4110 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
4111 * smaller per-cpu batchsize.
4112 */
4113void __init set_dma_reserve(unsigned long new_dma_reserve)
4114{
4115        dma_reserve = new_dma_reserve;
4116}
4117
4118#ifndef CONFIG_NEED_MULTIPLE_NODES
4119struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] };
4120EXPORT_SYMBOL(contig_page_data);
4121#endif
4122
4123void __init free_area_init(unsigned long *zones_size)
4124{
4125        free_area_init_node(0, zones_size,
4126                        __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
4127}
4128
4129static int page_alloc_cpu_notify(struct notifier_block *self,
4130                                 unsigned long action, void *hcpu)
4131{
4132        int cpu = (unsigned long)hcpu;
4133
4134        if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
4135                drain_pages(cpu);
4136
4137                /*
4138                 * Spill the event counters of the dead processor
4139                 * into the current processors event counters.
4140                 * This artificially elevates the count of the current
4141                 * processor.
4142                 */
4143                vm_events_fold_cpu(cpu);
4144
4145                /*
4146                 * Zero the differential counters of the dead processor
4147                 * so that the vm statistics are consistent.
4148                 *
4149                 * This is only okay since the processor is dead and cannot
4150                 * race with what we are doing.
4151                 */
4152                refresh_cpu_vm_stats(cpu);
4153        }
4154        return NOTIFY_OK;
4155}
4156
4157void __init page_alloc_init(void)
4158{
4159        hotcpu_notifier(page_alloc_cpu_notify, 0);
4160}
4161
4162/*
4163 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
4164 *      or min_free_kbytes changes.
4165 */
4166static void calculate_totalreserve_pages(void)
4167{
4168        struct pglist_data *pgdat;
4169        unsigned long reserve_pages = 0;
4170        enum zone_type i, j;
4171
4172        for_each_online_pgdat(pgdat) {
4173                for (i = 0; i < MAX_NR_ZONES; i++) {
4174                        struct zone *zone = pgdat->node_zones + i;
4175                        unsigned long max = 0;
4176
4177                        /* Find valid and maximum lowmem_reserve in the zone */
4178                        for (j = i; j < MAX_NR_ZONES; j++) {
4179                                if (zone->lowmem_reserve[j] > max)
4180                                        max = zone->lowmem_reserve[j];
4181                        }
4182
4183                        /* we treat pages_high as reserved pages. */
4184                        max += zone->pages_high;
4185
4186                        if (max > zone->present_pages)
4187                                max = zone->present_pages;
4188                        reserve_pages += max;
4189                }
4190        }
4191        totalreserve_pages = reserve_pages;
4192}
4193
4194/*
4195 * setup_per_zone_lowmem_reserve - called whenever
4196 *      sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
4197 *      has a correct pages reserved value, so an adequate number of
4198 *      pages are left in the zone after a successful __alloc_pages().
4199 */
4200static void setup_per_zone_lowmem_reserve(void)
4201{
4202        struct pglist_data *pgdat;
4203        enum zone_type j, idx;
4204
4205        for_each_online_pgdat(pgdat) {
4206                for (j = 0; j < MAX_NR_ZONES; j++) {
4207                        struct zone *zone = pgdat->node_zones + j;
4208                        unsigned long present_pages = zone->present_pages;
4209
4210                        zone->lowmem_reserve[j] = 0;
4211
4212                        idx = j;
4213                        while (idx) {
4214                                struct zone *lower_zone;
4215
4216                                idx--;
4217
4218                                if (sysctl_lowmem_reserve_ratio[idx] < 1)
4219                                        sysctl_lowmem_reserve_ratio[idx] = 1;
4220
4221                                lower_zone = pgdat->node_zones + idx;
4222                                lower_zone->lowmem_reserve[j] = present_pages /
4223                                        sysctl_lowmem_reserve_ratio[idx];
4224                                present_pages += lower_zone->present_pages;
4225                        }
4226                }
4227        }
4228
4229        /* update totalreserve_pages */
4230        calculate_totalreserve_pages();
4231}
4232
4233/**
4234 * setup_per_zone_pages_min - called when min_free_kbytes changes.
4235 *
4236 * Ensures that the pages_{min,low,high} values for each zone are set correctly
4237 * with respect to min_free_kbytes.
4238 */
4239void setup_per_zone_pages_min(void)
4240{
4241        unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
4242        unsigned long lowmem_pages = 0;
4243        struct zone *zone;
4244        unsigned long flags;
4245
4246        /* Calculate total number of !ZONE_HIGHMEM pages */
4247        for_each_zone(zone) {
4248                if (!is_highmem(zone))
4249                        lowmem_pages += zone->present_pages;
4250        }
4251
4252        for_each_zone(zone) {
4253                u64 tmp;
4254
4255                spin_lock_irqsave(&zone->lock, flags);
4256                tmp = (u64)pages_min * zone->present_pages;
4257                do_div(tmp, lowmem_pages);
4258                if (is_highmem(zone)) {
4259                        /*
4260                         * __GFP_HIGH and PF_MEMALLOC allocations usually don't
4261                         * need highmem pages, so cap pages_min to a small
4262                         * value here.
4263                         *
4264                         * The (pages_high-pages_low) and (pages_low-pages_min)
4265                         * deltas controls asynch page reclaim, and so should
4266                         * not be capped for highmem.
4267                         */
4268                        int min_pages;
4269
4270                        min_pages = zone->present_pages / 1024;
4271                        if (min_pages < SWAP_CLUSTER_MAX)
4272                                min_pages = SWAP_CLUSTER_MAX;
4273                        if (min_pages > 128)
4274                                min_pages = 128;
4275                        zone->pages_min = min_pages;
4276                } else {
4277                        /*
4278                         * If it's a lowmem zone, reserve a number of pages
4279                         * proportionate to the zone's size.
4280                         */
4281                        zone->pages_min = tmp;
4282                }
4283
4284                zone->pages_low   = zone->pages_min + (tmp >> 2);
4285                zone->pages_high  = zone->pages_min + (tmp >> 1);
4286                setup_zone_migrate_reserve(zone);
4287                spin_unlock_irqrestore(&zone->lock, flags);
4288        }
4289
4290        /* update totalreserve_pages */
4291        calculate_totalreserve_pages();
4292}
4293
4294/*
4295 * Initialise min_free_kbytes.
4296 *
4297 * For small machines we want it small (128k min).  For large machines
4298 * we want it large (64MB max).  But it is not linear, because network
4299 * bandwidth does not increase linearly with machine size.  We use
4300 *
4301 *      min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
4302 *      min_free_kbytes = sqrt(lowmem_kbytes * 16)
4303 *
4304 * which yields
4305 *
4306 * 16MB:        512k
4307 * 32MB:        724k
4308 * 64MB:        1024k
4309 * 128MB:       1448k
4310 * 256MB:       2048k
4311 * 512MB:       2896k
4312 * 1024MB:      4096k
4313 * 2048MB:      5792k
4314 * 4096MB:      8192k
4315 * 8192MB:      11584k
4316 * 16384MB:     16384k
4317 */
4318static int __init init_per_zone_pages_min(void)
4319{
4320        unsigned long lowmem_kbytes;
4321
4322        lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
4323
4324        min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
4325        if (min_free_kbytes < 128)
4326                min_free_kbytes = 128;
4327        if (min_free_kbytes > 65536)
4328                min_free_kbytes = 65536;
4329        setup_per_zone_pages_min();
4330        setup_per_zone_lowmem_reserve();
4331        return 0;
4332}
4333module_init(init_per_zone_pages_min)
4334
4335/*
4336 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 
4337 *      that we can call two helper functions whenever min_free_kbytes
4338 *      changes.
4339 */
4340int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 
4341        struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4342{
4343        proc_dointvec(table, write, file, buffer, length, ppos);
4344        if (write)
4345                setup_per_zone_pages_min();
4346        return 0;
4347}
4348
4349#ifdef CONFIG_NUMA
4350int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
4351        struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4352{
4353        struct zone *zone;
4354        int rc;
4355
4356        rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4357        if (rc)
4358                return rc;
4359
4360        for_each_zone(zone)
4361                zone->min_unmapped_pages = (zone->present_pages *
4362                                sysctl_min_unmapped_ratio) / 100;
4363        return 0;
4364}
4365
4366int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
4367        struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4368{
4369        struct zone *zone;
4370        int rc;
4371
4372        rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4373        if (rc)
4374                return rc;
4375
4376        for_each_zone(zone)
4377                zone->min_slab_pages = (zone->present_pages *
4378                                sysctl_min_slab_ratio) / 100;
4379        return 0;
4380}
4381#endif
4382
4383/*
4384 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
4385 *      proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
4386 *      whenever sysctl_lowmem_reserve_ratio changes.
4387 *
4388 * The reserve ratio obviously has absolutely no relation with the
4389 * pages_min watermarks. The lowmem reserve ratio can only make sense
4390 * if in function of the boot time zone sizes.
4391 */
4392int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
4393        struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4394{
4395        proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4396        setup_per_zone_lowmem_reserve();
4397        return 0;
4398}
4399
4400/*
4401 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
4402 * cpu.  It is the fraction of total pages in each zone that a hot per cpu pagelist
4403 * can have before it gets flushed back to buddy allocator.
4404 */
4405
4406int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
4407        struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
4408{
4409        struct zone *zone;
4410        unsigned int cpu;
4411        int ret;
4412
4413        ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
4414        if (!write || (ret == -EINVAL))
4415                return ret;
4416        for_each_zone(zone) {
4417                if (!populated_zone(zone))
4418                        continue;
4419                for_each_online_cpu(cpu) {
4420                        unsigned long  high;
4421                        high = zone->present_pages / percpu_pagelist_fraction;
4422                        setup_pagelist_highmark(zone_pcp(zone, cpu), high);
4423                }
4424        }
4425        return 0;
4426}
4427
4428int hashdist = HASHDIST_DEFAULT;
4429
4430#ifdef CONFIG_NUMA
4431static int __init set_hashdist(char *str)
4432{
4433        if (!str)
4434                return 0;
4435        hashdist = simple_strtoul(str, &str, 0);
4436        return 1;
4437}
4438__setup("hashdist=", set_hashdist);
4439#endif
4440
4441/*
4442 * allocate a large system hash table from bootmem
4443 * - it is assumed that the hash table must contain an exact power-of-2
4444 *   quantity of entries
4445 * - limit is the number of hash buckets, not the total allocation size
4446 */
4447void *__init alloc_large_system_hash(const char *tablename,
4448                                     unsigned long bucketsize,
4449                                     unsigned long numentries,
4450                                     int scale,
4451                                     int flags,
4452                                     unsigned int *_hash_shift,
4453                                     unsigned int *_hash_mask,
4454                                     unsigned long limit)
4455{
4456        unsigned long long max = limit;
4457        unsigned long log2qty, size;
4458        void *table = NULL;
4459
4460        /* allow the kernel cmdline to have a say */
4461        if (!numentries) {
4462                /* round applicable memory size up to nearest megabyte */
4463                numentries = nr_kernel_pages;
4464                numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
4465                numentries >>= 20 - PAGE_SHIFT;
4466                numentries <<= 20 - PAGE_SHIFT;
4467
4468                /* limit to 1 bucket per 2^scale bytes of low memory */
4469                if (scale > PAGE_SHIFT)
4470                        numentries >>= (scale - PAGE_SHIFT);
4471                else
4472                        numentries <<= (PAGE_SHIFT - scale);
4473
4474                /* Make sure we've got at least a 0-order allocation.. */
4475                if (unlikely((numentries * bucketsize) < PAGE_SIZE))
4476                        numentries = PAGE_SIZE / bucketsize;
4477        }
4478        numentries = roundup_pow_of_two(numentries);
4479
4480        /* limit allocation size to 1/16 total memory by default */
4481        if (max == 0) {
4482                max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
4483                do_div(max, bucketsize);
4484        }
4485
4486        if (numentries > max)
4487                numentries = max;
4488
4489        log2qty = ilog2(numentries);
4490
4491        do {
4492                size = bucketsize << log2qty;
4493                if (flags & HASH_EARLY)
4494                        table = alloc_bootmem_nopanic(size);
4495                else if (hashdist)
4496                        table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
4497                else {
4498                        unsigned long order = get_order(size);
4499                        table = (void*) __get_free_pages(GFP_ATOMIC, order);
4500                        /*
4501                         * If bucketsize is not a power-of-two, we may free
4502                         * some pages at the end of hash table.
4503                         */
4504                        if (table) {
4505                                unsigned long alloc_end = (unsigned long)table +
4506                                                (PAGE_SIZE << order);
4507                                unsigned long used = (unsigned long)table +
4508                                                PAGE_ALIGN(size);
4509                                split_page(virt_to_page(table), order);
4510                                while (used < alloc_end) {
4511                                        free_page(used);
4512                                        used += PAGE_SIZE;
4513                                }
4514                        }
4515                }
4516        } while (!table && size > PAGE_SIZE && --log2qty);
4517
4518        if (!table)
4519                panic("Failed to allocate %s hash table\n", tablename);
4520
4521        printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n",
4522               tablename,
4523               (1U << log2qty),
4524               ilog2(size) - PAGE_SHIFT,
4525               size);
4526
4527        if (_hash_shift)
4528                *_hash_shift = log2qty;
4529        if (_hash_mask)
4530                *_hash_mask = (1 << log2qty) - 1;
4531
4532        return table;
4533}
4534
4535#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE
4536struct page *pfn_to_page(unsigned long pfn)
4537{
4538        return __pfn_to_page(pfn);
4539}
4540unsigned long page_to_pfn(struct page *page)
4541{
4542        return __page_to_pfn(page);
4543}
4544EXPORT_SYMBOL(pfn_to_page);
4545EXPORT_SYMBOL(page_to_pfn);
4546#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */
4547
4548/* Return a pointer to the bitmap storing bits affecting a block of pages */
4549static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
4550                                                        unsigned long pfn)
4551{
4552#ifdef CONFIG_SPARSEMEM
4553        return __pfn_to_section(pfn)->pageblock_flags;
4554#else
4555        return zone->pageblock_flags;
4556#endif /* CONFIG_SPARSEMEM */
4557}
4558
4559static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
4560{
4561#ifdef CONFIG_SPARSEMEM
4562        pfn &= (PAGES_PER_SECTION-1);
4563        return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4564#else
4565        pfn = pfn - zone->zone_start_pfn;
4566        return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
4567#endif /* CONFIG_SPARSEMEM */
4568}
4569
4570/**
4571 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
4572 * @page: The page within the block of interest
4573 * @start_bitidx: The first bit of interest to retrieve
4574 * @end_bitidx: The last bit of interest
4575 * returns pageblock_bits flags
4576 */
4577unsigned long get_pageblock_flags_group(struct page *page,
4578                                        int start_bitidx, int end_bitidx)
4579{
4580        struct zone *zone;
4581        unsigned long *bitmap;
4582        unsigned long pfn, bitidx;
4583        unsigned long flags = 0;
4584        unsigned long value = 1;
4585
4586        zone = page_zone(page);
4587        pfn = page_to_pfn(page);
4588        bitmap = get_pageblock_bitmap(zone, pfn);
4589        bitidx = pfn_to_bitidx(zone, pfn);
4590
4591        for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4592                if (test_bit(bitidx + start_bitidx, bitmap))
4593                        flags |= value;
4594
4595        return flags;
4596}
4597
4598/**
4599 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
4600 * @page: The page within the block of interest
4601 * @start_bitidx: The first bit of interest
4602 * @end_bitidx: The last bit of interest
4603 * @flags: The flags to set
4604 */
4605void set_pageblock_flags_group(struct page *page, unsigned long flags,
4606                                        int start_bitidx, int end_bitidx)
4607{
4608        struct zone *zone;
4609        unsigned long *bitmap;
4610        unsigned long pfn, bitidx;
4611        unsigned long value = 1;
4612
4613        zone = page_zone(page);
4614        pfn = page_to_pfn(page);
4615        bitmap = get_pageblock_bitmap(zone, pfn);
4616        bitidx = pfn_to_bitidx(zone, pfn);
4617        VM_BUG_ON(pfn < zone->zone_start_pfn);
4618        VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
4619
4620        for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4621                if (flags & value)
4622                        __set_bit(bitidx + start_bitidx, bitmap);
4623                else
4624                        __clear_bit(bitidx + start_bitidx, bitmap);
4625}
4626
4627/*
4628 * This is designed as sub function...plz see page_isolation.c also.
4629 * set/clear page block's type to be ISOLATE.
4630 * page allocater never alloc memory from ISOLATE block.
4631 */
4632
4633int set_migratetype_isolate(struct page *page)
4634{
4635        struct zone *zone;
4636        unsigned long flags;
4637        int ret = -EBUSY;
4638
4639        zone = page_zone(page);
4640        spin_lock_irqsave(&zone->lock, flags);
4641        /*
4642         * In future, more migrate types will be able to be isolation target.
4643         */
4644        if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
4645                goto out;
4646        set_pageblock_migratetype(page, MIGRATE_ISOLATE);
4647        move_freepages_block(zone, page, MIGRATE_ISOLATE);
4648        ret = 0;
4649out:
4650        spin_unlock_irqrestore(&zone->lock, flags);
4651        if (!ret)
4652                drain_all_pages();
4653        return ret;
4654}
4655
4656void unset_migratetype_isolate(struct page *page)
4657{
4658        struct zone *zone;
4659        unsigned long flags;
4660        zone = page_zone(page);
4661        spin_lock_irqsave(&zone->lock, flags);
4662        if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
4663                goto out;
4664        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4665        move_freepages_block(zone, page, MIGRATE_MOVABLE);
4666out:
4667        spin_unlock_irqrestore(&zone->lock, flags);
4668}
4669
4670#ifdef CONFIG_MEMORY_HOTREMOVE
4671/*
4672 * All pages in the range must be isolated before calling this.
4673 */
4674void
4675__offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
4676{
4677        struct page *page;
4678        struct zone *zone;
4679        int order, i;
4680        unsigned long pfn;
4681        unsigned long flags;
4682        /* find the first valid pfn */
4683        for (pfn = start_pfn; pfn < end_pfn; pfn++)
4684                if (pfn_valid(pfn))
4685                        break;
4686        if (pfn == end_pfn)
4687                return;
4688        zone = page_zone(pfn_to_page(pfn));
4689        spin_lock_irqsave(&zone->lock, flags);
4690        pfn = start_pfn;
4691        while (pfn < end_pfn) {
4692                if (!pfn_valid(pfn)) {
4693                        pfn++;
4694                        continue;
4695                }
4696                page = pfn_to_page(pfn);
4697                BUG_ON(page_count(page));
4698                BUG_ON(!PageBuddy(page));
4699                order = page_order(page);
4700#ifdef CONFIG_DEBUG_VM
4701                printk(KERN_INFO "remove from free list %lx %d %lx\n",
4702                       pfn, 1 << order, end_pfn);
4703#endif
4704                list_del(&page->lru);
4705                rmv_page_order(page);
4706                zone->free_area[order].nr_free--;
4707                __mod_zone_page_state(zone, NR_FREE_PAGES,
4708                                      - (1UL << order));
4709                for (i = 0; i < (1 << order); i++)
4710                        SetPageReserved((page+i));
4711                pfn += (1 << order);
4712        }
4713        spin_unlock_irqrestore(&zone->lock, flags);
4714}
4715#endif
4716