linux/mm/vmscan.c
<<
>>
Prefs
   1/*
   2 *  linux/mm/vmscan.c
   3 *
   4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5 *
   6 *  Swap reorganised 29.12.95, Stephen Tweedie.
   7 *  kswapd added: 7.1.96  sct
   8 *  Removed kswapd_ctl limits, and swap out as many pages as needed
   9 *  to bring the system back to freepages.high: 2.4.97, Rik van Riel.
  10 *  Zone aware kswapd started 02/00, Kanoj Sarcar (kanoj@sgi.com).
  11 *  Multiqueue VM started 5.8.00, Rik van Riel.
  12 */
  13
  14#include <linux/mm.h>
  15#include <linux/module.h>
  16#include <linux/slab.h>
  17#include <linux/kernel_stat.h>
  18#include <linux/swap.h>
  19#include <linux/pagemap.h>
  20#include <linux/init.h>
  21#include <linux/highmem.h>
  22#include <linux/vmstat.h>
  23#include <linux/file.h>
  24#include <linux/writeback.h>
  25#include <linux/blkdev.h>
  26#include <linux/buffer_head.h>  /* for try_to_release_page(),
  27                                        buffer_heads_over_limit */
  28#include <linux/mm_inline.h>
  29#include <linux/pagevec.h>
  30#include <linux/backing-dev.h>
  31#include <linux/rmap.h>
  32#include <linux/topology.h>
  33#include <linux/cpu.h>
  34#include <linux/cpuset.h>
  35#include <linux/notifier.h>
  36#include <linux/rwsem.h>
  37#include <linux/delay.h>
  38#include <linux/kthread.h>
  39#include <linux/freezer.h>
  40#include <linux/memcontrol.h>
  41#include <linux/delayacct.h>
  42
  43#include <asm/tlbflush.h>
  44#include <asm/div64.h>
  45
  46#include <linux/swapops.h>
  47
  48#include "internal.h"
  49
  50struct scan_control {
  51        /* Incremented by the number of inactive pages that were scanned */
  52        unsigned long nr_scanned;
  53
  54        /* This context's GFP mask */
  55        gfp_t gfp_mask;
  56
  57        int may_writepage;
  58
  59        /* Can pages be swapped as part of reclaim? */
  60        int may_swap;
  61
  62        /* This context's SWAP_CLUSTER_MAX. If freeing memory for
  63         * suspend, we effectively ignore SWAP_CLUSTER_MAX.
  64         * In this context, it doesn't matter that we scan the
  65         * whole list at once. */
  66        int swap_cluster_max;
  67
  68        int swappiness;
  69
  70        int all_unreclaimable;
  71
  72        int order;
  73
  74        /* Which cgroup do we reclaim from */
  75        struct mem_cgroup *mem_cgroup;
  76
  77        /* Pluggable isolate pages callback */
  78        unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
  79                        unsigned long *scanned, int order, int mode,
  80                        struct zone *z, struct mem_cgroup *mem_cont,
  81                        int active);
  82};
  83
  84#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
  85
  86#ifdef ARCH_HAS_PREFETCH
  87#define prefetch_prev_lru_page(_page, _base, _field)                    \
  88        do {                                                            \
  89                if ((_page)->lru.prev != _base) {                       \
  90                        struct page *prev;                              \
  91                                                                        \
  92                        prev = lru_to_page(&(_page->lru));              \
  93                        prefetch(&prev->_field);                        \
  94                }                                                       \
  95        } while (0)
  96#else
  97#define prefetch_prev_lru_page(_page, _base, _field) do { } while (0)
  98#endif
  99
 100#ifdef ARCH_HAS_PREFETCHW
 101#define prefetchw_prev_lru_page(_page, _base, _field)                   \
 102        do {                                                            \
 103                if ((_page)->lru.prev != _base) {                       \
 104                        struct page *prev;                              \
 105                                                                        \
 106                        prev = lru_to_page(&(_page->lru));              \
 107                        prefetchw(&prev->_field);                       \
 108                }                                                       \
 109        } while (0)
 110#else
 111#define prefetchw_prev_lru_page(_page, _base, _field) do { } while (0)
 112#endif
 113
 114/*
 115 * From 0 .. 100.  Higher means more swappy.
 116 */
 117int vm_swappiness = 60;
 118long vm_total_pages;    /* The total number of pages which the VM controls */
 119
 120static LIST_HEAD(shrinker_list);
 121static DECLARE_RWSEM(shrinker_rwsem);
 122
 123#ifdef CONFIG_CGROUP_MEM_RES_CTLR
 124#define scan_global_lru(sc)     (!(sc)->mem_cgroup)
 125#else
 126#define scan_global_lru(sc)     (1)
 127#endif
 128
 129/*
 130 * Add a shrinker callback to be called from the vm
 131 */
 132void register_shrinker(struct shrinker *shrinker)
 133{
 134        shrinker->nr = 0;
 135        down_write(&shrinker_rwsem);
 136        list_add_tail(&shrinker->list, &shrinker_list);
 137        up_write(&shrinker_rwsem);
 138}
 139EXPORT_SYMBOL(register_shrinker);
 140
 141/*
 142 * Remove one
 143 */
 144void unregister_shrinker(struct shrinker *shrinker)
 145{
 146        down_write(&shrinker_rwsem);
 147        list_del(&shrinker->list);
 148        up_write(&shrinker_rwsem);
 149}
 150EXPORT_SYMBOL(unregister_shrinker);
 151
 152#define SHRINK_BATCH 128
 153/*
 154 * Call the shrink functions to age shrinkable caches
 155 *
 156 * Here we assume it costs one seek to replace a lru page and that it also
 157 * takes a seek to recreate a cache object.  With this in mind we age equal
 158 * percentages of the lru and ageable caches.  This should balance the seeks
 159 * generated by these structures.
 160 *
 161 * If the vm encountered mapped pages on the LRU it increase the pressure on
 162 * slab to avoid swapping.
 163 *
 164 * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
 165 *
 166 * `lru_pages' represents the number of on-LRU pages in all the zones which
 167 * are eligible for the caller's allocation attempt.  It is used for balancing
 168 * slab reclaim versus page reclaim.
 169 *
 170 * Returns the number of slab objects which we shrunk.
 171 */
 172unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
 173                        unsigned long lru_pages)
 174{
 175        struct shrinker *shrinker;
 176        unsigned long ret = 0;
 177
 178        if (scanned == 0)
 179                scanned = SWAP_CLUSTER_MAX;
 180
 181        if (!down_read_trylock(&shrinker_rwsem))
 182                return 1;       /* Assume we'll be able to shrink next time */
 183
 184        list_for_each_entry(shrinker, &shrinker_list, list) {
 185                unsigned long long delta;
 186                unsigned long total_scan;
 187                unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask);
 188
 189                delta = (4 * scanned) / shrinker->seeks;
 190                delta *= max_pass;
 191                do_div(delta, lru_pages + 1);
 192                shrinker->nr += delta;
 193                if (shrinker->nr < 0) {
 194                        printk(KERN_ERR "%s: nr=%ld\n",
 195                                        __func__, shrinker->nr);
 196                        shrinker->nr = max_pass;
 197                }
 198
 199                /*
 200                 * Avoid risking looping forever due to too large nr value:
 201                 * never try to free more than twice the estimate number of
 202                 * freeable entries.
 203                 */
 204                if (shrinker->nr > max_pass * 2)
 205                        shrinker->nr = max_pass * 2;
 206
 207                total_scan = shrinker->nr;
 208                shrinker->nr = 0;
 209
 210                while (total_scan >= SHRINK_BATCH) {
 211                        long this_scan = SHRINK_BATCH;
 212                        int shrink_ret;
 213                        int nr_before;
 214
 215                        nr_before = (*shrinker->shrink)(0, gfp_mask);
 216                        shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask);
 217                        if (shrink_ret == -1)
 218                                break;
 219                        if (shrink_ret < nr_before)
 220                                ret += nr_before - shrink_ret;
 221                        count_vm_events(SLABS_SCANNED, this_scan);
 222                        total_scan -= this_scan;
 223
 224                        cond_resched();
 225                }
 226
 227                shrinker->nr += total_scan;
 228        }
 229        up_read(&shrinker_rwsem);
 230        return ret;
 231}
 232
 233/* Called without lock on whether page is mapped, so answer is unstable */
 234static inline int page_mapping_inuse(struct page *page)
 235{
 236        struct address_space *mapping;
 237
 238        /* Page is in somebody's page tables. */
 239        if (page_mapped(page))
 240                return 1;
 241
 242        /* Be more reluctant to reclaim swapcache than pagecache */
 243        if (PageSwapCache(page))
 244                return 1;
 245
 246        mapping = page_mapping(page);
 247        if (!mapping)
 248                return 0;
 249
 250        /* File is mmap'd by somebody? */
 251        return mapping_mapped(mapping);
 252}
 253
 254static inline int is_page_cache_freeable(struct page *page)
 255{
 256        return page_count(page) - !!PagePrivate(page) == 2;
 257}
 258
 259static int may_write_to_queue(struct backing_dev_info *bdi)
 260{
 261        if (current->flags & PF_SWAPWRITE)
 262                return 1;
 263        if (!bdi_write_congested(bdi))
 264                return 1;
 265        if (bdi == current->backing_dev_info)
 266                return 1;
 267        return 0;
 268}
 269
 270/*
 271 * We detected a synchronous write error writing a page out.  Probably
 272 * -ENOSPC.  We need to propagate that into the address_space for a subsequent
 273 * fsync(), msync() or close().
 274 *
 275 * The tricky part is that after writepage we cannot touch the mapping: nothing
 276 * prevents it from being freed up.  But we have a ref on the page and once
 277 * that page is locked, the mapping is pinned.
 278 *
 279 * We're allowed to run sleeping lock_page() here because we know the caller has
 280 * __GFP_FS.
 281 */
 282static void handle_write_error(struct address_space *mapping,
 283                                struct page *page, int error)
 284{
 285        lock_page(page);
 286        if (page_mapping(page) == mapping)
 287                mapping_set_error(mapping, error);
 288        unlock_page(page);
 289}
 290
 291/* Request for sync pageout. */
 292enum pageout_io {
 293        PAGEOUT_IO_ASYNC,
 294        PAGEOUT_IO_SYNC,
 295};
 296
 297/* possible outcome of pageout() */
 298typedef enum {
 299        /* failed to write page out, page is locked */
 300        PAGE_KEEP,
 301        /* move page to the active list, page is locked */
 302        PAGE_ACTIVATE,
 303        /* page has been sent to the disk successfully, page is unlocked */
 304        PAGE_SUCCESS,
 305        /* page is clean and locked */
 306        PAGE_CLEAN,
 307} pageout_t;
 308
 309/*
 310 * pageout is called by shrink_page_list() for each dirty page.
 311 * Calls ->writepage().
 312 */
 313static pageout_t pageout(struct page *page, struct address_space *mapping,
 314                                                enum pageout_io sync_writeback)
 315{
 316        /*
 317         * If the page is dirty, only perform writeback if that write
 318         * will be non-blocking.  To prevent this allocation from being
 319         * stalled by pagecache activity.  But note that there may be
 320         * stalls if we need to run get_block().  We could test
 321         * PagePrivate for that.
 322         *
 323         * If this process is currently in generic_file_write() against
 324         * this page's queue, we can perform writeback even if that
 325         * will block.
 326         *
 327         * If the page is swapcache, write it back even if that would
 328         * block, for some throttling. This happens by accident, because
 329         * swap_backing_dev_info is bust: it doesn't reflect the
 330         * congestion state of the swapdevs.  Easy to fix, if needed.
 331         * See swapfile.c:page_queue_congested().
 332         */
 333        if (!is_page_cache_freeable(page))
 334                return PAGE_KEEP;
 335        if (!mapping) {
 336                /*
 337                 * Some data journaling orphaned pages can have
 338                 * page->mapping == NULL while being dirty with clean buffers.
 339                 */
 340                if (PagePrivate(page)) {
 341                        if (try_to_free_buffers(page)) {
 342                                ClearPageDirty(page);
 343                                printk("%s: orphaned page\n", __func__);
 344                                return PAGE_CLEAN;
 345                        }
 346                }
 347                return PAGE_KEEP;
 348        }
 349        if (mapping->a_ops->writepage == NULL)
 350                return PAGE_ACTIVATE;
 351        if (!may_write_to_queue(mapping->backing_dev_info))
 352                return PAGE_KEEP;
 353
 354        if (clear_page_dirty_for_io(page)) {
 355                int res;
 356                struct writeback_control wbc = {
 357                        .sync_mode = WB_SYNC_NONE,
 358                        .nr_to_write = SWAP_CLUSTER_MAX,
 359                        .range_start = 0,
 360                        .range_end = LLONG_MAX,
 361                        .nonblocking = 1,
 362                        .for_reclaim = 1,
 363                };
 364
 365                SetPageReclaim(page);
 366                res = mapping->a_ops->writepage(page, &wbc);
 367                if (res < 0)
 368                        handle_write_error(mapping, page, res);
 369                if (res == AOP_WRITEPAGE_ACTIVATE) {
 370                        ClearPageReclaim(page);
 371                        return PAGE_ACTIVATE;
 372                }
 373
 374                /*
 375                 * Wait on writeback if requested to. This happens when
 376                 * direct reclaiming a large contiguous area and the
 377                 * first attempt to free a range of pages fails.
 378                 */
 379                if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC)
 380                        wait_on_page_writeback(page);
 381
 382                if (!PageWriteback(page)) {
 383                        /* synchronous write or broken a_ops? */
 384                        ClearPageReclaim(page);
 385                }
 386                inc_zone_page_state(page, NR_VMSCAN_WRITE);
 387                return PAGE_SUCCESS;
 388        }
 389
 390        return PAGE_CLEAN;
 391}
 392
 393/*
 394 * Same as remove_mapping, but if the page is removed from the mapping, it
 395 * gets returned with a refcount of 0.
 396 */
 397static int __remove_mapping(struct address_space *mapping, struct page *page)
 398{
 399        BUG_ON(!PageLocked(page));
 400        BUG_ON(mapping != page_mapping(page));
 401
 402        spin_lock_irq(&mapping->tree_lock);
 403        /*
 404         * The non racy check for a busy page.
 405         *
 406         * Must be careful with the order of the tests. When someone has
 407         * a ref to the page, it may be possible that they dirty it then
 408         * drop the reference. So if PageDirty is tested before page_count
 409         * here, then the following race may occur:
 410         *
 411         * get_user_pages(&page);
 412         * [user mapping goes away]
 413         * write_to(page);
 414         *                              !PageDirty(page)    [good]
 415         * SetPageDirty(page);
 416         * put_page(page);
 417         *                              !page_count(page)   [good, discard it]
 418         *
 419         * [oops, our write_to data is lost]
 420         *
 421         * Reversing the order of the tests ensures such a situation cannot
 422         * escape unnoticed. The smp_rmb is needed to ensure the page->flags
 423         * load is not satisfied before that of page->_count.
 424         *
 425         * Note that if SetPageDirty is always performed via set_page_dirty,
 426         * and thus under tree_lock, then this ordering is not required.
 427         */
 428        if (!page_freeze_refs(page, 2))
 429                goto cannot_free;
 430        /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
 431        if (unlikely(PageDirty(page))) {
 432                page_unfreeze_refs(page, 2);
 433                goto cannot_free;
 434        }
 435
 436        if (PageSwapCache(page)) {
 437                swp_entry_t swap = { .val = page_private(page) };
 438                __delete_from_swap_cache(page);
 439                spin_unlock_irq(&mapping->tree_lock);
 440                swap_free(swap);
 441        } else {
 442                __remove_from_page_cache(page);
 443                spin_unlock_irq(&mapping->tree_lock);
 444        }
 445
 446        return 1;
 447
 448cannot_free:
 449        spin_unlock_irq(&mapping->tree_lock);
 450        return 0;
 451}
 452
 453/*
 454 * Attempt to detach a locked page from its ->mapping.  If it is dirty or if
 455 * someone else has a ref on the page, abort and return 0.  If it was
 456 * successfully detached, return 1.  Assumes the caller has a single ref on
 457 * this page.
 458 */
 459int remove_mapping(struct address_space *mapping, struct page *page)
 460{
 461        if (__remove_mapping(mapping, page)) {
 462                /*
 463                 * Unfreezing the refcount with 1 rather than 2 effectively
 464                 * drops the pagecache ref for us without requiring another
 465                 * atomic operation.
 466                 */
 467                page_unfreeze_refs(page, 1);
 468                return 1;
 469        }
 470        return 0;
 471}
 472
 473/*
 474 * shrink_page_list() returns the number of reclaimed pages
 475 */
 476static unsigned long shrink_page_list(struct list_head *page_list,
 477                                        struct scan_control *sc,
 478                                        enum pageout_io sync_writeback)
 479{
 480        LIST_HEAD(ret_pages);
 481        struct pagevec freed_pvec;
 482        int pgactivate = 0;
 483        unsigned long nr_reclaimed = 0;
 484
 485        cond_resched();
 486
 487        pagevec_init(&freed_pvec, 1);
 488        while (!list_empty(page_list)) {
 489                struct address_space *mapping;
 490                struct page *page;
 491                int may_enter_fs;
 492                int referenced;
 493
 494                cond_resched();
 495
 496                page = lru_to_page(page_list);
 497                list_del(&page->lru);
 498
 499                if (!trylock_page(page))
 500                        goto keep;
 501
 502                VM_BUG_ON(PageActive(page));
 503
 504                sc->nr_scanned++;
 505
 506                if (!sc->may_swap && page_mapped(page))
 507                        goto keep_locked;
 508
 509                /* Double the slab pressure for mapped and swapcache pages */
 510                if (page_mapped(page) || PageSwapCache(page))
 511                        sc->nr_scanned++;
 512
 513                may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
 514                        (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
 515
 516                if (PageWriteback(page)) {
 517                        /*
 518                         * Synchronous reclaim is performed in two passes,
 519                         * first an asynchronous pass over the list to
 520                         * start parallel writeback, and a second synchronous
 521                         * pass to wait for the IO to complete.  Wait here
 522                         * for any page for which writeback has already
 523                         * started.
 524                         */
 525                        if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
 526                                wait_on_page_writeback(page);
 527                        else
 528                                goto keep_locked;
 529                }
 530
 531                referenced = page_referenced(page, 1, sc->mem_cgroup);
 532                /* In active use or really unfreeable?  Activate it. */
 533                if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
 534                                        referenced && page_mapping_inuse(page))
 535                        goto activate_locked;
 536
 537#ifdef CONFIG_SWAP
 538                /*
 539                 * Anonymous process memory has backing store?
 540                 * Try to allocate it some swap space here.
 541                 */
 542                if (PageAnon(page) && !PageSwapCache(page))
 543                        if (!add_to_swap(page, GFP_ATOMIC))
 544                                goto activate_locked;
 545#endif /* CONFIG_SWAP */
 546
 547                mapping = page_mapping(page);
 548
 549                /*
 550                 * The page is mapped into the page tables of one or more
 551                 * processes. Try to unmap it here.
 552                 */
 553                if (page_mapped(page) && mapping) {
 554                        switch (try_to_unmap(page, 0)) {
 555                        case SWAP_FAIL:
 556                                goto activate_locked;
 557                        case SWAP_AGAIN:
 558                                goto keep_locked;
 559                        case SWAP_SUCCESS:
 560                                ; /* try to free the page below */
 561                        }
 562                }
 563
 564                if (PageDirty(page)) {
 565                        if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced)
 566                                goto keep_locked;
 567                        if (!may_enter_fs)
 568                                goto keep_locked;
 569                        if (!sc->may_writepage)
 570                                goto keep_locked;
 571
 572                        /* Page is dirty, try to write it out here */
 573                        switch (pageout(page, mapping, sync_writeback)) {
 574                        case PAGE_KEEP:
 575                                goto keep_locked;
 576                        case PAGE_ACTIVATE:
 577                                goto activate_locked;
 578                        case PAGE_SUCCESS:
 579                                if (PageWriteback(page) || PageDirty(page))
 580                                        goto keep;
 581                                /*
 582                                 * A synchronous write - probably a ramdisk.  Go
 583                                 * ahead and try to reclaim the page.
 584                                 */
 585                                if (!trylock_page(page))
 586                                        goto keep;
 587                                if (PageDirty(page) || PageWriteback(page))
 588                                        goto keep_locked;
 589                                mapping = page_mapping(page);
 590                        case PAGE_CLEAN:
 591                                ; /* try to free the page below */
 592                        }
 593                }
 594
 595                /*
 596                 * If the page has buffers, try to free the buffer mappings
 597                 * associated with this page. If we succeed we try to free
 598                 * the page as well.
 599                 *
 600                 * We do this even if the page is PageDirty().
 601                 * try_to_release_page() does not perform I/O, but it is
 602                 * possible for a page to have PageDirty set, but it is actually
 603                 * clean (all its buffers are clean).  This happens if the
 604                 * buffers were written out directly, with submit_bh(). ext3
 605                 * will do this, as well as the blockdev mapping. 
 606                 * try_to_release_page() will discover that cleanness and will
 607                 * drop the buffers and mark the page clean - it can be freed.
 608                 *
 609                 * Rarely, pages can have buffers and no ->mapping.  These are
 610                 * the pages which were not successfully invalidated in
 611                 * truncate_complete_page().  We try to drop those buffers here
 612                 * and if that worked, and the page is no longer mapped into
 613                 * process address space (page_count == 1) it can be freed.
 614                 * Otherwise, leave the page on the LRU so it is swappable.
 615                 */
 616                if (PagePrivate(page)) {
 617                        if (!try_to_release_page(page, sc->gfp_mask))
 618                                goto activate_locked;
 619                        if (!mapping && page_count(page) == 1) {
 620                                unlock_page(page);
 621                                if (put_page_testzero(page))
 622                                        goto free_it;
 623                                else {
 624                                        /*
 625                                         * rare race with speculative reference.
 626                                         * the speculative reference will free
 627                                         * this page shortly, so we may
 628                                         * increment nr_reclaimed here (and
 629                                         * leave it off the LRU).
 630                                         */
 631                                        nr_reclaimed++;
 632                                        continue;
 633                                }
 634                        }
 635                }
 636
 637                if (!mapping || !__remove_mapping(mapping, page))
 638                        goto keep_locked;
 639
 640                unlock_page(page);
 641free_it:
 642                nr_reclaimed++;
 643                if (!pagevec_add(&freed_pvec, page)) {
 644                        __pagevec_free(&freed_pvec);
 645                        pagevec_reinit(&freed_pvec);
 646                }
 647                continue;
 648
 649activate_locked:
 650                SetPageActive(page);
 651                pgactivate++;
 652keep_locked:
 653                unlock_page(page);
 654keep:
 655                list_add(&page->lru, &ret_pages);
 656                VM_BUG_ON(PageLRU(page));
 657        }
 658        list_splice(&ret_pages, page_list);
 659        if (pagevec_count(&freed_pvec))
 660                __pagevec_free(&freed_pvec);
 661        count_vm_events(PGACTIVATE, pgactivate);
 662        return nr_reclaimed;
 663}
 664
 665/* LRU Isolation modes. */
 666#define ISOLATE_INACTIVE 0      /* Isolate inactive pages. */
 667#define ISOLATE_ACTIVE 1        /* Isolate active pages. */
 668#define ISOLATE_BOTH 2          /* Isolate both active and inactive pages. */
 669
 670/*
 671 * Attempt to remove the specified page from its LRU.  Only take this page
 672 * if it is of the appropriate PageActive status.  Pages which are being
 673 * freed elsewhere are also ignored.
 674 *
 675 * page:        page to consider
 676 * mode:        one of the LRU isolation modes defined above
 677 *
 678 * returns 0 on success, -ve errno on failure.
 679 */
 680int __isolate_lru_page(struct page *page, int mode)
 681{
 682        int ret = -EINVAL;
 683
 684        /* Only take pages on the LRU. */
 685        if (!PageLRU(page))
 686                return ret;
 687
 688        /*
 689         * When checking the active state, we need to be sure we are
 690         * dealing with comparible boolean values.  Take the logical not
 691         * of each.
 692         */
 693        if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
 694                return ret;
 695
 696        ret = -EBUSY;
 697        if (likely(get_page_unless_zero(page))) {
 698                /*
 699                 * Be careful not to clear PageLRU until after we're
 700                 * sure the page is not being freed elsewhere -- the
 701                 * page release code relies on it.
 702                 */
 703                ClearPageLRU(page);
 704                ret = 0;
 705        }
 706
 707        return ret;
 708}
 709
 710/*
 711 * zone->lru_lock is heavily contended.  Some of the functions that
 712 * shrink the lists perform better by taking out a batch of pages
 713 * and working on them outside the LRU lock.
 714 *
 715 * For pagecache intensive workloads, this function is the hottest
 716 * spot in the kernel (apart from copy_*_user functions).
 717 *
 718 * Appropriate locks must be held before calling this function.
 719 *
 720 * @nr_to_scan: The number of pages to look through on the list.
 721 * @src:        The LRU list to pull pages off.
 722 * @dst:        The temp list to put pages on to.
 723 * @scanned:    The number of pages that were scanned.
 724 * @order:      The caller's attempted allocation order
 725 * @mode:       One of the LRU isolation modes
 726 *
 727 * returns how many pages were moved onto *@dst.
 728 */
 729static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
 730                struct list_head *src, struct list_head *dst,
 731                unsigned long *scanned, int order, int mode)
 732{
 733        unsigned long nr_taken = 0;
 734        unsigned long scan;
 735
 736        for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
 737                struct page *page;
 738                unsigned long pfn;
 739                unsigned long end_pfn;
 740                unsigned long page_pfn;
 741                int zone_id;
 742
 743                page = lru_to_page(src);
 744                prefetchw_prev_lru_page(page, src, flags);
 745
 746                VM_BUG_ON(!PageLRU(page));
 747
 748                switch (__isolate_lru_page(page, mode)) {
 749                case 0:
 750                        list_move(&page->lru, dst);
 751                        nr_taken++;
 752                        break;
 753
 754                case -EBUSY:
 755                        /* else it is being freed elsewhere */
 756                        list_move(&page->lru, src);
 757                        continue;
 758
 759                default:
 760                        BUG();
 761                }
 762
 763                if (!order)
 764                        continue;
 765
 766                /*
 767                 * Attempt to take all pages in the order aligned region
 768                 * surrounding the tag page.  Only take those pages of
 769                 * the same active state as that tag page.  We may safely
 770                 * round the target page pfn down to the requested order
 771                 * as the mem_map is guarenteed valid out to MAX_ORDER,
 772                 * where that page is in a different zone we will detect
 773                 * it from its zone id and abort this block scan.
 774                 */
 775                zone_id = page_zone_id(page);
 776                page_pfn = page_to_pfn(page);
 777                pfn = page_pfn & ~((1 << order) - 1);
 778                end_pfn = pfn + (1 << order);
 779                for (; pfn < end_pfn; pfn++) {
 780                        struct page *cursor_page;
 781
 782                        /* The target page is in the block, ignore it. */
 783                        if (unlikely(pfn == page_pfn))
 784                                continue;
 785
 786                        /* Avoid holes within the zone. */
 787                        if (unlikely(!pfn_valid_within(pfn)))
 788                                break;
 789
 790                        cursor_page = pfn_to_page(pfn);
 791                        /* Check that we have not crossed a zone boundary. */
 792                        if (unlikely(page_zone_id(cursor_page) != zone_id))
 793                                continue;
 794                        switch (__isolate_lru_page(cursor_page, mode)) {
 795                        case 0:
 796                                list_move(&cursor_page->lru, dst);
 797                                nr_taken++;
 798                                scan++;
 799                                break;
 800
 801                        case -EBUSY:
 802                                /* else it is being freed elsewhere */
 803                                list_move(&cursor_page->lru, src);
 804                        default:
 805                                break;
 806                        }
 807                }
 808        }
 809
 810        *scanned = scan;
 811        return nr_taken;
 812}
 813
 814static unsigned long isolate_pages_global(unsigned long nr,
 815                                        struct list_head *dst,
 816                                        unsigned long *scanned, int order,
 817                                        int mode, struct zone *z,
 818                                        struct mem_cgroup *mem_cont,
 819                                        int active)
 820{
 821        if (active)
 822                return isolate_lru_pages(nr, &z->active_list, dst,
 823                                                scanned, order, mode);
 824        else
 825                return isolate_lru_pages(nr, &z->inactive_list, dst,
 826                                                scanned, order, mode);
 827}
 828
 829/*
 830 * clear_active_flags() is a helper for shrink_active_list(), clearing
 831 * any active bits from the pages in the list.
 832 */
 833static unsigned long clear_active_flags(struct list_head *page_list)
 834{
 835        int nr_active = 0;
 836        struct page *page;
 837
 838        list_for_each_entry(page, page_list, lru)
 839                if (PageActive(page)) {
 840                        ClearPageActive(page);
 841                        nr_active++;
 842                }
 843
 844        return nr_active;
 845}
 846
 847/*
 848 * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
 849 * of reclaimed pages
 850 */
 851static unsigned long shrink_inactive_list(unsigned long max_scan,
 852                                struct zone *zone, struct scan_control *sc)
 853{
 854        LIST_HEAD(page_list);
 855        struct pagevec pvec;
 856        unsigned long nr_scanned = 0;
 857        unsigned long nr_reclaimed = 0;
 858
 859        pagevec_init(&pvec, 1);
 860
 861        lru_add_drain();
 862        spin_lock_irq(&zone->lru_lock);
 863        do {
 864                struct page *page;
 865                unsigned long nr_taken;
 866                unsigned long nr_scan;
 867                unsigned long nr_freed;
 868                unsigned long nr_active;
 869
 870                nr_taken = sc->isolate_pages(sc->swap_cluster_max,
 871                             &page_list, &nr_scan, sc->order,
 872                             (sc->order > PAGE_ALLOC_COSTLY_ORDER)?
 873                                             ISOLATE_BOTH : ISOLATE_INACTIVE,
 874                                zone, sc->mem_cgroup, 0);
 875                nr_active = clear_active_flags(&page_list);
 876                __count_vm_events(PGDEACTIVATE, nr_active);
 877
 878                __mod_zone_page_state(zone, NR_ACTIVE, -nr_active);
 879                __mod_zone_page_state(zone, NR_INACTIVE,
 880                                                -(nr_taken - nr_active));
 881                if (scan_global_lru(sc))
 882                        zone->pages_scanned += nr_scan;
 883                spin_unlock_irq(&zone->lru_lock);
 884
 885                nr_scanned += nr_scan;
 886                nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
 887
 888                /*
 889                 * If we are direct reclaiming for contiguous pages and we do
 890                 * not reclaim everything in the list, try again and wait
 891                 * for IO to complete. This will stall high-order allocations
 892                 * but that should be acceptable to the caller
 893                 */
 894                if (nr_freed < nr_taken && !current_is_kswapd() &&
 895                                        sc->order > PAGE_ALLOC_COSTLY_ORDER) {
 896                        congestion_wait(WRITE, HZ/10);
 897
 898                        /*
 899                         * The attempt at page out may have made some
 900                         * of the pages active, mark them inactive again.
 901                         */
 902                        nr_active = clear_active_flags(&page_list);
 903                        count_vm_events(PGDEACTIVATE, nr_active);
 904
 905                        nr_freed += shrink_page_list(&page_list, sc,
 906                                                        PAGEOUT_IO_SYNC);
 907                }
 908
 909                nr_reclaimed += nr_freed;
 910                local_irq_disable();
 911                if (current_is_kswapd()) {
 912                        __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
 913                        __count_vm_events(KSWAPD_STEAL, nr_freed);
 914                } else if (scan_global_lru(sc))
 915                        __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
 916
 917                __count_zone_vm_events(PGSTEAL, zone, nr_freed);
 918
 919                if (nr_taken == 0)
 920                        goto done;
 921
 922                spin_lock(&zone->lru_lock);
 923                /*
 924                 * Put back any unfreeable pages.
 925                 */
 926                while (!list_empty(&page_list)) {
 927                        page = lru_to_page(&page_list);
 928                        VM_BUG_ON(PageLRU(page));
 929                        SetPageLRU(page);
 930                        list_del(&page->lru);
 931                        if (PageActive(page))
 932                                add_page_to_active_list(zone, page);
 933                        else
 934                                add_page_to_inactive_list(zone, page);
 935                        if (!pagevec_add(&pvec, page)) {
 936                                spin_unlock_irq(&zone->lru_lock);
 937                                __pagevec_release(&pvec);
 938                                spin_lock_irq(&zone->lru_lock);
 939                        }
 940                }
 941        } while (nr_scanned < max_scan);
 942        spin_unlock(&zone->lru_lock);
 943done:
 944        local_irq_enable();
 945        pagevec_release(&pvec);
 946        return nr_reclaimed;
 947}
 948
 949/*
 950 * We are about to scan this zone at a certain priority level.  If that priority
 951 * level is smaller (ie: more urgent) than the previous priority, then note
 952 * that priority level within the zone.  This is done so that when the next
 953 * process comes in to scan this zone, it will immediately start out at this
 954 * priority level rather than having to build up its own scanning priority.
 955 * Here, this priority affects only the reclaim-mapped threshold.
 956 */
 957static inline void note_zone_scanning_priority(struct zone *zone, int priority)
 958{
 959        if (priority < zone->prev_priority)
 960                zone->prev_priority = priority;
 961}
 962
 963static inline int zone_is_near_oom(struct zone *zone)
 964{
 965        return zone->pages_scanned >= (zone_page_state(zone, NR_ACTIVE)
 966                                + zone_page_state(zone, NR_INACTIVE))*3;
 967}
 968
 969/*
 970 * Determine we should try to reclaim mapped pages.
 971 * This is called only when sc->mem_cgroup is NULL.
 972 */
 973static int calc_reclaim_mapped(struct scan_control *sc, struct zone *zone,
 974                                int priority)
 975{
 976        long mapped_ratio;
 977        long distress;
 978        long swap_tendency;
 979        long imbalance;
 980        int reclaim_mapped = 0;
 981        int prev_priority;
 982
 983        if (scan_global_lru(sc) && zone_is_near_oom(zone))
 984                return 1;
 985        /*
 986         * `distress' is a measure of how much trouble we're having
 987         * reclaiming pages.  0 -> no problems.  100 -> great trouble.
 988         */
 989        if (scan_global_lru(sc))
 990                prev_priority = zone->prev_priority;
 991        else
 992                prev_priority = mem_cgroup_get_reclaim_priority(sc->mem_cgroup);
 993
 994        distress = 100 >> min(prev_priority, priority);
 995
 996        /*
 997         * The point of this algorithm is to decide when to start
 998         * reclaiming mapped memory instead of just pagecache.  Work out
 999         * how much memory
1000         * is mapped.
1001         */
1002        if (scan_global_lru(sc))
1003                mapped_ratio = ((global_page_state(NR_FILE_MAPPED) +
1004                                global_page_state(NR_ANON_PAGES)) * 100) /
1005                                        vm_total_pages;
1006        else
1007                mapped_ratio = mem_cgroup_calc_mapped_ratio(sc->mem_cgroup);
1008
1009        /*
1010         * Now decide how much we really want to unmap some pages.  The
1011         * mapped ratio is downgraded - just because there's a lot of
1012         * mapped memory doesn't necessarily mean that page reclaim
1013         * isn't succeeding.
1014         *
1015         * The distress ratio is important - we don't want to start
1016         * going oom.
1017         *
1018         * A 100% value of vm_swappiness overrides this algorithm
1019         * altogether.
1020         */
1021        swap_tendency = mapped_ratio / 2 + distress + sc->swappiness;
1022
1023        /*
1024         * If there's huge imbalance between active and inactive
1025         * (think active 100 times larger than inactive) we should
1026         * become more permissive, or the system will take too much
1027         * cpu before it start swapping during memory pressure.
1028         * Distress is about avoiding early-oom, this is about
1029         * making swappiness graceful despite setting it to low
1030         * values.
1031         *
1032         * Avoid div by zero with nr_inactive+1, and max resulting
1033         * value is vm_total_pages.
1034         */
1035        if (scan_global_lru(sc)) {
1036                imbalance  = zone_page_state(zone, NR_ACTIVE);
1037                imbalance /= zone_page_state(zone, NR_INACTIVE) + 1;
1038        } else
1039                imbalance = mem_cgroup_reclaim_imbalance(sc->mem_cgroup);
1040
1041        /*
1042         * Reduce the effect of imbalance if swappiness is low,
1043         * this means for a swappiness very low, the imbalance
1044         * must be much higher than 100 for this logic to make
1045         * the difference.
1046         *
1047         * Max temporary value is vm_total_pages*100.
1048         */
1049        imbalance *= (vm_swappiness + 1);
1050        imbalance /= 100;
1051
1052        /*
1053         * If not much of the ram is mapped, makes the imbalance
1054         * less relevant, it's high priority we refill the inactive
1055         * list with mapped pages only in presence of high ratio of
1056         * mapped pages.
1057         *
1058         * Max temporary value is vm_total_pages*100.
1059         */
1060        imbalance *= mapped_ratio;
1061        imbalance /= 100;
1062
1063        /* apply imbalance feedback to swap_tendency */
1064        swap_tendency += imbalance;
1065
1066        /*
1067         * Now use this metric to decide whether to start moving mapped
1068         * memory onto the inactive list.
1069         */
1070        if (swap_tendency >= 100)
1071                reclaim_mapped = 1;
1072
1073        return reclaim_mapped;
1074}
1075
1076/*
1077 * This moves pages from the active list to the inactive list.
1078 *
1079 * We move them the other way if the page is referenced by one or more
1080 * processes, from rmap.
1081 *
1082 * If the pages are mostly unmapped, the processing is fast and it is
1083 * appropriate to hold zone->lru_lock across the whole operation.  But if
1084 * the pages are mapped, the processing is slow (page_referenced()) so we
1085 * should drop zone->lru_lock around each page.  It's impossible to balance
1086 * this, so instead we remove the pages from the LRU while processing them.
1087 * It is safe to rely on PG_active against the non-LRU pages in here because
1088 * nobody will play with that bit on a non-LRU page.
1089 *
1090 * The downside is that we have to touch page->_count against each page.
1091 * But we had to alter page->flags anyway.
1092 */
1093
1094
1095static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1096                                struct scan_control *sc, int priority)
1097{
1098        unsigned long pgmoved;
1099        int pgdeactivate = 0;
1100        unsigned long pgscanned;
1101        LIST_HEAD(l_hold);      /* The pages which were snipped off */
1102        LIST_HEAD(l_inactive);  /* Pages to go onto the inactive_list */
1103        LIST_HEAD(l_active);    /* Pages to go onto the active_list */
1104        struct page *page;
1105        struct pagevec pvec;
1106        int reclaim_mapped = 0;
1107
1108        if (sc->may_swap)
1109                reclaim_mapped = calc_reclaim_mapped(sc, zone, priority);
1110
1111        lru_add_drain();
1112        spin_lock_irq(&zone->lru_lock);
1113        pgmoved = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
1114                                        ISOLATE_ACTIVE, zone,
1115                                        sc->mem_cgroup, 1);
1116        /*
1117         * zone->pages_scanned is used for detect zone's oom
1118         * mem_cgroup remembers nr_scan by itself.
1119         */
1120        if (scan_global_lru(sc))
1121                zone->pages_scanned += pgscanned;
1122
1123        __mod_zone_page_state(zone, NR_ACTIVE, -pgmoved);
1124        spin_unlock_irq(&zone->lru_lock);
1125
1126        while (!list_empty(&l_hold)) {
1127                cond_resched();
1128                page = lru_to_page(&l_hold);
1129                list_del(&page->lru);
1130                if (page_mapped(page)) {
1131                        if (!reclaim_mapped ||
1132                            (total_swap_pages == 0 && PageAnon(page)) ||
1133                            page_referenced(page, 0, sc->mem_cgroup)) {
1134                                list_add(&page->lru, &l_active);
1135                                continue;
1136                        }
1137                }
1138                list_add(&page->lru, &l_inactive);
1139        }
1140
1141        pagevec_init(&pvec, 1);
1142        pgmoved = 0;
1143        spin_lock_irq(&zone->lru_lock);
1144        while (!list_empty(&l_inactive)) {
1145                page = lru_to_page(&l_inactive);
1146                prefetchw_prev_lru_page(page, &l_inactive, flags);
1147                VM_BUG_ON(PageLRU(page));
1148                SetPageLRU(page);
1149                VM_BUG_ON(!PageActive(page));
1150                ClearPageActive(page);
1151
1152                list_move(&page->lru, &zone->inactive_list);
1153                mem_cgroup_move_lists(page, false);
1154                pgmoved++;
1155                if (!pagevec_add(&pvec, page)) {
1156                        __mod_zone_page_state(zone, NR_INACTIVE, pgmoved);
1157                        spin_unlock_irq(&zone->lru_lock);
1158                        pgdeactivate += pgmoved;
1159                        pgmoved = 0;
1160                        if (buffer_heads_over_limit)
1161                                pagevec_strip(&pvec);
1162                        __pagevec_release(&pvec);
1163                        spin_lock_irq(&zone->lru_lock);
1164                }
1165        }
1166        __mod_zone_page_state(zone, NR_INACTIVE, pgmoved);
1167        pgdeactivate += pgmoved;
1168        if (buffer_heads_over_limit) {
1169                spin_unlock_irq(&zone->lru_lock);
1170                pagevec_strip(&pvec);
1171                spin_lock_irq(&zone->lru_lock);
1172        }
1173
1174        pgmoved = 0;
1175        while (!list_empty(&l_active)) {
1176                page = lru_to_page(&l_active);
1177                prefetchw_prev_lru_page(page, &l_active, flags);
1178                VM_BUG_ON(PageLRU(page));
1179                SetPageLRU(page);
1180                VM_BUG_ON(!PageActive(page));
1181
1182                list_move(&page->lru, &zone->active_list);
1183                mem_cgroup_move_lists(page, true);
1184                pgmoved++;
1185                if (!pagevec_add(&pvec, page)) {
1186                        __mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
1187                        pgmoved = 0;
1188                        spin_unlock_irq(&zone->lru_lock);
1189                        __pagevec_release(&pvec);
1190                        spin_lock_irq(&zone->lru_lock);
1191                }
1192        }
1193        __mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
1194
1195        __count_zone_vm_events(PGREFILL, zone, pgscanned);
1196        __count_vm_events(PGDEACTIVATE, pgdeactivate);
1197        spin_unlock_irq(&zone->lru_lock);
1198
1199        pagevec_release(&pvec);
1200}
1201
1202/*
1203 * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
1204 */
1205static unsigned long shrink_zone(int priority, struct zone *zone,
1206                                struct scan_control *sc)
1207{
1208        unsigned long nr_active;
1209        unsigned long nr_inactive;
1210        unsigned long nr_to_scan;
1211        unsigned long nr_reclaimed = 0;
1212
1213        if (scan_global_lru(sc)) {
1214                /*
1215                 * Add one to nr_to_scan just to make sure that the kernel
1216                 * will slowly sift through the active list.
1217                 */
1218                zone->nr_scan_active +=
1219                        (zone_page_state(zone, NR_ACTIVE) >> priority) + 1;
1220                nr_active = zone->nr_scan_active;
1221                zone->nr_scan_inactive +=
1222                        (zone_page_state(zone, NR_INACTIVE) >> priority) + 1;
1223                nr_inactive = zone->nr_scan_inactive;
1224                if (nr_inactive >= sc->swap_cluster_max)
1225                        zone->nr_scan_inactive = 0;
1226                else
1227                        nr_inactive = 0;
1228
1229                if (nr_active >= sc->swap_cluster_max)
1230                        zone->nr_scan_active = 0;
1231                else
1232                        nr_active = 0;
1233        } else {
1234                /*
1235                 * This reclaim occurs not because zone memory shortage but
1236                 * because memory controller hits its limit.
1237                 * Then, don't modify zone reclaim related data.
1238                 */
1239                nr_active = mem_cgroup_calc_reclaim_active(sc->mem_cgroup,
1240                                        zone, priority);
1241
1242                nr_inactive = mem_cgroup_calc_reclaim_inactive(sc->mem_cgroup,
1243                                        zone, priority);
1244        }
1245
1246
1247        while (nr_active || nr_inactive) {
1248                if (nr_active) {
1249                        nr_to_scan = min(nr_active,
1250                                        (unsigned long)sc->swap_cluster_max);
1251                        nr_active -= nr_to_scan;
1252                        shrink_active_list(nr_to_scan, zone, sc, priority);
1253                }
1254
1255                if (nr_inactive) {
1256                        nr_to_scan = min(nr_inactive,
1257                                        (unsigned long)sc->swap_cluster_max);
1258                        nr_inactive -= nr_to_scan;
1259                        nr_reclaimed += shrink_inactive_list(nr_to_scan, zone,
1260                                                                sc);
1261                }
1262        }
1263
1264        throttle_vm_writeout(sc->gfp_mask);
1265        return nr_reclaimed;
1266}
1267
1268/*
1269 * This is the direct reclaim path, for page-allocating processes.  We only
1270 * try to reclaim pages from zones which will satisfy the caller's allocation
1271 * request.
1272 *
1273 * We reclaim from a zone even if that zone is over pages_high.  Because:
1274 * a) The caller may be trying to free *extra* pages to satisfy a higher-order
1275 *    allocation or
1276 * b) The zones may be over pages_high but they must go *over* pages_high to
1277 *    satisfy the `incremental min' zone defense algorithm.
1278 *
1279 * Returns the number of reclaimed pages.
1280 *
1281 * If a zone is deemed to be full of pinned pages then just give it a light
1282 * scan then give up on it.
1283 */
1284static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
1285                                        struct scan_control *sc)
1286{
1287        enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
1288        unsigned long nr_reclaimed = 0;
1289        struct zoneref *z;
1290        struct zone *zone;
1291
1292        sc->all_unreclaimable = 1;
1293        for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1294                if (!populated_zone(zone))
1295                        continue;
1296                /*
1297                 * Take care memory controller reclaiming has small influence
1298                 * to global LRU.
1299                 */
1300                if (scan_global_lru(sc)) {
1301                        if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1302                                continue;
1303                        note_zone_scanning_priority(zone, priority);
1304
1305                        if (zone_is_all_unreclaimable(zone) &&
1306                                                priority != DEF_PRIORITY)
1307                                continue;       /* Let kswapd poll it */
1308                        sc->all_unreclaimable = 0;
1309                } else {
1310                        /*
1311                         * Ignore cpuset limitation here. We just want to reduce
1312                         * # of used pages by us regardless of memory shortage.
1313                         */
1314                        sc->all_unreclaimable = 0;
1315                        mem_cgroup_note_reclaim_priority(sc->mem_cgroup,
1316                                                        priority);
1317                }
1318
1319                nr_reclaimed += shrink_zone(priority, zone, sc);
1320        }
1321
1322        return nr_reclaimed;
1323}
1324 
1325/*
1326 * This is the main entry point to direct page reclaim.
1327 *
1328 * If a full scan of the inactive list fails to free enough memory then we
1329 * are "out of memory" and something needs to be killed.
1330 *
1331 * If the caller is !__GFP_FS then the probability of a failure is reasonably
1332 * high - the zone may be full of dirty or under-writeback pages, which this
1333 * caller can't do much about.  We kick pdflush and take explicit naps in the
1334 * hope that some of these pages can be written.  But if the allocating task
1335 * holds filesystem locks which prevent writeout this might not work, and the
1336 * allocation attempt will fail.
1337 *
1338 * returns:     0, if no pages reclaimed
1339 *              else, the number of pages reclaimed
1340 */
1341static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1342                                        struct scan_control *sc)
1343{
1344        int priority;
1345        unsigned long ret = 0;
1346        unsigned long total_scanned = 0;
1347        unsigned long nr_reclaimed = 0;
1348        struct reclaim_state *reclaim_state = current->reclaim_state;
1349        unsigned long lru_pages = 0;
1350        struct zoneref *z;
1351        struct zone *zone;
1352        enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
1353
1354        delayacct_freepages_start();
1355
1356        if (scan_global_lru(sc))
1357                count_vm_event(ALLOCSTALL);
1358        /*
1359         * mem_cgroup will not do shrink_slab.
1360         */
1361        if (scan_global_lru(sc)) {
1362                for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1363
1364                        if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1365                                continue;
1366
1367                        lru_pages += zone_page_state(zone, NR_ACTIVE)
1368                                        + zone_page_state(zone, NR_INACTIVE);
1369                }
1370        }
1371
1372        for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1373                sc->nr_scanned = 0;
1374                if (!priority)
1375                        disable_swap_token();
1376                nr_reclaimed += shrink_zones(priority, zonelist, sc);
1377                /*
1378                 * Don't shrink slabs when reclaiming memory from
1379                 * over limit cgroups
1380                 */
1381                if (scan_global_lru(sc)) {
1382                        shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
1383                        if (reclaim_state) {
1384                                nr_reclaimed += reclaim_state->reclaimed_slab;
1385                                reclaim_state->reclaimed_slab = 0;
1386                        }
1387                }
1388                total_scanned += sc->nr_scanned;
1389                if (nr_reclaimed >= sc->swap_cluster_max) {
1390                        ret = nr_reclaimed;
1391                        goto out;
1392                }
1393
1394                /*
1395                 * Try to write back as many pages as we just scanned.  This
1396                 * tends to cause slow streaming writers to write data to the
1397                 * disk smoothly, at the dirtying rate, which is nice.   But
1398                 * that's undesirable in laptop mode, where we *want* lumpy
1399                 * writeout.  So in laptop mode, write out the whole world.
1400                 */
1401                if (total_scanned > sc->swap_cluster_max +
1402                                        sc->swap_cluster_max / 2) {
1403                        wakeup_pdflush(laptop_mode ? 0 : total_scanned);
1404                        sc->may_writepage = 1;
1405                }
1406
1407                /* Take a nap, wait for some writeback to complete */
1408                if (sc->nr_scanned && priority < DEF_PRIORITY - 2)
1409                        congestion_wait(WRITE, HZ/10);
1410        }
1411        /* top priority shrink_zones still had more to do? don't OOM, then */
1412        if (!sc->all_unreclaimable && scan_global_lru(sc))
1413                ret = nr_reclaimed;
1414out:
1415        /*
1416         * Now that we've scanned all the zones at this priority level, note
1417         * that level within the zone so that the next thread which performs
1418         * scanning of this zone will immediately start out at this priority
1419         * level.  This affects only the decision whether or not to bring
1420         * mapped pages onto the inactive list.
1421         */
1422        if (priority < 0)
1423                priority = 0;
1424
1425        if (scan_global_lru(sc)) {
1426                for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1427
1428                        if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1429                                continue;
1430
1431                        zone->prev_priority = priority;
1432                }
1433        } else
1434                mem_cgroup_record_reclaim_priority(sc->mem_cgroup, priority);
1435
1436        delayacct_freepages_end();
1437
1438        return ret;
1439}
1440
1441unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
1442                                                                gfp_t gfp_mask)
1443{
1444        struct scan_control sc = {
1445                .gfp_mask = gfp_mask,
1446                .may_writepage = !laptop_mode,
1447                .swap_cluster_max = SWAP_CLUSTER_MAX,
1448                .may_swap = 1,
1449                .swappiness = vm_swappiness,
1450                .order = order,
1451                .mem_cgroup = NULL,
1452                .isolate_pages = isolate_pages_global,
1453        };
1454
1455        return do_try_to_free_pages(zonelist, &sc);
1456}
1457
1458#ifdef CONFIG_CGROUP_MEM_RES_CTLR
1459
1460unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
1461                                                gfp_t gfp_mask)
1462{
1463        struct scan_control sc = {
1464                .may_writepage = !laptop_mode,
1465                .may_swap = 1,
1466                .swap_cluster_max = SWAP_CLUSTER_MAX,
1467                .swappiness = vm_swappiness,
1468                .order = 0,
1469                .mem_cgroup = mem_cont,
1470                .isolate_pages = mem_cgroup_isolate_pages,
1471        };
1472        struct zonelist *zonelist;
1473
1474        sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
1475                        (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
1476        zonelist = NODE_DATA(numa_node_id())->node_zonelists;
1477        return do_try_to_free_pages(zonelist, &sc);
1478}
1479#endif
1480
1481/*
1482 * For kswapd, balance_pgdat() will work across all this node's zones until
1483 * they are all at pages_high.
1484 *
1485 * Returns the number of pages which were actually freed.
1486 *
1487 * There is special handling here for zones which are full of pinned pages.
1488 * This can happen if the pages are all mlocked, or if they are all used by
1489 * device drivers (say, ZONE_DMA).  Or if they are all in use by hugetlb.
1490 * What we do is to detect the case where all pages in the zone have been
1491 * scanned twice and there has been zero successful reclaim.  Mark the zone as
1492 * dead and from now on, only perform a short scan.  Basically we're polling
1493 * the zone for when the problem goes away.
1494 *
1495 * kswapd scans the zones in the highmem->normal->dma direction.  It skips
1496 * zones which have free_pages > pages_high, but once a zone is found to have
1497 * free_pages <= pages_high, we scan that zone and the lower zones regardless
1498 * of the number of free pages in the lower zones.  This interoperates with
1499 * the page allocator fallback scheme to ensure that aging of pages is balanced
1500 * across the zones.
1501 */
1502static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
1503{
1504        int all_zones_ok;
1505        int priority;
1506        int i;
1507        unsigned long total_scanned;
1508        unsigned long nr_reclaimed;
1509        struct reclaim_state *reclaim_state = current->reclaim_state;
1510        struct scan_control sc = {
1511                .gfp_mask = GFP_KERNEL,
1512                .may_swap = 1,
1513                .swap_cluster_max = SWAP_CLUSTER_MAX,
1514                .swappiness = vm_swappiness,
1515                .order = order,
1516                .mem_cgroup = NULL,
1517                .isolate_pages = isolate_pages_global,
1518        };
1519        /*
1520         * temp_priority is used to remember the scanning priority at which
1521         * this zone was successfully refilled to free_pages == pages_high.
1522         */
1523        int temp_priority[MAX_NR_ZONES];
1524
1525loop_again:
1526        total_scanned = 0;
1527        nr_reclaimed = 0;
1528        sc.may_writepage = !laptop_mode;
1529        count_vm_event(PAGEOUTRUN);
1530
1531        for (i = 0; i < pgdat->nr_zones; i++)
1532                temp_priority[i] = DEF_PRIORITY;
1533
1534        for (priority = DEF_PRIORITY; priority >= 0; priority--) {
1535                int end_zone = 0;       /* Inclusive.  0 = ZONE_DMA */
1536                unsigned long lru_pages = 0;
1537
1538                /* The swap token gets in the way of swapout... */
1539                if (!priority)
1540                        disable_swap_token();
1541
1542                all_zones_ok = 1;
1543
1544                /*
1545                 * Scan in the highmem->dma direction for the highest
1546                 * zone which needs scanning
1547                 */
1548                for (i = pgdat->nr_zones - 1; i >= 0; i--) {
1549                        struct zone *zone = pgdat->node_zones + i;
1550
1551                        if (!populated_zone(zone))
1552                                continue;
1553
1554                        if (zone_is_all_unreclaimable(zone) &&
1555                            priority != DEF_PRIORITY)
1556                                continue;
1557
1558                        if (!zone_watermark_ok(zone, order, zone->pages_high,
1559                                               0, 0)) {
1560                                end_zone = i;
1561                                break;
1562                        }
1563                }
1564                if (i < 0)
1565                        goto out;
1566
1567                for (i = 0; i <= end_zone; i++) {
1568                        struct zone *zone = pgdat->node_zones + i;
1569
1570                        lru_pages += zone_page_state(zone, NR_ACTIVE)
1571                                        + zone_page_state(zone, NR_INACTIVE);
1572                }
1573
1574                /*
1575                 * Now scan the zone in the dma->highmem direction, stopping
1576                 * at the last zone which needs scanning.
1577                 *
1578                 * We do this because the page allocator works in the opposite
1579                 * direction.  This prevents the page allocator from allocating
1580                 * pages behind kswapd's direction of progress, which would
1581                 * cause too much scanning of the lower zones.
1582                 */
1583                for (i = 0; i <= end_zone; i++) {
1584                        struct zone *zone = pgdat->node_zones + i;
1585                        int nr_slab;
1586
1587                        if (!populated_zone(zone))
1588                                continue;
1589
1590                        if (zone_is_all_unreclaimable(zone) &&
1591                                        priority != DEF_PRIORITY)
1592                                continue;
1593
1594                        if (!zone_watermark_ok(zone, order, zone->pages_high,
1595                                               end_zone, 0))
1596                                all_zones_ok = 0;
1597                        temp_priority[i] = priority;
1598                        sc.nr_scanned = 0;
1599                        note_zone_scanning_priority(zone, priority);
1600                        /*
1601                         * We put equal pressure on every zone, unless one
1602                         * zone has way too many pages free already.
1603                         */
1604                        if (!zone_watermark_ok(zone, order, 8*zone->pages_high,
1605                                                end_zone, 0))
1606                                nr_reclaimed += shrink_zone(priority, zone, &sc);
1607                        reclaim_state->reclaimed_slab = 0;
1608                        nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
1609                                                lru_pages);
1610                        nr_reclaimed += reclaim_state->reclaimed_slab;
1611                        total_scanned += sc.nr_scanned;
1612                        if (zone_is_all_unreclaimable(zone))
1613                                continue;
1614                        if (nr_slab == 0 && zone->pages_scanned >=
1615                                (zone_page_state(zone, NR_ACTIVE)
1616                                + zone_page_state(zone, NR_INACTIVE)) * 6)
1617                                        zone_set_flag(zone,
1618                                                      ZONE_ALL_UNRECLAIMABLE);
1619                        /*
1620                         * If we've done a decent amount of scanning and
1621                         * the reclaim ratio is low, start doing writepage
1622                         * even in laptop mode
1623                         */
1624                        if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
1625                            total_scanned > nr_reclaimed + nr_reclaimed / 2)
1626                                sc.may_writepage = 1;
1627                }
1628                if (all_zones_ok)
1629                        break;          /* kswapd: all done */
1630                /*
1631                 * OK, kswapd is getting into trouble.  Take a nap, then take
1632                 * another pass across the zones.
1633                 */
1634                if (total_scanned && priority < DEF_PRIORITY - 2)
1635                        congestion_wait(WRITE, HZ/10);
1636
1637                /*
1638                 * We do this so kswapd doesn't build up large priorities for
1639                 * example when it is freeing in parallel with allocators. It
1640                 * matches the direct reclaim path behaviour in terms of impact
1641                 * on zone->*_priority.
1642                 */
1643                if (nr_reclaimed >= SWAP_CLUSTER_MAX)
1644                        break;
1645        }
1646out:
1647        /*
1648         * Note within each zone the priority level at which this zone was
1649         * brought into a happy state.  So that the next thread which scans this
1650         * zone will start out at that priority level.
1651         */
1652        for (i = 0; i < pgdat->nr_zones; i++) {
1653                struct zone *zone = pgdat->node_zones + i;
1654
1655                zone->prev_priority = temp_priority[i];
1656        }
1657        if (!all_zones_ok) {
1658                cond_resched();
1659
1660                try_to_freeze();
1661
1662                goto loop_again;
1663        }
1664
1665        return nr_reclaimed;
1666}
1667
1668/*
1669 * The background pageout daemon, started as a kernel thread
1670 * from the init process. 
1671 *
1672 * This basically trickles out pages so that we have _some_
1673 * free memory available even if there is no other activity
1674 * that frees anything up. This is needed for things like routing
1675 * etc, where we otherwise might have all activity going on in
1676 * asynchronous contexts that cannot page things out.
1677 *
1678 * If there are applications that are active memory-allocators
1679 * (most normal use), this basically shouldn't matter.
1680 */
1681static int kswapd(void *p)
1682{
1683        unsigned long order;
1684        pg_data_t *pgdat = (pg_data_t*)p;
1685        struct task_struct *tsk = current;
1686        DEFINE_WAIT(wait);
1687        struct reclaim_state reclaim_state = {
1688                .reclaimed_slab = 0,
1689        };
1690        node_to_cpumask_ptr(cpumask, pgdat->node_id);
1691
1692        if (!cpus_empty(*cpumask))
1693                set_cpus_allowed_ptr(tsk, cpumask);
1694        current->reclaim_state = &reclaim_state;
1695
1696        /*
1697         * Tell the memory management that we're a "memory allocator",
1698         * and that if we need more memory we should get access to it
1699         * regardless (see "__alloc_pages()"). "kswapd" should
1700         * never get caught in the normal page freeing logic.
1701         *
1702         * (Kswapd normally doesn't need memory anyway, but sometimes
1703         * you need a small amount of memory in order to be able to
1704         * page out something else, and this flag essentially protects
1705         * us from recursively trying to free more memory as we're
1706         * trying to free the first piece of memory in the first place).
1707         */
1708        tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
1709        set_freezable();
1710
1711        order = 0;
1712        for ( ; ; ) {
1713                unsigned long new_order;
1714
1715                prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
1716                new_order = pgdat->kswapd_max_order;
1717                pgdat->kswapd_max_order = 0;
1718                if (order < new_order) {
1719                        /*
1720                         * Don't sleep if someone wants a larger 'order'
1721                         * allocation
1722                         */
1723                        order = new_order;
1724                } else {
1725                        if (!freezing(current))
1726                                schedule();
1727
1728                        order = pgdat->kswapd_max_order;
1729                }
1730                finish_wait(&pgdat->kswapd_wait, &wait);
1731
1732                if (!try_to_freeze()) {
1733                        /* We can speed up thawing tasks if we don't call
1734                         * balance_pgdat after returning from the refrigerator
1735                         */
1736                        balance_pgdat(pgdat, order);
1737                }
1738        }
1739        return 0;
1740}
1741
1742/*
1743 * A zone is low on free memory, so wake its kswapd task to service it.
1744 */
1745void wakeup_kswapd(struct zone *zone, int order)
1746{
1747        pg_data_t *pgdat;
1748
1749        if (!populated_zone(zone))
1750                return;
1751
1752        pgdat = zone->zone_pgdat;
1753        if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0))
1754                return;
1755        if (pgdat->kswapd_max_order < order)
1756                pgdat->kswapd_max_order = order;
1757        if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
1758                return;
1759        if (!waitqueue_active(&pgdat->kswapd_wait))
1760                return;
1761        wake_up_interruptible(&pgdat->kswapd_wait);
1762}
1763
1764#ifdef CONFIG_PM
1765/*
1766 * Helper function for shrink_all_memory().  Tries to reclaim 'nr_pages' pages
1767 * from LRU lists system-wide, for given pass and priority, and returns the
1768 * number of reclaimed pages
1769 *
1770 * For pass > 3 we also try to shrink the LRU lists that contain a few pages
1771 */
1772static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
1773                                      int pass, struct scan_control *sc)
1774{
1775        struct zone *zone;
1776        unsigned long nr_to_scan, ret = 0;
1777
1778        for_each_zone(zone) {
1779
1780                if (!populated_zone(zone))
1781                        continue;
1782
1783                if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
1784                        continue;
1785
1786                /* For pass = 0 we don't shrink the active list */
1787                if (pass > 0) {
1788                        zone->nr_scan_active +=
1789                                (zone_page_state(zone, NR_ACTIVE) >> prio) + 1;
1790                        if (zone->nr_scan_active >= nr_pages || pass > 3) {
1791                                zone->nr_scan_active = 0;
1792                                nr_to_scan = min(nr_pages,
1793                                        zone_page_state(zone, NR_ACTIVE));
1794                                shrink_active_list(nr_to_scan, zone, sc, prio);
1795                        }
1796                }
1797
1798                zone->nr_scan_inactive +=
1799                        (zone_page_state(zone, NR_INACTIVE) >> prio) + 1;
1800                if (zone->nr_scan_inactive >= nr_pages || pass > 3) {
1801                        zone->nr_scan_inactive = 0;
1802                        nr_to_scan = min(nr_pages,
1803                                zone_page_state(zone, NR_INACTIVE));
1804                        ret += shrink_inactive_list(nr_to_scan, zone, sc);
1805                        if (ret >= nr_pages)
1806                                return ret;
1807                }
1808        }
1809
1810        return ret;
1811}
1812
1813static unsigned long count_lru_pages(void)
1814{
1815        return global_page_state(NR_ACTIVE) + global_page_state(NR_INACTIVE);
1816}
1817
1818/*
1819 * Try to free `nr_pages' of memory, system-wide, and return the number of
1820 * freed pages.
1821 *
1822 * Rather than trying to age LRUs the aim is to preserve the overall
1823 * LRU order by reclaiming preferentially
1824 * inactive > active > active referenced > active mapped
1825 */
1826unsigned long shrink_all_memory(unsigned long nr_pages)
1827{
1828        unsigned long lru_pages, nr_slab;
1829        unsigned long ret = 0;
1830        int pass;
1831        struct reclaim_state reclaim_state;
1832        struct scan_control sc = {
1833                .gfp_mask = GFP_KERNEL,
1834                .may_swap = 0,
1835                .swap_cluster_max = nr_pages,
1836                .may_writepage = 1,
1837                .swappiness = vm_swappiness,
1838                .isolate_pages = isolate_pages_global,
1839        };
1840
1841        current->reclaim_state = &reclaim_state;
1842
1843        lru_pages = count_lru_pages();
1844        nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
1845        /* If slab caches are huge, it's better to hit them first */
1846        while (nr_slab >= lru_pages) {
1847                reclaim_state.reclaimed_slab = 0;
1848                shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
1849                if (!reclaim_state.reclaimed_slab)
1850                        break;
1851
1852                ret += reclaim_state.reclaimed_slab;
1853                if (ret >= nr_pages)
1854                        goto out;
1855
1856                nr_slab -= reclaim_state.reclaimed_slab;
1857        }
1858
1859        /*
1860         * We try to shrink LRUs in 5 passes:
1861         * 0 = Reclaim from inactive_list only
1862         * 1 = Reclaim from active list but don't reclaim mapped
1863         * 2 = 2nd pass of type 1
1864         * 3 = Reclaim mapped (normal reclaim)
1865         * 4 = 2nd pass of type 3
1866         */
1867        for (pass = 0; pass < 5; pass++) {
1868                int prio;
1869
1870                /* Force reclaiming mapped pages in the passes #3 and #4 */
1871                if (pass > 2) {
1872                        sc.may_swap = 1;
1873                        sc.swappiness = 100;
1874                }
1875
1876                for (prio = DEF_PRIORITY; prio >= 0; prio--) {
1877                        unsigned long nr_to_scan = nr_pages - ret;
1878
1879                        sc.nr_scanned = 0;
1880                        ret += shrink_all_zones(nr_to_scan, prio, pass, &sc);
1881                        if (ret >= nr_pages)
1882                                goto out;
1883
1884                        reclaim_state.reclaimed_slab = 0;
1885                        shrink_slab(sc.nr_scanned, sc.gfp_mask,
1886                                        count_lru_pages());
1887                        ret += reclaim_state.reclaimed_slab;
1888                        if (ret >= nr_pages)
1889                                goto out;
1890
1891                        if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
1892                                congestion_wait(WRITE, HZ / 10);
1893                }
1894        }
1895
1896        /*
1897         * If ret = 0, we could not shrink LRUs, but there may be something
1898         * in slab caches
1899         */
1900        if (!ret) {
1901                do {
1902                        reclaim_state.reclaimed_slab = 0;
1903                        shrink_slab(nr_pages, sc.gfp_mask, count_lru_pages());
1904                        ret += reclaim_state.reclaimed_slab;
1905                } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
1906        }
1907
1908out:
1909        current->reclaim_state = NULL;
1910
1911        return ret;
1912}
1913#endif
1914
1915/* It's optimal to keep kswapds on the same CPUs as their memory, but
1916   not required for correctness.  So if the last cpu in a node goes
1917   away, we get changed to run anywhere: as the first one comes back,
1918   restore their cpu bindings. */
1919static int __devinit cpu_callback(struct notifier_block *nfb,
1920                                  unsigned long action, void *hcpu)
1921{
1922        int nid;
1923
1924        if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
1925                for_each_node_state(nid, N_HIGH_MEMORY) {
1926                        pg_data_t *pgdat = NODE_DATA(nid);
1927                        node_to_cpumask_ptr(mask, pgdat->node_id);
1928
1929                        if (any_online_cpu(*mask) < nr_cpu_ids)
1930                                /* One of our CPUs online: restore mask */
1931                                set_cpus_allowed_ptr(pgdat->kswapd, mask);
1932                }
1933        }
1934        return NOTIFY_OK;
1935}
1936
1937/*
1938 * This kswapd start function will be called by init and node-hot-add.
1939 * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
1940 */
1941int kswapd_run(int nid)
1942{
1943        pg_data_t *pgdat = NODE_DATA(nid);
1944        int ret = 0;
1945
1946        if (pgdat->kswapd)
1947                return 0;
1948
1949        pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
1950        if (IS_ERR(pgdat->kswapd)) {
1951                /* failure at boot is fatal */
1952                BUG_ON(system_state == SYSTEM_BOOTING);
1953                printk("Failed to start kswapd on node %d\n",nid);
1954                ret = -1;
1955        }
1956        return ret;
1957}
1958
1959static int __init kswapd_init(void)
1960{
1961        int nid;
1962
1963        swap_setup();
1964        for_each_node_state(nid, N_HIGH_MEMORY)
1965                kswapd_run(nid);
1966        hotcpu_notifier(cpu_callback, 0);
1967        return 0;
1968}
1969
1970module_init(kswapd_init)
1971
1972#ifdef CONFIG_NUMA
1973/*
1974 * Zone reclaim mode
1975 *
1976 * If non-zero call zone_reclaim when the number of free pages falls below
1977 * the watermarks.
1978 */
1979int zone_reclaim_mode __read_mostly;
1980
1981#define RECLAIM_OFF 0
1982#define RECLAIM_ZONE (1<<0)     /* Run shrink_inactive_list on the zone */
1983#define RECLAIM_WRITE (1<<1)    /* Writeout pages during reclaim */
1984#define RECLAIM_SWAP (1<<2)     /* Swap pages out during reclaim */
1985
1986/*
1987 * Priority for ZONE_RECLAIM. This determines the fraction of pages
1988 * of a node considered for each zone_reclaim. 4 scans 1/16th of
1989 * a zone.
1990 */
1991#define ZONE_RECLAIM_PRIORITY 4
1992
1993/*
1994 * Percentage of pages in a zone that must be unmapped for zone_reclaim to
1995 * occur.
1996 */
1997int sysctl_min_unmapped_ratio = 1;
1998
1999/*
2000 * If the number of slab pages in a zone grows beyond this percentage then
2001 * slab reclaim needs to occur.
2002 */
2003int sysctl_min_slab_ratio = 5;
2004
2005/*
2006 * Try to free up some pages from this zone through reclaim.
2007 */
2008static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2009{
2010        /* Minimum pages needed in order to stay on node */
2011        const unsigned long nr_pages = 1 << order;
2012        struct task_struct *p = current;
2013        struct reclaim_state reclaim_state;
2014        int priority;
2015        unsigned long nr_reclaimed = 0;
2016        struct scan_control sc = {
2017                .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
2018                .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP),
2019                .swap_cluster_max = max_t(unsigned long, nr_pages,
2020                                        SWAP_CLUSTER_MAX),
2021                .gfp_mask = gfp_mask,
2022                .swappiness = vm_swappiness,
2023                .isolate_pages = isolate_pages_global,
2024        };
2025        unsigned long slab_reclaimable;
2026
2027        disable_swap_token();
2028        cond_resched();
2029        /*
2030         * We need to be able to allocate from the reserves for RECLAIM_SWAP
2031         * and we also need to be able to write out pages for RECLAIM_WRITE
2032         * and RECLAIM_SWAP.
2033         */
2034        p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
2035        reclaim_state.reclaimed_slab = 0;
2036        p->reclaim_state = &reclaim_state;
2037
2038        if (zone_page_state(zone, NR_FILE_PAGES) -
2039                zone_page_state(zone, NR_FILE_MAPPED) >
2040                zone->min_unmapped_pages) {
2041                /*
2042                 * Free memory by calling shrink zone with increasing
2043                 * priorities until we have enough memory freed.
2044                 */
2045                priority = ZONE_RECLAIM_PRIORITY;
2046                do {
2047                        note_zone_scanning_priority(zone, priority);
2048                        nr_reclaimed += shrink_zone(priority, zone, &sc);
2049                        priority--;
2050                } while (priority >= 0 && nr_reclaimed < nr_pages);
2051        }
2052
2053        slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
2054        if (slab_reclaimable > zone->min_slab_pages) {
2055                /*
2056                 * shrink_slab() does not currently allow us to determine how
2057                 * many pages were freed in this zone. So we take the current
2058                 * number of slab pages and shake the slab until it is reduced
2059                 * by the same nr_pages that we used for reclaiming unmapped
2060                 * pages.
2061                 *
2062                 * Note that shrink_slab will free memory on all zones and may
2063                 * take a long time.
2064                 */
2065                while (shrink_slab(sc.nr_scanned, gfp_mask, order) &&
2066                        zone_page_state(zone, NR_SLAB_RECLAIMABLE) >
2067                                slab_reclaimable - nr_pages)
2068                        ;
2069
2070                /*
2071                 * Update nr_reclaimed by the number of slab pages we
2072                 * reclaimed from this zone.
2073                 */
2074                nr_reclaimed += slab_reclaimable -
2075                        zone_page_state(zone, NR_SLAB_RECLAIMABLE);
2076        }
2077
2078        p->reclaim_state = NULL;
2079        current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
2080        return nr_reclaimed >= nr_pages;
2081}
2082
2083int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2084{
2085        int node_id;
2086        int ret;
2087
2088        /*
2089         * Zone reclaim reclaims unmapped file backed pages and
2090         * slab pages if we are over the defined limits.
2091         *
2092         * A small portion of unmapped file backed pages is needed for
2093         * file I/O otherwise pages read by file I/O will be immediately
2094         * thrown out if the zone is overallocated. So we do not reclaim
2095         * if less than a specified percentage of the zone is used by
2096         * unmapped file backed pages.
2097         */
2098        if (zone_page_state(zone, NR_FILE_PAGES) -
2099            zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages
2100            && zone_page_state(zone, NR_SLAB_RECLAIMABLE)
2101                        <= zone->min_slab_pages)
2102                return 0;
2103
2104        if (zone_is_all_unreclaimable(zone))
2105                return 0;
2106
2107        /*
2108         * Do not scan if the allocation should not be delayed.
2109         */
2110        if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
2111                        return 0;
2112
2113        /*
2114         * Only run zone reclaim on the local zone or on zones that do not
2115         * have associated processors. This will favor the local processor
2116         * over remote processors and spread off node memory allocations
2117         * as wide as possible.
2118         */
2119        node_id = zone_to_nid(zone);
2120        if (node_state(node_id, N_CPU) && node_id != numa_node_id())
2121                return 0;
2122
2123        if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
2124                return 0;
2125        ret = __zone_reclaim(zone, gfp_mask, order);
2126        zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
2127
2128        return ret;
2129}
2130#endif
2131