linux/mm/swap.c
<<
>>
Prefs
   1/*
   2 *  linux/mm/swap.c
   3 *
   4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5 */
   6
   7/*
   8 * This file contains the default values for the operation of the
   9 * Linux VM subsystem. Fine-tuning documentation can be found in
  10 * Documentation/sysctl/vm.txt.
  11 * Started 18.12.91
  12 * Swap aging added 23.2.95, Stephen Tweedie.
  13 * Buffermem limits added 12.3.98, Rik van Riel.
  14 */
  15
  16#include <linux/mm.h>
  17#include <linux/sched.h>
  18#include <linux/kernel_stat.h>
  19#include <linux/swap.h>
  20#include <linux/mman.h>
  21#include <linux/pagemap.h>
  22#include <linux/pagevec.h>
  23#include <linux/init.h>
  24#include <linux/export.h>
  25#include <linux/mm_inline.h>
  26#include <linux/percpu_counter.h>
  27#include <linux/percpu.h>
  28#include <linux/cpu.h>
  29#include <linux/notifier.h>
  30#include <linux/backing-dev.h>
  31#include <linux/memcontrol.h>
  32#include <linux/gfp.h>
  33#include <linux/uio.h>
  34
  35#include "internal.h"
  36
  37/* How many pages do we try to swap or page in/out together? */
  38int page_cluster;
  39
  40static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
  41static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
  42static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
  43
  44/*
  45 * This path almost never happens for VM activity - pages are normally
  46 * freed via pagevecs.  But it gets used by networking.
  47 */
  48static void __page_cache_release(struct page *page)
  49{
  50        if (PageLRU(page)) {
  51                struct zone *zone = page_zone(page);
  52                struct lruvec *lruvec;
  53                unsigned long flags;
  54
  55                spin_lock_irqsave(&zone->lru_lock, flags);
  56                lruvec = mem_cgroup_page_lruvec(page, zone);
  57                VM_BUG_ON(!PageLRU(page));
  58                __ClearPageLRU(page);
  59                del_page_from_lru_list(page, lruvec, page_off_lru(page));
  60                spin_unlock_irqrestore(&zone->lru_lock, flags);
  61        }
  62}
  63
  64static void __put_single_page(struct page *page)
  65{
  66        __page_cache_release(page);
  67        free_hot_cold_page(page, 0);
  68}
  69
  70static void __put_compound_page(struct page *page)
  71{
  72        compound_page_dtor *dtor;
  73
  74        __page_cache_release(page);
  75        dtor = get_compound_page_dtor(page);
  76        (*dtor)(page);
  77}
  78
  79static void put_compound_page(struct page *page)
  80{
  81        if (unlikely(PageTail(page))) {
  82                /* __split_huge_page_refcount can run under us */
  83                struct page *page_head = compound_trans_head(page);
  84
  85                if (likely(page != page_head &&
  86                           get_page_unless_zero(page_head))) {
  87                        unsigned long flags;
  88
  89                        /*
  90                         * THP can not break up slab pages so avoid taking
  91                         * compound_lock().  Slab performs non-atomic bit ops
  92                         * on page->flags for better performance.  In particular
  93                         * slab_unlock() in slub used to be a hot path.  It is
  94                         * still hot on arches that do not support
  95                         * this_cpu_cmpxchg_double().
  96                         */
  97                        if (PageSlab(page_head)) {
  98                                if (PageTail(page)) {
  99                                        if (put_page_testzero(page_head))
 100                                                VM_BUG_ON(1);
 101
 102                                        atomic_dec(&page->_mapcount);
 103                                        goto skip_lock_tail;
 104                                } else
 105                                        goto skip_lock;
 106                        }
 107                        /*
 108                         * page_head wasn't a dangling pointer but it
 109                         * may not be a head page anymore by the time
 110                         * we obtain the lock. That is ok as long as it
 111                         * can't be freed from under us.
 112                         */
 113                        flags = compound_lock_irqsave(page_head);
 114                        if (unlikely(!PageTail(page))) {
 115                                /* __split_huge_page_refcount run before us */
 116                                compound_unlock_irqrestore(page_head, flags);
 117skip_lock:
 118                                if (put_page_testzero(page_head))
 119                                        __put_single_page(page_head);
 120out_put_single:
 121                                if (put_page_testzero(page))
 122                                        __put_single_page(page);
 123                                return;
 124                        }
 125                        VM_BUG_ON(page_head != page->first_page);
 126                        /*
 127                         * We can release the refcount taken by
 128                         * get_page_unless_zero() now that
 129                         * __split_huge_page_refcount() is blocked on
 130                         * the compound_lock.
 131                         */
 132                        if (put_page_testzero(page_head))
 133                                VM_BUG_ON(1);
 134                        /* __split_huge_page_refcount will wait now */
 135                        VM_BUG_ON(page_mapcount(page) <= 0);
 136                        atomic_dec(&page->_mapcount);
 137                        VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
 138                        VM_BUG_ON(atomic_read(&page->_count) != 0);
 139                        compound_unlock_irqrestore(page_head, flags);
 140
 141skip_lock_tail:
 142                        if (put_page_testzero(page_head)) {
 143                                if (PageHead(page_head))
 144                                        __put_compound_page(page_head);
 145                                else
 146                                        __put_single_page(page_head);
 147                        }
 148                } else {
 149                        /* page_head is a dangling pointer */
 150                        VM_BUG_ON(PageTail(page));
 151                        goto out_put_single;
 152                }
 153        } else if (put_page_testzero(page)) {
 154                if (PageHead(page))
 155                        __put_compound_page(page);
 156                else
 157                        __put_single_page(page);
 158        }
 159}
 160
 161void put_page(struct page *page)
 162{
 163        if (unlikely(PageCompound(page)))
 164                put_compound_page(page);
 165        else if (put_page_testzero(page))
 166                __put_single_page(page);
 167}
 168EXPORT_SYMBOL(put_page);
 169
 170/*
 171 * This function is exported but must not be called by anything other
 172 * than get_page(). It implements the slow path of get_page().
 173 */
 174bool __get_page_tail(struct page *page)
 175{
 176        /*
 177         * This takes care of get_page() if run on a tail page
 178         * returned by one of the get_user_pages/follow_page variants.
 179         * get_user_pages/follow_page itself doesn't need the compound
 180         * lock because it runs __get_page_tail_foll() under the
 181         * proper PT lock that already serializes against
 182         * split_huge_page().
 183         */
 184        unsigned long flags;
 185        bool got = false;
 186        struct page *page_head = compound_trans_head(page);
 187
 188        if (likely(page != page_head && get_page_unless_zero(page_head))) {
 189
 190                /* Ref to put_compound_page() comment. */
 191                if (PageSlab(page_head)) {
 192                        if (likely(PageTail(page))) {
 193                                __get_page_tail_foll(page, false);
 194                                return true;
 195                        } else {
 196                                put_page(page_head);
 197                                return false;
 198                        }
 199                }
 200
 201                /*
 202                 * page_head wasn't a dangling pointer but it
 203                 * may not be a head page anymore by the time
 204                 * we obtain the lock. That is ok as long as it
 205                 * can't be freed from under us.
 206                 */
 207                flags = compound_lock_irqsave(page_head);
 208                /* here __split_huge_page_refcount won't run anymore */
 209                if (likely(PageTail(page))) {
 210                        __get_page_tail_foll(page, false);
 211                        got = true;
 212                }
 213                compound_unlock_irqrestore(page_head, flags);
 214                if (unlikely(!got))
 215                        put_page(page_head);
 216        }
 217        return got;
 218}
 219EXPORT_SYMBOL(__get_page_tail);
 220
 221/**
 222 * put_pages_list() - release a list of pages
 223 * @pages: list of pages threaded on page->lru
 224 *
 225 * Release a list of pages which are strung together on page.lru.  Currently
 226 * used by read_cache_pages() and related error recovery code.
 227 */
 228void put_pages_list(struct list_head *pages)
 229{
 230        while (!list_empty(pages)) {
 231                struct page *victim;
 232
 233                victim = list_entry(pages->prev, struct page, lru);
 234                list_del(&victim->lru);
 235                page_cache_release(victim);
 236        }
 237}
 238EXPORT_SYMBOL(put_pages_list);
 239
 240/*
 241 * get_kernel_pages() - pin kernel pages in memory
 242 * @kiov:       An array of struct kvec structures
 243 * @nr_segs:    number of segments to pin
 244 * @write:      pinning for read/write, currently ignored
 245 * @pages:      array that receives pointers to the pages pinned.
 246 *              Should be at least nr_segs long.
 247 *
 248 * Returns number of pages pinned. This may be fewer than the number
 249 * requested. If nr_pages is 0 or negative, returns 0. If no pages
 250 * were pinned, returns -errno. Each page returned must be released
 251 * with a put_page() call when it is finished with.
 252 */
 253int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write,
 254                struct page **pages)
 255{
 256        int seg;
 257
 258        for (seg = 0; seg < nr_segs; seg++) {
 259                if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE))
 260                        return seg;
 261
 262                pages[seg] = kmap_to_page(kiov[seg].iov_base);
 263                page_cache_get(pages[seg]);
 264        }
 265
 266        return seg;
 267}
 268EXPORT_SYMBOL_GPL(get_kernel_pages);
 269
 270/*
 271 * get_kernel_page() - pin a kernel page in memory
 272 * @start:      starting kernel address
 273 * @write:      pinning for read/write, currently ignored
 274 * @pages:      array that receives pointer to the page pinned.
 275 *              Must be at least nr_segs long.
 276 *
 277 * Returns 1 if page is pinned. If the page was not pinned, returns
 278 * -errno. The page returned must be released with a put_page() call
 279 * when it is finished with.
 280 */
 281int get_kernel_page(unsigned long start, int write, struct page **pages)
 282{
 283        const struct kvec kiov = {
 284                .iov_base = (void *)start,
 285                .iov_len = PAGE_SIZE
 286        };
 287
 288        return get_kernel_pages(&kiov, 1, write, pages);
 289}
 290EXPORT_SYMBOL_GPL(get_kernel_page);
 291
 292static void pagevec_lru_move_fn(struct pagevec *pvec,
 293        void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
 294        void *arg)
 295{
 296        int i;
 297        struct zone *zone = NULL;
 298        struct lruvec *lruvec;
 299        unsigned long flags = 0;
 300
 301        for (i = 0; i < pagevec_count(pvec); i++) {
 302                struct page *page = pvec->pages[i];
 303                struct zone *pagezone = page_zone(page);
 304
 305                if (pagezone != zone) {
 306                        if (zone)
 307                                spin_unlock_irqrestore(&zone->lru_lock, flags);
 308                        zone = pagezone;
 309                        spin_lock_irqsave(&zone->lru_lock, flags);
 310                }
 311
 312                lruvec = mem_cgroup_page_lruvec(page, zone);
 313                (*move_fn)(page, lruvec, arg);
 314        }
 315        if (zone)
 316                spin_unlock_irqrestore(&zone->lru_lock, flags);
 317        release_pages(pvec->pages, pvec->nr, pvec->cold);
 318        pagevec_reinit(pvec);
 319}
 320
 321static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
 322                                 void *arg)
 323{
 324        int *pgmoved = arg;
 325
 326        if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
 327                enum lru_list lru = page_lru_base_type(page);
 328                list_move_tail(&page->lru, &lruvec->lists[lru]);
 329                (*pgmoved)++;
 330        }
 331}
 332
 333/*
 334 * pagevec_move_tail() must be called with IRQ disabled.
 335 * Otherwise this may cause nasty races.
 336 */
 337static void pagevec_move_tail(struct pagevec *pvec)
 338{
 339        int pgmoved = 0;
 340
 341        pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
 342        __count_vm_events(PGROTATED, pgmoved);
 343}
 344
 345/*
 346 * Writeback is about to end against a page which has been marked for immediate
 347 * reclaim.  If it still appears to be reclaimable, move it to the tail of the
 348 * inactive list.
 349 */
 350void rotate_reclaimable_page(struct page *page)
 351{
 352        if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
 353            !PageUnevictable(page) && PageLRU(page)) {
 354                struct pagevec *pvec;
 355                unsigned long flags;
 356
 357                page_cache_get(page);
 358                local_irq_save(flags);
 359                pvec = &__get_cpu_var(lru_rotate_pvecs);
 360                if (!pagevec_add(pvec, page))
 361                        pagevec_move_tail(pvec);
 362                local_irq_restore(flags);
 363        }
 364}
 365
 366static void update_page_reclaim_stat(struct lruvec *lruvec,
 367                                     int file, int rotated)
 368{
 369        struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
 370
 371        reclaim_stat->recent_scanned[file]++;
 372        if (rotated)
 373                reclaim_stat->recent_rotated[file]++;
 374}
 375
 376static void __activate_page(struct page *page, struct lruvec *lruvec,
 377                            void *arg)
 378{
 379        if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
 380                int file = page_is_file_cache(page);
 381                int lru = page_lru_base_type(page);
 382
 383                del_page_from_lru_list(page, lruvec, lru);
 384                SetPageActive(page);
 385                lru += LRU_ACTIVE;
 386                add_page_to_lru_list(page, lruvec, lru);
 387
 388                __count_vm_event(PGACTIVATE);
 389                update_page_reclaim_stat(lruvec, file, 1);
 390        }
 391}
 392
 393#ifdef CONFIG_SMP
 394static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
 395
 396static void activate_page_drain(int cpu)
 397{
 398        struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
 399
 400        if (pagevec_count(pvec))
 401                pagevec_lru_move_fn(pvec, __activate_page, NULL);
 402}
 403
 404void activate_page(struct page *page)
 405{
 406        if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
 407                struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
 408
 409                page_cache_get(page);
 410                if (!pagevec_add(pvec, page))
 411                        pagevec_lru_move_fn(pvec, __activate_page, NULL);
 412                put_cpu_var(activate_page_pvecs);
 413        }
 414}
 415
 416#else
 417static inline void activate_page_drain(int cpu)
 418{
 419}
 420
 421void activate_page(struct page *page)
 422{
 423        struct zone *zone = page_zone(page);
 424
 425        spin_lock_irq(&zone->lru_lock);
 426        __activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL);
 427        spin_unlock_irq(&zone->lru_lock);
 428}
 429#endif
 430
 431/*
 432 * Mark a page as having seen activity.
 433 *
 434 * inactive,unreferenced        ->      inactive,referenced
 435 * inactive,referenced          ->      active,unreferenced
 436 * active,unreferenced          ->      active,referenced
 437 */
 438void mark_page_accessed(struct page *page)
 439{
 440        if (!PageActive(page) && !PageUnevictable(page) &&
 441                        PageReferenced(page) && PageLRU(page)) {
 442                activate_page(page);
 443                ClearPageReferenced(page);
 444        } else if (!PageReferenced(page)) {
 445                SetPageReferenced(page);
 446        }
 447}
 448EXPORT_SYMBOL(mark_page_accessed);
 449
 450/*
 451 * Order of operations is important: flush the pagevec when it's already
 452 * full, not when adding the last page, to make sure that last page is
 453 * not added to the LRU directly when passed to this function. Because
 454 * mark_page_accessed() (called after this when writing) only activates
 455 * pages that are on the LRU, linear writes in subpage chunks would see
 456 * every PAGEVEC_SIZE page activated, which is unexpected.
 457 */
 458void __lru_cache_add(struct page *page, enum lru_list lru)
 459{
 460        struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
 461
 462        page_cache_get(page);
 463        if (!pagevec_space(pvec))
 464                __pagevec_lru_add(pvec, lru);
 465        pagevec_add(pvec, page);
 466        put_cpu_var(lru_add_pvecs);
 467}
 468EXPORT_SYMBOL(__lru_cache_add);
 469
 470/**
 471 * lru_cache_add_lru - add a page to a page list
 472 * @page: the page to be added to the LRU.
 473 * @lru: the LRU list to which the page is added.
 474 */
 475void lru_cache_add_lru(struct page *page, enum lru_list lru)
 476{
 477        if (PageActive(page)) {
 478                VM_BUG_ON(PageUnevictable(page));
 479                ClearPageActive(page);
 480        } else if (PageUnevictable(page)) {
 481                VM_BUG_ON(PageActive(page));
 482                ClearPageUnevictable(page);
 483        }
 484
 485        VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page));
 486        __lru_cache_add(page, lru);
 487}
 488
 489/**
 490 * add_page_to_unevictable_list - add a page to the unevictable list
 491 * @page:  the page to be added to the unevictable list
 492 *
 493 * Add page directly to its zone's unevictable list.  To avoid races with
 494 * tasks that might be making the page evictable, through eg. munlock,
 495 * munmap or exit, while it's not on the lru, we want to add the page
 496 * while it's locked or otherwise "invisible" to other tasks.  This is
 497 * difficult to do when using the pagevec cache, so bypass that.
 498 */
 499void add_page_to_unevictable_list(struct page *page)
 500{
 501        struct zone *zone = page_zone(page);
 502        struct lruvec *lruvec;
 503
 504        spin_lock_irq(&zone->lru_lock);
 505        lruvec = mem_cgroup_page_lruvec(page, zone);
 506        SetPageUnevictable(page);
 507        SetPageLRU(page);
 508        add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
 509        spin_unlock_irq(&zone->lru_lock);
 510}
 511
 512/*
 513 * If the page can not be invalidated, it is moved to the
 514 * inactive list to speed up its reclaim.  It is moved to the
 515 * head of the list, rather than the tail, to give the flusher
 516 * threads some time to write it out, as this is much more
 517 * effective than the single-page writeout from reclaim.
 518 *
 519 * If the page isn't page_mapped and dirty/writeback, the page
 520 * could reclaim asap using PG_reclaim.
 521 *
 522 * 1. active, mapped page -> none
 523 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
 524 * 3. inactive, mapped page -> none
 525 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
 526 * 5. inactive, clean -> inactive, tail
 527 * 6. Others -> none
 528 *
 529 * In 4, why it moves inactive's head, the VM expects the page would
 530 * be write it out by flusher threads as this is much more effective
 531 * than the single-page writeout from reclaim.
 532 */
 533static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
 534                              void *arg)
 535{
 536        int lru, file;
 537        bool active;
 538
 539        if (!PageLRU(page))
 540                return;
 541
 542        if (PageUnevictable(page))
 543                return;
 544
 545        /* Some processes are using the page */
 546        if (page_mapped(page))
 547                return;
 548
 549        active = PageActive(page);
 550        file = page_is_file_cache(page);
 551        lru = page_lru_base_type(page);
 552
 553        del_page_from_lru_list(page, lruvec, lru + active);
 554        ClearPageActive(page);
 555        ClearPageReferenced(page);
 556        add_page_to_lru_list(page, lruvec, lru);
 557
 558        if (PageWriteback(page) || PageDirty(page)) {
 559                /*
 560                 * PG_reclaim could be raced with end_page_writeback
 561                 * It can make readahead confusing.  But race window
 562                 * is _really_ small and  it's non-critical problem.
 563                 */
 564                SetPageReclaim(page);
 565        } else {
 566                /*
 567                 * The page's writeback ends up during pagevec
 568                 * We moves tha page into tail of inactive.
 569                 */
 570                list_move_tail(&page->lru, &lruvec->lists[lru]);
 571                __count_vm_event(PGROTATED);
 572        }
 573
 574        if (active)
 575                __count_vm_event(PGDEACTIVATE);
 576        update_page_reclaim_stat(lruvec, file, 0);
 577}
 578
 579/*
 580 * Drain pages out of the cpu's pagevecs.
 581 * Either "cpu" is the current CPU, and preemption has already been
 582 * disabled; or "cpu" is being hot-unplugged, and is already dead.
 583 */
 584void lru_add_drain_cpu(int cpu)
 585{
 586        struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu);
 587        struct pagevec *pvec;
 588        int lru;
 589
 590        for_each_lru(lru) {
 591                pvec = &pvecs[lru - LRU_BASE];
 592                if (pagevec_count(pvec))
 593                        __pagevec_lru_add(pvec, lru);
 594        }
 595
 596        pvec = &per_cpu(lru_rotate_pvecs, cpu);
 597        if (pagevec_count(pvec)) {
 598                unsigned long flags;
 599
 600                /* No harm done if a racing interrupt already did this */
 601                local_irq_save(flags);
 602                pagevec_move_tail(pvec);
 603                local_irq_restore(flags);
 604        }
 605
 606        pvec = &per_cpu(lru_deactivate_pvecs, cpu);
 607        if (pagevec_count(pvec))
 608                pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
 609
 610        activate_page_drain(cpu);
 611}
 612
 613/**
 614 * deactivate_page - forcefully deactivate a page
 615 * @page: page to deactivate
 616 *
 617 * This function hints the VM that @page is a good reclaim candidate,
 618 * for example if its invalidation fails due to the page being dirty
 619 * or under writeback.
 620 */
 621void deactivate_page(struct page *page)
 622{
 623        /*
 624         * In a workload with many unevictable page such as mprotect, unevictable
 625         * page deactivation for accelerating reclaim is pointless.
 626         */
 627        if (PageUnevictable(page))
 628                return;
 629
 630        if (likely(get_page_unless_zero(page))) {
 631                struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
 632
 633                if (!pagevec_add(pvec, page))
 634                        pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
 635                put_cpu_var(lru_deactivate_pvecs);
 636        }
 637}
 638
 639void lru_add_drain(void)
 640{
 641        lru_add_drain_cpu(get_cpu());
 642        put_cpu();
 643}
 644
 645static void lru_add_drain_per_cpu(struct work_struct *dummy)
 646{
 647        lru_add_drain();
 648}
 649
 650/*
 651 * Returns 0 for success
 652 */
 653int lru_add_drain_all(void)
 654{
 655        return schedule_on_each_cpu(lru_add_drain_per_cpu);
 656}
 657
 658/*
 659 * Batched page_cache_release().  Decrement the reference count on all the
 660 * passed pages.  If it fell to zero then remove the page from the LRU and
 661 * free it.
 662 *
 663 * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
 664 * for the remainder of the operation.
 665 *
 666 * The locking in this function is against shrink_inactive_list(): we recheck
 667 * the page count inside the lock to see whether shrink_inactive_list()
 668 * grabbed the page via the LRU.  If it did, give up: shrink_inactive_list()
 669 * will free it.
 670 */
 671void release_pages(struct page **pages, int nr, int cold)
 672{
 673        int i;
 674        LIST_HEAD(pages_to_free);
 675        struct zone *zone = NULL;
 676        struct lruvec *lruvec;
 677        unsigned long uninitialized_var(flags);
 678
 679        for (i = 0; i < nr; i++) {
 680                struct page *page = pages[i];
 681
 682                if (unlikely(PageCompound(page))) {
 683                        if (zone) {
 684                                spin_unlock_irqrestore(&zone->lru_lock, flags);
 685                                zone = NULL;
 686                        }
 687                        put_compound_page(page);
 688                        continue;
 689                }
 690
 691                if (!put_page_testzero(page))
 692                        continue;
 693
 694                if (PageLRU(page)) {
 695                        struct zone *pagezone = page_zone(page);
 696
 697                        if (pagezone != zone) {
 698                                if (zone)
 699                                        spin_unlock_irqrestore(&zone->lru_lock,
 700                                                                        flags);
 701                                zone = pagezone;
 702                                spin_lock_irqsave(&zone->lru_lock, flags);
 703                        }
 704
 705                        lruvec = mem_cgroup_page_lruvec(page, zone);
 706                        VM_BUG_ON(!PageLRU(page));
 707                        __ClearPageLRU(page);
 708                        del_page_from_lru_list(page, lruvec, page_off_lru(page));
 709                }
 710
 711                list_add(&page->lru, &pages_to_free);
 712        }
 713        if (zone)
 714                spin_unlock_irqrestore(&zone->lru_lock, flags);
 715
 716        free_hot_cold_page_list(&pages_to_free, cold);
 717}
 718EXPORT_SYMBOL(release_pages);
 719
 720/*
 721 * The pages which we're about to release may be in the deferred lru-addition
 722 * queues.  That would prevent them from really being freed right now.  That's
 723 * OK from a correctness point of view but is inefficient - those pages may be
 724 * cache-warm and we want to give them back to the page allocator ASAP.
 725 *
 726 * So __pagevec_release() will drain those queues here.  __pagevec_lru_add()
 727 * and __pagevec_lru_add_active() call release_pages() directly to avoid
 728 * mutual recursion.
 729 */
 730void __pagevec_release(struct pagevec *pvec)
 731{
 732        lru_add_drain();
 733        release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
 734        pagevec_reinit(pvec);
 735}
 736EXPORT_SYMBOL(__pagevec_release);
 737
 738#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 739/* used by __split_huge_page_refcount() */
 740void lru_add_page_tail(struct page *page, struct page *page_tail,
 741                       struct lruvec *lruvec, struct list_head *list)
 742{
 743        int uninitialized_var(active);
 744        enum lru_list lru;
 745        const int file = 0;
 746
 747        VM_BUG_ON(!PageHead(page));
 748        VM_BUG_ON(PageCompound(page_tail));
 749        VM_BUG_ON(PageLRU(page_tail));
 750        VM_BUG_ON(NR_CPUS != 1 &&
 751                  !spin_is_locked(&lruvec_zone(lruvec)->lru_lock));
 752
 753        if (!list)
 754                SetPageLRU(page_tail);
 755
 756        if (page_evictable(page_tail)) {
 757                if (PageActive(page)) {
 758                        SetPageActive(page_tail);
 759                        active = 1;
 760                        lru = LRU_ACTIVE_ANON;
 761                } else {
 762                        active = 0;
 763                        lru = LRU_INACTIVE_ANON;
 764                }
 765        } else {
 766                SetPageUnevictable(page_tail);
 767                lru = LRU_UNEVICTABLE;
 768        }
 769
 770        if (likely(PageLRU(page)))
 771                list_add_tail(&page_tail->lru, &page->lru);
 772        else if (list) {
 773                /* page reclaim is reclaiming a huge page */
 774                get_page(page_tail);
 775                list_add_tail(&page_tail->lru, list);
 776        } else {
 777                struct list_head *list_head;
 778                /*
 779                 * Head page has not yet been counted, as an hpage,
 780                 * so we must account for each subpage individually.
 781                 *
 782                 * Use the standard add function to put page_tail on the list,
 783                 * but then correct its position so they all end up in order.
 784                 */
 785                add_page_to_lru_list(page_tail, lruvec, lru);
 786                list_head = page_tail->lru.prev;
 787                list_move_tail(&page_tail->lru, list_head);
 788        }
 789
 790        if (!PageUnevictable(page))
 791                update_page_reclaim_stat(lruvec, file, active);
 792}
 793#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 794
 795static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
 796                                 void *arg)
 797{
 798        enum lru_list lru = (enum lru_list)arg;
 799        int file = is_file_lru(lru);
 800        int active = is_active_lru(lru);
 801
 802        VM_BUG_ON(PageActive(page));
 803        VM_BUG_ON(PageUnevictable(page));
 804        VM_BUG_ON(PageLRU(page));
 805
 806        SetPageLRU(page);
 807        if (active)
 808                SetPageActive(page);
 809        add_page_to_lru_list(page, lruvec, lru);
 810        update_page_reclaim_stat(lruvec, file, active);
 811}
 812
 813/*
 814 * Add the passed pages to the LRU, then drop the caller's refcount
 815 * on them.  Reinitialises the caller's pagevec.
 816 */
 817void __pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
 818{
 819        VM_BUG_ON(is_unevictable_lru(lru));
 820
 821        pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, (void *)lru);
 822}
 823EXPORT_SYMBOL(__pagevec_lru_add);
 824
 825/**
 826 * pagevec_lookup - gang pagecache lookup
 827 * @pvec:       Where the resulting pages are placed
 828 * @mapping:    The address_space to search
 829 * @start:      The starting page index
 830 * @nr_pages:   The maximum number of pages
 831 *
 832 * pagevec_lookup() will search for and return a group of up to @nr_pages pages
 833 * in the mapping.  The pages are placed in @pvec.  pagevec_lookup() takes a
 834 * reference against the pages in @pvec.
 835 *
 836 * The search returns a group of mapping-contiguous pages with ascending
 837 * indexes.  There may be holes in the indices due to not-present pages.
 838 *
 839 * pagevec_lookup() returns the number of pages which were found.
 840 */
 841unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
 842                pgoff_t start, unsigned nr_pages)
 843{
 844        pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
 845        return pagevec_count(pvec);
 846}
 847EXPORT_SYMBOL(pagevec_lookup);
 848
 849unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
 850                pgoff_t *index, int tag, unsigned nr_pages)
 851{
 852        pvec->nr = find_get_pages_tag(mapping, index, tag,
 853                                        nr_pages, pvec->pages);
 854        return pagevec_count(pvec);
 855}
 856EXPORT_SYMBOL(pagevec_lookup_tag);
 857
 858/*
 859 * Perform any setup for the swap system
 860 */
 861void __init swap_setup(void)
 862{
 863        unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
 864#ifdef CONFIG_SWAP
 865        int i;
 866
 867        bdi_init(swapper_spaces[0].backing_dev_info);
 868        for (i = 0; i < MAX_SWAPFILES; i++) {
 869                spin_lock_init(&swapper_spaces[i].tree_lock);
 870                INIT_LIST_HEAD(&swapper_spaces[i].i_mmap_nonlinear);
 871        }
 872#endif
 873
 874        /* Use a smaller cluster for small-memory machines */
 875        if (megs < 16)
 876                page_cluster = 2;
 877        else
 878                page_cluster = 3;
 879        /*
 880         * Right now other parts of the system means that we
 881         * _really_ don't want to cluster much more
 882         */
 883}
 884
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.