linux/mm/swap.c
<<
>>
Prefs
   1/*
   2 *  linux/mm/swap.c
   3 *
   4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5 */
   6
   7/*
   8 * This file contains the default values for the operation of the
   9 * Linux VM subsystem. Fine-tuning documentation can be found in
  10 * Documentation/sysctl/vm.txt.
  11 * Started 18.12.91
  12 * Swap aging added 23.2.95, Stephen Tweedie.
  13 * Buffermem limits added 12.3.98, Rik van Riel.
  14 */
  15
  16#include <linux/mm.h>
  17#include <linux/sched.h>
  18#include <linux/kernel_stat.h>
  19#include <linux/swap.h>
  20#include <linux/mman.h>
  21#include <linux/pagemap.h>
  22#include <linux/pagevec.h>
  23#include <linux/init.h>
  24#include <linux/export.h>
  25#include <linux/mm_inline.h>
  26#include <linux/percpu_counter.h>
  27#include <linux/percpu.h>
  28#include <linux/cpu.h>
  29#include <linux/notifier.h>
  30#include <linux/backing-dev.h>
  31#include <linux/memcontrol.h>
  32#include <linux/gfp.h>
  33
  34#include "internal.h"
  35
  36/* How many pages do we try to swap or page in/out together? */
  37int page_cluster;
  38
  39static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
  40static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
  41static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
  42
  43/*
  44 * This path almost never happens for VM activity - pages are normally
  45 * freed via pagevecs.  But it gets used by networking.
  46 */
  47static void __page_cache_release(struct page *page)
  48{
  49        if (PageLRU(page)) {
  50                struct zone *zone = page_zone(page);
  51                struct lruvec *lruvec;
  52                unsigned long flags;
  53
  54                spin_lock_irqsave(&zone->lru_lock, flags);
  55                lruvec = mem_cgroup_page_lruvec(page, zone);
  56                VM_BUG_ON(!PageLRU(page));
  57                __ClearPageLRU(page);
  58                del_page_from_lru_list(page, lruvec, page_off_lru(page));
  59                spin_unlock_irqrestore(&zone->lru_lock, flags);
  60        }
  61}
  62
  63static void __put_single_page(struct page *page)
  64{
  65        __page_cache_release(page);
  66        free_hot_cold_page(page, 0);
  67}
  68
  69static void __put_compound_page(struct page *page)
  70{
  71        compound_page_dtor *dtor;
  72
  73        __page_cache_release(page);
  74        dtor = get_compound_page_dtor(page);
  75        (*dtor)(page);
  76}
  77
  78static void put_compound_page(struct page *page)
  79{
  80        if (unlikely(PageTail(page))) {
  81                /* __split_huge_page_refcount can run under us */
  82                struct page *page_head = compound_trans_head(page);
  83
  84                if (likely(page != page_head &&
  85                           get_page_unless_zero(page_head))) {
  86                        unsigned long flags;
  87
  88                        /*
  89                         * THP can not break up slab pages so avoid taking
  90                         * compound_lock().  Slab performs non-atomic bit ops
  91                         * on page->flags for better performance.  In particular
  92                         * slab_unlock() in slub used to be a hot path.  It is
  93                         * still hot on arches that do not support
  94                         * this_cpu_cmpxchg_double().
  95                         */
  96                        if (PageSlab(page_head)) {
  97                                if (PageTail(page)) {
  98                                        if (put_page_testzero(page_head))
  99                                                VM_BUG_ON(1);
 100
 101                                        atomic_dec(&page->_mapcount);
 102                                        goto skip_lock_tail;
 103                                } else
 104                                        goto skip_lock;
 105                        }
 106                        /*
 107                         * page_head wasn't a dangling pointer but it
 108                         * may not be a head page anymore by the time
 109                         * we obtain the lock. That is ok as long as it
 110                         * can't be freed from under us.
 111                         */
 112                        flags = compound_lock_irqsave(page_head);
 113                        if (unlikely(!PageTail(page))) {
 114                                /* __split_huge_page_refcount run before us */
 115                                compound_unlock_irqrestore(page_head, flags);
 116skip_lock:
 117                                if (put_page_testzero(page_head))
 118                                        __put_single_page(page_head);
 119out_put_single:
 120                                if (put_page_testzero(page))
 121                                        __put_single_page(page);
 122                                return;
 123                        }
 124                        VM_BUG_ON(page_head != page->first_page);
 125                        /*
 126                         * We can release the refcount taken by
 127                         * get_page_unless_zero() now that
 128                         * __split_huge_page_refcount() is blocked on
 129                         * the compound_lock.
 130                         */
 131                        if (put_page_testzero(page_head))
 132                                VM_BUG_ON(1);
 133                        /* __split_huge_page_refcount will wait now */
 134                        VM_BUG_ON(page_mapcount(page) <= 0);
 135                        atomic_dec(&page->_mapcount);
 136                        VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
 137                        VM_BUG_ON(atomic_read(&page->_count) != 0);
 138                        compound_unlock_irqrestore(page_head, flags);
 139
 140skip_lock_tail:
 141                        if (put_page_testzero(page_head)) {
 142                                if (PageHead(page_head))
 143                                        __put_compound_page(page_head);
 144                                else
 145                                        __put_single_page(page_head);
 146                        }
 147                } else {
 148                        /* page_head is a dangling pointer */
 149                        VM_BUG_ON(PageTail(page));
 150                        goto out_put_single;
 151                }
 152        } else if (put_page_testzero(page)) {
 153                if (PageHead(page))
 154                        __put_compound_page(page);
 155                else
 156                        __put_single_page(page);
 157        }
 158}
 159
 160void put_page(struct page *page)
 161{
 162        if (unlikely(PageCompound(page)))
 163                put_compound_page(page);
 164        else if (put_page_testzero(page))
 165                __put_single_page(page);
 166}
 167EXPORT_SYMBOL(put_page);
 168
 169/*
 170 * This function is exported but must not be called by anything other
 171 * than get_page(). It implements the slow path of get_page().
 172 */
 173bool __get_page_tail(struct page *page)
 174{
 175        /*
 176         * This takes care of get_page() if run on a tail page
 177         * returned by one of the get_user_pages/follow_page variants.
 178         * get_user_pages/follow_page itself doesn't need the compound
 179         * lock because it runs __get_page_tail_foll() under the
 180         * proper PT lock that already serializes against
 181         * split_huge_page().
 182         */
 183        unsigned long flags;
 184        bool got = false;
 185        struct page *page_head = compound_trans_head(page);
 186
 187        if (likely(page != page_head && get_page_unless_zero(page_head))) {
 188
 189                /* Ref to put_compound_page() comment. */
 190                if (PageSlab(page_head)) {
 191                        if (likely(PageTail(page))) {
 192                                __get_page_tail_foll(page, false);
 193                                return true;
 194                        } else {
 195                                put_page(page_head);
 196                                return false;
 197                        }
 198                }
 199
 200                /*
 201                 * page_head wasn't a dangling pointer but it
 202                 * may not be a head page anymore by the time
 203                 * we obtain the lock. That is ok as long as it
 204                 * can't be freed from under us.
 205                 */
 206                flags = compound_lock_irqsave(page_head);
 207                /* here __split_huge_page_refcount won't run anymore */
 208                if (likely(PageTail(page))) {
 209                        __get_page_tail_foll(page, false);
 210                        got = true;
 211                }
 212                compound_unlock_irqrestore(page_head, flags);
 213                if (unlikely(!got))
 214                        put_page(page_head);
 215        }
 216        return got;
 217}
 218EXPORT_SYMBOL(__get_page_tail);
 219
 220/**
 221 * put_pages_list() - release a list of pages
 222 * @pages: list of pages threaded on page->lru
 223 *
 224 * Release a list of pages which are strung together on page.lru.  Currently
 225 * used by read_cache_pages() and related error recovery code.
 226 */
 227void put_pages_list(struct list_head *pages)
 228{
 229        while (!list_empty(pages)) {
 230                struct page *victim;
 231
 232                victim = list_entry(pages->prev, struct page, lru);
 233                list_del(&victim->lru);
 234                page_cache_release(victim);
 235        }
 236}
 237EXPORT_SYMBOL(put_pages_list);
 238
 239static void pagevec_lru_move_fn(struct pagevec *pvec,
 240        void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
 241        void *arg)
 242{
 243        int i;
 244        struct zone *zone = NULL;
 245        struct lruvec *lruvec;
 246        unsigned long flags = 0;
 247
 248        for (i = 0; i < pagevec_count(pvec); i++) {
 249                struct page *page = pvec->pages[i];
 250                struct zone *pagezone = page_zone(page);
 251
 252                if (pagezone != zone) {
 253                        if (zone)
 254                                spin_unlock_irqrestore(&zone->lru_lock, flags);
 255                        zone = pagezone;
 256                        spin_lock_irqsave(&zone->lru_lock, flags);
 257                }
 258
 259                lruvec = mem_cgroup_page_lruvec(page, zone);
 260                (*move_fn)(page, lruvec, arg);
 261        }
 262        if (zone)
 263                spin_unlock_irqrestore(&zone->lru_lock, flags);
 264        release_pages(pvec->pages, pvec->nr, pvec->cold);
 265        pagevec_reinit(pvec);
 266}
 267
 268static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
 269                                 void *arg)
 270{
 271        int *pgmoved = arg;
 272
 273        if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
 274                enum lru_list lru = page_lru_base_type(page);
 275                list_move_tail(&page->lru, &lruvec->lists[lru]);
 276                (*pgmoved)++;
 277        }
 278}
 279
 280/*
 281 * pagevec_move_tail() must be called with IRQ disabled.
 282 * Otherwise this may cause nasty races.
 283 */
 284static void pagevec_move_tail(struct pagevec *pvec)
 285{
 286        int pgmoved = 0;
 287
 288        pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
 289        __count_vm_events(PGROTATED, pgmoved);
 290}
 291
 292/*
 293 * Writeback is about to end against a page which has been marked for immediate
 294 * reclaim.  If it still appears to be reclaimable, move it to the tail of the
 295 * inactive list.
 296 */
 297void rotate_reclaimable_page(struct page *page)
 298{
 299        if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
 300            !PageUnevictable(page) && PageLRU(page)) {
 301                struct pagevec *pvec;
 302                unsigned long flags;
 303
 304                page_cache_get(page);
 305                local_irq_save(flags);
 306                pvec = &__get_cpu_var(lru_rotate_pvecs);
 307                if (!pagevec_add(pvec, page))
 308                        pagevec_move_tail(pvec);
 309                local_irq_restore(flags);
 310        }
 311}
 312
 313static void update_page_reclaim_stat(struct lruvec *lruvec,
 314                                     int file, int rotated)
 315{
 316        struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
 317
 318        reclaim_stat->recent_scanned[file]++;
 319        if (rotated)
 320                reclaim_stat->recent_rotated[file]++;
 321}
 322
 323static void __activate_page(struct page *page, struct lruvec *lruvec,
 324                            void *arg)
 325{
 326        if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
 327                int file = page_is_file_cache(page);
 328                int lru = page_lru_base_type(page);
 329
 330                del_page_from_lru_list(page, lruvec, lru);
 331                SetPageActive(page);
 332                lru += LRU_ACTIVE;
 333                add_page_to_lru_list(page, lruvec, lru);
 334
 335                __count_vm_event(PGACTIVATE);
 336                update_page_reclaim_stat(lruvec, file, 1);
 337        }
 338}
 339
 340#ifdef CONFIG_SMP
 341static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
 342
 343static void activate_page_drain(int cpu)
 344{
 345        struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
 346
 347        if (pagevec_count(pvec))
 348                pagevec_lru_move_fn(pvec, __activate_page, NULL);
 349}
 350
 351void activate_page(struct page *page)
 352{
 353        if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
 354                struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
 355
 356                page_cache_get(page);
 357                if (!pagevec_add(pvec, page))
 358                        pagevec_lru_move_fn(pvec, __activate_page, NULL);
 359                put_cpu_var(activate_page_pvecs);
 360        }
 361}
 362
 363#else
 364static inline void activate_page_drain(int cpu)
 365{
 366}
 367
 368void activate_page(struct page *page)
 369{
 370        struct zone *zone = page_zone(page);
 371
 372        spin_lock_irq(&zone->lru_lock);
 373        __activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL);
 374        spin_unlock_irq(&zone->lru_lock);
 375}
 376#endif
 377
 378/*
 379 * Mark a page as having seen activity.
 380 *
 381 * inactive,unreferenced        ->      inactive,referenced
 382 * inactive,referenced          ->      active,unreferenced
 383 * active,unreferenced          ->      active,referenced
 384 */
 385void mark_page_accessed(struct page *page)
 386{
 387        if (!PageActive(page) && !PageUnevictable(page) &&
 388                        PageReferenced(page) && PageLRU(page)) {
 389                activate_page(page);
 390                ClearPageReferenced(page);
 391        } else if (!PageReferenced(page)) {
 392                SetPageReferenced(page);
 393        }
 394}
 395EXPORT_SYMBOL(mark_page_accessed);
 396
 397void __lru_cache_add(struct page *page, enum lru_list lru)
 398{
 399        struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
 400
 401        page_cache_get(page);
 402        if (!pagevec_add(pvec, page))
 403                __pagevec_lru_add(pvec, lru);
 404        put_cpu_var(lru_add_pvecs);
 405}
 406EXPORT_SYMBOL(__lru_cache_add);
 407
 408/**
 409 * lru_cache_add_lru - add a page to a page list
 410 * @page: the page to be added to the LRU.
 411 * @lru: the LRU list to which the page is added.
 412 */
 413void lru_cache_add_lru(struct page *page, enum lru_list lru)
 414{
 415        if (PageActive(page)) {
 416                VM_BUG_ON(PageUnevictable(page));
 417                ClearPageActive(page);
 418        } else if (PageUnevictable(page)) {
 419                VM_BUG_ON(PageActive(page));
 420                ClearPageUnevictable(page);
 421        }
 422
 423        VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page));
 424        __lru_cache_add(page, lru);
 425}
 426
 427/**
 428 * add_page_to_unevictable_list - add a page to the unevictable list
 429 * @page:  the page to be added to the unevictable list
 430 *
 431 * Add page directly to its zone's unevictable list.  To avoid races with
 432 * tasks that might be making the page evictable, through eg. munlock,
 433 * munmap or exit, while it's not on the lru, we want to add the page
 434 * while it's locked or otherwise "invisible" to other tasks.  This is
 435 * difficult to do when using the pagevec cache, so bypass that.
 436 */
 437void add_page_to_unevictable_list(struct page *page)
 438{
 439        struct zone *zone = page_zone(page);
 440        struct lruvec *lruvec;
 441
 442        spin_lock_irq(&zone->lru_lock);
 443        lruvec = mem_cgroup_page_lruvec(page, zone);
 444        SetPageUnevictable(page);
 445        SetPageLRU(page);
 446        add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
 447        spin_unlock_irq(&zone->lru_lock);
 448}
 449
 450/*
 451 * If the page can not be invalidated, it is moved to the
 452 * inactive list to speed up its reclaim.  It is moved to the
 453 * head of the list, rather than the tail, to give the flusher
 454 * threads some time to write it out, as this is much more
 455 * effective than the single-page writeout from reclaim.
 456 *
 457 * If the page isn't page_mapped and dirty/writeback, the page
 458 * could reclaim asap using PG_reclaim.
 459 *
 460 * 1. active, mapped page -> none
 461 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
 462 * 3. inactive, mapped page -> none
 463 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
 464 * 5. inactive, clean -> inactive, tail
 465 * 6. Others -> none
 466 *
 467 * In 4, why it moves inactive's head, the VM expects the page would
 468 * be write it out by flusher threads as this is much more effective
 469 * than the single-page writeout from reclaim.
 470 */
 471static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
 472                              void *arg)
 473{
 474        int lru, file;
 475        bool active;
 476
 477        if (!PageLRU(page))
 478                return;
 479
 480        if (PageUnevictable(page))
 481                return;
 482
 483        /* Some processes are using the page */
 484        if (page_mapped(page))
 485                return;
 486
 487        active = PageActive(page);
 488        file = page_is_file_cache(page);
 489        lru = page_lru_base_type(page);
 490
 491        del_page_from_lru_list(page, lruvec, lru + active);
 492        ClearPageActive(page);
 493        ClearPageReferenced(page);
 494        add_page_to_lru_list(page, lruvec, lru);
 495
 496        if (PageWriteback(page) || PageDirty(page)) {
 497                /*
 498                 * PG_reclaim could be raced with end_page_writeback
 499                 * It can make readahead confusing.  But race window
 500                 * is _really_ small and  it's non-critical problem.
 501                 */
 502                SetPageReclaim(page);
 503        } else {
 504                /*
 505                 * The page's writeback ends up during pagevec
 506                 * We moves tha page into tail of inactive.
 507                 */
 508                list_move_tail(&page->lru, &lruvec->lists[lru]);
 509                __count_vm_event(PGROTATED);
 510        }
 511
 512        if (active)
 513                __count_vm_event(PGDEACTIVATE);
 514        update_page_reclaim_stat(lruvec, file, 0);
 515}
 516
 517/*
 518 * Drain pages out of the cpu's pagevecs.
 519 * Either "cpu" is the current CPU, and preemption has already been
 520 * disabled; or "cpu" is being hot-unplugged, and is already dead.
 521 */
 522void lru_add_drain_cpu(int cpu)
 523{
 524        struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu);
 525        struct pagevec *pvec;
 526        int lru;
 527
 528        for_each_lru(lru) {
 529                pvec = &pvecs[lru - LRU_BASE];
 530                if (pagevec_count(pvec))
 531                        __pagevec_lru_add(pvec, lru);
 532        }
 533
 534        pvec = &per_cpu(lru_rotate_pvecs, cpu);
 535        if (pagevec_count(pvec)) {
 536                unsigned long flags;
 537
 538                /* No harm done if a racing interrupt already did this */
 539                local_irq_save(flags);
 540                pagevec_move_tail(pvec);
 541                local_irq_restore(flags);
 542        }
 543
 544        pvec = &per_cpu(lru_deactivate_pvecs, cpu);
 545        if (pagevec_count(pvec))
 546                pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
 547
 548        activate_page_drain(cpu);
 549}
 550
 551/**
 552 * deactivate_page - forcefully deactivate a page
 553 * @page: page to deactivate
 554 *
 555 * This function hints the VM that @page is a good reclaim candidate,
 556 * for example if its invalidation fails due to the page being dirty
 557 * or under writeback.
 558 */
 559void deactivate_page(struct page *page)
 560{
 561        /*
 562         * In a workload with many unevictable page such as mprotect, unevictable
 563         * page deactivation for accelerating reclaim is pointless.
 564         */
 565        if (PageUnevictable(page))
 566                return;
 567
 568        if (likely(get_page_unless_zero(page))) {
 569                struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
 570
 571                if (!pagevec_add(pvec, page))
 572                        pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
 573                put_cpu_var(lru_deactivate_pvecs);
 574        }
 575}
 576
 577void lru_add_drain(void)
 578{
 579        lru_add_drain_cpu(get_cpu());
 580        put_cpu();
 581}
 582
 583static void lru_add_drain_per_cpu(struct work_struct *dummy)
 584{
 585        lru_add_drain();
 586}
 587
 588/*
 589 * Returns 0 for success
 590 */
 591int lru_add_drain_all(void)
 592{
 593        return schedule_on_each_cpu(lru_add_drain_per_cpu);
 594}
 595
 596/*
 597 * Batched page_cache_release().  Decrement the reference count on all the
 598 * passed pages.  If it fell to zero then remove the page from the LRU and
 599 * free it.
 600 *
 601 * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
 602 * for the remainder of the operation.
 603 *
 604 * The locking in this function is against shrink_inactive_list(): we recheck
 605 * the page count inside the lock to see whether shrink_inactive_list()
 606 * grabbed the page via the LRU.  If it did, give up: shrink_inactive_list()
 607 * will free it.
 608 */
 609void release_pages(struct page **pages, int nr, int cold)
 610{
 611        int i;
 612        LIST_HEAD(pages_to_free);
 613        struct zone *zone = NULL;
 614        struct lruvec *lruvec;
 615        unsigned long uninitialized_var(flags);
 616
 617        for (i = 0; i < nr; i++) {
 618                struct page *page = pages[i];
 619
 620                if (unlikely(PageCompound(page))) {
 621                        if (zone) {
 622                                spin_unlock_irqrestore(&zone->lru_lock, flags);
 623                                zone = NULL;
 624                        }
 625                        put_compound_page(page);
 626                        continue;
 627                }
 628
 629                if (!put_page_testzero(page))
 630                        continue;
 631
 632                if (PageLRU(page)) {
 633                        struct zone *pagezone = page_zone(page);
 634
 635                        if (pagezone != zone) {
 636                                if (zone)
 637                                        spin_unlock_irqrestore(&zone->lru_lock,
 638                                                                        flags);
 639                                zone = pagezone;
 640                                spin_lock_irqsave(&zone->lru_lock, flags);
 641                        }
 642
 643                        lruvec = mem_cgroup_page_lruvec(page, zone);
 644                        VM_BUG_ON(!PageLRU(page));
 645                        __ClearPageLRU(page);
 646                        del_page_from_lru_list(page, lruvec, page_off_lru(page));
 647                }
 648
 649                list_add(&page->lru, &pages_to_free);
 650        }
 651        if (zone)
 652                spin_unlock_irqrestore(&zone->lru_lock, flags);
 653
 654        free_hot_cold_page_list(&pages_to_free, cold);
 655}
 656EXPORT_SYMBOL(release_pages);
 657
 658/*
 659 * The pages which we're about to release may be in the deferred lru-addition
 660 * queues.  That would prevent them from really being freed right now.  That's
 661 * OK from a correctness point of view but is inefficient - those pages may be
 662 * cache-warm and we want to give them back to the page allocator ASAP.
 663 *
 664 * So __pagevec_release() will drain those queues here.  __pagevec_lru_add()
 665 * and __pagevec_lru_add_active() call release_pages() directly to avoid
 666 * mutual recursion.
 667 */
 668void __pagevec_release(struct pagevec *pvec)
 669{
 670        lru_add_drain();
 671        release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
 672        pagevec_reinit(pvec);
 673}
 674EXPORT_SYMBOL(__pagevec_release);
 675
 676#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 677/* used by __split_huge_page_refcount() */
 678void lru_add_page_tail(struct page *page, struct page *page_tail,
 679                       struct lruvec *lruvec)
 680{
 681        int uninitialized_var(active);
 682        enum lru_list lru;
 683        const int file = 0;
 684
 685        VM_BUG_ON(!PageHead(page));
 686        VM_BUG_ON(PageCompound(page_tail));
 687        VM_BUG_ON(PageLRU(page_tail));
 688        VM_BUG_ON(NR_CPUS != 1 &&
 689                  !spin_is_locked(&lruvec_zone(lruvec)->lru_lock));
 690
 691        SetPageLRU(page_tail);
 692
 693        if (page_evictable(page_tail, NULL)) {
 694                if (PageActive(page)) {
 695                        SetPageActive(page_tail);
 696                        active = 1;
 697                        lru = LRU_ACTIVE_ANON;
 698                } else {
 699                        active = 0;
 700                        lru = LRU_INACTIVE_ANON;
 701                }
 702        } else {
 703                SetPageUnevictable(page_tail);
 704                lru = LRU_UNEVICTABLE;
 705        }
 706
 707        if (likely(PageLRU(page)))
 708                list_add_tail(&page_tail->lru, &page->lru);
 709        else {
 710                struct list_head *list_head;
 711                /*
 712                 * Head page has not yet been counted, as an hpage,
 713                 * so we must account for each subpage individually.
 714                 *
 715                 * Use the standard add function to put page_tail on the list,
 716                 * but then correct its position so they all end up in order.
 717                 */
 718                add_page_to_lru_list(page_tail, lruvec, lru);
 719                list_head = page_tail->lru.prev;
 720                list_move_tail(&page_tail->lru, list_head);
 721        }
 722
 723        if (!PageUnevictable(page))
 724                update_page_reclaim_stat(lruvec, file, active);
 725}
 726#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 727
 728static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
 729                                 void *arg)
 730{
 731        enum lru_list lru = (enum lru_list)arg;
 732        int file = is_file_lru(lru);
 733        int active = is_active_lru(lru);
 734
 735        VM_BUG_ON(PageActive(page));
 736        VM_BUG_ON(PageUnevictable(page));
 737        VM_BUG_ON(PageLRU(page));
 738
 739        SetPageLRU(page);
 740        if (active)
 741                SetPageActive(page);
 742        add_page_to_lru_list(page, lruvec, lru);
 743        update_page_reclaim_stat(lruvec, file, active);
 744}
 745
 746/*
 747 * Add the passed pages to the LRU, then drop the caller's refcount
 748 * on them.  Reinitialises the caller's pagevec.
 749 */
 750void __pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
 751{
 752        VM_BUG_ON(is_unevictable_lru(lru));
 753
 754        pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, (void *)lru);
 755}
 756EXPORT_SYMBOL(__pagevec_lru_add);
 757
 758/**
 759 * pagevec_lookup - gang pagecache lookup
 760 * @pvec:       Where the resulting pages are placed
 761 * @mapping:    The address_space to search
 762 * @start:      The starting page index
 763 * @nr_pages:   The maximum number of pages
 764 *
 765 * pagevec_lookup() will search for and return a group of up to @nr_pages pages
 766 * in the mapping.  The pages are placed in @pvec.  pagevec_lookup() takes a
 767 * reference against the pages in @pvec.
 768 *
 769 * The search returns a group of mapping-contiguous pages with ascending
 770 * indexes.  There may be holes in the indices due to not-present pages.
 771 *
 772 * pagevec_lookup() returns the number of pages which were found.
 773 */
 774unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
 775                pgoff_t start, unsigned nr_pages)
 776{
 777        pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
 778        return pagevec_count(pvec);
 779}
 780EXPORT_SYMBOL(pagevec_lookup);
 781
 782unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
 783                pgoff_t *index, int tag, unsigned nr_pages)
 784{
 785        pvec->nr = find_get_pages_tag(mapping, index, tag,
 786                                        nr_pages, pvec->pages);
 787        return pagevec_count(pvec);
 788}
 789EXPORT_SYMBOL(pagevec_lookup_tag);
 790
 791/*
 792 * Perform any setup for the swap system
 793 */
 794void __init swap_setup(void)
 795{
 796        unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
 797
 798#ifdef CONFIG_SWAP
 799        bdi_init(swapper_space.backing_dev_info);
 800#endif
 801
 802        /* Use a smaller cluster for small-memory machines */
 803        if (megs < 16)
 804                page_cluster = 2;
 805        else
 806                page_cluster = 3;
 807        /*
 808         * Right now other parts of the system means that we
 809         * _really_ don't want to cluster much more
 810         */
 811}
 812
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.