linux/mm/swap.c
<<
>>
Prefs
   1/*
   2 *  linux/mm/swap.c
   3 *
   4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5 */
   6
   7/*
   8 * This file contains the default values for the operation of the
   9 * Linux VM subsystem. Fine-tuning documentation can be found in
  10 * Documentation/sysctl/vm.txt.
  11 * Started 18.12.91
  12 * Swap aging added 23.2.95, Stephen Tweedie.
  13 * Buffermem limits added 12.3.98, Rik van Riel.
  14 */
  15
  16#include <linux/mm.h>
  17#include <linux/sched.h>
  18#include <linux/kernel_stat.h>
  19#include <linux/swap.h>
  20#include <linux/mman.h>
  21#include <linux/pagemap.h>
  22#include <linux/pagevec.h>
  23#include <linux/init.h>
  24#include <linux/module.h>
  25#include <linux/mm_inline.h>
  26#include <linux/buffer_head.h>  /* for try_to_release_page() */
  27#include <linux/percpu_counter.h>
  28#include <linux/percpu.h>
  29#include <linux/cpu.h>
  30#include <linux/notifier.h>
  31#include <linux/backing-dev.h>
  32#include <linux/memcontrol.h>
  33#include <linux/gfp.h>
  34
  35#include "internal.h"
  36
  37/* How many pages do we try to swap or page in/out together? */
  38int page_cluster;
  39
  40static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
  41static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
  42static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
  43
  44/*
  45 * This path almost never happens for VM activity - pages are normally
  46 * freed via pagevecs.  But it gets used by networking.
  47 */
  48static void __page_cache_release(struct page *page)
  49{
  50        if (PageLRU(page)) {
  51                unsigned long flags;
  52                struct zone *zone = page_zone(page);
  53
  54                spin_lock_irqsave(&zone->lru_lock, flags);
  55                VM_BUG_ON(!PageLRU(page));
  56                __ClearPageLRU(page);
  57                del_page_from_lru(zone, page);
  58                spin_unlock_irqrestore(&zone->lru_lock, flags);
  59        }
  60}
  61
  62static void __put_single_page(struct page *page)
  63{
  64        __page_cache_release(page);
  65        free_hot_cold_page(page, 0);
  66}
  67
  68static void __put_compound_page(struct page *page)
  69{
  70        compound_page_dtor *dtor;
  71
  72        __page_cache_release(page);
  73        dtor = get_compound_page_dtor(page);
  74        (*dtor)(page);
  75}
  76
  77static void put_compound_page(struct page *page)
  78{
  79        if (unlikely(PageTail(page))) {
  80                /* __split_huge_page_refcount can run under us */
  81                struct page *page_head = compound_trans_head(page);
  82
  83                if (likely(page != page_head &&
  84                           get_page_unless_zero(page_head))) {
  85                        unsigned long flags;
  86                        /*
  87                         * page_head wasn't a dangling pointer but it
  88                         * may not be a head page anymore by the time
  89                         * we obtain the lock. That is ok as long as it
  90                         * can't be freed from under us.
  91                         */
  92                        flags = compound_lock_irqsave(page_head);
  93                        if (unlikely(!PageTail(page))) {
  94                                /* __split_huge_page_refcount run before us */
  95                                compound_unlock_irqrestore(page_head, flags);
  96                                VM_BUG_ON(PageHead(page_head));
  97                                if (put_page_testzero(page_head))
  98                                        __put_single_page(page_head);
  99                        out_put_single:
 100                                if (put_page_testzero(page))
 101                                        __put_single_page(page);
 102                                return;
 103                        }
 104                        VM_BUG_ON(page_head != page->first_page);
 105                        /*
 106                         * We can release the refcount taken by
 107                         * get_page_unless_zero() now that
 108                         * __split_huge_page_refcount() is blocked on
 109                         * the compound_lock.
 110                         */
 111                        if (put_page_testzero(page_head))
 112                                VM_BUG_ON(1);
 113                        /* __split_huge_page_refcount will wait now */
 114                        VM_BUG_ON(page_mapcount(page) <= 0);
 115                        atomic_dec(&page->_mapcount);
 116                        VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
 117                        VM_BUG_ON(atomic_read(&page->_count) != 0);
 118                        compound_unlock_irqrestore(page_head, flags);
 119                        if (put_page_testzero(page_head)) {
 120                                if (PageHead(page_head))
 121                                        __put_compound_page(page_head);
 122                                else
 123                                        __put_single_page(page_head);
 124                        }
 125                } else {
 126                        /* page_head is a dangling pointer */
 127                        VM_BUG_ON(PageTail(page));
 128                        goto out_put_single;
 129                }
 130        } else if (put_page_testzero(page)) {
 131                if (PageHead(page))
 132                        __put_compound_page(page);
 133                else
 134                        __put_single_page(page);
 135        }
 136}
 137
 138void put_page(struct page *page)
 139{
 140        if (unlikely(PageCompound(page)))
 141                put_compound_page(page);
 142        else if (put_page_testzero(page))
 143                __put_single_page(page);
 144}
 145EXPORT_SYMBOL(put_page);
 146
 147/*
 148 * This function is exported but must not be called by anything other
 149 * than get_page(). It implements the slow path of get_page().
 150 */
 151bool __get_page_tail(struct page *page)
 152{
 153        /*
 154         * This takes care of get_page() if run on a tail page
 155         * returned by one of the get_user_pages/follow_page variants.
 156         * get_user_pages/follow_page itself doesn't need the compound
 157         * lock because it runs __get_page_tail_foll() under the
 158         * proper PT lock that already serializes against
 159         * split_huge_page().
 160         */
 161        unsigned long flags;
 162        bool got = false;
 163        struct page *page_head = compound_trans_head(page);
 164
 165        if (likely(page != page_head && get_page_unless_zero(page_head))) {
 166                /*
 167                 * page_head wasn't a dangling pointer but it
 168                 * may not be a head page anymore by the time
 169                 * we obtain the lock. That is ok as long as it
 170                 * can't be freed from under us.
 171                 */
 172                flags = compound_lock_irqsave(page_head);
 173                /* here __split_huge_page_refcount won't run anymore */
 174                if (likely(PageTail(page))) {
 175                        __get_page_tail_foll(page, false);
 176                        got = true;
 177                }
 178                compound_unlock_irqrestore(page_head, flags);
 179                if (unlikely(!got))
 180                        put_page(page_head);
 181        }
 182        return got;
 183}
 184EXPORT_SYMBOL(__get_page_tail);
 185
 186/**
 187 * put_pages_list() - release a list of pages
 188 * @pages: list of pages threaded on page->lru
 189 *
 190 * Release a list of pages which are strung together on page.lru.  Currently
 191 * used by read_cache_pages() and related error recovery code.
 192 */
 193void put_pages_list(struct list_head *pages)
 194{
 195        while (!list_empty(pages)) {
 196                struct page *victim;
 197
 198                victim = list_entry(pages->prev, struct page, lru);
 199                list_del(&victim->lru);
 200                page_cache_release(victim);
 201        }
 202}
 203EXPORT_SYMBOL(put_pages_list);
 204
 205static void pagevec_lru_move_fn(struct pagevec *pvec,
 206                                void (*move_fn)(struct page *page, void *arg),
 207                                void *arg)
 208{
 209        int i;
 210        struct zone *zone = NULL;
 211        unsigned long flags = 0;
 212
 213        for (i = 0; i < pagevec_count(pvec); i++) {
 214                struct page *page = pvec->pages[i];
 215                struct zone *pagezone = page_zone(page);
 216
 217                if (pagezone != zone) {
 218                        if (zone)
 219                                spin_unlock_irqrestore(&zone->lru_lock, flags);
 220                        zone = pagezone;
 221                        spin_lock_irqsave(&zone->lru_lock, flags);
 222                }
 223
 224                (*move_fn)(page, arg);
 225        }
 226        if (zone)
 227                spin_unlock_irqrestore(&zone->lru_lock, flags);
 228        release_pages(pvec->pages, pvec->nr, pvec->cold);
 229        pagevec_reinit(pvec);
 230}
 231
 232static void pagevec_move_tail_fn(struct page *page, void *arg)
 233{
 234        int *pgmoved = arg;
 235        struct zone *zone = page_zone(page);
 236
 237        if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
 238                enum lru_list lru = page_lru_base_type(page);
 239                list_move_tail(&page->lru, &zone->lru[lru].list);
 240                mem_cgroup_rotate_reclaimable_page(page);
 241                (*pgmoved)++;
 242        }
 243}
 244
 245/*
 246 * pagevec_move_tail() must be called with IRQ disabled.
 247 * Otherwise this may cause nasty races.
 248 */
 249static void pagevec_move_tail(struct pagevec *pvec)
 250{
 251        int pgmoved = 0;
 252
 253        pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved);
 254        __count_vm_events(PGROTATED, pgmoved);
 255}
 256
 257/*
 258 * Writeback is about to end against a page which has been marked for immediate
 259 * reclaim.  If it still appears to be reclaimable, move it to the tail of the
 260 * inactive list.
 261 */
 262void rotate_reclaimable_page(struct page *page)
 263{
 264        if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
 265            !PageUnevictable(page) && PageLRU(page)) {
 266                struct pagevec *pvec;
 267                unsigned long flags;
 268
 269                page_cache_get(page);
 270                local_irq_save(flags);
 271                pvec = &__get_cpu_var(lru_rotate_pvecs);
 272                if (!pagevec_add(pvec, page))
 273                        pagevec_move_tail(pvec);
 274                local_irq_restore(flags);
 275        }
 276}
 277
 278static void update_page_reclaim_stat(struct zone *zone, struct page *page,
 279                                     int file, int rotated)
 280{
 281        struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat;
 282        struct zone_reclaim_stat *memcg_reclaim_stat;
 283
 284        memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page);
 285
 286        reclaim_stat->recent_scanned[file]++;
 287        if (rotated)
 288                reclaim_stat->recent_rotated[file]++;
 289
 290        if (!memcg_reclaim_stat)
 291                return;
 292
 293        memcg_reclaim_stat->recent_scanned[file]++;
 294        if (rotated)
 295                memcg_reclaim_stat->recent_rotated[file]++;
 296}
 297
 298static void __activate_page(struct page *page, void *arg)
 299{
 300        struct zone *zone = page_zone(page);
 301
 302        if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
 303                int file = page_is_file_cache(page);
 304                int lru = page_lru_base_type(page);
 305                del_page_from_lru_list(zone, page, lru);
 306
 307                SetPageActive(page);
 308                lru += LRU_ACTIVE;
 309                add_page_to_lru_list(zone, page, lru);
 310                __count_vm_event(PGACTIVATE);
 311
 312                update_page_reclaim_stat(zone, page, file, 1);
 313        }
 314}
 315
 316#ifdef CONFIG_SMP
 317static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs);
 318
 319static void activate_page_drain(int cpu)
 320{
 321        struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu);
 322
 323        if (pagevec_count(pvec))
 324                pagevec_lru_move_fn(pvec, __activate_page, NULL);
 325}
 326
 327void activate_page(struct page *page)
 328{
 329        if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
 330                struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
 331
 332                page_cache_get(page);
 333                if (!pagevec_add(pvec, page))
 334                        pagevec_lru_move_fn(pvec, __activate_page, NULL);
 335                put_cpu_var(activate_page_pvecs);
 336        }
 337}
 338
 339#else
 340static inline void activate_page_drain(int cpu)
 341{
 342}
 343
 344void activate_page(struct page *page)
 345{
 346        struct zone *zone = page_zone(page);
 347
 348        spin_lock_irq(&zone->lru_lock);
 349        __activate_page(page, NULL);
 350        spin_unlock_irq(&zone->lru_lock);
 351}
 352#endif
 353
 354/*
 355 * Mark a page as having seen activity.
 356 *
 357 * inactive,unreferenced        ->      inactive,referenced
 358 * inactive,referenced          ->      active,unreferenced
 359 * active,unreferenced          ->      active,referenced
 360 */
 361void mark_page_accessed(struct page *page)
 362{
 363        if (!PageActive(page) && !PageUnevictable(page) &&
 364                        PageReferenced(page) && PageLRU(page)) {
 365                activate_page(page);
 366                ClearPageReferenced(page);
 367        } else if (!PageReferenced(page)) {
 368                SetPageReferenced(page);
 369        }
 370}
 371
 372EXPORT_SYMBOL(mark_page_accessed);
 373
 374void __lru_cache_add(struct page *page, enum lru_list lru)
 375{
 376        struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
 377
 378        page_cache_get(page);
 379        if (!pagevec_add(pvec, page))
 380                ____pagevec_lru_add(pvec, lru);
 381        put_cpu_var(lru_add_pvecs);
 382}
 383EXPORT_SYMBOL(__lru_cache_add);
 384
 385/**
 386 * lru_cache_add_lru - add a page to a page list
 387 * @page: the page to be added to the LRU.
 388 * @lru: the LRU list to which the page is added.
 389 */
 390void lru_cache_add_lru(struct page *page, enum lru_list lru)
 391{
 392        if (PageActive(page)) {
 393                VM_BUG_ON(PageUnevictable(page));
 394                ClearPageActive(page);
 395        } else if (PageUnevictable(page)) {
 396                VM_BUG_ON(PageActive(page));
 397                ClearPageUnevictable(page);
 398        }
 399
 400        VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page));
 401        __lru_cache_add(page, lru);
 402}
 403
 404/**
 405 * add_page_to_unevictable_list - add a page to the unevictable list
 406 * @page:  the page to be added to the unevictable list
 407 *
 408 * Add page directly to its zone's unevictable list.  To avoid races with
 409 * tasks that might be making the page evictable, through eg. munlock,
 410 * munmap or exit, while it's not on the lru, we want to add the page
 411 * while it's locked or otherwise "invisible" to other tasks.  This is
 412 * difficult to do when using the pagevec cache, so bypass that.
 413 */
 414void add_page_to_unevictable_list(struct page *page)
 415{
 416        struct zone *zone = page_zone(page);
 417
 418        spin_lock_irq(&zone->lru_lock);
 419        SetPageUnevictable(page);
 420        SetPageLRU(page);
 421        add_page_to_lru_list(zone, page, LRU_UNEVICTABLE);
 422        spin_unlock_irq(&zone->lru_lock);
 423}
 424
 425/*
 426 * If the page can not be invalidated, it is moved to the
 427 * inactive list to speed up its reclaim.  It is moved to the
 428 * head of the list, rather than the tail, to give the flusher
 429 * threads some time to write it out, as this is much more
 430 * effective than the single-page writeout from reclaim.
 431 *
 432 * If the page isn't page_mapped and dirty/writeback, the page
 433 * could reclaim asap using PG_reclaim.
 434 *
 435 * 1. active, mapped page -> none
 436 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
 437 * 3. inactive, mapped page -> none
 438 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
 439 * 5. inactive, clean -> inactive, tail
 440 * 6. Others -> none
 441 *
 442 * In 4, why it moves inactive's head, the VM expects the page would
 443 * be write it out by flusher threads as this is much more effective
 444 * than the single-page writeout from reclaim.
 445 */
 446static void lru_deactivate_fn(struct page *page, void *arg)
 447{
 448        int lru, file;
 449        bool active;
 450        struct zone *zone = page_zone(page);
 451
 452        if (!PageLRU(page))
 453                return;
 454
 455        if (PageUnevictable(page))
 456                return;
 457
 458        /* Some processes are using the page */
 459        if (page_mapped(page))
 460                return;
 461
 462        active = PageActive(page);
 463
 464        file = page_is_file_cache(page);
 465        lru = page_lru_base_type(page);
 466        del_page_from_lru_list(zone, page, lru + active);
 467        ClearPageActive(page);
 468        ClearPageReferenced(page);
 469        add_page_to_lru_list(zone, page, lru);
 470
 471        if (PageWriteback(page) || PageDirty(page)) {
 472                /*
 473                 * PG_reclaim could be raced with end_page_writeback
 474                 * It can make readahead confusing.  But race window
 475                 * is _really_ small and  it's non-critical problem.
 476                 */
 477                SetPageReclaim(page);
 478        } else {
 479                /*
 480                 * The page's writeback ends up during pagevec
 481                 * We moves tha page into tail of inactive.
 482                 */
 483                list_move_tail(&page->lru, &zone->lru[lru].list);
 484                mem_cgroup_rotate_reclaimable_page(page);
 485                __count_vm_event(PGROTATED);
 486        }
 487
 488        if (active)
 489                __count_vm_event(PGDEACTIVATE);
 490        update_page_reclaim_stat(zone, page, file, 0);
 491}
 492
 493/*
 494 * Drain pages out of the cpu's pagevecs.
 495 * Either "cpu" is the current CPU, and preemption has already been
 496 * disabled; or "cpu" is being hot-unplugged, and is already dead.
 497 */
 498static void drain_cpu_pagevecs(int cpu)
 499{
 500        struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu);
 501        struct pagevec *pvec;
 502        int lru;
 503
 504        for_each_lru(lru) {
 505                pvec = &pvecs[lru - LRU_BASE];
 506                if (pagevec_count(pvec))
 507                        ____pagevec_lru_add(pvec, lru);
 508        }
 509
 510        pvec = &per_cpu(lru_rotate_pvecs, cpu);
 511        if (pagevec_count(pvec)) {
 512                unsigned long flags;
 513
 514                /* No harm done if a racing interrupt already did this */
 515                local_irq_save(flags);
 516                pagevec_move_tail(pvec);
 517                local_irq_restore(flags);
 518        }
 519
 520        pvec = &per_cpu(lru_deactivate_pvecs, cpu);
 521        if (pagevec_count(pvec))
 522                pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
 523
 524        activate_page_drain(cpu);
 525}
 526
 527/**
 528 * deactivate_page - forcefully deactivate a page
 529 * @page: page to deactivate
 530 *
 531 * This function hints the VM that @page is a good reclaim candidate,
 532 * for example if its invalidation fails due to the page being dirty
 533 * or under writeback.
 534 */
 535void deactivate_page(struct page *page)
 536{
 537        /*
 538         * In a workload with many unevictable page such as mprotect, unevictable
 539         * page deactivation for accelerating reclaim is pointless.
 540         */
 541        if (PageUnevictable(page))
 542                return;
 543
 544        if (likely(get_page_unless_zero(page))) {
 545                struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
 546
 547                if (!pagevec_add(pvec, page))
 548                        pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
 549                put_cpu_var(lru_deactivate_pvecs);
 550        }
 551}
 552
 553void lru_add_drain(void)
 554{
 555        drain_cpu_pagevecs(get_cpu());
 556        put_cpu();
 557}
 558
 559static void lru_add_drain_per_cpu(struct work_struct *dummy)
 560{
 561        lru_add_drain();
 562}
 563
 564/*
 565 * Returns 0 for success
 566 */
 567int lru_add_drain_all(void)
 568{
 569        return schedule_on_each_cpu(lru_add_drain_per_cpu);
 570}
 571
 572/*
 573 * Batched page_cache_release().  Decrement the reference count on all the
 574 * passed pages.  If it fell to zero then remove the page from the LRU and
 575 * free it.
 576 *
 577 * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
 578 * for the remainder of the operation.
 579 *
 580 * The locking in this function is against shrink_inactive_list(): we recheck
 581 * the page count inside the lock to see whether shrink_inactive_list()
 582 * grabbed the page via the LRU.  If it did, give up: shrink_inactive_list()
 583 * will free it.
 584 */
 585void release_pages(struct page **pages, int nr, int cold)
 586{
 587        int i;
 588        struct pagevec pages_to_free;
 589        struct zone *zone = NULL;
 590        unsigned long uninitialized_var(flags);
 591
 592        pagevec_init(&pages_to_free, cold);
 593        for (i = 0; i < nr; i++) {
 594                struct page *page = pages[i];
 595
 596                if (unlikely(PageCompound(page))) {
 597                        if (zone) {
 598                                spin_unlock_irqrestore(&zone->lru_lock, flags);
 599                                zone = NULL;
 600                        }
 601                        put_compound_page(page);
 602                        continue;
 603                }
 604
 605                if (!put_page_testzero(page))
 606                        continue;
 607
 608                if (PageLRU(page)) {
 609                        struct zone *pagezone = page_zone(page);
 610
 611                        if (pagezone != zone) {
 612                                if (zone)
 613                                        spin_unlock_irqrestore(&zone->lru_lock,
 614                                                                        flags);
 615                                zone = pagezone;
 616                                spin_lock_irqsave(&zone->lru_lock, flags);
 617                        }
 618                        VM_BUG_ON(!PageLRU(page));
 619                        __ClearPageLRU(page);
 620                        del_page_from_lru(zone, page);
 621                }
 622
 623                if (!pagevec_add(&pages_to_free, page)) {
 624                        if (zone) {
 625                                spin_unlock_irqrestore(&zone->lru_lock, flags);
 626                                zone = NULL;
 627                        }
 628                        __pagevec_free(&pages_to_free);
 629                        pagevec_reinit(&pages_to_free);
 630                }
 631        }
 632        if (zone)
 633                spin_unlock_irqrestore(&zone->lru_lock, flags);
 634
 635        pagevec_free(&pages_to_free);
 636}
 637EXPORT_SYMBOL(release_pages);
 638
 639/*
 640 * The pages which we're about to release may be in the deferred lru-addition
 641 * queues.  That would prevent them from really being freed right now.  That's
 642 * OK from a correctness point of view but is inefficient - those pages may be
 643 * cache-warm and we want to give them back to the page allocator ASAP.
 644 *
 645 * So __pagevec_release() will drain those queues here.  __pagevec_lru_add()
 646 * and __pagevec_lru_add_active() call release_pages() directly to avoid
 647 * mutual recursion.
 648 */
 649void __pagevec_release(struct pagevec *pvec)
 650{
 651        lru_add_drain();
 652        release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
 653        pagevec_reinit(pvec);
 654}
 655
 656EXPORT_SYMBOL(__pagevec_release);
 657
 658/* used by __split_huge_page_refcount() */
 659void lru_add_page_tail(struct zone* zone,
 660                       struct page *page, struct page *page_tail)
 661{
 662        int active;
 663        enum lru_list lru;
 664        const int file = 0;
 665        struct list_head *head;
 666
 667        VM_BUG_ON(!PageHead(page));
 668        VM_BUG_ON(PageCompound(page_tail));
 669        VM_BUG_ON(PageLRU(page_tail));
 670        VM_BUG_ON(!spin_is_locked(&zone->lru_lock));
 671
 672        SetPageLRU(page_tail);
 673
 674        if (page_evictable(page_tail, NULL)) {
 675                if (PageActive(page)) {
 676                        SetPageActive(page_tail);
 677                        active = 1;
 678                        lru = LRU_ACTIVE_ANON;
 679                } else {
 680                        active = 0;
 681                        lru = LRU_INACTIVE_ANON;
 682                }
 683                update_page_reclaim_stat(zone, page_tail, file, active);
 684                if (likely(PageLRU(page)))
 685                        head = page->lru.prev;
 686                else
 687                        head = &zone->lru[lru].list;
 688                __add_page_to_lru_list(zone, page_tail, lru, head);
 689        } else {
 690                SetPageUnevictable(page_tail);
 691                add_page_to_lru_list(zone, page_tail, LRU_UNEVICTABLE);
 692        }
 693}
 694
 695static void ____pagevec_lru_add_fn(struct page *page, void *arg)
 696{
 697        enum lru_list lru = (enum lru_list)arg;
 698        struct zone *zone = page_zone(page);
 699        int file = is_file_lru(lru);
 700        int active = is_active_lru(lru);
 701
 702        VM_BUG_ON(PageActive(page));
 703        VM_BUG_ON(PageUnevictable(page));
 704        VM_BUG_ON(PageLRU(page));
 705
 706        SetPageLRU(page);
 707        if (active)
 708                SetPageActive(page);
 709        update_page_reclaim_stat(zone, page, file, active);
 710        add_page_to_lru_list(zone, page, lru);
 711}
 712
 713/*
 714 * Add the passed pages to the LRU, then drop the caller's refcount
 715 * on them.  Reinitialises the caller's pagevec.
 716 */
 717void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
 718{
 719        VM_BUG_ON(is_unevictable_lru(lru));
 720
 721        pagevec_lru_move_fn(pvec, ____pagevec_lru_add_fn, (void *)lru);
 722}
 723
 724EXPORT_SYMBOL(____pagevec_lru_add);
 725
 726/*
 727 * Try to drop buffers from the pages in a pagevec
 728 */
 729void pagevec_strip(struct pagevec *pvec)
 730{
 731        int i;
 732
 733        for (i = 0; i < pagevec_count(pvec); i++) {
 734                struct page *page = pvec->pages[i];
 735
 736                if (page_has_private(page) && trylock_page(page)) {
 737                        if (page_has_private(page))
 738                                try_to_release_page(page, 0);
 739                        unlock_page(page);
 740                }
 741        }
 742}
 743
 744/**
 745 * pagevec_lookup - gang pagecache lookup
 746 * @pvec:       Where the resulting pages are placed
 747 * @mapping:    The address_space to search
 748 * @start:      The starting page index
 749 * @nr_pages:   The maximum number of pages
 750 *
 751 * pagevec_lookup() will search for and return a group of up to @nr_pages pages
 752 * in the mapping.  The pages are placed in @pvec.  pagevec_lookup() takes a
 753 * reference against the pages in @pvec.
 754 *
 755 * The search returns a group of mapping-contiguous pages with ascending
 756 * indexes.  There may be holes in the indices due to not-present pages.
 757 *
 758 * pagevec_lookup() returns the number of pages which were found.
 759 */
 760unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
 761                pgoff_t start, unsigned nr_pages)
 762{
 763        pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
 764        return pagevec_count(pvec);
 765}
 766
 767EXPORT_SYMBOL(pagevec_lookup);
 768
 769unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
 770                pgoff_t *index, int tag, unsigned nr_pages)
 771{
 772        pvec->nr = find_get_pages_tag(mapping, index, tag,
 773                                        nr_pages, pvec->pages);
 774        return pagevec_count(pvec);
 775}
 776
 777EXPORT_SYMBOL(pagevec_lookup_tag);
 778
 779/*
 780 * Perform any setup for the swap system
 781 */
 782void __init swap_setup(void)
 783{
 784        unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
 785
 786#ifdef CONFIG_SWAP
 787        bdi_init(swapper_space.backing_dev_info);
 788#endif
 789
 790        /* Use a smaller cluster for small-memory machines */
 791        if (megs < 16)
 792                page_cluster = 2;
 793        else
 794                page_cluster = 3;
 795        /*
 796         * Right now other parts of the system means that we
 797         * _really_ don't want to cluster much more
 798         */
 799}
 800
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.