linux/mm/migrate.c
<<
>>
Prefs
   1/*
   2 * Memory Migration functionality - linux/mm/migration.c
   3 *
   4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
   5 *
   6 * Page migration was first developed in the context of the memory hotplug
   7 * project. The main authors of the migration code are:
   8 *
   9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
  10 * Hirokazu Takahashi <taka@valinux.co.jp>
  11 * Dave Hansen <haveblue@us.ibm.com>
  12 * Christoph Lameter
  13 */
  14
  15#include <linux/migrate.h>
  16#include <linux/export.h>
  17#include <linux/swap.h>
  18#include <linux/swapops.h>
  19#include <linux/pagemap.h>
  20#include <linux/buffer_head.h>
  21#include <linux/mm_inline.h>
  22#include <linux/nsproxy.h>
  23#include <linux/pagevec.h>
  24#include <linux/ksm.h>
  25#include <linux/rmap.h>
  26#include <linux/topology.h>
  27#include <linux/cpu.h>
  28#include <linux/cpuset.h>
  29#include <linux/writeback.h>
  30#include <linux/mempolicy.h>
  31#include <linux/vmalloc.h>
  32#include <linux/security.h>
  33#include <linux/memcontrol.h>
  34#include <linux/syscalls.h>
  35#include <linux/hugetlb.h>
  36#include <linux/hugetlb_cgroup.h>
  37#include <linux/gfp.h>
  38
  39#include <asm/tlbflush.h>
  40
  41#include "internal.h"
  42
  43/*
  44 * migrate_prep() needs to be called before we start compiling a list of pages
  45 * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
  46 * undesirable, use migrate_prep_local()
  47 */
  48int migrate_prep(void)
  49{
  50        /*
  51         * Clear the LRU lists so pages can be isolated.
  52         * Note that pages may be moved off the LRU after we have
  53         * drained them. Those pages will fail to migrate like other
  54         * pages that may be busy.
  55         */
  56        lru_add_drain_all();
  57
  58        return 0;
  59}
  60
  61/* Do the necessary work of migrate_prep but not if it involves other CPUs */
  62int migrate_prep_local(void)
  63{
  64        lru_add_drain();
  65
  66        return 0;
  67}
  68
  69/*
  70 * Add isolated pages on the list back to the LRU under page lock
  71 * to avoid leaking evictable pages back onto unevictable list.
  72 */
  73void putback_lru_pages(struct list_head *l)
  74{
  75        struct page *page;
  76        struct page *page2;
  77
  78        list_for_each_entry_safe(page, page2, l, lru) {
  79                list_del(&page->lru);
  80                dec_zone_page_state(page, NR_ISOLATED_ANON +
  81                                page_is_file_cache(page));
  82                putback_lru_page(page);
  83        }
  84}
  85
  86/*
  87 * Restore a potential migration pte to a working pte entry
  88 */
  89static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
  90                                 unsigned long addr, void *old)
  91{
  92        struct mm_struct *mm = vma->vm_mm;
  93        swp_entry_t entry;
  94        pgd_t *pgd;
  95        pud_t *pud;
  96        pmd_t *pmd;
  97        pte_t *ptep, pte;
  98        spinlock_t *ptl;
  99
 100        if (unlikely(PageHuge(new))) {
 101                ptep = huge_pte_offset(mm, addr);
 102                if (!ptep)
 103                        goto out;
 104                ptl = &mm->page_table_lock;
 105        } else {
 106                pgd = pgd_offset(mm, addr);
 107                if (!pgd_present(*pgd))
 108                        goto out;
 109
 110                pud = pud_offset(pgd, addr);
 111                if (!pud_present(*pud))
 112                        goto out;
 113
 114                pmd = pmd_offset(pud, addr);
 115                if (pmd_trans_huge(*pmd))
 116                        goto out;
 117                if (!pmd_present(*pmd))
 118                        goto out;
 119
 120                ptep = pte_offset_map(pmd, addr);
 121
 122                /*
 123                 * Peek to check is_swap_pte() before taking ptlock?  No, we
 124                 * can race mremap's move_ptes(), which skips anon_vma lock.
 125                 */
 126
 127                ptl = pte_lockptr(mm, pmd);
 128        }
 129
 130        spin_lock(ptl);
 131        pte = *ptep;
 132        if (!is_swap_pte(pte))
 133                goto unlock;
 134
 135        entry = pte_to_swp_entry(pte);
 136
 137        if (!is_migration_entry(entry) ||
 138            migration_entry_to_page(entry) != old)
 139                goto unlock;
 140
 141        get_page(new);
 142        pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
 143        if (is_write_migration_entry(entry))
 144                pte = pte_mkwrite(pte);
 145#ifdef CONFIG_HUGETLB_PAGE
 146        if (PageHuge(new)) {
 147                pte = pte_mkhuge(pte);
 148                pte = arch_make_huge_pte(pte, vma, new, 0);
 149        }
 150#endif
 151        flush_cache_page(vma, addr, pte_pfn(pte));
 152        set_pte_at(mm, addr, ptep, pte);
 153
 154        if (PageHuge(new)) {
 155                if (PageAnon(new))
 156                        hugepage_add_anon_rmap(new, vma, addr);
 157                else
 158                        page_dup_rmap(new);
 159        } else if (PageAnon(new))
 160                page_add_anon_rmap(new, vma, addr);
 161        else
 162                page_add_file_rmap(new);
 163
 164        /* No need to invalidate - it was non-present before */
 165        update_mmu_cache(vma, addr, ptep);
 166unlock:
 167        pte_unmap_unlock(ptep, ptl);
 168out:
 169        return SWAP_AGAIN;
 170}
 171
 172/*
 173 * Get rid of all migration entries and replace them by
 174 * references to the indicated page.
 175 */
 176static void remove_migration_ptes(struct page *old, struct page *new)
 177{
 178        rmap_walk(new, remove_migration_pte, old);
 179}
 180
 181/*
 182 * Something used the pte of a page under migration. We need to
 183 * get to the page and wait until migration is finished.
 184 * When we return from this function the fault will be retried.
 185 */
 186void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
 187                                unsigned long address)
 188{
 189        pte_t *ptep, pte;
 190        spinlock_t *ptl;
 191        swp_entry_t entry;
 192        struct page *page;
 193
 194        ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
 195        pte = *ptep;
 196        if (!is_swap_pte(pte))
 197                goto out;
 198
 199        entry = pte_to_swp_entry(pte);
 200        if (!is_migration_entry(entry))
 201                goto out;
 202
 203        page = migration_entry_to_page(entry);
 204
 205        /*
 206         * Once radix-tree replacement of page migration started, page_count
 207         * *must* be zero. And, we don't want to call wait_on_page_locked()
 208         * against a page without get_page().
 209         * So, we use get_page_unless_zero(), here. Even failed, page fault
 210         * will occur again.
 211         */
 212        if (!get_page_unless_zero(page))
 213                goto out;
 214        pte_unmap_unlock(ptep, ptl);
 215        wait_on_page_locked(page);
 216        put_page(page);
 217        return;
 218out:
 219        pte_unmap_unlock(ptep, ptl);
 220}
 221
 222#ifdef CONFIG_BLOCK
 223/* Returns true if all buffers are successfully locked */
 224static bool buffer_migrate_lock_buffers(struct buffer_head *head,
 225                                                        enum migrate_mode mode)
 226{
 227        struct buffer_head *bh = head;
 228
 229        /* Simple case, sync compaction */
 230        if (mode != MIGRATE_ASYNC) {
 231                do {
 232                        get_bh(bh);
 233                        lock_buffer(bh);
 234                        bh = bh->b_this_page;
 235
 236                } while (bh != head);
 237
 238                return true;
 239        }
 240
 241        /* async case, we cannot block on lock_buffer so use trylock_buffer */
 242        do {
 243                get_bh(bh);
 244                if (!trylock_buffer(bh)) {
 245                        /*
 246                         * We failed to lock the buffer and cannot stall in
 247                         * async migration. Release the taken locks
 248                         */
 249                        struct buffer_head *failed_bh = bh;
 250                        put_bh(failed_bh);
 251                        bh = head;
 252                        while (bh != failed_bh) {
 253                                unlock_buffer(bh);
 254                                put_bh(bh);
 255                                bh = bh->b_this_page;
 256                        }
 257                        return false;
 258                }
 259
 260                bh = bh->b_this_page;
 261        } while (bh != head);
 262        return true;
 263}
 264#else
 265static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
 266                                                        enum migrate_mode mode)
 267{
 268        return true;
 269}
 270#endif /* CONFIG_BLOCK */
 271
 272/*
 273 * Replace the page in the mapping.
 274 *
 275 * The number of remaining references must be:
 276 * 1 for anonymous pages without a mapping
 277 * 2 for pages with a mapping
 278 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
 279 */
 280static int migrate_page_move_mapping(struct address_space *mapping,
 281                struct page *newpage, struct page *page,
 282                struct buffer_head *head, enum migrate_mode mode)
 283{
 284        int expected_count;
 285        void **pslot;
 286
 287        if (!mapping) {
 288                /* Anonymous page without mapping */
 289                if (page_count(page) != 1)
 290                        return -EAGAIN;
 291                return 0;
 292        }
 293
 294        spin_lock_irq(&mapping->tree_lock);
 295
 296        pslot = radix_tree_lookup_slot(&mapping->page_tree,
 297                                        page_index(page));
 298
 299        expected_count = 2 + page_has_private(page);
 300        if (page_count(page) != expected_count ||
 301                radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
 302                spin_unlock_irq(&mapping->tree_lock);
 303                return -EAGAIN;
 304        }
 305
 306        if (!page_freeze_refs(page, expected_count)) {
 307                spin_unlock_irq(&mapping->tree_lock);
 308                return -EAGAIN;
 309        }
 310
 311        /*
 312         * In the async migration case of moving a page with buffers, lock the
 313         * buffers using trylock before the mapping is moved. If the mapping
 314         * was moved, we later failed to lock the buffers and could not move
 315         * the mapping back due to an elevated page count, we would have to
 316         * block waiting on other references to be dropped.
 317         */
 318        if (mode == MIGRATE_ASYNC && head &&
 319                        !buffer_migrate_lock_buffers(head, mode)) {
 320                page_unfreeze_refs(page, expected_count);
 321                spin_unlock_irq(&mapping->tree_lock);
 322                return -EAGAIN;
 323        }
 324
 325        /*
 326         * Now we know that no one else is looking at the page.
 327         */
 328        get_page(newpage);      /* add cache reference */
 329        if (PageSwapCache(page)) {
 330                SetPageSwapCache(newpage);
 331                set_page_private(newpage, page_private(page));
 332        }
 333
 334        radix_tree_replace_slot(pslot, newpage);
 335
 336        /*
 337         * Drop cache reference from old page by unfreezing
 338         * to one less reference.
 339         * We know this isn't the last reference.
 340         */
 341        page_unfreeze_refs(page, expected_count - 1);
 342
 343        /*
 344         * If moved to a different zone then also account
 345         * the page for that zone. Other VM counters will be
 346         * taken care of when we establish references to the
 347         * new page and drop references to the old page.
 348         *
 349         * Note that anonymous pages are accounted for
 350         * via NR_FILE_PAGES and NR_ANON_PAGES if they
 351         * are mapped to swap space.
 352         */
 353        __dec_zone_page_state(page, NR_FILE_PAGES);
 354        __inc_zone_page_state(newpage, NR_FILE_PAGES);
 355        if (!PageSwapCache(page) && PageSwapBacked(page)) {
 356                __dec_zone_page_state(page, NR_SHMEM);
 357                __inc_zone_page_state(newpage, NR_SHMEM);
 358        }
 359        spin_unlock_irq(&mapping->tree_lock);
 360
 361        return 0;
 362}
 363
 364/*
 365 * The expected number of remaining references is the same as that
 366 * of migrate_page_move_mapping().
 367 */
 368int migrate_huge_page_move_mapping(struct address_space *mapping,
 369                                   struct page *newpage, struct page *page)
 370{
 371        int expected_count;
 372        void **pslot;
 373
 374        if (!mapping) {
 375                if (page_count(page) != 1)
 376                        return -EAGAIN;
 377                return 0;
 378        }
 379
 380        spin_lock_irq(&mapping->tree_lock);
 381
 382        pslot = radix_tree_lookup_slot(&mapping->page_tree,
 383                                        page_index(page));
 384
 385        expected_count = 2 + page_has_private(page);
 386        if (page_count(page) != expected_count ||
 387                radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
 388                spin_unlock_irq(&mapping->tree_lock);
 389                return -EAGAIN;
 390        }
 391
 392        if (!page_freeze_refs(page, expected_count)) {
 393                spin_unlock_irq(&mapping->tree_lock);
 394                return -EAGAIN;
 395        }
 396
 397        get_page(newpage);
 398
 399        radix_tree_replace_slot(pslot, newpage);
 400
 401        page_unfreeze_refs(page, expected_count - 1);
 402
 403        spin_unlock_irq(&mapping->tree_lock);
 404        return 0;
 405}
 406
 407/*
 408 * Copy the page to its new location
 409 */
 410void migrate_page_copy(struct page *newpage, struct page *page)
 411{
 412        if (PageHuge(page))
 413                copy_huge_page(newpage, page);
 414        else
 415                copy_highpage(newpage, page);
 416
 417        if (PageError(page))
 418                SetPageError(newpage);
 419        if (PageReferenced(page))
 420                SetPageReferenced(newpage);
 421        if (PageUptodate(page))
 422                SetPageUptodate(newpage);
 423        if (TestClearPageActive(page)) {
 424                VM_BUG_ON(PageUnevictable(page));
 425                SetPageActive(newpage);
 426        } else if (TestClearPageUnevictable(page))
 427                SetPageUnevictable(newpage);
 428        if (PageChecked(page))
 429                SetPageChecked(newpage);
 430        if (PageMappedToDisk(page))
 431                SetPageMappedToDisk(newpage);
 432
 433        if (PageDirty(page)) {
 434                clear_page_dirty_for_io(page);
 435                /*
 436                 * Want to mark the page and the radix tree as dirty, and
 437                 * redo the accounting that clear_page_dirty_for_io undid,
 438                 * but we can't use set_page_dirty because that function
 439                 * is actually a signal that all of the page has become dirty.
 440                 * Whereas only part of our page may be dirty.
 441                 */
 442                if (PageSwapBacked(page))
 443                        SetPageDirty(newpage);
 444                else
 445                        __set_page_dirty_nobuffers(newpage);
 446        }
 447
 448        mlock_migrate_page(newpage, page);
 449        ksm_migrate_page(newpage, page);
 450
 451        ClearPageSwapCache(page);
 452        ClearPagePrivate(page);
 453        set_page_private(page, 0);
 454
 455        /*
 456         * If any waiters have accumulated on the new page then
 457         * wake them up.
 458         */
 459        if (PageWriteback(newpage))
 460                end_page_writeback(newpage);
 461}
 462
 463/************************************************************
 464 *                    Migration functions
 465 ***********************************************************/
 466
 467/* Always fail migration. Used for mappings that are not movable */
 468int fail_migrate_page(struct address_space *mapping,
 469                        struct page *newpage, struct page *page)
 470{
 471        return -EIO;
 472}
 473EXPORT_SYMBOL(fail_migrate_page);
 474
 475/*
 476 * Common logic to directly migrate a single page suitable for
 477 * pages that do not use PagePrivate/PagePrivate2.
 478 *
 479 * Pages are locked upon entry and exit.
 480 */
 481int migrate_page(struct address_space *mapping,
 482                struct page *newpage, struct page *page,
 483                enum migrate_mode mode)
 484{
 485        int rc;
 486
 487        BUG_ON(PageWriteback(page));    /* Writeback must be complete */
 488
 489        rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode);
 490
 491        if (rc)
 492                return rc;
 493
 494        migrate_page_copy(newpage, page);
 495        return 0;
 496}
 497EXPORT_SYMBOL(migrate_page);
 498
 499#ifdef CONFIG_BLOCK
 500/*
 501 * Migration function for pages with buffers. This function can only be used
 502 * if the underlying filesystem guarantees that no other references to "page"
 503 * exist.
 504 */
 505int buffer_migrate_page(struct address_space *mapping,
 506                struct page *newpage, struct page *page, enum migrate_mode mode)
 507{
 508        struct buffer_head *bh, *head;
 509        int rc;
 510
 511        if (!page_has_buffers(page))
 512                return migrate_page(mapping, newpage, page, mode);
 513
 514        head = page_buffers(page);
 515
 516        rc = migrate_page_move_mapping(mapping, newpage, page, head, mode);
 517
 518        if (rc)
 519                return rc;
 520
 521        /*
 522         * In the async case, migrate_page_move_mapping locked the buffers
 523         * with an IRQ-safe spinlock held. In the sync case, the buffers
 524         * need to be locked now
 525         */
 526        if (mode != MIGRATE_ASYNC)
 527                BUG_ON(!buffer_migrate_lock_buffers(head, mode));
 528
 529        ClearPagePrivate(page);
 530        set_page_private(newpage, page_private(page));
 531        set_page_private(page, 0);
 532        put_page(page);
 533        get_page(newpage);
 534
 535        bh = head;
 536        do {
 537                set_bh_page(bh, newpage, bh_offset(bh));
 538                bh = bh->b_this_page;
 539
 540        } while (bh != head);
 541
 542        SetPagePrivate(newpage);
 543
 544        migrate_page_copy(newpage, page);
 545
 546        bh = head;
 547        do {
 548                unlock_buffer(bh);
 549                put_bh(bh);
 550                bh = bh->b_this_page;
 551
 552        } while (bh != head);
 553
 554        return 0;
 555}
 556EXPORT_SYMBOL(buffer_migrate_page);
 557#endif
 558
 559/*
 560 * Writeback a page to clean the dirty state
 561 */
 562static int writeout(struct address_space *mapping, struct page *page)
 563{
 564        struct writeback_control wbc = {
 565                .sync_mode = WB_SYNC_NONE,
 566                .nr_to_write = 1,
 567                .range_start = 0,
 568                .range_end = LLONG_MAX,
 569                .for_reclaim = 1
 570        };
 571        int rc;
 572
 573        if (!mapping->a_ops->writepage)
 574                /* No write method for the address space */
 575                return -EINVAL;
 576
 577        if (!clear_page_dirty_for_io(page))
 578                /* Someone else already triggered a write */
 579                return -EAGAIN;
 580
 581        /*
 582         * A dirty page may imply that the underlying filesystem has
 583         * the page on some queue. So the page must be clean for
 584         * migration. Writeout may mean we loose the lock and the
 585         * page state is no longer what we checked for earlier.
 586         * At this point we know that the migration attempt cannot
 587         * be successful.
 588         */
 589        remove_migration_ptes(page, page);
 590
 591        rc = mapping->a_ops->writepage(page, &wbc);
 592
 593        if (rc != AOP_WRITEPAGE_ACTIVATE)
 594                /* unlocked. Relock */
 595                lock_page(page);
 596
 597        return (rc < 0) ? -EIO : -EAGAIN;
 598}
 599
 600/*
 601 * Default handling if a filesystem does not provide a migration function.
 602 */
 603static int fallback_migrate_page(struct address_space *mapping,
 604        struct page *newpage, struct page *page, enum migrate_mode mode)
 605{
 606        if (PageDirty(page)) {
 607                /* Only writeback pages in full synchronous migration */
 608                if (mode != MIGRATE_SYNC)
 609                        return -EBUSY;
 610                return writeout(mapping, page);
 611        }
 612
 613        /*
 614         * Buffers may be managed in a filesystem specific way.
 615         * We must have no buffers or drop them.
 616         */
 617        if (page_has_private(page) &&
 618            !try_to_release_page(page, GFP_KERNEL))
 619                return -EAGAIN;
 620
 621        return migrate_page(mapping, newpage, page, mode);
 622}
 623
 624/*
 625 * Move a page to a newly allocated page
 626 * The page is locked and all ptes have been successfully removed.
 627 *
 628 * The new page will have replaced the old page if this function
 629 * is successful.
 630 *
 631 * Return value:
 632 *   < 0 - error code
 633 *  == 0 - success
 634 */
 635static int move_to_new_page(struct page *newpage, struct page *page,
 636                                int remap_swapcache, enum migrate_mode mode)
 637{
 638        struct address_space *mapping;
 639        int rc;
 640
 641        /*
 642         * Block others from accessing the page when we get around to
 643         * establishing additional references. We are the only one
 644         * holding a reference to the new page at this point.
 645         */
 646        if (!trylock_page(newpage))
 647                BUG();
 648
 649        /* Prepare mapping for the new page.*/
 650        newpage->index = page->index;
 651        newpage->mapping = page->mapping;
 652        if (PageSwapBacked(page))
 653                SetPageSwapBacked(newpage);
 654
 655        mapping = page_mapping(page);
 656        if (!mapping)
 657                rc = migrate_page(mapping, newpage, page, mode);
 658        else if (mapping->a_ops->migratepage)
 659                /*
 660                 * Most pages have a mapping and most filesystems provide a
 661                 * migratepage callback. Anonymous pages are part of swap
 662                 * space which also has its own migratepage callback. This
 663                 * is the most common path for page migration.
 664                 */
 665                rc = mapping->a_ops->migratepage(mapping,
 666                                                newpage, page, mode);
 667        else
 668                rc = fallback_migrate_page(mapping, newpage, page, mode);
 669
 670        if (rc) {
 671                newpage->mapping = NULL;
 672        } else {
 673                if (remap_swapcache)
 674                        remove_migration_ptes(page, newpage);
 675                page->mapping = NULL;
 676        }
 677
 678        unlock_page(newpage);
 679
 680        return rc;
 681}
 682
 683static int __unmap_and_move(struct page *page, struct page *newpage,
 684                        int force, bool offlining, enum migrate_mode mode)
 685{
 686        int rc = -EAGAIN;
 687        int remap_swapcache = 1;
 688        struct mem_cgroup *mem;
 689        struct anon_vma *anon_vma = NULL;
 690
 691        if (!trylock_page(page)) {
 692                if (!force || mode == MIGRATE_ASYNC)
 693                        goto out;
 694
 695                /*
 696                 * It's not safe for direct compaction to call lock_page.
 697                 * For example, during page readahead pages are added locked
 698                 * to the LRU. Later, when the IO completes the pages are
 699                 * marked uptodate and unlocked. However, the queueing
 700                 * could be merging multiple pages for one bio (e.g.
 701                 * mpage_readpages). If an allocation happens for the
 702                 * second or third page, the process can end up locking
 703                 * the same page twice and deadlocking. Rather than
 704                 * trying to be clever about what pages can be locked,
 705                 * avoid the use of lock_page for direct compaction
 706                 * altogether.
 707                 */
 708                if (current->flags & PF_MEMALLOC)
 709                        goto out;
 710
 711                lock_page(page);
 712        }
 713
 714        /*
 715         * Only memory hotplug's offline_pages() caller has locked out KSM,
 716         * and can safely migrate a KSM page.  The other cases have skipped
 717         * PageKsm along with PageReserved - but it is only now when we have
 718         * the page lock that we can be certain it will not go KSM beneath us
 719         * (KSM will not upgrade a page from PageAnon to PageKsm when it sees
 720         * its pagecount raised, but only here do we take the page lock which
 721         * serializes that).
 722         */
 723        if (PageKsm(page) && !offlining) {
 724                rc = -EBUSY;
 725                goto unlock;
 726        }
 727
 728        /* charge against new page */
 729        mem_cgroup_prepare_migration(page, newpage, &mem);
 730
 731        if (PageWriteback(page)) {
 732                /*
 733                 * Only in the case of a full syncronous migration is it
 734                 * necessary to wait for PageWriteback. In the async case,
 735                 * the retry loop is too short and in the sync-light case,
 736                 * the overhead of stalling is too much
 737                 */
 738                if (mode != MIGRATE_SYNC) {
 739                        rc = -EBUSY;
 740                        goto uncharge;
 741                }
 742                if (!force)
 743                        goto uncharge;
 744                wait_on_page_writeback(page);
 745        }
 746        /*
 747         * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
 748         * we cannot notice that anon_vma is freed while we migrates a page.
 749         * This get_anon_vma() delays freeing anon_vma pointer until the end
 750         * of migration. File cache pages are no problem because of page_lock()
 751         * File Caches may use write_page() or lock_page() in migration, then,
 752         * just care Anon page here.
 753         */
 754        if (PageAnon(page)) {
 755                /*
 756                 * Only page_lock_anon_vma() understands the subtleties of
 757                 * getting a hold on an anon_vma from outside one of its mms.
 758                 */
 759                anon_vma = page_get_anon_vma(page);
 760                if (anon_vma) {
 761                        /*
 762                         * Anon page
 763                         */
 764                } else if (PageSwapCache(page)) {
 765                        /*
 766                         * We cannot be sure that the anon_vma of an unmapped
 767                         * swapcache page is safe to use because we don't
 768                         * know in advance if the VMA that this page belonged
 769                         * to still exists. If the VMA and others sharing the
 770                         * data have been freed, then the anon_vma could
 771                         * already be invalid.
 772                         *
 773                         * To avoid this possibility, swapcache pages get
 774                         * migrated but are not remapped when migration
 775                         * completes
 776                         */
 777                        remap_swapcache = 0;
 778                } else {
 779                        goto uncharge;
 780                }
 781        }
 782
 783        /*
 784         * Corner case handling:
 785         * 1. When a new swap-cache page is read into, it is added to the LRU
 786         * and treated as swapcache but it has no rmap yet.
 787         * Calling try_to_unmap() against a page->mapping==NULL page will
 788         * trigger a BUG.  So handle it here.
 789         * 2. An orphaned page (see truncate_complete_page) might have
 790         * fs-private metadata. The page can be picked up due to memory
 791         * offlining.  Everywhere else except page reclaim, the page is
 792         * invisible to the vm, so the page can not be migrated.  So try to
 793         * free the metadata, so the page can be freed.
 794         */
 795        if (!page->mapping) {
 796                VM_BUG_ON(PageAnon(page));
 797                if (page_has_private(page)) {
 798                        try_to_free_buffers(page);
 799                        goto uncharge;
 800                }
 801                goto skip_unmap;
 802        }
 803
 804        /* Establish migration ptes or remove ptes */
 805        try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
 806
 807skip_unmap:
 808        if (!page_mapped(page))
 809                rc = move_to_new_page(newpage, page, remap_swapcache, mode);
 810
 811        if (rc && remap_swapcache)
 812                remove_migration_ptes(page, page);
 813
 814        /* Drop an anon_vma reference if we took one */
 815        if (anon_vma)
 816                put_anon_vma(anon_vma);
 817
 818uncharge:
 819        mem_cgroup_end_migration(mem, page, newpage, rc == 0);
 820unlock:
 821        unlock_page(page);
 822out:
 823        return rc;
 824}
 825
 826/*
 827 * Obtain the lock on page, remove all ptes and migrate the page
 828 * to the newly allocated page in newpage.
 829 */
 830static int unmap_and_move(new_page_t get_new_page, unsigned long private,
 831                        struct page *page, int force, bool offlining,
 832                        enum migrate_mode mode)
 833{
 834        int rc = 0;
 835        int *result = NULL;
 836        struct page *newpage = get_new_page(page, private, &result);
 837
 838        if (!newpage)
 839                return -ENOMEM;
 840
 841        if (page_count(page) == 1) {
 842                /* page was freed from under us. So we are done. */
 843                goto out;
 844        }
 845
 846        if (unlikely(PageTransHuge(page)))
 847                if (unlikely(split_huge_page(page)))
 848                        goto out;
 849
 850        rc = __unmap_and_move(page, newpage, force, offlining, mode);
 851out:
 852        if (rc != -EAGAIN) {
 853                /*
 854                 * A page that has been migrated has all references
 855                 * removed and will be freed. A page that has not been
 856                 * migrated will have kepts its references and be
 857                 * restored.
 858                 */
 859                list_del(&page->lru);
 860                dec_zone_page_state(page, NR_ISOLATED_ANON +
 861                                page_is_file_cache(page));
 862                putback_lru_page(page);
 863        }
 864        /*
 865         * Move the new page to the LRU. If migration was not successful
 866         * then this will free the page.
 867         */
 868        putback_lru_page(newpage);
 869        if (result) {
 870                if (rc)
 871                        *result = rc;
 872                else
 873                        *result = page_to_nid(newpage);
 874        }
 875        return rc;
 876}
 877
 878/*
 879 * Counterpart of unmap_and_move_page() for hugepage migration.
 880 *
 881 * This function doesn't wait the completion of hugepage I/O
 882 * because there is no race between I/O and migration for hugepage.
 883 * Note that currently hugepage I/O occurs only in direct I/O
 884 * where no lock is held and PG_writeback is irrelevant,
 885 * and writeback status of all subpages are counted in the reference
 886 * count of the head page (i.e. if all subpages of a 2MB hugepage are
 887 * under direct I/O, the reference of the head page is 512 and a bit more.)
 888 * This means that when we try to migrate hugepage whose subpages are
 889 * doing direct I/O, some references remain after try_to_unmap() and
 890 * hugepage migration fails without data corruption.
 891 *
 892 * There is also no race when direct I/O is issued on the page under migration,
 893 * because then pte is replaced with migration swap entry and direct I/O code
 894 * will wait in the page fault for migration to complete.
 895 */
 896static int unmap_and_move_huge_page(new_page_t get_new_page,
 897                                unsigned long private, struct page *hpage,
 898                                int force, bool offlining,
 899                                enum migrate_mode mode)
 900{
 901        int rc = 0;
 902        int *result = NULL;
 903        struct page *new_hpage = get_new_page(hpage, private, &result);
 904        struct anon_vma *anon_vma = NULL;
 905
 906        if (!new_hpage)
 907                return -ENOMEM;
 908
 909        rc = -EAGAIN;
 910
 911        if (!trylock_page(hpage)) {
 912                if (!force || mode != MIGRATE_SYNC)
 913                        goto out;
 914                lock_page(hpage);
 915        }
 916
 917        if (PageAnon(hpage))
 918                anon_vma = page_get_anon_vma(hpage);
 919
 920        try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
 921
 922        if (!page_mapped(hpage))
 923                rc = move_to_new_page(new_hpage, hpage, 1, mode);
 924
 925        if (rc)
 926                remove_migration_ptes(hpage, hpage);
 927
 928        if (anon_vma)
 929                put_anon_vma(anon_vma);
 930
 931        if (!rc)
 932                hugetlb_cgroup_migrate(hpage, new_hpage);
 933
 934        unlock_page(hpage);
 935out:
 936        put_page(new_hpage);
 937        if (result) {
 938                if (rc)
 939                        *result = rc;
 940                else
 941                        *result = page_to_nid(new_hpage);
 942        }
 943        return rc;
 944}
 945
 946/*
 947 * migrate_pages
 948 *
 949 * The function takes one list of pages to migrate and a function
 950 * that determines from the page to be migrated and the private data
 951 * the target of the move and allocates the page.
 952 *
 953 * The function returns after 10 attempts or if no pages
 954 * are movable anymore because to has become empty
 955 * or no retryable pages exist anymore.
 956 * Caller should call putback_lru_pages to return pages to the LRU
 957 * or free list only if ret != 0.
 958 *
 959 * Return: Number of pages not migrated or error code.
 960 */
 961int migrate_pages(struct list_head *from,
 962                new_page_t get_new_page, unsigned long private, bool offlining,
 963                enum migrate_mode mode)
 964{
 965        int retry = 1;
 966        int nr_failed = 0;
 967        int pass = 0;
 968        struct page *page;
 969        struct page *page2;
 970        int swapwrite = current->flags & PF_SWAPWRITE;
 971        int rc;
 972
 973        if (!swapwrite)
 974                current->flags |= PF_SWAPWRITE;
 975
 976        for(pass = 0; pass < 10 && retry; pass++) {
 977                retry = 0;
 978
 979                list_for_each_entry_safe(page, page2, from, lru) {
 980                        cond_resched();
 981
 982                        rc = unmap_and_move(get_new_page, private,
 983                                                page, pass > 2, offlining,
 984                                                mode);
 985
 986                        switch(rc) {
 987                        case -ENOMEM:
 988                                goto out;
 989                        case -EAGAIN:
 990                                retry++;
 991                                break;
 992                        case 0:
 993                                break;
 994                        default:
 995                                /* Permanent failure */
 996                                nr_failed++;
 997                                break;
 998                        }
 999                }
1000        }
1001        rc = 0;
1002out:
1003        if (!swapwrite)
1004                current->flags &= ~PF_SWAPWRITE;
1005
1006        if (rc)
1007                return rc;
1008
1009        return nr_failed + retry;
1010}
1011
1012int migrate_huge_page(struct page *hpage, new_page_t get_new_page,
1013                      unsigned long private, bool offlining,
1014                      enum migrate_mode mode)
1015{
1016        int pass, rc;
1017
1018        for (pass = 0; pass < 10; pass++) {
1019                rc = unmap_and_move_huge_page(get_new_page,
1020                                              private, hpage, pass > 2, offlining,
1021                                              mode);
1022                switch (rc) {
1023                case -ENOMEM:
1024                        goto out;
1025                case -EAGAIN:
1026                        /* try again */
1027                        cond_resched();
1028                        break;
1029                case 0:
1030                        goto out;
1031                default:
1032                        rc = -EIO;
1033                        goto out;
1034                }
1035        }
1036out:
1037        return rc;
1038}
1039
1040#ifdef CONFIG_NUMA
1041/*
1042 * Move a list of individual pages
1043 */
1044struct page_to_node {
1045        unsigned long addr;
1046        struct page *page;
1047        int node;
1048        int status;
1049};
1050
1051static struct page *new_page_node(struct page *p, unsigned long private,
1052                int **result)
1053{
1054        struct page_to_node *pm = (struct page_to_node *)private;
1055
1056        while (pm->node != MAX_NUMNODES && pm->page != p)
1057                pm++;
1058
1059        if (pm->node == MAX_NUMNODES)
1060                return NULL;
1061
1062        *result = &pm->status;
1063
1064        return alloc_pages_exact_node(pm->node,
1065                                GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
1066}
1067
1068/*
1069 * Move a set of pages as indicated in the pm array. The addr
1070 * field must be set to the virtual address of the page to be moved
1071 * and the node number must contain a valid target node.
1072 * The pm array ends with node = MAX_NUMNODES.
1073 */
1074static int do_move_page_to_node_array(struct mm_struct *mm,
1075                                      struct page_to_node *pm,
1076                                      int migrate_all)
1077{
1078        int err;
1079        struct page_to_node *pp;
1080        LIST_HEAD(pagelist);
1081
1082        down_read(&mm->mmap_sem);
1083
1084        /*
1085         * Build a list of pages to migrate
1086         */
1087        for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
1088                struct vm_area_struct *vma;
1089                struct page *page;
1090
1091                err = -EFAULT;
1092                vma = find_vma(mm, pp->addr);
1093                if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma))
1094                        goto set_status;
1095
1096                page = follow_page(vma, pp->addr, FOLL_GET|FOLL_SPLIT);
1097
1098                err = PTR_ERR(page);
1099                if (IS_ERR(page))
1100                        goto set_status;
1101
1102                err = -ENOENT;
1103                if (!page)
1104                        goto set_status;
1105
1106                /* Use PageReserved to check for zero page */
1107                if (PageReserved(page) || PageKsm(page))
1108                        goto put_and_set;
1109
1110                pp->page = page;
1111                err = page_to_nid(page);
1112
1113                if (err == pp->node)
1114                        /*
1115                         * Node already in the right place
1116                         */
1117                        goto put_and_set;
1118
1119                err = -EACCES;
1120                if (page_mapcount(page) > 1 &&
1121                                !migrate_all)
1122                        goto put_and_set;
1123
1124                err = isolate_lru_page(page);
1125                if (!err) {
1126                        list_add_tail(&page->lru, &pagelist);
1127                        inc_zone_page_state(page, NR_ISOLATED_ANON +
1128                                            page_is_file_cache(page));
1129                }
1130put_and_set:
1131                /*
1132                 * Either remove the duplicate refcount from
1133                 * isolate_lru_page() or drop the page ref if it was
1134                 * not isolated.
1135                 */
1136                put_page(page);
1137set_status:
1138                pp->status = err;
1139        }
1140
1141        err = 0;
1142        if (!list_empty(&pagelist)) {
1143                err = migrate_pages(&pagelist, new_page_node,
1144                                (unsigned long)pm, 0, MIGRATE_SYNC);
1145                if (err)
1146                        putback_lru_pages(&pagelist);
1147        }
1148
1149        up_read(&mm->mmap_sem);
1150        return err;
1151}
1152
1153/*
1154 * Migrate an array of page address onto an array of nodes and fill
1155 * the corresponding array of status.
1156 */
1157static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
1158                         unsigned long nr_pages,
1159                         const void __user * __user *pages,
1160                         const int __user *nodes,
1161                         int __user *status, int flags)
1162{
1163        struct page_to_node *pm;
1164        unsigned long chunk_nr_pages;
1165        unsigned long chunk_start;
1166        int err;
1167
1168        err = -ENOMEM;
1169        pm = (struct page_to_node *)__get_free_page(GFP_KERNEL);
1170        if (!pm)
1171                goto out;
1172
1173        migrate_prep();
1174
1175        /*
1176         * Store a chunk of page_to_node array in a page,
1177         * but keep the last one as a marker
1178         */
1179        chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1;
1180
1181        for (chunk_start = 0;
1182             chunk_start < nr_pages;
1183             chunk_start += chunk_nr_pages) {
1184                int j;
1185
1186                if (chunk_start + chunk_nr_pages > nr_pages)
1187                        chunk_nr_pages = nr_pages - chunk_start;
1188
1189                /* fill the chunk pm with addrs and nodes from user-space */
1190                for (j = 0; j < chunk_nr_pages; j++) {
1191                        const void __user *p;
1192                        int node;
1193
1194                        err = -EFAULT;
1195                        if (get_user(p, pages + j + chunk_start))
1196                                goto out_pm;
1197                        pm[j].addr = (unsigned long) p;
1198
1199                        if (get_user(node, nodes + j + chunk_start))
1200                                goto out_pm;
1201
1202                        err = -ENODEV;
1203                        if (node < 0 || node >= MAX_NUMNODES)
1204                                goto out_pm;
1205
1206                        if (!node_state(node, N_HIGH_MEMORY))
1207                                goto out_pm;
1208
1209                        err = -EACCES;
1210                        if (!node_isset(node, task_nodes))
1211                                goto out_pm;
1212
1213                        pm[j].node = node;
1214                }
1215
1216                /* End marker for this chunk */
1217                pm[chunk_nr_pages].node = MAX_NUMNODES;
1218
1219                /* Migrate this chunk */
1220                err = do_move_page_to_node_array(mm, pm,
1221                                                 flags & MPOL_MF_MOVE_ALL);
1222                if (err < 0)
1223                        goto out_pm;
1224
1225                /* Return status information */
1226                for (j = 0; j < chunk_nr_pages; j++)
1227                        if (put_user(pm[j].status, status + j + chunk_start)) {
1228                                err = -EFAULT;
1229                                goto out_pm;
1230                        }
1231        }
1232        err = 0;
1233
1234out_pm:
1235        free_page((unsigned long)pm);
1236out:
1237        return err;
1238}
1239
1240/*
1241 * Determine the nodes of an array of pages and store it in an array of status.
1242 */
1243static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1244                                const void __user **pages, int *status)
1245{
1246        unsigned long i;
1247
1248        down_read(&mm->mmap_sem);
1249
1250        for (i = 0; i < nr_pages; i++) {
1251                unsigned long addr = (unsigned long)(*pages);
1252                struct vm_area_struct *vma;
1253                struct page *page;
1254                int err = -EFAULT;
1255
1256                vma = find_vma(mm, addr);
1257                if (!vma || addr < vma->vm_start)
1258                        goto set_status;
1259
1260                page = follow_page(vma, addr, 0);
1261
1262                err = PTR_ERR(page);
1263                if (IS_ERR(page))
1264                        goto set_status;
1265
1266                err = -ENOENT;
1267                /* Use PageReserved to check for zero page */
1268                if (!page || PageReserved(page) || PageKsm(page))
1269                        goto set_status;
1270
1271                err = page_to_nid(page);
1272set_status:
1273                *status = err;
1274
1275                pages++;
1276                status++;
1277        }
1278
1279        up_read(&mm->mmap_sem);
1280}
1281
1282/*
1283 * Determine the nodes of a user array of pages and store it in
1284 * a user array of status.
1285 */
1286static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1287                         const void __user * __user *pages,
1288                         int __user *status)
1289{
1290#define DO_PAGES_STAT_CHUNK_NR 16
1291        const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1292        int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1293
1294        while (nr_pages) {
1295                unsigned long chunk_nr;
1296
1297                chunk_nr = nr_pages;
1298                if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1299                        chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1300
1301                if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1302                        break;
1303
1304                do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1305
1306                if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1307                        break;
1308
1309                pages += chunk_nr;
1310                status += chunk_nr;
1311                nr_pages -= chunk_nr;
1312        }
1313        return nr_pages ? -EFAULT : 0;
1314}
1315
1316/*
1317 * Move a list of pages in the address space of the currently executing
1318 * process.
1319 */
1320SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1321                const void __user * __user *, pages,
1322                const int __user *, nodes,
1323                int __user *, status, int, flags)
1324{
1325        const struct cred *cred = current_cred(), *tcred;
1326        struct task_struct *task;
1327        struct mm_struct *mm;
1328        int err;
1329        nodemask_t task_nodes;
1330
1331        /* Check flags */
1332        if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1333                return -EINVAL;
1334
1335        if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1336                return -EPERM;
1337
1338        /* Find the mm_struct */
1339        rcu_read_lock();
1340        task = pid ? find_task_by_vpid(pid) : current;
1341        if (!task) {
1342                rcu_read_unlock();
1343                return -ESRCH;
1344        }
1345        get_task_struct(task);
1346
1347        /*
1348         * Check if this process has the right to modify the specified
1349         * process. The right exists if the process has administrative
1350         * capabilities, superuser privileges or the same
1351         * userid as the target process.
1352         */
1353        tcred = __task_cred(task);
1354        if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1355            !uid_eq(cred->uid,  tcred->suid) && !uid_eq(cred->uid,  tcred->uid) &&
1356            !capable(CAP_SYS_NICE)) {
1357                rcu_read_unlock();
1358                err = -EPERM;
1359                goto out;
1360        }
1361        rcu_read_unlock();
1362
1363        err = security_task_movememory(task);
1364        if (err)
1365                goto out;
1366
1367        task_nodes = cpuset_mems_allowed(task);
1368        mm = get_task_mm(task);
1369        put_task_struct(task);
1370
1371        if (!mm)
1372                return -EINVAL;
1373
1374        if (nodes)
1375                err = do_pages_move(mm, task_nodes, nr_pages, pages,
1376                                    nodes, status, flags);
1377        else
1378                err = do_pages_stat(mm, nr_pages, pages, status);
1379
1380        mmput(mm);
1381        return err;
1382
1383out:
1384        put_task_struct(task);
1385        return err;
1386}
1387
1388/*
1389 * Call migration functions in the vma_ops that may prepare
1390 * memory in a vm for migration. migration functions may perform
1391 * the migration for vmas that do not have an underlying page struct.
1392 */
1393int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
1394        const nodemask_t *from, unsigned long flags)
1395{
1396        struct vm_area_struct *vma;
1397        int err = 0;
1398
1399        for (vma = mm->mmap; vma && !err; vma = vma->vm_next) {
1400                if (vma->vm_ops && vma->vm_ops->migrate) {
1401                        err = vma->vm_ops->migrate(vma, to, from, flags);
1402                        if (err)
1403                                break;
1404                }
1405        }
1406        return err;
1407}
1408#endif
1409
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.