linux/mm/migrate.c
<<
>>
Prefs
   1/*
   2 * Memory Migration functionality - linux/mm/migration.c
   3 *
   4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
   5 *
   6 * Page migration was first developed in the context of the memory hotplug
   7 * project. The main authors of the migration code are:
   8 *
   9 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
  10 * Hirokazu Takahashi <taka@valinux.co.jp>
  11 * Dave Hansen <haveblue@us.ibm.com>
  12 * Christoph Lameter
  13 */
  14
  15#include <linux/migrate.h>
  16#include <linux/export.h>
  17#include <linux/swap.h>
  18#include <linux/swapops.h>
  19#include <linux/pagemap.h>
  20#include <linux/buffer_head.h>
  21#include <linux/mm_inline.h>
  22#include <linux/nsproxy.h>
  23#include <linux/pagevec.h>
  24#include <linux/ksm.h>
  25#include <linux/rmap.h>
  26#include <linux/topology.h>
  27#include <linux/cpu.h>
  28#include <linux/cpuset.h>
  29#include <linux/writeback.h>
  30#include <linux/mempolicy.h>
  31#include <linux/vmalloc.h>
  32#include <linux/security.h>
  33#include <linux/memcontrol.h>
  34#include <linux/syscalls.h>
  35#include <linux/hugetlb.h>
  36#include <linux/gfp.h>
  37
  38#include <asm/tlbflush.h>
  39
  40#include "internal.h"
  41
  42/*
  43 * migrate_prep() needs to be called before we start compiling a list of pages
  44 * to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
  45 * undesirable, use migrate_prep_local()
  46 */
  47int migrate_prep(void)
  48{
  49        /*
  50         * Clear the LRU lists so pages can be isolated.
  51         * Note that pages may be moved off the LRU after we have
  52         * drained them. Those pages will fail to migrate like other
  53         * pages that may be busy.
  54         */
  55        lru_add_drain_all();
  56
  57        return 0;
  58}
  59
  60/* Do the necessary work of migrate_prep but not if it involves other CPUs */
  61int migrate_prep_local(void)
  62{
  63        lru_add_drain();
  64
  65        return 0;
  66}
  67
  68/*
  69 * Add isolated pages on the list back to the LRU under page lock
  70 * to avoid leaking evictable pages back onto unevictable list.
  71 */
  72void putback_lru_pages(struct list_head *l)
  73{
  74        struct page *page;
  75        struct page *page2;
  76
  77        list_for_each_entry_safe(page, page2, l, lru) {
  78                list_del(&page->lru);
  79                dec_zone_page_state(page, NR_ISOLATED_ANON +
  80                                page_is_file_cache(page));
  81                putback_lru_page(page);
  82        }
  83}
  84
  85/*
  86 * Restore a potential migration pte to a working pte entry
  87 */
  88static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
  89                                 unsigned long addr, void *old)
  90{
  91        struct mm_struct *mm = vma->vm_mm;
  92        swp_entry_t entry;
  93        pgd_t *pgd;
  94        pud_t *pud;
  95        pmd_t *pmd;
  96        pte_t *ptep, pte;
  97        spinlock_t *ptl;
  98
  99        if (unlikely(PageHuge(new))) {
 100                ptep = huge_pte_offset(mm, addr);
 101                if (!ptep)
 102                        goto out;
 103                ptl = &mm->page_table_lock;
 104        } else {
 105                pgd = pgd_offset(mm, addr);
 106                if (!pgd_present(*pgd))
 107                        goto out;
 108
 109                pud = pud_offset(pgd, addr);
 110                if (!pud_present(*pud))
 111                        goto out;
 112
 113                pmd = pmd_offset(pud, addr);
 114                if (pmd_trans_huge(*pmd))
 115                        goto out;
 116                if (!pmd_present(*pmd))
 117                        goto out;
 118
 119                ptep = pte_offset_map(pmd, addr);
 120
 121                /*
 122                 * Peek to check is_swap_pte() before taking ptlock?  No, we
 123                 * can race mremap's move_ptes(), which skips anon_vma lock.
 124                 */
 125
 126                ptl = pte_lockptr(mm, pmd);
 127        }
 128
 129        spin_lock(ptl);
 130        pte = *ptep;
 131        if (!is_swap_pte(pte))
 132                goto unlock;
 133
 134        entry = pte_to_swp_entry(pte);
 135
 136        if (!is_migration_entry(entry) ||
 137            migration_entry_to_page(entry) != old)
 138                goto unlock;
 139
 140        get_page(new);
 141        pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
 142        if (is_write_migration_entry(entry))
 143                pte = pte_mkwrite(pte);
 144#ifdef CONFIG_HUGETLB_PAGE
 145        if (PageHuge(new))
 146                pte = pte_mkhuge(pte);
 147#endif
 148        flush_cache_page(vma, addr, pte_pfn(pte));
 149        set_pte_at(mm, addr, ptep, pte);
 150
 151        if (PageHuge(new)) {
 152                if (PageAnon(new))
 153                        hugepage_add_anon_rmap(new, vma, addr);
 154                else
 155                        page_dup_rmap(new);
 156        } else if (PageAnon(new))
 157                page_add_anon_rmap(new, vma, addr);
 158        else
 159                page_add_file_rmap(new);
 160
 161        /* No need to invalidate - it was non-present before */
 162        update_mmu_cache(vma, addr, ptep);
 163unlock:
 164        pte_unmap_unlock(ptep, ptl);
 165out:
 166        return SWAP_AGAIN;
 167}
 168
 169/*
 170 * Get rid of all migration entries and replace them by
 171 * references to the indicated page.
 172 */
 173static void remove_migration_ptes(struct page *old, struct page *new)
 174{
 175        rmap_walk(new, remove_migration_pte, old);
 176}
 177
 178/*
 179 * Something used the pte of a page under migration. We need to
 180 * get to the page and wait until migration is finished.
 181 * When we return from this function the fault will be retried.
 182 */
 183void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
 184                                unsigned long address)
 185{
 186        pte_t *ptep, pte;
 187        spinlock_t *ptl;
 188        swp_entry_t entry;
 189        struct page *page;
 190
 191        ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
 192        pte = *ptep;
 193        if (!is_swap_pte(pte))
 194                goto out;
 195
 196        entry = pte_to_swp_entry(pte);
 197        if (!is_migration_entry(entry))
 198                goto out;
 199
 200        page = migration_entry_to_page(entry);
 201
 202        /*
 203         * Once radix-tree replacement of page migration started, page_count
 204         * *must* be zero. And, we don't want to call wait_on_page_locked()
 205         * against a page without get_page().
 206         * So, we use get_page_unless_zero(), here. Even failed, page fault
 207         * will occur again.
 208         */
 209        if (!get_page_unless_zero(page))
 210                goto out;
 211        pte_unmap_unlock(ptep, ptl);
 212        wait_on_page_locked(page);
 213        put_page(page);
 214        return;
 215out:
 216        pte_unmap_unlock(ptep, ptl);
 217}
 218
 219#ifdef CONFIG_BLOCK
 220/* Returns true if all buffers are successfully locked */
 221static bool buffer_migrate_lock_buffers(struct buffer_head *head,
 222                                                        enum migrate_mode mode)
 223{
 224        struct buffer_head *bh = head;
 225
 226        /* Simple case, sync compaction */
 227        if (mode != MIGRATE_ASYNC) {
 228                do {
 229                        get_bh(bh);
 230                        lock_buffer(bh);
 231                        bh = bh->b_this_page;
 232
 233                } while (bh != head);
 234
 235                return true;
 236        }
 237
 238        /* async case, we cannot block on lock_buffer so use trylock_buffer */
 239        do {
 240                get_bh(bh);
 241                if (!trylock_buffer(bh)) {
 242                        /*
 243                         * We failed to lock the buffer and cannot stall in
 244                         * async migration. Release the taken locks
 245                         */
 246                        struct buffer_head *failed_bh = bh;
 247                        put_bh(failed_bh);
 248                        bh = head;
 249                        while (bh != failed_bh) {
 250                                unlock_buffer(bh);
 251                                put_bh(bh);
 252                                bh = bh->b_this_page;
 253                        }
 254                        return false;
 255                }
 256
 257                bh = bh->b_this_page;
 258        } while (bh != head);
 259        return true;
 260}
 261#else
 262static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
 263                                                        enum migrate_mode mode)
 264{
 265        return true;
 266}
 267#endif /* CONFIG_BLOCK */
 268
 269/*
 270 * Replace the page in the mapping.
 271 *
 272 * The number of remaining references must be:
 273 * 1 for anonymous pages without a mapping
 274 * 2 for pages with a mapping
 275 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
 276 */
 277static int migrate_page_move_mapping(struct address_space *mapping,
 278                struct page *newpage, struct page *page,
 279                struct buffer_head *head, enum migrate_mode mode)
 280{
 281        int expected_count;
 282        void **pslot;
 283
 284        if (!mapping) {
 285                /* Anonymous page without mapping */
 286                if (page_count(page) != 1)
 287                        return -EAGAIN;
 288                return 0;
 289        }
 290
 291        spin_lock_irq(&mapping->tree_lock);
 292
 293        pslot = radix_tree_lookup_slot(&mapping->page_tree,
 294                                        page_index(page));
 295
 296        expected_count = 2 + page_has_private(page);
 297        if (page_count(page) != expected_count ||
 298                radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
 299                spin_unlock_irq(&mapping->tree_lock);
 300                return -EAGAIN;
 301        }
 302
 303        if (!page_freeze_refs(page, expected_count)) {
 304                spin_unlock_irq(&mapping->tree_lock);
 305                return -EAGAIN;
 306        }
 307
 308        /*
 309         * In the async migration case of moving a page with buffers, lock the
 310         * buffers using trylock before the mapping is moved. If the mapping
 311         * was moved, we later failed to lock the buffers and could not move
 312         * the mapping back due to an elevated page count, we would have to
 313         * block waiting on other references to be dropped.
 314         */
 315        if (mode == MIGRATE_ASYNC && head &&
 316                        !buffer_migrate_lock_buffers(head, mode)) {
 317                page_unfreeze_refs(page, expected_count);
 318                spin_unlock_irq(&mapping->tree_lock);
 319                return -EAGAIN;
 320        }
 321
 322        /*
 323         * Now we know that no one else is looking at the page.
 324         */
 325        get_page(newpage);      /* add cache reference */
 326        if (PageSwapCache(page)) {
 327                SetPageSwapCache(newpage);
 328                set_page_private(newpage, page_private(page));
 329        }
 330
 331        radix_tree_replace_slot(pslot, newpage);
 332
 333        /*
 334         * Drop cache reference from old page by unfreezing
 335         * to one less reference.
 336         * We know this isn't the last reference.
 337         */
 338        page_unfreeze_refs(page, expected_count - 1);
 339
 340        /*
 341         * If moved to a different zone then also account
 342         * the page for that zone. Other VM counters will be
 343         * taken care of when we establish references to the
 344         * new page and drop references to the old page.
 345         *
 346         * Note that anonymous pages are accounted for
 347         * via NR_FILE_PAGES and NR_ANON_PAGES if they
 348         * are mapped to swap space.
 349         */
 350        __dec_zone_page_state(page, NR_FILE_PAGES);
 351        __inc_zone_page_state(newpage, NR_FILE_PAGES);
 352        if (!PageSwapCache(page) && PageSwapBacked(page)) {
 353                __dec_zone_page_state(page, NR_SHMEM);
 354                __inc_zone_page_state(newpage, NR_SHMEM);
 355        }
 356        spin_unlock_irq(&mapping->tree_lock);
 357
 358        return 0;
 359}
 360
 361/*
 362 * The expected number of remaining references is the same as that
 363 * of migrate_page_move_mapping().
 364 */
 365int migrate_huge_page_move_mapping(struct address_space *mapping,
 366                                   struct page *newpage, struct page *page)
 367{
 368        int expected_count;
 369        void **pslot;
 370
 371        if (!mapping) {
 372                if (page_count(page) != 1)
 373                        return -EAGAIN;
 374                return 0;
 375        }
 376
 377        spin_lock_irq(&mapping->tree_lock);
 378
 379        pslot = radix_tree_lookup_slot(&mapping->page_tree,
 380                                        page_index(page));
 381
 382        expected_count = 2 + page_has_private(page);
 383        if (page_count(page) != expected_count ||
 384                radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
 385                spin_unlock_irq(&mapping->tree_lock);
 386                return -EAGAIN;
 387        }
 388
 389        if (!page_freeze_refs(page, expected_count)) {
 390                spin_unlock_irq(&mapping->tree_lock);
 391                return -EAGAIN;
 392        }
 393
 394        get_page(newpage);
 395
 396        radix_tree_replace_slot(pslot, newpage);
 397
 398        page_unfreeze_refs(page, expected_count - 1);
 399
 400        spin_unlock_irq(&mapping->tree_lock);
 401        return 0;
 402}
 403
 404/*
 405 * Copy the page to its new location
 406 */
 407void migrate_page_copy(struct page *newpage, struct page *page)
 408{
 409        if (PageHuge(page))
 410                copy_huge_page(newpage, page);
 411        else
 412                copy_highpage(newpage, page);
 413
 414        if (PageError(page))
 415                SetPageError(newpage);
 416        if (PageReferenced(page))
 417                SetPageReferenced(newpage);
 418        if (PageUptodate(page))
 419                SetPageUptodate(newpage);
 420        if (TestClearPageActive(page)) {
 421                VM_BUG_ON(PageUnevictable(page));
 422                SetPageActive(newpage);
 423        } else if (TestClearPageUnevictable(page))
 424                SetPageUnevictable(newpage);
 425        if (PageChecked(page))
 426                SetPageChecked(newpage);
 427        if (PageMappedToDisk(page))
 428                SetPageMappedToDisk(newpage);
 429
 430        if (PageDirty(page)) {
 431                clear_page_dirty_for_io(page);
 432                /*
 433                 * Want to mark the page and the radix tree as dirty, and
 434                 * redo the accounting that clear_page_dirty_for_io undid,
 435                 * but we can't use set_page_dirty because that function
 436                 * is actually a signal that all of the page has become dirty.
 437                 * Whereas only part of our page may be dirty.
 438                 */
 439                if (PageSwapBacked(page))
 440                        SetPageDirty(newpage);
 441                else
 442                        __set_page_dirty_nobuffers(newpage);
 443        }
 444
 445        mlock_migrate_page(newpage, page);
 446        ksm_migrate_page(newpage, page);
 447
 448        ClearPageSwapCache(page);
 449        ClearPagePrivate(page);
 450        set_page_private(page, 0);
 451
 452        /*
 453         * If any waiters have accumulated on the new page then
 454         * wake them up.
 455         */
 456        if (PageWriteback(newpage))
 457                end_page_writeback(newpage);
 458}
 459
 460/************************************************************
 461 *                    Migration functions
 462 ***********************************************************/
 463
 464/* Always fail migration. Used for mappings that are not movable */
 465int fail_migrate_page(struct address_space *mapping,
 466                        struct page *newpage, struct page *page)
 467{
 468        return -EIO;
 469}
 470EXPORT_SYMBOL(fail_migrate_page);
 471
 472/*
 473 * Common logic to directly migrate a single page suitable for
 474 * pages that do not use PagePrivate/PagePrivate2.
 475 *
 476 * Pages are locked upon entry and exit.
 477 */
 478int migrate_page(struct address_space *mapping,
 479                struct page *newpage, struct page *page,
 480                enum migrate_mode mode)
 481{
 482        int rc;
 483
 484        BUG_ON(PageWriteback(page));    /* Writeback must be complete */
 485
 486        rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode);
 487
 488        if (rc)
 489                return rc;
 490
 491        migrate_page_copy(newpage, page);
 492        return 0;
 493}
 494EXPORT_SYMBOL(migrate_page);
 495
 496#ifdef CONFIG_BLOCK
 497/*
 498 * Migration function for pages with buffers. This function can only be used
 499 * if the underlying filesystem guarantees that no other references to "page"
 500 * exist.
 501 */
 502int buffer_migrate_page(struct address_space *mapping,
 503                struct page *newpage, struct page *page, enum migrate_mode mode)
 504{
 505        struct buffer_head *bh, *head;
 506        int rc;
 507
 508        if (!page_has_buffers(page))
 509                return migrate_page(mapping, newpage, page, mode);
 510
 511        head = page_buffers(page);
 512
 513        rc = migrate_page_move_mapping(mapping, newpage, page, head, mode);
 514
 515        if (rc)
 516                return rc;
 517
 518        /*
 519         * In the async case, migrate_page_move_mapping locked the buffers
 520         * with an IRQ-safe spinlock held. In the sync case, the buffers
 521         * need to be locked now
 522         */
 523        if (mode != MIGRATE_ASYNC)
 524                BUG_ON(!buffer_migrate_lock_buffers(head, mode));
 525
 526        ClearPagePrivate(page);
 527        set_page_private(newpage, page_private(page));
 528        set_page_private(page, 0);
 529        put_page(page);
 530        get_page(newpage);
 531
 532        bh = head;
 533        do {
 534                set_bh_page(bh, newpage, bh_offset(bh));
 535                bh = bh->b_this_page;
 536
 537        } while (bh != head);
 538
 539        SetPagePrivate(newpage);
 540
 541        migrate_page_copy(newpage, page);
 542
 543        bh = head;
 544        do {
 545                unlock_buffer(bh);
 546                put_bh(bh);
 547                bh = bh->b_this_page;
 548
 549        } while (bh != head);
 550
 551        return 0;
 552}
 553EXPORT_SYMBOL(buffer_migrate_page);
 554#endif
 555
 556/*
 557 * Writeback a page to clean the dirty state
 558 */
 559static int writeout(struct address_space *mapping, struct page *page)
 560{
 561        struct writeback_control wbc = {
 562                .sync_mode = WB_SYNC_NONE,
 563                .nr_to_write = 1,
 564                .range_start = 0,
 565                .range_end = LLONG_MAX,
 566                .for_reclaim = 1
 567        };
 568        int rc;
 569
 570        if (!mapping->a_ops->writepage)
 571                /* No write method for the address space */
 572                return -EINVAL;
 573
 574        if (!clear_page_dirty_for_io(page))
 575                /* Someone else already triggered a write */
 576                return -EAGAIN;
 577
 578        /*
 579         * A dirty page may imply that the underlying filesystem has
 580         * the page on some queue. So the page must be clean for
 581         * migration. Writeout may mean we loose the lock and the
 582         * page state is no longer what we checked for earlier.
 583         * At this point we know that the migration attempt cannot
 584         * be successful.
 585         */
 586        remove_migration_ptes(page, page);
 587
 588        rc = mapping->a_ops->writepage(page, &wbc);
 589
 590        if (rc != AOP_WRITEPAGE_ACTIVATE)
 591                /* unlocked. Relock */
 592                lock_page(page);
 593
 594        return (rc < 0) ? -EIO : -EAGAIN;
 595}
 596
 597/*
 598 * Default handling if a filesystem does not provide a migration function.
 599 */
 600static int fallback_migrate_page(struct address_space *mapping,
 601        struct page *newpage, struct page *page, enum migrate_mode mode)
 602{
 603        if (PageDirty(page)) {
 604                /* Only writeback pages in full synchronous migration */
 605                if (mode != MIGRATE_SYNC)
 606                        return -EBUSY;
 607                return writeout(mapping, page);
 608        }
 609
 610        /*
 611         * Buffers may be managed in a filesystem specific way.
 612         * We must have no buffers or drop them.
 613         */
 614        if (page_has_private(page) &&
 615            !try_to_release_page(page, GFP_KERNEL))
 616                return -EAGAIN;
 617
 618        return migrate_page(mapping, newpage, page, mode);
 619}
 620
 621/*
 622 * Move a page to a newly allocated page
 623 * The page is locked and all ptes have been successfully removed.
 624 *
 625 * The new page will have replaced the old page if this function
 626 * is successful.
 627 *
 628 * Return value:
 629 *   < 0 - error code
 630 *  == 0 - success
 631 */
 632static int move_to_new_page(struct page *newpage, struct page *page,
 633                                int remap_swapcache, enum migrate_mode mode)
 634{
 635        struct address_space *mapping;
 636        int rc;
 637
 638        /*
 639         * Block others from accessing the page when we get around to
 640         * establishing additional references. We are the only one
 641         * holding a reference to the new page at this point.
 642         */
 643        if (!trylock_page(newpage))
 644                BUG();
 645
 646        /* Prepare mapping for the new page.*/
 647        newpage->index = page->index;
 648        newpage->mapping = page->mapping;
 649        if (PageSwapBacked(page))
 650                SetPageSwapBacked(newpage);
 651
 652        mapping = page_mapping(page);
 653        if (!mapping)
 654                rc = migrate_page(mapping, newpage, page, mode);
 655        else if (mapping->a_ops->migratepage)
 656                /*
 657                 * Most pages have a mapping and most filesystems provide a
 658                 * migratepage callback. Anonymous pages are part of swap
 659                 * space which also has its own migratepage callback. This
 660                 * is the most common path for page migration.
 661                 */
 662                rc = mapping->a_ops->migratepage(mapping,
 663                                                newpage, page, mode);
 664        else
 665                rc = fallback_migrate_page(mapping, newpage, page, mode);
 666
 667        if (rc) {
 668                newpage->mapping = NULL;
 669        } else {
 670                if (remap_swapcache)
 671                        remove_migration_ptes(page, newpage);
 672                page->mapping = NULL;
 673        }
 674
 675        unlock_page(newpage);
 676
 677        return rc;
 678}
 679
 680static int __unmap_and_move(struct page *page, struct page *newpage,
 681                        int force, bool offlining, enum migrate_mode mode)
 682{
 683        int rc = -EAGAIN;
 684        int remap_swapcache = 1;
 685        int charge = 0;
 686        struct mem_cgroup *mem;
 687        struct anon_vma *anon_vma = NULL;
 688
 689        if (!trylock_page(page)) {
 690                if (!force || mode == MIGRATE_ASYNC)
 691                        goto out;
 692
 693                /*
 694                 * It's not safe for direct compaction to call lock_page.
 695                 * For example, during page readahead pages are added locked
 696                 * to the LRU. Later, when the IO completes the pages are
 697                 * marked uptodate and unlocked. However, the queueing
 698                 * could be merging multiple pages for one bio (e.g.
 699                 * mpage_readpages). If an allocation happens for the
 700                 * second or third page, the process can end up locking
 701                 * the same page twice and deadlocking. Rather than
 702                 * trying to be clever about what pages can be locked,
 703                 * avoid the use of lock_page for direct compaction
 704                 * altogether.
 705                 */
 706                if (current->flags & PF_MEMALLOC)
 707                        goto out;
 708
 709                lock_page(page);
 710        }
 711
 712        /*
 713         * Only memory hotplug's offline_pages() caller has locked out KSM,
 714         * and can safely migrate a KSM page.  The other cases have skipped
 715         * PageKsm along with PageReserved - but it is only now when we have
 716         * the page lock that we can be certain it will not go KSM beneath us
 717         * (KSM will not upgrade a page from PageAnon to PageKsm when it sees
 718         * its pagecount raised, but only here do we take the page lock which
 719         * serializes that).
 720         */
 721        if (PageKsm(page) && !offlining) {
 722                rc = -EBUSY;
 723                goto unlock;
 724        }
 725
 726        /* charge against new page */
 727        charge = mem_cgroup_prepare_migration(page, newpage, &mem, GFP_KERNEL);
 728        if (charge == -ENOMEM) {
 729                rc = -ENOMEM;
 730                goto unlock;
 731        }
 732        BUG_ON(charge);
 733
 734        if (PageWriteback(page)) {
 735                /*
 736                 * Only in the case of a full syncronous migration is it
 737                 * necessary to wait for PageWriteback. In the async case,
 738                 * the retry loop is too short and in the sync-light case,
 739                 * the overhead of stalling is too much
 740                 */
 741                if (mode != MIGRATE_SYNC) {
 742                        rc = -EBUSY;
 743                        goto uncharge;
 744                }
 745                if (!force)
 746                        goto uncharge;
 747                wait_on_page_writeback(page);
 748        }
 749        /*
 750         * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
 751         * we cannot notice that anon_vma is freed while we migrates a page.
 752         * This get_anon_vma() delays freeing anon_vma pointer until the end
 753         * of migration. File cache pages are no problem because of page_lock()
 754         * File Caches may use write_page() or lock_page() in migration, then,
 755         * just care Anon page here.
 756         */
 757        if (PageAnon(page)) {
 758                /*
 759                 * Only page_lock_anon_vma() understands the subtleties of
 760                 * getting a hold on an anon_vma from outside one of its mms.
 761                 */
 762                anon_vma = page_get_anon_vma(page);
 763                if (anon_vma) {
 764                        /*
 765                         * Anon page
 766                         */
 767                } else if (PageSwapCache(page)) {
 768                        /*
 769                         * We cannot be sure that the anon_vma of an unmapped
 770                         * swapcache page is safe to use because we don't
 771                         * know in advance if the VMA that this page belonged
 772                         * to still exists. If the VMA and others sharing the
 773                         * data have been freed, then the anon_vma could
 774                         * already be invalid.
 775                         *
 776                         * To avoid this possibility, swapcache pages get
 777                         * migrated but are not remapped when migration
 778                         * completes
 779                         */
 780                        remap_swapcache = 0;
 781                } else {
 782                        goto uncharge;
 783                }
 784        }
 785
 786        /*
 787         * Corner case handling:
 788         * 1. When a new swap-cache page is read into, it is added to the LRU
 789         * and treated as swapcache but it has no rmap yet.
 790         * Calling try_to_unmap() against a page->mapping==NULL page will
 791         * trigger a BUG.  So handle it here.
 792         * 2. An orphaned page (see truncate_complete_page) might have
 793         * fs-private metadata. The page can be picked up due to memory
 794         * offlining.  Everywhere else except page reclaim, the page is
 795         * invisible to the vm, so the page can not be migrated.  So try to
 796         * free the metadata, so the page can be freed.
 797         */
 798        if (!page->mapping) {
 799                VM_BUG_ON(PageAnon(page));
 800                if (page_has_private(page)) {
 801                        try_to_free_buffers(page);
 802                        goto uncharge;
 803                }
 804                goto skip_unmap;
 805        }
 806
 807        /* Establish migration ptes or remove ptes */
 808        try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
 809
 810skip_unmap:
 811        if (!page_mapped(page))
 812                rc = move_to_new_page(newpage, page, remap_swapcache, mode);
 813
 814        if (rc && remap_swapcache)
 815                remove_migration_ptes(page, page);
 816
 817        /* Drop an anon_vma reference if we took one */
 818        if (anon_vma)
 819                put_anon_vma(anon_vma);
 820
 821uncharge:
 822        if (!charge)
 823                mem_cgroup_end_migration(mem, page, newpage, rc == 0);
 824unlock:
 825        unlock_page(page);
 826out:
 827        return rc;
 828}
 829
 830/*
 831 * Obtain the lock on page, remove all ptes and migrate the page
 832 * to the newly allocated page in newpage.
 833 */
 834static int unmap_and_move(new_page_t get_new_page, unsigned long private,
 835                        struct page *page, int force, bool offlining,
 836                        enum migrate_mode mode)
 837{
 838        int rc = 0;
 839        int *result = NULL;
 840        struct page *newpage = get_new_page(page, private, &result);
 841
 842        if (!newpage)
 843                return -ENOMEM;
 844
 845        if (page_count(page) == 1) {
 846                /* page was freed from under us. So we are done. */
 847                goto out;
 848        }
 849
 850        if (unlikely(PageTransHuge(page)))
 851                if (unlikely(split_huge_page(page)))
 852                        goto out;
 853
 854        rc = __unmap_and_move(page, newpage, force, offlining, mode);
 855out:
 856        if (rc != -EAGAIN) {
 857                /*
 858                 * A page that has been migrated has all references
 859                 * removed and will be freed. A page that has not been
 860                 * migrated will have kepts its references and be
 861                 * restored.
 862                 */
 863                list_del(&page->lru);
 864                dec_zone_page_state(page, NR_ISOLATED_ANON +
 865                                page_is_file_cache(page));
 866                putback_lru_page(page);
 867        }
 868        /*
 869         * Move the new page to the LRU. If migration was not successful
 870         * then this will free the page.
 871         */
 872        putback_lru_page(newpage);
 873        if (result) {
 874                if (rc)
 875                        *result = rc;
 876                else
 877                        *result = page_to_nid(newpage);
 878        }
 879        return rc;
 880}
 881
 882/*
 883 * Counterpart of unmap_and_move_page() for hugepage migration.
 884 *
 885 * This function doesn't wait the completion of hugepage I/O
 886 * because there is no race between I/O and migration for hugepage.
 887 * Note that currently hugepage I/O occurs only in direct I/O
 888 * where no lock is held and PG_writeback is irrelevant,
 889 * and writeback status of all subpages are counted in the reference
 890 * count of the head page (i.e. if all subpages of a 2MB hugepage are
 891 * under direct I/O, the reference of the head page is 512 and a bit more.)
 892 * This means that when we try to migrate hugepage whose subpages are
 893 * doing direct I/O, some references remain after try_to_unmap() and
 894 * hugepage migration fails without data corruption.
 895 *
 896 * There is also no race when direct I/O is issued on the page under migration,
 897 * because then pte is replaced with migration swap entry and direct I/O code
 898 * will wait in the page fault for migration to complete.
 899 */
 900static int unmap_and_move_huge_page(new_page_t get_new_page,
 901                                unsigned long private, struct page *hpage,
 902                                int force, bool offlining,
 903                                enum migrate_mode mode)
 904{
 905        int rc = 0;
 906        int *result = NULL;
 907        struct page *new_hpage = get_new_page(hpage, private, &result);
 908        struct anon_vma *anon_vma = NULL;
 909
 910        if (!new_hpage)
 911                return -ENOMEM;
 912
 913        rc = -EAGAIN;
 914
 915        if (!trylock_page(hpage)) {
 916                if (!force || mode != MIGRATE_SYNC)
 917                        goto out;
 918                lock_page(hpage);
 919        }
 920
 921        if (PageAnon(hpage))
 922                anon_vma = page_get_anon_vma(hpage);
 923
 924        try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
 925
 926        if (!page_mapped(hpage))
 927                rc = move_to_new_page(new_hpage, hpage, 1, mode);
 928
 929        if (rc)
 930                remove_migration_ptes(hpage, hpage);
 931
 932        if (anon_vma)
 933                put_anon_vma(anon_vma);
 934        unlock_page(hpage);
 935
 936out:
 937        if (rc != -EAGAIN) {
 938                list_del(&hpage->lru);
 939                put_page(hpage);
 940        }
 941
 942        put_page(new_hpage);
 943
 944        if (result) {
 945                if (rc)
 946                        *result = rc;
 947                else
 948                        *result = page_to_nid(new_hpage);
 949        }
 950        return rc;
 951}
 952
 953/*
 954 * migrate_pages
 955 *
 956 * The function takes one list of pages to migrate and a function
 957 * that determines from the page to be migrated and the private data
 958 * the target of the move and allocates the page.
 959 *
 960 * The function returns after 10 attempts or if no pages
 961 * are movable anymore because to has become empty
 962 * or no retryable pages exist anymore.
 963 * Caller should call putback_lru_pages to return pages to the LRU
 964 * or free list only if ret != 0.
 965 *
 966 * Return: Number of pages not migrated or error code.
 967 */
 968int migrate_pages(struct list_head *from,
 969                new_page_t get_new_page, unsigned long private, bool offlining,
 970                enum migrate_mode mode)
 971{
 972        int retry = 1;
 973        int nr_failed = 0;
 974        int pass = 0;
 975        struct page *page;
 976        struct page *page2;
 977        int swapwrite = current->flags & PF_SWAPWRITE;
 978        int rc;
 979
 980        if (!swapwrite)
 981                current->flags |= PF_SWAPWRITE;
 982
 983        for(pass = 0; pass < 10 && retry; pass++) {
 984                retry = 0;
 985
 986                list_for_each_entry_safe(page, page2, from, lru) {
 987                        cond_resched();
 988
 989                        rc = unmap_and_move(get_new_page, private,
 990                                                page, pass > 2, offlining,
 991                                                mode);
 992
 993                        switch(rc) {
 994                        case -ENOMEM:
 995                                goto out;
 996                        case -EAGAIN:
 997                                retry++;
 998                                break;
 999                        case 0:
1000                                break;
1001                        default:
1002                                /* Permanent failure */
1003                                nr_failed++;
1004                                break;
1005                        }
1006                }
1007        }
1008        rc = 0;
1009out:
1010        if (!swapwrite)
1011                current->flags &= ~PF_SWAPWRITE;
1012
1013        if (rc)
1014                return rc;
1015
1016        return nr_failed + retry;
1017}
1018
1019int migrate_huge_pages(struct list_head *from,
1020                new_page_t get_new_page, unsigned long private, bool offlining,
1021                enum migrate_mode mode)
1022{
1023        int retry = 1;
1024        int nr_failed = 0;
1025        int pass = 0;
1026        struct page *page;
1027        struct page *page2;
1028        int rc;
1029
1030        for (pass = 0; pass < 10 && retry; pass++) {
1031                retry = 0;
1032
1033                list_for_each_entry_safe(page, page2, from, lru) {
1034                        cond_resched();
1035
1036                        rc = unmap_and_move_huge_page(get_new_page,
1037                                        private, page, pass > 2, offlining,
1038                                        mode);
1039
1040                        switch(rc) {
1041                        case -ENOMEM:
1042                                goto out;
1043                        case -EAGAIN:
1044                                retry++;
1045                                break;
1046                        case 0:
1047                                break;
1048                        default:
1049                                /* Permanent failure */
1050                                nr_failed++;
1051                                break;
1052                        }
1053                }
1054        }
1055        rc = 0;
1056out:
1057        if (rc)
1058                return rc;
1059
1060        return nr_failed + retry;
1061}
1062
1063#ifdef CONFIG_NUMA
1064/*
1065 * Move a list of individual pages
1066 */
1067struct page_to_node {
1068        unsigned long addr;
1069        struct page *page;
1070        int node;
1071        int status;
1072};
1073
1074static struct page *new_page_node(struct page *p, unsigned long private,
1075                int **result)
1076{
1077        struct page_to_node *pm = (struct page_to_node *)private;
1078
1079        while (pm->node != MAX_NUMNODES && pm->page != p)
1080                pm++;
1081
1082        if (pm->node == MAX_NUMNODES)
1083                return NULL;
1084
1085        *result = &pm->status;
1086
1087        return alloc_pages_exact_node(pm->node,
1088                                GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
1089}
1090
1091/*
1092 * Move a set of pages as indicated in the pm array. The addr
1093 * field must be set to the virtual address of the page to be moved
1094 * and the node number must contain a valid target node.
1095 * The pm array ends with node = MAX_NUMNODES.
1096 */
1097static int do_move_page_to_node_array(struct mm_struct *mm,
1098                                      struct page_to_node *pm,
1099                                      int migrate_all)
1100{
1101        int err;
1102        struct page_to_node *pp;
1103        LIST_HEAD(pagelist);
1104
1105        down_read(&mm->mmap_sem);
1106
1107        /*
1108         * Build a list of pages to migrate
1109         */
1110        for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
1111                struct vm_area_struct *vma;
1112                struct page *page;
1113
1114                err = -EFAULT;
1115                vma = find_vma(mm, pp->addr);
1116                if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma))
1117                        goto set_status;
1118
1119                page = follow_page(vma, pp->addr, FOLL_GET|FOLL_SPLIT);
1120
1121                err = PTR_ERR(page);
1122                if (IS_ERR(page))
1123                        goto set_status;
1124
1125                err = -ENOENT;
1126                if (!page)
1127                        goto set_status;
1128
1129                /* Use PageReserved to check for zero page */
1130                if (PageReserved(page) || PageKsm(page))
1131                        goto put_and_set;
1132
1133                pp->page = page;
1134                err = page_to_nid(page);
1135
1136                if (err == pp->node)
1137                        /*
1138                         * Node already in the right place
1139                         */
1140                        goto put_and_set;
1141
1142                err = -EACCES;
1143                if (page_mapcount(page) > 1 &&
1144                                !migrate_all)
1145                        goto put_and_set;
1146
1147                err = isolate_lru_page(page);
1148                if (!err) {
1149                        list_add_tail(&page->lru, &pagelist);
1150                        inc_zone_page_state(page, NR_ISOLATED_ANON +
1151                                            page_is_file_cache(page));
1152                }
1153put_and_set:
1154                /*
1155                 * Either remove the duplicate refcount from
1156                 * isolate_lru_page() or drop the page ref if it was
1157                 * not isolated.
1158                 */
1159                put_page(page);
1160set_status:
1161                pp->status = err;
1162        }
1163
1164        err = 0;
1165        if (!list_empty(&pagelist)) {
1166                err = migrate_pages(&pagelist, new_page_node,
1167                                (unsigned long)pm, 0, MIGRATE_SYNC);
1168                if (err)
1169                        putback_lru_pages(&pagelist);
1170        }
1171
1172        up_read(&mm->mmap_sem);
1173        return err;
1174}
1175
1176/*
1177 * Migrate an array of page address onto an array of nodes and fill
1178 * the corresponding array of status.
1179 */
1180static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
1181                         unsigned long nr_pages,
1182                         const void __user * __user *pages,
1183                         const int __user *nodes,
1184                         int __user *status, int flags)
1185{
1186        struct page_to_node *pm;
1187        unsigned long chunk_nr_pages;
1188        unsigned long chunk_start;
1189        int err;
1190
1191        err = -ENOMEM;
1192        pm = (struct page_to_node *)__get_free_page(GFP_KERNEL);
1193        if (!pm)
1194                goto out;
1195
1196        migrate_prep();
1197
1198        /*
1199         * Store a chunk of page_to_node array in a page,
1200         * but keep the last one as a marker
1201         */
1202        chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1;
1203
1204        for (chunk_start = 0;
1205             chunk_start < nr_pages;
1206             chunk_start += chunk_nr_pages) {
1207                int j;
1208
1209                if (chunk_start + chunk_nr_pages > nr_pages)
1210                        chunk_nr_pages = nr_pages - chunk_start;
1211
1212                /* fill the chunk pm with addrs and nodes from user-space */
1213                for (j = 0; j < chunk_nr_pages; j++) {
1214                        const void __user *p;
1215                        int node;
1216
1217                        err = -EFAULT;
1218                        if (get_user(p, pages + j + chunk_start))
1219                                goto out_pm;
1220                        pm[j].addr = (unsigned long) p;
1221
1222                        if (get_user(node, nodes + j + chunk_start))
1223                                goto out_pm;
1224
1225                        err = -ENODEV;
1226                        if (node < 0 || node >= MAX_NUMNODES)
1227                                goto out_pm;
1228
1229                        if (!node_state(node, N_HIGH_MEMORY))
1230                                goto out_pm;
1231
1232                        err = -EACCES;
1233                        if (!node_isset(node, task_nodes))
1234                                goto out_pm;
1235
1236                        pm[j].node = node;
1237                }
1238
1239                /* End marker for this chunk */
1240                pm[chunk_nr_pages].node = MAX_NUMNODES;
1241
1242                /* Migrate this chunk */
1243                err = do_move_page_to_node_array(mm, pm,
1244                                                 flags & MPOL_MF_MOVE_ALL);
1245                if (err < 0)
1246                        goto out_pm;
1247
1248                /* Return status information */
1249                for (j = 0; j < chunk_nr_pages; j++)
1250                        if (put_user(pm[j].status, status + j + chunk_start)) {
1251                                err = -EFAULT;
1252                                goto out_pm;
1253                        }
1254        }
1255        err = 0;
1256
1257out_pm:
1258        free_page((unsigned long)pm);
1259out:
1260        return err;
1261}
1262
1263/*
1264 * Determine the nodes of an array of pages and store it in an array of status.
1265 */
1266static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1267                                const void __user **pages, int *status)
1268{
1269        unsigned long i;
1270
1271        down_read(&mm->mmap_sem);
1272
1273        for (i = 0; i < nr_pages; i++) {
1274                unsigned long addr = (unsigned long)(*pages);
1275                struct vm_area_struct *vma;
1276                struct page *page;
1277                int err = -EFAULT;
1278
1279                vma = find_vma(mm, addr);
1280                if (!vma || addr < vma->vm_start)
1281                        goto set_status;
1282
1283                page = follow_page(vma, addr, 0);
1284
1285                err = PTR_ERR(page);
1286                if (IS_ERR(page))
1287                        goto set_status;
1288
1289                err = -ENOENT;
1290                /* Use PageReserved to check for zero page */
1291                if (!page || PageReserved(page) || PageKsm(page))
1292                        goto set_status;
1293
1294                err = page_to_nid(page);
1295set_status:
1296                *status = err;
1297
1298                pages++;
1299                status++;
1300        }
1301
1302        up_read(&mm->mmap_sem);
1303}
1304
1305/*
1306 * Determine the nodes of a user array of pages and store it in
1307 * a user array of status.
1308 */
1309static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1310                         const void __user * __user *pages,
1311                         int __user *status)
1312{
1313#define DO_PAGES_STAT_CHUNK_NR 16
1314        const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1315        int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1316
1317        while (nr_pages) {
1318                unsigned long chunk_nr;
1319
1320                chunk_nr = nr_pages;
1321                if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1322                        chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1323
1324                if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1325                        break;
1326
1327                do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1328
1329                if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1330                        break;
1331
1332                pages += chunk_nr;
1333                status += chunk_nr;
1334                nr_pages -= chunk_nr;
1335        }
1336        return nr_pages ? -EFAULT : 0;
1337}
1338
1339/*
1340 * Move a list of pages in the address space of the currently executing
1341 * process.
1342 */
1343SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1344                const void __user * __user *, pages,
1345                const int __user *, nodes,
1346                int __user *, status, int, flags)
1347{
1348        const struct cred *cred = current_cred(), *tcred;
1349        struct task_struct *task;
1350        struct mm_struct *mm;
1351        int err;
1352        nodemask_t task_nodes;
1353
1354        /* Check flags */
1355        if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1356                return -EINVAL;
1357
1358        if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1359                return -EPERM;
1360
1361        /* Find the mm_struct */
1362        rcu_read_lock();
1363        task = pid ? find_task_by_vpid(pid) : current;
1364        if (!task) {
1365                rcu_read_unlock();
1366                return -ESRCH;
1367        }
1368        get_task_struct(task);
1369
1370        /*
1371         * Check if this process has the right to modify the specified
1372         * process. The right exists if the process has administrative
1373         * capabilities, superuser privileges or the same
1374         * userid as the target process.
1375         */
1376        tcred = __task_cred(task);
1377        if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1378            !uid_eq(cred->uid,  tcred->suid) && !uid_eq(cred->uid,  tcred->uid) &&
1379            !capable(CAP_SYS_NICE)) {
1380                rcu_read_unlock();
1381                err = -EPERM;
1382                goto out;
1383        }
1384        rcu_read_unlock();
1385
1386        err = security_task_movememory(task);
1387        if (err)
1388                goto out;
1389
1390        task_nodes = cpuset_mems_allowed(task);
1391        mm = get_task_mm(task);
1392        put_task_struct(task);
1393
1394        if (!mm)
1395                return -EINVAL;
1396
1397        if (nodes)
1398                err = do_pages_move(mm, task_nodes, nr_pages, pages,
1399                                    nodes, status, flags);
1400        else
1401                err = do_pages_stat(mm, nr_pages, pages, status);
1402
1403        mmput(mm);
1404        return err;
1405
1406out:
1407        put_task_struct(task);
1408        return err;
1409}
1410
1411/*
1412 * Call migration functions in the vma_ops that may prepare
1413 * memory in a vm for migration. migration functions may perform
1414 * the migration for vmas that do not have an underlying page struct.
1415 */
1416int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
1417        const nodemask_t *from, unsigned long flags)
1418{
1419        struct vm_area_struct *vma;
1420        int err = 0;
1421
1422        for (vma = mm->mmap; vma && !err; vma = vma->vm_next) {
1423                if (vma->vm_ops && vma->vm_ops->migrate) {
1424                        err = vma->vm_ops->migrate(vma, to, from, flags);
1425                        if (err)
1426                                break;
1427                }
1428        }
1429        return err;
1430}
1431#endif
1432
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.