linux/mm/compaction.c
<<
>>
Prefs
   1/*
   2 * linux/mm/compaction.c
   3 *
   4 * Memory compaction for the reduction of external fragmentation. Note that
   5 * this heavily depends upon page migration to do all the real heavy
   6 * lifting
   7 *
   8 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
   9 */
  10#include <linux/swap.h>
  11#include <linux/migrate.h>
  12#include <linux/compaction.h>
  13#include <linux/mm_inline.h>
  14#include <linux/backing-dev.h>
  15#include <linux/sysctl.h>
  16#include <linux/sysfs.h>
  17#include "internal.h"
  18
  19#if defined CONFIG_COMPACTION || defined CONFIG_CMA
  20
  21#define CREATE_TRACE_POINTS
  22#include <trace/events/compaction.h>
  23
  24static unsigned long release_freepages(struct list_head *freelist)
  25{
  26        struct page *page, *next;
  27        unsigned long count = 0;
  28
  29        list_for_each_entry_safe(page, next, freelist, lru) {
  30                list_del(&page->lru);
  31                __free_page(page);
  32                count++;
  33        }
  34
  35        return count;
  36}
  37
  38static void map_pages(struct list_head *list)
  39{
  40        struct page *page;
  41
  42        list_for_each_entry(page, list, lru) {
  43                arch_alloc_page(page, 0);
  44                kernel_map_pages(page, 1, 1);
  45        }
  46}
  47
  48static inline bool migrate_async_suitable(int migratetype)
  49{
  50        return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
  51}
  52
  53#ifdef CONFIG_COMPACTION
  54/* Returns true if the pageblock should be scanned for pages to isolate. */
  55static inline bool isolation_suitable(struct compact_control *cc,
  56                                        struct page *page)
  57{
  58        if (cc->ignore_skip_hint)
  59                return true;
  60
  61        return !get_pageblock_skip(page);
  62}
  63
  64/*
  65 * This function is called to clear all cached information on pageblocks that
  66 * should be skipped for page isolation when the migrate and free page scanner
  67 * meet.
  68 */
  69static void __reset_isolation_suitable(struct zone *zone)
  70{
  71        unsigned long start_pfn = zone->zone_start_pfn;
  72        unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages;
  73        unsigned long pfn;
  74
  75        zone->compact_cached_migrate_pfn = start_pfn;
  76        zone->compact_cached_free_pfn = end_pfn;
  77        zone->compact_blockskip_flush = false;
  78
  79        /* Walk the zone and mark every pageblock as suitable for isolation */
  80        for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
  81                struct page *page;
  82
  83                cond_resched();
  84
  85                if (!pfn_valid(pfn))
  86                        continue;
  87
  88                page = pfn_to_page(pfn);
  89                if (zone != page_zone(page))
  90                        continue;
  91
  92                clear_pageblock_skip(page);
  93        }
  94}
  95
  96void reset_isolation_suitable(pg_data_t *pgdat)
  97{
  98        int zoneid;
  99
 100        for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
 101                struct zone *zone = &pgdat->node_zones[zoneid];
 102                if (!populated_zone(zone))
 103                        continue;
 104
 105                /* Only flush if a full compaction finished recently */
 106                if (zone->compact_blockskip_flush)
 107                        __reset_isolation_suitable(zone);
 108        }
 109}
 110
 111/*
 112 * If no pages were isolated then mark this pageblock to be skipped in the
 113 * future. The information is later cleared by __reset_isolation_suitable().
 114 */
 115static void update_pageblock_skip(struct compact_control *cc,
 116                        struct page *page, unsigned long nr_isolated,
 117                        bool migrate_scanner)
 118{
 119        struct zone *zone = cc->zone;
 120        if (!page)
 121                return;
 122
 123        if (!nr_isolated) {
 124                unsigned long pfn = page_to_pfn(page);
 125                set_pageblock_skip(page);
 126
 127                /* Update where compaction should restart */
 128                if (migrate_scanner) {
 129                        if (!cc->finished_update_migrate &&
 130                            pfn > zone->compact_cached_migrate_pfn)
 131                                zone->compact_cached_migrate_pfn = pfn;
 132                } else {
 133                        if (!cc->finished_update_free &&
 134                            pfn < zone->compact_cached_free_pfn)
 135                                zone->compact_cached_free_pfn = pfn;
 136                }
 137        }
 138}
 139#else
 140static inline bool isolation_suitable(struct compact_control *cc,
 141                                        struct page *page)
 142{
 143        return true;
 144}
 145
 146static void update_pageblock_skip(struct compact_control *cc,
 147                        struct page *page, unsigned long nr_isolated,
 148                        bool migrate_scanner)
 149{
 150}
 151#endif /* CONFIG_COMPACTION */
 152
 153static inline bool should_release_lock(spinlock_t *lock)
 154{
 155        return need_resched() || spin_is_contended(lock);
 156}
 157
 158/*
 159 * Compaction requires the taking of some coarse locks that are potentially
 160 * very heavily contended. Check if the process needs to be scheduled or
 161 * if the lock is contended. For async compaction, back out in the event
 162 * if contention is severe. For sync compaction, schedule.
 163 *
 164 * Returns true if the lock is held.
 165 * Returns false if the lock is released and compaction should abort
 166 */
 167static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
 168                                      bool locked, struct compact_control *cc)
 169{
 170        if (should_release_lock(lock)) {
 171                if (locked) {
 172                        spin_unlock_irqrestore(lock, *flags);
 173                        locked = false;
 174                }
 175
 176                /* async aborts if taking too long or contended */
 177                if (!cc->sync) {
 178                        cc->contended = true;
 179                        return false;
 180                }
 181
 182                cond_resched();
 183        }
 184
 185        if (!locked)
 186                spin_lock_irqsave(lock, *flags);
 187        return true;
 188}
 189
 190static inline bool compact_trylock_irqsave(spinlock_t *lock,
 191                        unsigned long *flags, struct compact_control *cc)
 192{
 193        return compact_checklock_irqsave(lock, flags, false, cc);
 194}
 195
 196/* Returns true if the page is within a block suitable for migration to */
 197static bool suitable_migration_target(struct page *page)
 198{
 199        int migratetype = get_pageblock_migratetype(page);
 200
 201        /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
 202        if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
 203                return false;
 204
 205        /* If the page is a large free page, then allow migration */
 206        if (PageBuddy(page) && page_order(page) >= pageblock_order)
 207                return true;
 208
 209        /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
 210        if (migrate_async_suitable(migratetype))
 211                return true;
 212
 213        /* Otherwise skip the block */
 214        return false;
 215}
 216
 217static void compact_capture_page(struct compact_control *cc)
 218{
 219        unsigned long flags;
 220        int mtype, mtype_low, mtype_high;
 221
 222        if (!cc->page || *cc->page)
 223                return;
 224
 225        /*
 226         * For MIGRATE_MOVABLE allocations we capture a suitable page ASAP
 227         * regardless of the migratetype of the freelist is is captured from.
 228         * This is fine because the order for a high-order MIGRATE_MOVABLE
 229         * allocation is typically at least a pageblock size and overall
 230         * fragmentation is not impaired. Other allocation types must
 231         * capture pages from their own migratelist because otherwise they
 232         * could pollute other pageblocks like MIGRATE_MOVABLE with
 233         * difficult to move pages and making fragmentation worse overall.
 234         */
 235        if (cc->migratetype == MIGRATE_MOVABLE) {
 236                mtype_low = 0;
 237                mtype_high = MIGRATE_PCPTYPES;
 238        } else {
 239                mtype_low = cc->migratetype;
 240                mtype_high = cc->migratetype + 1;
 241        }
 242
 243        /* Speculatively examine the free lists without zone lock */
 244        for (mtype = mtype_low; mtype < mtype_high; mtype++) {
 245                int order;
 246                for (order = cc->order; order < MAX_ORDER; order++) {
 247                        struct page *page;
 248                        struct free_area *area;
 249                        area = &(cc->zone->free_area[order]);
 250                        if (list_empty(&area->free_list[mtype]))
 251                                continue;
 252
 253                        /* Take the lock and attempt capture of the page */
 254                        if (!compact_trylock_irqsave(&cc->zone->lock, &flags, cc))
 255                                return;
 256                        if (!list_empty(&area->free_list[mtype])) {
 257                                page = list_entry(area->free_list[mtype].next,
 258                                                        struct page, lru);
 259                                if (capture_free_page(page, cc->order, mtype)) {
 260                                        spin_unlock_irqrestore(&cc->zone->lock,
 261                                                                        flags);
 262                                        *cc->page = page;
 263                                        return;
 264                                }
 265                        }
 266                        spin_unlock_irqrestore(&cc->zone->lock, flags);
 267                }
 268        }
 269}
 270
 271/*
 272 * Isolate free pages onto a private freelist. Caller must hold zone->lock.
 273 * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
 274 * pages inside of the pageblock (even though it may still end up isolating
 275 * some pages).
 276 */
 277static unsigned long isolate_freepages_block(struct compact_control *cc,
 278                                unsigned long blockpfn,
 279                                unsigned long end_pfn,
 280                                struct list_head *freelist,
 281                                bool strict)
 282{
 283        int nr_scanned = 0, total_isolated = 0;
 284        struct page *cursor, *valid_page = NULL;
 285        unsigned long nr_strict_required = end_pfn - blockpfn;
 286        unsigned long flags;
 287        bool locked = false;
 288
 289        cursor = pfn_to_page(blockpfn);
 290
 291        /* Isolate free pages. */
 292        for (; blockpfn < end_pfn; blockpfn++, cursor++) {
 293                int isolated, i;
 294                struct page *page = cursor;
 295
 296                nr_scanned++;
 297                if (!pfn_valid_within(blockpfn))
 298                        continue;
 299                if (!valid_page)
 300                        valid_page = page;
 301                if (!PageBuddy(page))
 302                        continue;
 303
 304                /*
 305                 * The zone lock must be held to isolate freepages.
 306                 * Unfortunately this is a very coarse lock and can be
 307                 * heavily contended if there are parallel allocations
 308                 * or parallel compactions. For async compaction do not
 309                 * spin on the lock and we acquire the lock as late as
 310                 * possible.
 311                 */
 312                locked = compact_checklock_irqsave(&cc->zone->lock, &flags,
 313                                                                locked, cc);
 314                if (!locked)
 315                        break;
 316
 317                /* Recheck this is a suitable migration target under lock */
 318                if (!strict && !suitable_migration_target(page))
 319                        break;
 320
 321                /* Recheck this is a buddy page under lock */
 322                if (!PageBuddy(page))
 323                        continue;
 324
 325                /* Found a free page, break it into order-0 pages */
 326                isolated = split_free_page(page);
 327                if (!isolated && strict)
 328                        break;
 329                total_isolated += isolated;
 330                for (i = 0; i < isolated; i++) {
 331                        list_add(&page->lru, freelist);
 332                        page++;
 333                }
 334
 335                /* If a page was split, advance to the end of it */
 336                if (isolated) {
 337                        blockpfn += isolated - 1;
 338                        cursor += isolated - 1;
 339                }
 340        }
 341
 342        trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
 343
 344        /*
 345         * If strict isolation is requested by CMA then check that all the
 346         * pages requested were isolated. If there were any failures, 0 is
 347         * returned and CMA will fail.
 348         */
 349        if (strict && nr_strict_required > total_isolated)
 350                total_isolated = 0;
 351
 352        if (locked)
 353                spin_unlock_irqrestore(&cc->zone->lock, flags);
 354
 355        /* Update the pageblock-skip if the whole pageblock was scanned */
 356        if (blockpfn == end_pfn)
 357                update_pageblock_skip(cc, valid_page, total_isolated, false);
 358
 359        return total_isolated;
 360}
 361
 362/**
 363 * isolate_freepages_range() - isolate free pages.
 364 * @start_pfn: The first PFN to start isolating.
 365 * @end_pfn:   The one-past-last PFN.
 366 *
 367 * Non-free pages, invalid PFNs, or zone boundaries within the
 368 * [start_pfn, end_pfn) range are considered errors, cause function to
 369 * undo its actions and return zero.
 370 *
 371 * Otherwise, function returns one-past-the-last PFN of isolated page
 372 * (which may be greater then end_pfn if end fell in a middle of
 373 * a free page).
 374 */
 375unsigned long
 376isolate_freepages_range(struct compact_control *cc,
 377                        unsigned long start_pfn, unsigned long end_pfn)
 378{
 379        unsigned long isolated, pfn, block_end_pfn;
 380        LIST_HEAD(freelist);
 381
 382        for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
 383                if (!pfn_valid(pfn) || cc->zone != page_zone(pfn_to_page(pfn)))
 384                        break;
 385
 386                /*
 387                 * On subsequent iterations ALIGN() is actually not needed,
 388                 * but we keep it that we not to complicate the code.
 389                 */
 390                block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
 391                block_end_pfn = min(block_end_pfn, end_pfn);
 392
 393                isolated = isolate_freepages_block(cc, pfn, block_end_pfn,
 394                                                   &freelist, true);
 395
 396                /*
 397                 * In strict mode, isolate_freepages_block() returns 0 if
 398                 * there are any holes in the block (ie. invalid PFNs or
 399                 * non-free pages).
 400                 */
 401                if (!isolated)
 402                        break;
 403
 404                /*
 405                 * If we managed to isolate pages, it is always (1 << n) *
 406                 * pageblock_nr_pages for some non-negative n.  (Max order
 407                 * page may span two pageblocks).
 408                 */
 409        }
 410
 411        /* split_free_page does not map the pages */
 412        map_pages(&freelist);
 413
 414        if (pfn < end_pfn) {
 415                /* Loop terminated early, cleanup. */
 416                release_freepages(&freelist);
 417                return 0;
 418        }
 419
 420        /* We don't use freelists for anything. */
 421        return pfn;
 422}
 423
 424/* Update the number of anon and file isolated pages in the zone */
 425static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc)
 426{
 427        struct page *page;
 428        unsigned int count[2] = { 0, };
 429
 430        list_for_each_entry(page, &cc->migratepages, lru)
 431                count[!!page_is_file_cache(page)]++;
 432
 433        /* If locked we can use the interrupt unsafe versions */
 434        if (locked) {
 435                __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
 436                __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
 437        } else {
 438                mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
 439                mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
 440        }
 441}
 442
 443/* Similar to reclaim, but different enough that they don't share logic */
 444static bool too_many_isolated(struct zone *zone)
 445{
 446        unsigned long active, inactive, isolated;
 447
 448        inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
 449                                        zone_page_state(zone, NR_INACTIVE_ANON);
 450        active = zone_page_state(zone, NR_ACTIVE_FILE) +
 451                                        zone_page_state(zone, NR_ACTIVE_ANON);
 452        isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
 453                                        zone_page_state(zone, NR_ISOLATED_ANON);
 454
 455        return isolated > (inactive + active) / 2;
 456}
 457
 458/**
 459 * isolate_migratepages_range() - isolate all migrate-able pages in range.
 460 * @zone:       Zone pages are in.
 461 * @cc:         Compaction control structure.
 462 * @low_pfn:    The first PFN of the range.
 463 * @end_pfn:    The one-past-the-last PFN of the range.
 464 * @unevictable: true if it allows to isolate unevictable pages
 465 *
 466 * Isolate all pages that can be migrated from the range specified by
 467 * [low_pfn, end_pfn).  Returns zero if there is a fatal signal
 468 * pending), otherwise PFN of the first page that was not scanned
 469 * (which may be both less, equal to or more then end_pfn).
 470 *
 471 * Assumes that cc->migratepages is empty and cc->nr_migratepages is
 472 * zero.
 473 *
 474 * Apart from cc->migratepages and cc->nr_migratetypes this function
 475 * does not modify any cc's fields, in particular it does not modify
 476 * (or read for that matter) cc->migrate_pfn.
 477 */
 478unsigned long
 479isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
 480                unsigned long low_pfn, unsigned long end_pfn, bool unevictable)
 481{
 482        unsigned long last_pageblock_nr = 0, pageblock_nr;
 483        unsigned long nr_scanned = 0, nr_isolated = 0;
 484        struct list_head *migratelist = &cc->migratepages;
 485        isolate_mode_t mode = 0;
 486        struct lruvec *lruvec;
 487        unsigned long flags;
 488        bool locked = false;
 489        struct page *page = NULL, *valid_page = NULL;
 490
 491        /*
 492         * Ensure that there are not too many pages isolated from the LRU
 493         * list by either parallel reclaimers or compaction. If there are,
 494         * delay for some time until fewer pages are isolated
 495         */
 496        while (unlikely(too_many_isolated(zone))) {
 497                /* async migration should just abort */
 498                if (!cc->sync)
 499                        return 0;
 500
 501                congestion_wait(BLK_RW_ASYNC, HZ/10);
 502
 503                if (fatal_signal_pending(current))
 504                        return 0;
 505        }
 506
 507        /* Time to isolate some pages for migration */
 508        cond_resched();
 509        for (; low_pfn < end_pfn; low_pfn++) {
 510                /* give a chance to irqs before checking need_resched() */
 511                if (locked && !((low_pfn+1) % SWAP_CLUSTER_MAX)) {
 512                        if (should_release_lock(&zone->lru_lock)) {
 513                                spin_unlock_irqrestore(&zone->lru_lock, flags);
 514                                locked = false;
 515                        }
 516                }
 517
 518                /*
 519                 * migrate_pfn does not necessarily start aligned to a
 520                 * pageblock. Ensure that pfn_valid is called when moving
 521                 * into a new MAX_ORDER_NR_PAGES range in case of large
 522                 * memory holes within the zone
 523                 */
 524                if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
 525                        if (!pfn_valid(low_pfn)) {
 526                                low_pfn += MAX_ORDER_NR_PAGES - 1;
 527                                continue;
 528                        }
 529                }
 530
 531                if (!pfn_valid_within(low_pfn))
 532                        continue;
 533                nr_scanned++;
 534
 535                /*
 536                 * Get the page and ensure the page is within the same zone.
 537                 * See the comment in isolate_freepages about overlapping
 538                 * nodes. It is deliberate that the new zone lock is not taken
 539                 * as memory compaction should not move pages between nodes.
 540                 */
 541                page = pfn_to_page(low_pfn);
 542                if (page_zone(page) != zone)
 543                        continue;
 544
 545                if (!valid_page)
 546                        valid_page = page;
 547
 548                /* If isolation recently failed, do not retry */
 549                pageblock_nr = low_pfn >> pageblock_order;
 550                if (!isolation_suitable(cc, page))
 551                        goto next_pageblock;
 552
 553                /* Skip if free */
 554                if (PageBuddy(page))
 555                        continue;
 556
 557                /*
 558                 * For async migration, also only scan in MOVABLE blocks. Async
 559                 * migration is optimistic to see if the minimum amount of work
 560                 * satisfies the allocation
 561                 */
 562                if (!cc->sync && last_pageblock_nr != pageblock_nr &&
 563                    !migrate_async_suitable(get_pageblock_migratetype(page))) {
 564                        cc->finished_update_migrate = true;
 565                        goto next_pageblock;
 566                }
 567
 568                /* Check may be lockless but that's ok as we recheck later */
 569                if (!PageLRU(page))
 570                        continue;
 571
 572                /*
 573                 * PageLRU is set. lru_lock normally excludes isolation
 574                 * splitting and collapsing (collapsing has already happened
 575                 * if PageLRU is set) but the lock is not necessarily taken
 576                 * here and it is wasteful to take it just to check transhuge.
 577                 * Check TransHuge without lock and skip the whole pageblock if
 578                 * it's either a transhuge or hugetlbfs page, as calling
 579                 * compound_order() without preventing THP from splitting the
 580                 * page underneath us may return surprising results.
 581                 */
 582                if (PageTransHuge(page)) {
 583                        if (!locked)
 584                                goto next_pageblock;
 585                        low_pfn += (1 << compound_order(page)) - 1;
 586                        continue;
 587                }
 588
 589                /* Check if it is ok to still hold the lock */
 590                locked = compact_checklock_irqsave(&zone->lru_lock, &flags,
 591                                                                locked, cc);
 592                if (!locked || fatal_signal_pending(current))
 593                        break;
 594
 595                /* Recheck PageLRU and PageTransHuge under lock */
 596                if (!PageLRU(page))
 597                        continue;
 598                if (PageTransHuge(page)) {
 599                        low_pfn += (1 << compound_order(page)) - 1;
 600                        continue;
 601                }
 602
 603                if (!cc->sync)
 604                        mode |= ISOLATE_ASYNC_MIGRATE;
 605
 606                if (unevictable)
 607                        mode |= ISOLATE_UNEVICTABLE;
 608
 609                lruvec = mem_cgroup_page_lruvec(page, zone);
 610
 611                /* Try isolate the page */
 612                if (__isolate_lru_page(page, mode) != 0)
 613                        continue;
 614
 615                VM_BUG_ON(PageTransCompound(page));
 616
 617                /* Successfully isolated */
 618                cc->finished_update_migrate = true;
 619                del_page_from_lru_list(page, lruvec, page_lru(page));
 620                list_add(&page->lru, migratelist);
 621                cc->nr_migratepages++;
 622                nr_isolated++;
 623
 624                /* Avoid isolating too much */
 625                if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
 626                        ++low_pfn;
 627                        break;
 628                }
 629
 630                continue;
 631
 632next_pageblock:
 633                low_pfn += pageblock_nr_pages;
 634                low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
 635                last_pageblock_nr = pageblock_nr;
 636        }
 637
 638        acct_isolated(zone, locked, cc);
 639
 640        if (locked)
 641                spin_unlock_irqrestore(&zone->lru_lock, flags);
 642
 643        /* Update the pageblock-skip if the whole pageblock was scanned */
 644        if (low_pfn == end_pfn)
 645                update_pageblock_skip(cc, valid_page, nr_isolated, true);
 646
 647        trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
 648
 649        return low_pfn;
 650}
 651
 652#endif /* CONFIG_COMPACTION || CONFIG_CMA */
 653#ifdef CONFIG_COMPACTION
 654/*
 655 * Based on information in the current compact_control, find blocks
 656 * suitable for isolating free pages from and then isolate them.
 657 */
 658static void isolate_freepages(struct zone *zone,
 659                                struct compact_control *cc)
 660{
 661        struct page *page;
 662        unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn;
 663        int nr_freepages = cc->nr_freepages;
 664        struct list_head *freelist = &cc->freepages;
 665
 666        /*
 667         * Initialise the free scanner. The starting point is where we last
 668         * scanned from (or the end of the zone if starting). The low point
 669         * is the end of the pageblock the migration scanner is using.
 670         */
 671        pfn = cc->free_pfn;
 672        low_pfn = cc->migrate_pfn + pageblock_nr_pages;
 673
 674        /*
 675         * Take care that if the migration scanner is at the end of the zone
 676         * that the free scanner does not accidentally move to the next zone
 677         * in the next isolation cycle.
 678         */
 679        high_pfn = min(low_pfn, pfn);
 680
 681        zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
 682
 683        /*
 684         * Isolate free pages until enough are available to migrate the
 685         * pages on cc->migratepages. We stop searching if the migrate
 686         * and free page scanners meet or enough free pages are isolated.
 687         */
 688        for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
 689                                        pfn -= pageblock_nr_pages) {
 690                unsigned long isolated;
 691
 692                if (!pfn_valid(pfn))
 693                        continue;
 694
 695                /*
 696                 * Check for overlapping nodes/zones. It's possible on some
 697                 * configurations to have a setup like
 698                 * node0 node1 node0
 699                 * i.e. it's possible that all pages within a zones range of
 700                 * pages do not belong to a single zone.
 701                 */
 702                page = pfn_to_page(pfn);
 703                if (page_zone(page) != zone)
 704                        continue;
 705
 706                /* Check the block is suitable for migration */
 707                if (!suitable_migration_target(page))
 708                        continue;
 709
 710                /* If isolation recently failed, do not retry */
 711                if (!isolation_suitable(cc, page))
 712                        continue;
 713
 714                /* Found a block suitable for isolating free pages from */
 715                isolated = 0;
 716
 717                /*
 718                 * As pfn may not start aligned, pfn+pageblock_nr_page
 719                 * may cross a MAX_ORDER_NR_PAGES boundary and miss
 720                 * a pfn_valid check. Ensure isolate_freepages_block()
 721                 * only scans within a pageblock
 722                 */
 723                end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
 724                end_pfn = min(end_pfn, zone_end_pfn);
 725                isolated = isolate_freepages_block(cc, pfn, end_pfn,
 726                                                   freelist, false);
 727                nr_freepages += isolated;
 728
 729                /*
 730                 * Record the highest PFN we isolated pages from. When next
 731                 * looking for free pages, the search will restart here as
 732                 * page migration may have returned some pages to the allocator
 733                 */
 734                if (isolated) {
 735                        cc->finished_update_free = true;
 736                        high_pfn = max(high_pfn, pfn);
 737                }
 738        }
 739
 740        /* split_free_page does not map the pages */
 741        map_pages(freelist);
 742
 743        cc->free_pfn = high_pfn;
 744        cc->nr_freepages = nr_freepages;
 745}
 746
 747/*
 748 * This is a migrate-callback that "allocates" freepages by taking pages
 749 * from the isolated freelists in the block we are migrating to.
 750 */
 751static struct page *compaction_alloc(struct page *migratepage,
 752                                        unsigned long data,
 753                                        int **result)
 754{
 755        struct compact_control *cc = (struct compact_control *)data;
 756        struct page *freepage;
 757
 758        /* Isolate free pages if necessary */
 759        if (list_empty(&cc->freepages)) {
 760                isolate_freepages(cc->zone, cc);
 761
 762                if (list_empty(&cc->freepages))
 763                        return NULL;
 764        }
 765
 766        freepage = list_entry(cc->freepages.next, struct page, lru);
 767        list_del(&freepage->lru);
 768        cc->nr_freepages--;
 769
 770        return freepage;
 771}
 772
 773/*
 774 * We cannot control nr_migratepages and nr_freepages fully when migration is
 775 * running as migrate_pages() has no knowledge of compact_control. When
 776 * migration is complete, we count the number of pages on the lists by hand.
 777 */
 778static void update_nr_listpages(struct compact_control *cc)
 779{
 780        int nr_migratepages = 0;
 781        int nr_freepages = 0;
 782        struct page *page;
 783
 784        list_for_each_entry(page, &cc->migratepages, lru)
 785                nr_migratepages++;
 786        list_for_each_entry(page, &cc->freepages, lru)
 787                nr_freepages++;
 788
 789        cc->nr_migratepages = nr_migratepages;
 790        cc->nr_freepages = nr_freepages;
 791}
 792
 793/* possible outcome of isolate_migratepages */
 794typedef enum {
 795        ISOLATE_ABORT,          /* Abort compaction now */
 796        ISOLATE_NONE,           /* No pages isolated, continue scanning */
 797        ISOLATE_SUCCESS,        /* Pages isolated, migrate */
 798} isolate_migrate_t;
 799
 800/*
 801 * Isolate all pages that can be migrated from the block pointed to by
 802 * the migrate scanner within compact_control.
 803 */
 804static isolate_migrate_t isolate_migratepages(struct zone *zone,
 805                                        struct compact_control *cc)
 806{
 807        unsigned long low_pfn, end_pfn;
 808
 809        /* Do not scan outside zone boundaries */
 810        low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
 811
 812        /* Only scan within a pageblock boundary */
 813        end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
 814
 815        /* Do not cross the free scanner or scan within a memory hole */
 816        if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
 817                cc->migrate_pfn = end_pfn;
 818                return ISOLATE_NONE;
 819        }
 820
 821        /* Perform the isolation */
 822        low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false);
 823        if (!low_pfn || cc->contended)
 824                return ISOLATE_ABORT;
 825
 826        cc->migrate_pfn = low_pfn;
 827
 828        return ISOLATE_SUCCESS;
 829}
 830
 831static int compact_finished(struct zone *zone,
 832                            struct compact_control *cc)
 833{
 834        unsigned long watermark;
 835
 836        if (fatal_signal_pending(current))
 837                return COMPACT_PARTIAL;
 838
 839        /* Compaction run completes if the migrate and free scanner meet */
 840        if (cc->free_pfn <= cc->migrate_pfn) {
 841                /*
 842                 * Mark that the PG_migrate_skip information should be cleared
 843                 * by kswapd when it goes to sleep. kswapd does not set the
 844                 * flag itself as the decision to be clear should be directly
 845                 * based on an allocation request.
 846                 */
 847                if (!current_is_kswapd())
 848                        zone->compact_blockskip_flush = true;
 849
 850                return COMPACT_COMPLETE;
 851        }
 852
 853        /*
 854         * order == -1 is expected when compacting via
 855         * /proc/sys/vm/compact_memory
 856         */
 857        if (cc->order == -1)
 858                return COMPACT_CONTINUE;
 859
 860        /* Compaction run is not finished if the watermark is not met */
 861        watermark = low_wmark_pages(zone);
 862        watermark += (1 << cc->order);
 863
 864        if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
 865                return COMPACT_CONTINUE;
 866
 867        /* Direct compactor: Is a suitable page free? */
 868        if (cc->page) {
 869                /* Was a suitable page captured? */
 870                if (*cc->page)
 871                        return COMPACT_PARTIAL;
 872        } else {
 873                unsigned int order;
 874                for (order = cc->order; order < MAX_ORDER; order++) {
 875                        struct free_area *area = &zone->free_area[cc->order];
 876                        /* Job done if page is free of the right migratetype */
 877                        if (!list_empty(&area->free_list[cc->migratetype]))
 878                                return COMPACT_PARTIAL;
 879
 880                        /* Job done if allocation would set block type */
 881                        if (cc->order >= pageblock_order && area->nr_free)
 882                                return COMPACT_PARTIAL;
 883                }
 884        }
 885
 886        return COMPACT_CONTINUE;
 887}
 888
 889/*
 890 * compaction_suitable: Is this suitable to run compaction on this zone now?
 891 * Returns
 892 *   COMPACT_SKIPPED  - If there are too few free pages for compaction
 893 *   COMPACT_PARTIAL  - If the allocation would succeed without compaction
 894 *   COMPACT_CONTINUE - If compaction should run now
 895 */
 896unsigned long compaction_suitable(struct zone *zone, int order)
 897{
 898        int fragindex;
 899        unsigned long watermark;
 900
 901        /*
 902         * order == -1 is expected when compacting via
 903         * /proc/sys/vm/compact_memory
 904         */
 905        if (order == -1)
 906                return COMPACT_CONTINUE;
 907
 908        /*
 909         * Watermarks for order-0 must be met for compaction. Note the 2UL.
 910         * This is because during migration, copies of pages need to be
 911         * allocated and for a short time, the footprint is higher
 912         */
 913        watermark = low_wmark_pages(zone) + (2UL << order);
 914        if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
 915                return COMPACT_SKIPPED;
 916
 917        /*
 918         * fragmentation index determines if allocation failures are due to
 919         * low memory or external fragmentation
 920         *
 921         * index of -1000 implies allocations might succeed depending on
 922         * watermarks
 923         * index towards 0 implies failure is due to lack of memory
 924         * index towards 1000 implies failure is due to fragmentation
 925         *
 926         * Only compact if a failure would be due to fragmentation.
 927         */
 928        fragindex = fragmentation_index(zone, order);
 929        if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
 930                return COMPACT_SKIPPED;
 931
 932        if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark,
 933            0, 0))
 934                return COMPACT_PARTIAL;
 935
 936        return COMPACT_CONTINUE;
 937}
 938
 939static int compact_zone(struct zone *zone, struct compact_control *cc)
 940{
 941        int ret;
 942        unsigned long start_pfn = zone->zone_start_pfn;
 943        unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages;
 944
 945        ret = compaction_suitable(zone, cc->order);
 946        switch (ret) {
 947        case COMPACT_PARTIAL:
 948        case COMPACT_SKIPPED:
 949                /* Compaction is likely to fail */
 950                return ret;
 951        case COMPACT_CONTINUE:
 952                /* Fall through to compaction */
 953                ;
 954        }
 955
 956        /*
 957         * Setup to move all movable pages to the end of the zone. Used cached
 958         * information on where the scanners should start but check that it
 959         * is initialised by ensuring the values are within zone boundaries.
 960         */
 961        cc->migrate_pfn = zone->compact_cached_migrate_pfn;
 962        cc->free_pfn = zone->compact_cached_free_pfn;
 963        if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
 964                cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
 965                zone->compact_cached_free_pfn = cc->free_pfn;
 966        }
 967        if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
 968                cc->migrate_pfn = start_pfn;
 969                zone->compact_cached_migrate_pfn = cc->migrate_pfn;
 970        }
 971
 972        /*
 973         * Clear pageblock skip if there were failures recently and compaction
 974         * is about to be retried after being deferred. kswapd does not do
 975         * this reset as it'll reset the cached information when going to sleep.
 976         */
 977        if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
 978                __reset_isolation_suitable(zone);
 979
 980        migrate_prep_local();
 981
 982        while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
 983                unsigned long nr_migrate, nr_remaining;
 984                int err;
 985
 986                switch (isolate_migratepages(zone, cc)) {
 987                case ISOLATE_ABORT:
 988                        ret = COMPACT_PARTIAL;
 989                        putback_lru_pages(&cc->migratepages);
 990                        cc->nr_migratepages = 0;
 991                        goto out;
 992                case ISOLATE_NONE:
 993                        continue;
 994                case ISOLATE_SUCCESS:
 995                        ;
 996                }
 997
 998                nr_migrate = cc->nr_migratepages;
 999                err = migrate_pages(&cc->migratepages, compaction_alloc,
1000                                (unsigned long)cc, false,
1001                                cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC);
1002                update_nr_listpages(cc);
1003                nr_remaining = cc->nr_migratepages;
1004
1005                count_vm_event(COMPACTBLOCKS);
1006                count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining);
1007                if (nr_remaining)
1008                        count_vm_events(COMPACTPAGEFAILED, nr_remaining);
1009                trace_mm_compaction_migratepages(nr_migrate - nr_remaining,
1010                                                nr_remaining);
1011
1012                /* Release LRU pages not migrated */
1013                if (err) {
1014                        putback_lru_pages(&cc->migratepages);
1015                        cc->nr_migratepages = 0;
1016                        if (err == -ENOMEM) {
1017                                ret = COMPACT_PARTIAL;
1018                                goto out;
1019                        }
1020                }
1021
1022                /* Capture a page now if it is a suitable size */
1023                compact_capture_page(cc);
1024        }
1025
1026out:
1027        /* Release free pages and check accounting */
1028        cc->nr_freepages -= release_freepages(&cc->freepages);
1029        VM_BUG_ON(cc->nr_freepages != 0);
1030
1031        return ret;
1032}
1033
1034static unsigned long compact_zone_order(struct zone *zone,
1035                                 int order, gfp_t gfp_mask,
1036                                 bool sync, bool *contended,
1037                                 struct page **page)
1038{
1039        unsigned long ret;
1040        struct compact_control cc = {
1041                .nr_freepages = 0,
1042                .nr_migratepages = 0,
1043                .order = order,
1044                .migratetype = allocflags_to_migratetype(gfp_mask),
1045                .zone = zone,
1046                .sync = sync,
1047                .page = page,
1048        };
1049        INIT_LIST_HEAD(&cc.freepages);
1050        INIT_LIST_HEAD(&cc.migratepages);
1051
1052        ret = compact_zone(zone, &cc);
1053
1054        VM_BUG_ON(!list_empty(&cc.freepages));
1055        VM_BUG_ON(!list_empty(&cc.migratepages));
1056
1057        *contended = cc.contended;
1058        return ret;
1059}
1060
1061int sysctl_extfrag_threshold = 500;
1062
1063/**
1064 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
1065 * @zonelist: The zonelist used for the current allocation
1066 * @order: The order of the current allocation
1067 * @gfp_mask: The GFP mask of the current allocation
1068 * @nodemask: The allowed nodes to allocate from
1069 * @sync: Whether migration is synchronous or not
1070 * @contended: Return value that is true if compaction was aborted due to lock contention
1071 * @page: Optionally capture a free page of the requested order during compaction
1072 *
1073 * This is the main entry point for direct page compaction.
1074 */
1075unsigned long try_to_compact_pages(struct zonelist *zonelist,
1076                        int order, gfp_t gfp_mask, nodemask_t *nodemask,
1077                        bool sync, bool *contended, struct page **page)
1078{
1079        enum zone_type high_zoneidx = gfp_zone(gfp_mask);
1080        int may_enter_fs = gfp_mask & __GFP_FS;
1081        int may_perform_io = gfp_mask & __GFP_IO;
1082        struct zoneref *z;
1083        struct zone *zone;
1084        int rc = COMPACT_SKIPPED;
1085        int alloc_flags = 0;
1086
1087        /* Check if the GFP flags allow compaction */
1088        if (!order || !may_enter_fs || !may_perform_io)
1089                return rc;
1090
1091        count_vm_event(COMPACTSTALL);
1092
1093#ifdef CONFIG_CMA
1094        if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
1095                alloc_flags |= ALLOC_CMA;
1096#endif
1097        /* Compact each zone in the list */
1098        for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
1099                                                                nodemask) {
1100                int status;
1101
1102                status = compact_zone_order(zone, order, gfp_mask, sync,
1103                                                contended, page);
1104                rc = max(status, rc);
1105
1106                /* If a normal allocation would succeed, stop compacting */
1107                if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0,
1108                                      alloc_flags))
1109                        break;
1110        }
1111
1112        return rc;
1113}
1114
1115
1116/* Compact all zones within a node */
1117static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
1118{
1119        int zoneid;
1120        struct zone *zone;
1121
1122        for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
1123
1124                zone = &pgdat->node_zones[zoneid];
1125                if (!populated_zone(zone))
1126                        continue;
1127
1128                cc->nr_freepages = 0;
1129                cc->nr_migratepages = 0;
1130                cc->zone = zone;
1131                INIT_LIST_HEAD(&cc->freepages);
1132                INIT_LIST_HEAD(&cc->migratepages);
1133
1134                if (cc->order == -1 || !compaction_deferred(zone, cc->order))
1135                        compact_zone(zone, cc);
1136
1137                if (cc->order > 0) {
1138                        int ok = zone_watermark_ok(zone, cc->order,
1139                                                low_wmark_pages(zone), 0, 0);
1140                        if (ok && cc->order >= zone->compact_order_failed)
1141                                zone->compact_order_failed = cc->order + 1;
1142                        /* Currently async compaction is never deferred. */
1143                        else if (!ok && cc->sync)
1144                                defer_compaction(zone, cc->order);
1145                }
1146
1147                VM_BUG_ON(!list_empty(&cc->freepages));
1148                VM_BUG_ON(!list_empty(&cc->migratepages));
1149        }
1150
1151        return 0;
1152}
1153
1154int compact_pgdat(pg_data_t *pgdat, int order)
1155{
1156        struct compact_control cc = {
1157                .order = order,
1158                .sync = false,
1159                .page = NULL,
1160        };
1161
1162        return __compact_pgdat(pgdat, &cc);
1163}
1164
1165static int compact_node(int nid)
1166{
1167        struct compact_control cc = {
1168                .order = -1,
1169                .sync = true,
1170                .page = NULL,
1171        };
1172
1173        return __compact_pgdat(NODE_DATA(nid), &cc);
1174}
1175
1176/* Compact all nodes in the system */
1177static int compact_nodes(void)
1178{
1179        int nid;
1180
1181        /* Flush pending updates to the LRU lists */
1182        lru_add_drain_all();
1183
1184        for_each_online_node(nid)
1185                compact_node(nid);
1186
1187        return COMPACT_COMPLETE;
1188}
1189
1190/* The written value is actually unused, all memory is compacted */
1191int sysctl_compact_memory;
1192
1193/* This is the entry point for compacting all nodes via /proc/sys/vm */
1194int sysctl_compaction_handler(struct ctl_table *table, int write,
1195                        void __user *buffer, size_t *length, loff_t *ppos)
1196{
1197        if (write)
1198                return compact_nodes();
1199
1200        return 0;
1201}
1202
1203int sysctl_extfrag_handler(struct ctl_table *table, int write,
1204                        void __user *buffer, size_t *length, loff_t *ppos)
1205{
1206        proc_dointvec_minmax(table, write, buffer, length, ppos);
1207
1208        return 0;
1209}
1210
1211#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
1212ssize_t sysfs_compact_node(struct device *dev,
1213                        struct device_attribute *attr,
1214                        const char *buf, size_t count)
1215{
1216        int nid = dev->id;
1217
1218        if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
1219                /* Flush pending updates to the LRU lists */
1220                lru_add_drain_all();
1221
1222                compact_node(nid);
1223        }
1224
1225        return count;
1226}
1227static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
1228
1229int compaction_register_node(struct node *node)
1230{
1231        return device_create_file(&node->dev, &dev_attr_compact);
1232}
1233
1234void compaction_unregister_node(struct node *node)
1235{
1236        return device_remove_file(&node->dev, &dev_attr_compact);
1237}
1238#endif /* CONFIG_SYSFS && CONFIG_NUMA */
1239
1240#endif /* CONFIG_COMPACTION */
1241
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.