linux/mm/page_isolation.c
<<
>>
Prefs
   1/*
   2 * linux/mm/page_isolation.c
   3 */
   4
   5#include <linux/mm.h>
   6#include <linux/page-isolation.h>
   7#include <linux/pageblock-flags.h>
   8#include <linux/memory.h>
   9#include "internal.h"
  10
  11/* called while holding zone->lock */
  12static void set_pageblock_isolate(struct page *page)
  13{
  14        if (get_pageblock_migratetype(page) == MIGRATE_ISOLATE)
  15                return;
  16
  17        set_pageblock_migratetype(page, MIGRATE_ISOLATE);
  18        page_zone(page)->nr_pageblock_isolate++;
  19}
  20
  21/* called while holding zone->lock */
  22static void restore_pageblock_isolate(struct page *page, int migratetype)
  23{
  24        struct zone *zone = page_zone(page);
  25        if (WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE))
  26                return;
  27
  28        BUG_ON(zone->nr_pageblock_isolate <= 0);
  29        set_pageblock_migratetype(page, migratetype);
  30        zone->nr_pageblock_isolate--;
  31}
  32
  33int set_migratetype_isolate(struct page *page)
  34{
  35        struct zone *zone;
  36        unsigned long flags, pfn;
  37        struct memory_isolate_notify arg;
  38        int notifier_ret;
  39        int ret = -EBUSY;
  40
  41        zone = page_zone(page);
  42
  43        spin_lock_irqsave(&zone->lock, flags);
  44
  45        pfn = page_to_pfn(page);
  46        arg.start_pfn = pfn;
  47        arg.nr_pages = pageblock_nr_pages;
  48        arg.pages_found = 0;
  49
  50        /*
  51         * It may be possible to isolate a pageblock even if the
  52         * migratetype is not MIGRATE_MOVABLE. The memory isolation
  53         * notifier chain is used by balloon drivers to return the
  54         * number of pages in a range that are held by the balloon
  55         * driver to shrink memory. If all the pages are accounted for
  56         * by balloons, are free, or on the LRU, isolation can continue.
  57         * Later, for example, when memory hotplug notifier runs, these
  58         * pages reported as "can be isolated" should be isolated(freed)
  59         * by the balloon driver through the memory notifier chain.
  60         */
  61        notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
  62        notifier_ret = notifier_to_errno(notifier_ret);
  63        if (notifier_ret)
  64                goto out;
  65        /*
  66         * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
  67         * We just check MOVABLE pages.
  68         */
  69        if (!has_unmovable_pages(zone, page, arg.pages_found))
  70                ret = 0;
  71
  72        /*
  73         * immobile means "not-on-lru" paes. If immobile is larger than
  74         * removable-by-driver pages reported by notifier, we'll fail.
  75         */
  76
  77out:
  78        if (!ret) {
  79                unsigned long nr_pages;
  80                int migratetype = get_pageblock_migratetype(page);
  81
  82                set_pageblock_isolate(page);
  83                nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE);
  84
  85                __mod_zone_freepage_state(zone, -nr_pages, migratetype);
  86        }
  87
  88        spin_unlock_irqrestore(&zone->lock, flags);
  89        if (!ret)
  90                drain_all_pages();
  91        return ret;
  92}
  93
  94void unset_migratetype_isolate(struct page *page, unsigned migratetype)
  95{
  96        struct zone *zone;
  97        unsigned long flags, nr_pages;
  98
  99        zone = page_zone(page);
 100        spin_lock_irqsave(&zone->lock, flags);
 101        if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
 102                goto out;
 103        nr_pages = move_freepages_block(zone, page, migratetype);
 104        __mod_zone_freepage_state(zone, nr_pages, migratetype);
 105        restore_pageblock_isolate(page, migratetype);
 106out:
 107        spin_unlock_irqrestore(&zone->lock, flags);
 108}
 109
 110static inline struct page *
 111__first_valid_page(unsigned long pfn, unsigned long nr_pages)
 112{
 113        int i;
 114        for (i = 0; i < nr_pages; i++)
 115                if (pfn_valid_within(pfn + i))
 116                        break;
 117        if (unlikely(i == nr_pages))
 118                return NULL;
 119        return pfn_to_page(pfn + i);
 120}
 121
 122/*
 123 * start_isolate_page_range() -- make page-allocation-type of range of pages
 124 * to be MIGRATE_ISOLATE.
 125 * @start_pfn: The lower PFN of the range to be isolated.
 126 * @end_pfn: The upper PFN of the range to be isolated.
 127 * @migratetype: migrate type to set in error recovery.
 128 *
 129 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
 130 * the range will never be allocated. Any free pages and pages freed in the
 131 * future will not be allocated again.
 132 *
 133 * start_pfn/end_pfn must be aligned to pageblock_order.
 134 * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
 135 */
 136int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
 137                             unsigned migratetype)
 138{
 139        unsigned long pfn;
 140        unsigned long undo_pfn;
 141        struct page *page;
 142
 143        BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
 144        BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
 145
 146        for (pfn = start_pfn;
 147             pfn < end_pfn;
 148             pfn += pageblock_nr_pages) {
 149                page = __first_valid_page(pfn, pageblock_nr_pages);
 150                if (page && set_migratetype_isolate(page)) {
 151                        undo_pfn = pfn;
 152                        goto undo;
 153                }
 154        }
 155        return 0;
 156undo:
 157        for (pfn = start_pfn;
 158             pfn < undo_pfn;
 159             pfn += pageblock_nr_pages)
 160                unset_migratetype_isolate(pfn_to_page(pfn), migratetype);
 161
 162        return -EBUSY;
 163}
 164
 165/*
 166 * Make isolated pages available again.
 167 */
 168int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
 169                            unsigned migratetype)
 170{
 171        unsigned long pfn;
 172        struct page *page;
 173        BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
 174        BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
 175        for (pfn = start_pfn;
 176             pfn < end_pfn;
 177             pfn += pageblock_nr_pages) {
 178                page = __first_valid_page(pfn, pageblock_nr_pages);
 179                if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
 180                        continue;
 181                unset_migratetype_isolate(page, migratetype);
 182        }
 183        return 0;
 184}
 185/*
 186 * Test all pages in the range is free(means isolated) or not.
 187 * all pages in [start_pfn...end_pfn) must be in the same zone.
 188 * zone->lock must be held before call this.
 189 *
 190 * Returns 1 if all pages in the range are isolated.
 191 */
 192static int
 193__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
 194{
 195        struct page *page;
 196
 197        while (pfn < end_pfn) {
 198                if (!pfn_valid_within(pfn)) {
 199                        pfn++;
 200                        continue;
 201                }
 202                page = pfn_to_page(pfn);
 203                if (PageBuddy(page)) {
 204                        /*
 205                         * If race between isolatation and allocation happens,
 206                         * some free pages could be in MIGRATE_MOVABLE list
 207                         * although pageblock's migratation type of the page
 208                         * is MIGRATE_ISOLATE. Catch it and move the page into
 209                         * MIGRATE_ISOLATE list.
 210                         */
 211                        if (get_freepage_migratetype(page) != MIGRATE_ISOLATE) {
 212                                struct page *end_page;
 213
 214                                end_page = page + (1 << page_order(page)) - 1;
 215                                move_freepages(page_zone(page), page, end_page,
 216                                                MIGRATE_ISOLATE);
 217                        }
 218                        pfn += 1 << page_order(page);
 219                }
 220                else if (page_count(page) == 0 &&
 221                        get_freepage_migratetype(page) == MIGRATE_ISOLATE)
 222                        pfn += 1;
 223                else
 224                        break;
 225        }
 226        if (pfn < end_pfn)
 227                return 0;
 228        return 1;
 229}
 230
 231int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
 232{
 233        unsigned long pfn, flags;
 234        struct page *page;
 235        struct zone *zone;
 236        int ret;
 237
 238        /*
 239         * Note: pageblock_nr_page != MAX_ORDER. Then, chunks of free page
 240         * is not aligned to pageblock_nr_pages.
 241         * Then we just check pagetype fist.
 242         */
 243        for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
 244                page = __first_valid_page(pfn, pageblock_nr_pages);
 245                if (page && get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
 246                        break;
 247        }
 248        page = __first_valid_page(start_pfn, end_pfn - start_pfn);
 249        if ((pfn < end_pfn) || !page)
 250                return -EBUSY;
 251        /* Check all pages are free or Marked as ISOLATED */
 252        zone = page_zone(page);
 253        spin_lock_irqsave(&zone->lock, flags);
 254        ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn);
 255        spin_unlock_irqrestore(&zone->lock, flags);
 256        return ret ? 0 : -EBUSY;
 257}
 258
 259struct page *alloc_migrate_target(struct page *page, unsigned long private,
 260                                  int **resultp)
 261{
 262        gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
 263
 264        if (PageHighMem(page))
 265                gfp_mask |= __GFP_HIGHMEM;
 266
 267        return alloc_page(gfp_mask);
 268}
 269
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.