linux/mm/page_isolation.c
<<
>>
Prefs
   1/*
   2 * linux/mm/page_isolation.c
   3 */
   4
   5#include <linux/mm.h>
   6#include <linux/page-isolation.h>
   7#include <linux/pageblock-flags.h>
   8#include <linux/memory.h>
   9#include "internal.h"
  10
  11/* called while holding zone->lock */
  12static void set_pageblock_isolate(struct page *page)
  13{
  14        if (get_pageblock_migratetype(page) == MIGRATE_ISOLATE)
  15                return;
  16
  17        set_pageblock_migratetype(page, MIGRATE_ISOLATE);
  18        page_zone(page)->nr_pageblock_isolate++;
  19}
  20
  21/* called while holding zone->lock */
  22static void restore_pageblock_isolate(struct page *page, int migratetype)
  23{
  24        struct zone *zone = page_zone(page);
  25        if (WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE))
  26                return;
  27
  28        BUG_ON(zone->nr_pageblock_isolate <= 0);
  29        set_pageblock_migratetype(page, migratetype);
  30        zone->nr_pageblock_isolate--;
  31}
  32
  33int set_migratetype_isolate(struct page *page)
  34{
  35        struct zone *zone;
  36        unsigned long flags, pfn;
  37        struct memory_isolate_notify arg;
  38        int notifier_ret;
  39        int ret = -EBUSY;
  40
  41        zone = page_zone(page);
  42
  43        spin_lock_irqsave(&zone->lock, flags);
  44
  45        pfn = page_to_pfn(page);
  46        arg.start_pfn = pfn;
  47        arg.nr_pages = pageblock_nr_pages;
  48        arg.pages_found = 0;
  49
  50        /*
  51         * It may be possible to isolate a pageblock even if the
  52         * migratetype is not MIGRATE_MOVABLE. The memory isolation
  53         * notifier chain is used by balloon drivers to return the
  54         * number of pages in a range that are held by the balloon
  55         * driver to shrink memory. If all the pages are accounted for
  56         * by balloons, are free, or on the LRU, isolation can continue.
  57         * Later, for example, when memory hotplug notifier runs, these
  58         * pages reported as "can be isolated" should be isolated(freed)
  59         * by the balloon driver through the memory notifier chain.
  60         */
  61        notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
  62        notifier_ret = notifier_to_errno(notifier_ret);
  63        if (notifier_ret)
  64                goto out;
  65        /*
  66         * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
  67         * We just check MOVABLE pages.
  68         */
  69        if (!has_unmovable_pages(zone, page, arg.pages_found))
  70                ret = 0;
  71
  72        /*
  73         * immobile means "not-on-lru" paes. If immobile is larger than
  74         * removable-by-driver pages reported by notifier, we'll fail.
  75         */
  76
  77out:
  78        if (!ret) {
  79                set_pageblock_isolate(page);
  80                move_freepages_block(zone, page, MIGRATE_ISOLATE);
  81        }
  82
  83        spin_unlock_irqrestore(&zone->lock, flags);
  84        if (!ret)
  85                drain_all_pages();
  86        return ret;
  87}
  88
  89void unset_migratetype_isolate(struct page *page, unsigned migratetype)
  90{
  91        struct zone *zone;
  92        unsigned long flags;
  93        zone = page_zone(page);
  94        spin_lock_irqsave(&zone->lock, flags);
  95        if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
  96                goto out;
  97        move_freepages_block(zone, page, migratetype);
  98        restore_pageblock_isolate(page, migratetype);
  99out:
 100        spin_unlock_irqrestore(&zone->lock, flags);
 101}
 102
 103static inline struct page *
 104__first_valid_page(unsigned long pfn, unsigned long nr_pages)
 105{
 106        int i;
 107        for (i = 0; i < nr_pages; i++)
 108                if (pfn_valid_within(pfn + i))
 109                        break;
 110        if (unlikely(i == nr_pages))
 111                return NULL;
 112        return pfn_to_page(pfn + i);
 113}
 114
 115/*
 116 * start_isolate_page_range() -- make page-allocation-type of range of pages
 117 * to be MIGRATE_ISOLATE.
 118 * @start_pfn: The lower PFN of the range to be isolated.
 119 * @end_pfn: The upper PFN of the range to be isolated.
 120 * @migratetype: migrate type to set in error recovery.
 121 *
 122 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
 123 * the range will never be allocated. Any free pages and pages freed in the
 124 * future will not be allocated again.
 125 *
 126 * start_pfn/end_pfn must be aligned to pageblock_order.
 127 * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
 128 */
 129int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
 130                             unsigned migratetype)
 131{
 132        unsigned long pfn;
 133        unsigned long undo_pfn;
 134        struct page *page;
 135
 136        BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
 137        BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
 138
 139        for (pfn = start_pfn;
 140             pfn < end_pfn;
 141             pfn += pageblock_nr_pages) {
 142                page = __first_valid_page(pfn, pageblock_nr_pages);
 143                if (page && set_migratetype_isolate(page)) {
 144                        undo_pfn = pfn;
 145                        goto undo;
 146                }
 147        }
 148        return 0;
 149undo:
 150        for (pfn = start_pfn;
 151             pfn < undo_pfn;
 152             pfn += pageblock_nr_pages)
 153                unset_migratetype_isolate(pfn_to_page(pfn), migratetype);
 154
 155        return -EBUSY;
 156}
 157
 158/*
 159 * Make isolated pages available again.
 160 */
 161int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
 162                            unsigned migratetype)
 163{
 164        unsigned long pfn;
 165        struct page *page;
 166        BUG_ON((start_pfn) & (pageblock_nr_pages - 1));
 167        BUG_ON((end_pfn) & (pageblock_nr_pages - 1));
 168        for (pfn = start_pfn;
 169             pfn < end_pfn;
 170             pfn += pageblock_nr_pages) {
 171                page = __first_valid_page(pfn, pageblock_nr_pages);
 172                if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
 173                        continue;
 174                unset_migratetype_isolate(page, migratetype);
 175        }
 176        return 0;
 177}
 178/*
 179 * Test all pages in the range is free(means isolated) or not.
 180 * all pages in [start_pfn...end_pfn) must be in the same zone.
 181 * zone->lock must be held before call this.
 182 *
 183 * Returns 1 if all pages in the range are isolated.
 184 */
 185static int
 186__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
 187{
 188        struct page *page;
 189
 190        while (pfn < end_pfn) {
 191                if (!pfn_valid_within(pfn)) {
 192                        pfn++;
 193                        continue;
 194                }
 195                page = pfn_to_page(pfn);
 196                if (PageBuddy(page))
 197                        pfn += 1 << page_order(page);
 198                else if (page_count(page) == 0 &&
 199                                page_private(page) == MIGRATE_ISOLATE)
 200                        pfn += 1;
 201                else
 202                        break;
 203        }
 204        if (pfn < end_pfn)
 205                return 0;
 206        return 1;
 207}
 208
 209int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
 210{
 211        unsigned long pfn, flags;
 212        struct page *page;
 213        struct zone *zone;
 214        int ret;
 215
 216        /*
 217         * Note: pageblock_nr_page != MAX_ORDER. Then, chunks of free page
 218         * is not aligned to pageblock_nr_pages.
 219         * Then we just check pagetype fist.
 220         */
 221        for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
 222                page = __first_valid_page(pfn, pageblock_nr_pages);
 223                if (page && get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
 224                        break;
 225        }
 226        page = __first_valid_page(start_pfn, end_pfn - start_pfn);
 227        if ((pfn < end_pfn) || !page)
 228                return -EBUSY;
 229        /* Check all pages are free or Marked as ISOLATED */
 230        zone = page_zone(page);
 231        spin_lock_irqsave(&zone->lock, flags);
 232        ret = __test_page_isolated_in_pageblock(start_pfn, end_pfn);
 233        spin_unlock_irqrestore(&zone->lock, flags);
 234        return ret ? 0 : -EBUSY;
 235}
 236
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.