linux/mm/swap_state.c
<<
>>
Prefs
   1/*
   2 *  linux/mm/swap_state.c
   3 *
   4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5 *  Swap reorganised 29.12.95, Stephen Tweedie
   6 *
   7 *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
   8 */
   9#include <linux/module.h>
  10#include <linux/mm.h>
  11#include <linux/kernel_stat.h>
  12#include <linux/swap.h>
  13#include <linux/swapops.h>
  14#include <linux/init.h>
  15#include <linux/pagemap.h>
  16#include <linux/buffer_head.h>
  17#include <linux/backing-dev.h>
  18#include <linux/pagevec.h>
  19#include <linux/migrate.h>
  20
  21#include <asm/pgtable.h>
  22
  23/*
  24 * swapper_space is a fiction, retained to simplify the path through
  25 * vmscan's shrink_page_list, to make sync_page look nicer, and to allow
  26 * future use of radix_tree tags in the swap cache.
  27 */
  28static const struct address_space_operations swap_aops = {
  29        .writepage      = swap_writepage,
  30        .sync_page      = block_sync_page,
  31        .set_page_dirty = __set_page_dirty_nobuffers,
  32        .migratepage    = migrate_page,
  33};
  34
  35static struct backing_dev_info swap_backing_dev_info = {
  36        .capabilities   = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
  37        .unplug_io_fn   = swap_unplug_io_fn,
  38};
  39
  40struct address_space swapper_space = {
  41        .page_tree      = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
  42        .tree_lock      = __RW_LOCK_UNLOCKED(swapper_space.tree_lock),
  43        .a_ops          = &swap_aops,
  44        .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
  45        .backing_dev_info = &swap_backing_dev_info,
  46};
  47
  48#define INC_CACHE_INFO(x)       do { swap_cache_info.x++; } while (0)
  49
  50static struct {
  51        unsigned long add_total;
  52        unsigned long del_total;
  53        unsigned long find_success;
  54        unsigned long find_total;
  55} swap_cache_info;
  56
  57void show_swap_cache_info(void)
  58{
  59        printk("Swap cache: add %lu, delete %lu, find %lu/%lu\n",
  60                swap_cache_info.add_total, swap_cache_info.del_total,
  61                swap_cache_info.find_success, swap_cache_info.find_total);
  62        printk("Free swap  = %lukB\n", nr_swap_pages << (PAGE_SHIFT - 10));
  63        printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
  64}
  65
  66/*
  67 * add_to_swap_cache resembles add_to_page_cache on swapper_space,
  68 * but sets SwapCache flag and private instead of mapping and index.
  69 */
  70int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
  71{
  72        int error;
  73
  74        BUG_ON(!PageLocked(page));
  75        BUG_ON(PageSwapCache(page));
  76        BUG_ON(PagePrivate(page));
  77        error = radix_tree_preload(gfp_mask);
  78        if (!error) {
  79                write_lock_irq(&swapper_space.tree_lock);
  80                error = radix_tree_insert(&swapper_space.page_tree,
  81                                                entry.val, page);
  82                if (!error) {
  83                        page_cache_get(page);
  84                        SetPageSwapCache(page);
  85                        set_page_private(page, entry.val);
  86                        total_swapcache_pages++;
  87                        __inc_zone_page_state(page, NR_FILE_PAGES);
  88                        INC_CACHE_INFO(add_total);
  89                }
  90                write_unlock_irq(&swapper_space.tree_lock);
  91                radix_tree_preload_end();
  92        }
  93        return error;
  94}
  95
  96/*
  97 * This must be called only on pages that have
  98 * been verified to be in the swap cache.
  99 */
 100void __delete_from_swap_cache(struct page *page)
 101{
 102        BUG_ON(!PageLocked(page));
 103        BUG_ON(!PageSwapCache(page));
 104        BUG_ON(PageWriteback(page));
 105        BUG_ON(PagePrivate(page));
 106
 107        radix_tree_delete(&swapper_space.page_tree, page_private(page));
 108        set_page_private(page, 0);
 109        ClearPageSwapCache(page);
 110        total_swapcache_pages--;
 111        __dec_zone_page_state(page, NR_FILE_PAGES);
 112        INC_CACHE_INFO(del_total);
 113}
 114
 115/**
 116 * add_to_swap - allocate swap space for a page
 117 * @page: page we want to move to swap
 118 * @gfp_mask: memory allocation flags
 119 *
 120 * Allocate swap space for the page and add the page to the
 121 * swap cache.  Caller needs to hold the page lock. 
 122 */
 123int add_to_swap(struct page * page, gfp_t gfp_mask)
 124{
 125        swp_entry_t entry;
 126        int err;
 127
 128        BUG_ON(!PageLocked(page));
 129        BUG_ON(!PageUptodate(page));
 130
 131        for (;;) {
 132                entry = get_swap_page();
 133                if (!entry.val)
 134                        return 0;
 135
 136                /*
 137                 * Radix-tree node allocations from PF_MEMALLOC contexts could
 138                 * completely exhaust the page allocator. __GFP_NOMEMALLOC
 139                 * stops emergency reserves from being allocated.
 140                 *
 141                 * TODO: this could cause a theoretical memory reclaim
 142                 * deadlock in the swap out path.
 143                 */
 144                /*
 145                 * Add it to the swap cache and mark it dirty
 146                 */
 147                err = add_to_swap_cache(page, entry,
 148                                gfp_mask|__GFP_NOMEMALLOC|__GFP_NOWARN);
 149
 150                switch (err) {
 151                case 0:                         /* Success */
 152                        SetPageDirty(page);
 153                        return 1;
 154                case -EEXIST:
 155                        /* Raced with "speculative" read_swap_cache_async */
 156                        swap_free(entry);
 157                        continue;
 158                default:
 159                        /* -ENOMEM radix-tree allocation failure */
 160                        swap_free(entry);
 161                        return 0;
 162                }
 163        }
 164}
 165
 166/*
 167 * This must be called only on pages that have
 168 * been verified to be in the swap cache and locked.
 169 * It will never put the page into the free list,
 170 * the caller has a reference on the page.
 171 */
 172void delete_from_swap_cache(struct page *page)
 173{
 174        swp_entry_t entry;
 175
 176        entry.val = page_private(page);
 177
 178        write_lock_irq(&swapper_space.tree_lock);
 179        __delete_from_swap_cache(page);
 180        write_unlock_irq(&swapper_space.tree_lock);
 181
 182        swap_free(entry);
 183        page_cache_release(page);
 184}
 185
 186/* 
 187 * If we are the only user, then try to free up the swap cache. 
 188 * 
 189 * Its ok to check for PageSwapCache without the page lock
 190 * here because we are going to recheck again inside 
 191 * exclusive_swap_page() _with_ the lock. 
 192 *                                      - Marcelo
 193 */
 194static inline void free_swap_cache(struct page *page)
 195{
 196        if (PageSwapCache(page) && !TestSetPageLocked(page)) {
 197                remove_exclusive_swap_page(page);
 198                unlock_page(page);
 199        }
 200}
 201
 202/* 
 203 * Perform a free_page(), also freeing any swap cache associated with
 204 * this page if it is the last user of the page.
 205 */
 206void free_page_and_swap_cache(struct page *page)
 207{
 208        free_swap_cache(page);
 209        page_cache_release(page);
 210}
 211
 212/*
 213 * Passed an array of pages, drop them all from swapcache and then release
 214 * them.  They are removed from the LRU and freed if this is their last use.
 215 */
 216void free_pages_and_swap_cache(struct page **pages, int nr)
 217{
 218        struct page **pagep = pages;
 219
 220        lru_add_drain();
 221        while (nr) {
 222                int todo = min(nr, PAGEVEC_SIZE);
 223                int i;
 224
 225                for (i = 0; i < todo; i++)
 226                        free_swap_cache(pagep[i]);
 227                release_pages(pagep, todo, 0);
 228                pagep += todo;
 229                nr -= todo;
 230        }
 231}
 232
 233/*
 234 * Lookup a swap entry in the swap cache. A found page will be returned
 235 * unlocked and with its refcount incremented - we rely on the kernel
 236 * lock getting page table operations atomic even if we drop the page
 237 * lock before returning.
 238 */
 239struct page * lookup_swap_cache(swp_entry_t entry)
 240{
 241        struct page *page;
 242
 243        page = find_get_page(&swapper_space, entry.val);
 244
 245        if (page)
 246                INC_CACHE_INFO(find_success);
 247
 248        INC_CACHE_INFO(find_total);
 249        return page;
 250}
 251
 252/* 
 253 * Locate a page of swap in physical memory, reserving swap cache space
 254 * and reading the disk if it is not already cached.
 255 * A failure return means that either the page allocation failed or that
 256 * the swap entry is no longer in use.
 257 */
 258struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 259                        struct vm_area_struct *vma, unsigned long addr)
 260{
 261        struct page *found_page, *new_page = NULL;
 262        int err;
 263
 264        do {
 265                /*
 266                 * First check the swap cache.  Since this is normally
 267                 * called after lookup_swap_cache() failed, re-calling
 268                 * that would confuse statistics.
 269                 */
 270                found_page = find_get_page(&swapper_space, entry.val);
 271                if (found_page)
 272                        break;
 273
 274                /*
 275                 * Get a new page to read into from swap.
 276                 */
 277                if (!new_page) {
 278                        new_page = alloc_page_vma(gfp_mask, vma, addr);
 279                        if (!new_page)
 280                                break;          /* Out of memory */
 281                }
 282
 283                /*
 284                 * Swap entry may have been freed since our caller observed it.
 285                 */
 286                if (!swap_duplicate(entry))
 287                        break;
 288
 289                /*
 290                 * Associate the page with swap entry in the swap cache.
 291                 * May fail (-EEXIST) if there is already a page associated
 292                 * with this entry in the swap cache: added by a racing
 293                 * read_swap_cache_async, or add_to_swap or shmem_writepage
 294                 * re-using the just freed swap entry for an existing page.
 295                 * May fail (-ENOMEM) if radix-tree node allocation failed.
 296                 */
 297                SetPageLocked(new_page);
 298                err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
 299                if (!err) {
 300                        /*
 301                         * Initiate read into locked page and return.
 302                         */
 303                        lru_cache_add_active(new_page);
 304                        swap_readpage(NULL, new_page);
 305                        return new_page;
 306                }
 307                ClearPageLocked(new_page);
 308                swap_free(entry);
 309        } while (err != -ENOMEM);
 310
 311        if (new_page)
 312                page_cache_release(new_page);
 313        return found_page;
 314}
 315
 316/**
 317 * swapin_readahead - swap in pages in hope we need them soon
 318 * @entry: swap entry of this memory
 319 * @gfp_mask: memory allocation flags
 320 * @vma: user vma this address belongs to
 321 * @addr: target address for mempolicy
 322 *
 323 * Returns the struct page for entry and addr, after queueing swapin.
 324 *
 325 * Primitive swap readahead code. We simply read an aligned block of
 326 * (1 << page_cluster) entries in the swap area. This method is chosen
 327 * because it doesn't cost us any seek time.  We also make sure to queue
 328 * the 'original' request together with the readahead ones...
 329 *
 330 * This has been extended to use the NUMA policies from the mm triggering
 331 * the readahead.
 332 *
 333 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
 334 */
 335struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
 336                        struct vm_area_struct *vma, unsigned long addr)
 337{
 338        int nr_pages;
 339        struct page *page;
 340        unsigned long offset;
 341        unsigned long end_offset;
 342
 343        /*
 344         * Get starting offset for readaround, and number of pages to read.
 345         * Adjust starting address by readbehind (for NUMA interleave case)?
 346         * No, it's very unlikely that swap layout would follow vma layout,
 347         * more likely that neighbouring swap pages came from the same node:
 348         * so use the same "addr" to choose the same node for each swap read.
 349         */
 350        nr_pages = valid_swaphandles(entry, &offset);
 351        for (end_offset = offset + nr_pages; offset < end_offset; offset++) {
 352                /* Ok, do the async read-ahead now */
 353                page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
 354                                                gfp_mask, vma, addr);
 355                if (!page)
 356                        break;
 357                page_cache_release(page);
 358        }
 359        lru_add_drain();        /* Push any new pages onto the LRU now */
 360        return read_swap_cache_async(entry, gfp_mask, vma, addr);
 361}
 362
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.