linux-old/mm/swap_state.c
<<
>>
Prefs
   1/*
   2 *  linux/mm/swap_state.c
   3 *
   4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5 *  Swap reorganised 29.12.95, Stephen Tweedie
   6 *
   7 *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
   8 */
   9
  10#include <linux/mm.h>
  11#include <linux/kernel_stat.h>
  12#include <linux/swap.h>
  13#include <linux/swapctl.h>
  14#include <linux/init.h>
  15#include <linux/pagemap.h>
  16#include <linux/smp_lock.h>
  17
  18#include <asm/pgtable.h>
  19
  20static int swap_writepage(struct page *page)
  21{
  22        rw_swap_page(WRITE, page, 0);
  23        return 0;
  24}
  25
  26static struct address_space_operations swap_aops = {
  27        writepage: swap_writepage,
  28        sync_page: block_sync_page,
  29};
  30
  31struct address_space swapper_space = {
  32        LIST_HEAD_INIT(swapper_space.clean_pages),
  33        LIST_HEAD_INIT(swapper_space.dirty_pages),
  34        LIST_HEAD_INIT(swapper_space.locked_pages),
  35        0,                              /* nrpages      */
  36        &swap_aops,
  37};
  38
  39#ifdef SWAP_CACHE_INFO
  40unsigned long swap_cache_add_total;
  41unsigned long swap_cache_del_total;
  42unsigned long swap_cache_find_total;
  43unsigned long swap_cache_find_success;
  44
  45void show_swap_cache_info(void)
  46{
  47        printk("Swap cache: add %ld, delete %ld, find %ld/%ld\n",
  48                swap_cache_add_total, 
  49                swap_cache_del_total,
  50                swap_cache_find_success, swap_cache_find_total);
  51}
  52#endif
  53
  54void add_to_swap_cache(struct page *page, swp_entry_t entry)
  55{
  56        unsigned long flags;
  57
  58#ifdef SWAP_CACHE_INFO
  59        swap_cache_add_total++;
  60#endif
  61        if (!PageLocked(page))
  62                BUG();
  63        if (PageTestandSetSwapCache(page))
  64                BUG();
  65        if (page->mapping)
  66                BUG();
  67        flags = page->flags & ~((1 << PG_error) | (1 << PG_arch_1));
  68        page->flags = flags | (1 << PG_uptodate);
  69        add_to_page_cache_locked(page, &swapper_space, entry.val);
  70}
  71
  72static inline void remove_from_swap_cache(struct page *page)
  73{
  74        struct address_space *mapping = page->mapping;
  75
  76        if (mapping != &swapper_space)
  77                BUG();
  78        if (!PageSwapCache(page) || !PageLocked(page))
  79                PAGE_BUG(page);
  80
  81        PageClearSwapCache(page);
  82        ClearPageDirty(page);
  83        __remove_inode_page(page);
  84}
  85
  86/*
  87 * This must be called only on pages that have
  88 * been verified to be in the swap cache.
  89 */
  90void __delete_from_swap_cache(struct page *page)
  91{
  92        swp_entry_t entry;
  93
  94        entry.val = page->index;
  95
  96#ifdef SWAP_CACHE_INFO
  97        swap_cache_del_total++;
  98#endif
  99        remove_from_swap_cache(page);
 100        swap_free(entry);
 101}
 102
 103/*
 104 * This will never put the page into the free list, the caller has
 105 * a reference on the page.
 106 */
 107void delete_from_swap_cache_nolock(struct page *page)
 108{
 109        if (!PageLocked(page))
 110                BUG();
 111
 112        if (block_flushpage(page, 0))
 113                lru_cache_del(page);
 114
 115        spin_lock(&pagecache_lock);
 116        ClearPageDirty(page);
 117        __delete_from_swap_cache(page);
 118        spin_unlock(&pagecache_lock);
 119        page_cache_release(page);
 120}
 121
 122/*
 123 * This must be called only on pages that have
 124 * been verified to be in the swap cache and locked.
 125 */
 126void delete_from_swap_cache(struct page *page)
 127{
 128        lock_page(page);
 129        delete_from_swap_cache_nolock(page);
 130        UnlockPage(page);
 131}
 132
 133/* 
 134 * Perform a free_page(), also freeing any swap cache associated with
 135 * this page if it is the last user of the page. Can not do a lock_page,
 136 * as we are holding the page_table_lock spinlock.
 137 */
 138void free_page_and_swap_cache(struct page *page)
 139{
 140        /* 
 141         * If we are the only user, then try to free up the swap cache. 
 142         */
 143        if (PageSwapCache(page) && !TryLockPage(page)) {
 144                if (!is_page_shared(page)) {
 145                        delete_from_swap_cache_nolock(page);
 146                }
 147                UnlockPage(page);
 148        }
 149        page_cache_release(page);
 150}
 151
 152
 153/*
 154 * Lookup a swap entry in the swap cache. A found page will be returned
 155 * unlocked and with its refcount incremented - we rely on the kernel
 156 * lock getting page table operations atomic even if we drop the page
 157 * lock before returning.
 158 */
 159
 160struct page * lookup_swap_cache(swp_entry_t entry)
 161{
 162        struct page *found;
 163
 164#ifdef SWAP_CACHE_INFO
 165        swap_cache_find_total++;
 166#endif
 167        while (1) {
 168                /*
 169                 * Right now the pagecache is 32-bit only.  But it's a 32 bit index. =)
 170                 */
 171repeat:
 172                found = find_lock_page(&swapper_space, entry.val);
 173                if (!found)
 174                        return 0;
 175                /*
 176                 * Though the "found" page was in the swap cache an instant
 177                 * earlier, it might have been removed by refill_inactive etc.
 178                 * Re search ... Since find_lock_page grabs a reference on
 179                 * the page, it can not be reused for anything else, namely
 180                 * it can not be associated with another swaphandle, so it
 181                 * is enough to check whether the page is still in the scache.
 182                 */
 183                if (!PageSwapCache(found)) {
 184                        UnlockPage(found);
 185                        page_cache_release(found);
 186                        goto repeat;
 187                }
 188                if (found->mapping != &swapper_space)
 189                        goto out_bad;
 190#ifdef SWAP_CACHE_INFO
 191                swap_cache_find_success++;
 192#endif
 193                UnlockPage(found);
 194                return found;
 195        }
 196
 197out_bad:
 198        printk (KERN_ERR "VM: Found a non-swapper swap page!\n");
 199        UnlockPage(found);
 200        page_cache_release(found);
 201        return 0;
 202}
 203
 204/* 
 205 * Locate a page of swap in physical memory, reserving swap cache space
 206 * and reading the disk if it is not already cached.  If wait==0, we are
 207 * only doing readahead, so don't worry if the page is already locked.
 208 *
 209 * A failure return means that either the page allocation failed or that
 210 * the swap entry is no longer in use.
 211 */
 212
 213struct page * read_swap_cache_async(swp_entry_t entry, int wait)
 214{
 215        struct page *found_page = 0, *new_page;
 216        unsigned long new_page_addr;
 217        
 218        /*
 219         * Make sure the swap entry is still in use.
 220         */
 221        if (!swap_duplicate(entry))     /* Account for the swap cache */
 222                goto out;
 223        /*
 224         * Look for the page in the swap cache.
 225         */
 226        found_page = lookup_swap_cache(entry);
 227        if (found_page)
 228                goto out_free_swap;
 229
 230        new_page_addr = __get_free_page(GFP_USER);
 231        if (!new_page_addr)
 232                goto out_free_swap;     /* Out of memory */
 233        new_page = virt_to_page(new_page_addr);
 234
 235        /*
 236         * Check the swap cache again, in case we stalled above.
 237         */
 238        found_page = lookup_swap_cache(entry);
 239        if (found_page)
 240                goto out_free_page;
 241        /* 
 242         * Add it to the swap cache and read its contents.
 243         */
 244        lock_page(new_page);
 245        add_to_swap_cache(new_page, entry);
 246        rw_swap_page(READ, new_page, wait);
 247        return new_page;
 248
 249out_free_page:
 250        page_cache_release(new_page);
 251out_free_swap:
 252        swap_free(entry);
 253out:
 254        return found_page;
 255}
 256
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.