linux/mm/page_io.c
<<
>>
Prefs
   1/*
   2 *  linux/mm/page_io.c
   3 *
   4 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
   5 *
   6 *  Swap reorganised 29.12.95, 
   7 *  Asynchronous swapping added 30.12.95. Stephen Tweedie
   8 *  Removed race in async swapping. 14.4.1996. Bruno Haible
   9 *  Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
  10 *  Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
  11 */
  12
  13#include <linux/mm.h>
  14#include <linux/kernel_stat.h>
  15#include <linux/gfp.h>
  16#include <linux/pagemap.h>
  17#include <linux/swap.h>
  18#include <linux/bio.h>
  19#include <linux/swapops.h>
  20#include <linux/writeback.h>
  21#include <linux/frontswap.h>
  22#include <asm/pgtable.h>
  23
  24static struct bio *get_swap_bio(gfp_t gfp_flags,
  25                                struct page *page, bio_end_io_t end_io)
  26{
  27        struct bio *bio;
  28
  29        bio = bio_alloc(gfp_flags, 1);
  30        if (bio) {
  31                bio->bi_sector = map_swap_page(page, &bio->bi_bdev);
  32                bio->bi_sector <<= PAGE_SHIFT - 9;
  33                bio->bi_io_vec[0].bv_page = page;
  34                bio->bi_io_vec[0].bv_len = PAGE_SIZE;
  35                bio->bi_io_vec[0].bv_offset = 0;
  36                bio->bi_vcnt = 1;
  37                bio->bi_idx = 0;
  38                bio->bi_size = PAGE_SIZE;
  39                bio->bi_end_io = end_io;
  40        }
  41        return bio;
  42}
  43
  44static void end_swap_bio_write(struct bio *bio, int err)
  45{
  46        const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  47        struct page *page = bio->bi_io_vec[0].bv_page;
  48
  49        if (!uptodate) {
  50                SetPageError(page);
  51                /*
  52                 * We failed to write the page out to swap-space.
  53                 * Re-dirty the page in order to avoid it being reclaimed.
  54                 * Also print a dire warning that things will go BAD (tm)
  55                 * very quickly.
  56                 *
  57                 * Also clear PG_reclaim to avoid rotate_reclaimable_page()
  58                 */
  59                set_page_dirty(page);
  60                printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n",
  61                                imajor(bio->bi_bdev->bd_inode),
  62                                iminor(bio->bi_bdev->bd_inode),
  63                                (unsigned long long)bio->bi_sector);
  64                ClearPageReclaim(page);
  65        }
  66        end_page_writeback(page);
  67        bio_put(bio);
  68}
  69
  70void end_swap_bio_read(struct bio *bio, int err)
  71{
  72        const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  73        struct page *page = bio->bi_io_vec[0].bv_page;
  74
  75        if (!uptodate) {
  76                SetPageError(page);
  77                ClearPageUptodate(page);
  78                printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
  79                                imajor(bio->bi_bdev->bd_inode),
  80                                iminor(bio->bi_bdev->bd_inode),
  81                                (unsigned long long)bio->bi_sector);
  82        } else {
  83                SetPageUptodate(page);
  84        }
  85        unlock_page(page);
  86        bio_put(bio);
  87}
  88
  89/*
  90 * We may have stale swap cache pages in memory: notice
  91 * them here and get rid of the unnecessary final write.
  92 */
  93int swap_writepage(struct page *page, struct writeback_control *wbc)
  94{
  95        struct bio *bio;
  96        int ret = 0, rw = WRITE;
  97
  98        if (try_to_free_swap(page)) {
  99                unlock_page(page);
 100                goto out;
 101        }
 102        if (frontswap_store(page) == 0) {
 103                set_page_writeback(page);
 104                unlock_page(page);
 105                end_page_writeback(page);
 106                goto out;
 107        }
 108        bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write);
 109        if (bio == NULL) {
 110                set_page_dirty(page);
 111                unlock_page(page);
 112                ret = -ENOMEM;
 113                goto out;
 114        }
 115        if (wbc->sync_mode == WB_SYNC_ALL)
 116                rw |= REQ_SYNC;
 117        count_vm_event(PSWPOUT);
 118        set_page_writeback(page);
 119        unlock_page(page);
 120        submit_bio(rw, bio);
 121out:
 122        return ret;
 123}
 124
 125int swap_readpage(struct page *page)
 126{
 127        struct bio *bio;
 128        int ret = 0;
 129
 130        VM_BUG_ON(!PageLocked(page));
 131        VM_BUG_ON(PageUptodate(page));
 132        if (frontswap_load(page) == 0) {
 133                SetPageUptodate(page);
 134                unlock_page(page);
 135                goto out;
 136        }
 137        bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read);
 138        if (bio == NULL) {
 139                unlock_page(page);
 140                ret = -ENOMEM;
 141                goto out;
 142        }
 143        count_vm_event(PSWPIN);
 144        submit_bio(READ, bio);
 145out:
 146        return ret;
 147}
 148
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.