linux/mm/cleancache.c
<<
>>
Prefs
   1/*
   2 * Cleancache frontend
   3 *
   4 * This code provides the generic "frontend" layer to call a matching
   5 * "backend" driver implementation of cleancache.  See
   6 * Documentation/vm/cleancache.txt for more information.
   7 *
   8 * Copyright (C) 2009-2010 Oracle Corp. All rights reserved.
   9 * Author: Dan Magenheimer
  10 *
  11 * This work is licensed under the terms of the GNU GPL, version 2.
  12 */
  13
  14#include <linux/module.h>
  15#include <linux/fs.h>
  16#include <linux/exportfs.h>
  17#include <linux/mm.h>
  18#include <linux/debugfs.h>
  19#include <linux/cleancache.h>
  20
  21/*
  22 * This global enablement flag may be read thousands of times per second
  23 * by cleancache_get/put/invalidate even on systems where cleancache_ops
  24 * is not claimed (e.g. cleancache is config'ed on but remains
  25 * disabled), so is preferred to the slower alternative: a function
  26 * call that checks a non-global.
  27 */
  28int cleancache_enabled __read_mostly;
  29EXPORT_SYMBOL(cleancache_enabled);
  30
  31/*
  32 * cleancache_ops is set by cleancache_ops_register to contain the pointers
  33 * to the cleancache "backend" implementation functions.
  34 */
  35static struct cleancache_ops cleancache_ops __read_mostly;
  36
  37/*
  38 * Counters available via /sys/kernel/debug/frontswap (if debugfs is
  39 * properly configured.  These are for information only so are not protected
  40 * against increment races.
  41 */
  42static u64 cleancache_succ_gets;
  43static u64 cleancache_failed_gets;
  44static u64 cleancache_puts;
  45static u64 cleancache_invalidates;
  46
  47/*
  48 * register operations for cleancache, returning previous thus allowing
  49 * detection of multiple backends and possible nesting
  50 */
  51struct cleancache_ops cleancache_register_ops(struct cleancache_ops *ops)
  52{
  53        struct cleancache_ops old = cleancache_ops;
  54
  55        cleancache_ops = *ops;
  56        cleancache_enabled = 1;
  57        return old;
  58}
  59EXPORT_SYMBOL(cleancache_register_ops);
  60
  61/* Called by a cleancache-enabled filesystem at time of mount */
  62void __cleancache_init_fs(struct super_block *sb)
  63{
  64        sb->cleancache_poolid = (*cleancache_ops.init_fs)(PAGE_SIZE);
  65}
  66EXPORT_SYMBOL(__cleancache_init_fs);
  67
  68/* Called by a cleancache-enabled clustered filesystem at time of mount */
  69void __cleancache_init_shared_fs(char *uuid, struct super_block *sb)
  70{
  71        sb->cleancache_poolid =
  72                (*cleancache_ops.init_shared_fs)(uuid, PAGE_SIZE);
  73}
  74EXPORT_SYMBOL(__cleancache_init_shared_fs);
  75
  76/*
  77 * If the filesystem uses exportable filehandles, use the filehandle as
  78 * the key, else use the inode number.
  79 */
  80static int cleancache_get_key(struct inode *inode,
  81                              struct cleancache_filekey *key)
  82{
  83        int (*fhfn)(struct inode *, __u32 *fh, int *, struct inode *);
  84        int len = 0, maxlen = CLEANCACHE_KEY_MAX;
  85        struct super_block *sb = inode->i_sb;
  86
  87        key->u.ino = inode->i_ino;
  88        if (sb->s_export_op != NULL) {
  89                fhfn = sb->s_export_op->encode_fh;
  90                if  (fhfn) {
  91                        len = (*fhfn)(inode, &key->u.fh[0], &maxlen, NULL);
  92                        if (len <= 0 || len == 255)
  93                                return -1;
  94                        if (maxlen > CLEANCACHE_KEY_MAX)
  95                                return -1;
  96                }
  97        }
  98        return 0;
  99}
 100
 101/*
 102 * "Get" data from cleancache associated with the poolid/inode/index
 103 * that were specified when the data was put to cleanache and, if
 104 * successful, use it to fill the specified page with data and return 0.
 105 * The pageframe is unchanged and returns -1 if the get fails.
 106 * Page must be locked by caller.
 107 */
 108int __cleancache_get_page(struct page *page)
 109{
 110        int ret = -1;
 111        int pool_id;
 112        struct cleancache_filekey key = { .u.key = { 0 } };
 113
 114        VM_BUG_ON(!PageLocked(page));
 115        pool_id = page->mapping->host->i_sb->cleancache_poolid;
 116        if (pool_id < 0)
 117                goto out;
 118
 119        if (cleancache_get_key(page->mapping->host, &key) < 0)
 120                goto out;
 121
 122        ret = (*cleancache_ops.get_page)(pool_id, key, page->index, page);
 123        if (ret == 0)
 124                cleancache_succ_gets++;
 125        else
 126                cleancache_failed_gets++;
 127out:
 128        return ret;
 129}
 130EXPORT_SYMBOL(__cleancache_get_page);
 131
 132/*
 133 * "Put" data from a page to cleancache and associate it with the
 134 * (previously-obtained per-filesystem) poolid and the page's,
 135 * inode and page index.  Page must be locked.  Note that a put_page
 136 * always "succeeds", though a subsequent get_page may succeed or fail.
 137 */
 138void __cleancache_put_page(struct page *page)
 139{
 140        int pool_id;
 141        struct cleancache_filekey key = { .u.key = { 0 } };
 142
 143        VM_BUG_ON(!PageLocked(page));
 144        pool_id = page->mapping->host->i_sb->cleancache_poolid;
 145        if (pool_id >= 0 &&
 146              cleancache_get_key(page->mapping->host, &key) >= 0) {
 147                (*cleancache_ops.put_page)(pool_id, key, page->index, page);
 148                cleancache_puts++;
 149        }
 150}
 151EXPORT_SYMBOL(__cleancache_put_page);
 152
 153/*
 154 * Invalidate any data from cleancache associated with the poolid and the
 155 * page's inode and page index so that a subsequent "get" will fail.
 156 */
 157void __cleancache_invalidate_page(struct address_space *mapping,
 158                                        struct page *page)
 159{
 160        /* careful... page->mapping is NULL sometimes when this is called */
 161        int pool_id = mapping->host->i_sb->cleancache_poolid;
 162        struct cleancache_filekey key = { .u.key = { 0 } };
 163
 164        if (pool_id >= 0) {
 165                VM_BUG_ON(!PageLocked(page));
 166                if (cleancache_get_key(mapping->host, &key) >= 0) {
 167                        (*cleancache_ops.invalidate_page)(pool_id,
 168                                                          key, page->index);
 169                        cleancache_invalidates++;
 170                }
 171        }
 172}
 173EXPORT_SYMBOL(__cleancache_invalidate_page);
 174
 175/*
 176 * Invalidate all data from cleancache associated with the poolid and the
 177 * mappings's inode so that all subsequent gets to this poolid/inode
 178 * will fail.
 179 */
 180void __cleancache_invalidate_inode(struct address_space *mapping)
 181{
 182        int pool_id = mapping->host->i_sb->cleancache_poolid;
 183        struct cleancache_filekey key = { .u.key = { 0 } };
 184
 185        if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0)
 186                (*cleancache_ops.invalidate_inode)(pool_id, key);
 187}
 188EXPORT_SYMBOL(__cleancache_invalidate_inode);
 189
 190/*
 191 * Called by any cleancache-enabled filesystem at time of unmount;
 192 * note that pool_id is surrendered and may be reutrned by a subsequent
 193 * cleancache_init_fs or cleancache_init_shared_fs
 194 */
 195void __cleancache_invalidate_fs(struct super_block *sb)
 196{
 197        if (sb->cleancache_poolid >= 0) {
 198                int old_poolid = sb->cleancache_poolid;
 199                sb->cleancache_poolid = -1;
 200                (*cleancache_ops.invalidate_fs)(old_poolid);
 201        }
 202}
 203EXPORT_SYMBOL(__cleancache_invalidate_fs);
 204
 205static int __init init_cleancache(void)
 206{
 207#ifdef CONFIG_DEBUG_FS
 208        struct dentry *root = debugfs_create_dir("cleancache", NULL);
 209        if (root == NULL)
 210                return -ENXIO;
 211        debugfs_create_u64("succ_gets", S_IRUGO, root, &cleancache_succ_gets);
 212        debugfs_create_u64("failed_gets", S_IRUGO,
 213                                root, &cleancache_failed_gets);
 214        debugfs_create_u64("puts", S_IRUGO, root, &cleancache_puts);
 215        debugfs_create_u64("invalidates", S_IRUGO,
 216                                root, &cleancache_invalidates);
 217#endif
 218        return 0;
 219}
 220module_init(init_cleancache)
 221