linux/drivers/md/dm-cache-policy-cleaner.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2012 Red Hat. All rights reserved.
   3 *
   4 * writeback cache policy supporting flushing out dirty cache blocks.
   5 *
   6 * This file is released under the GPL.
   7 */
   8
   9#include "dm-cache-policy.h"
  10#include "dm.h"
  11
  12#include <linux/hash.h>
  13#include <linux/module.h>
  14#include <linux/slab.h>
  15#include <linux/vmalloc.h>
  16
  17/*----------------------------------------------------------------*/
  18
  19#define DM_MSG_PREFIX "cache cleaner"
  20
  21/* Cache entry struct. */
  22struct wb_cache_entry {
  23        struct list_head list;
  24        struct hlist_node hlist;
  25
  26        dm_oblock_t oblock;
  27        dm_cblock_t cblock;
  28        bool dirty:1;
  29        bool pending:1;
  30};
  31
  32struct hash {
  33        struct hlist_head *table;
  34        dm_block_t hash_bits;
  35        unsigned nr_buckets;
  36};
  37
  38struct policy {
  39        struct dm_cache_policy policy;
  40        spinlock_t lock;
  41
  42        struct list_head free;
  43        struct list_head clean;
  44        struct list_head clean_pending;
  45        struct list_head dirty;
  46
  47        /*
  48         * We know exactly how many cblocks will be needed,
  49         * so we can allocate them up front.
  50         */
  51        dm_cblock_t cache_size, nr_cblocks_allocated;
  52        struct wb_cache_entry *cblocks;
  53        struct hash chash;
  54};
  55
  56/*----------------------------------------------------------------------------*/
  57
  58/*
  59 * Low-level functions.
  60 */
  61static unsigned next_power(unsigned n, unsigned min)
  62{
  63        return roundup_pow_of_two(max(n, min));
  64}
  65
  66static struct policy *to_policy(struct dm_cache_policy *p)
  67{
  68        return container_of(p, struct policy, policy);
  69}
  70
  71static struct list_head *list_pop(struct list_head *q)
  72{
  73        struct list_head *r = q->next;
  74
  75        list_del(r);
  76
  77        return r;
  78}
  79
  80/*----------------------------------------------------------------------------*/
  81
  82/* Allocate/free various resources. */
  83static int alloc_hash(struct hash *hash, unsigned elts)
  84{
  85        hash->nr_buckets = next_power(elts >> 4, 16);
  86        hash->hash_bits = ffs(hash->nr_buckets) - 1;
  87        hash->table = vzalloc(sizeof(*hash->table) * hash->nr_buckets);
  88
  89        return hash->table ? 0 : -ENOMEM;
  90}
  91
  92static void free_hash(struct hash *hash)
  93{
  94        vfree(hash->table);
  95}
  96
  97static int alloc_cache_blocks_with_hash(struct policy *p, dm_cblock_t cache_size)
  98{
  99        int r = -ENOMEM;
 100
 101        p->cblocks = vzalloc(sizeof(*p->cblocks) * from_cblock(cache_size));
 102        if (p->cblocks) {
 103                unsigned u = from_cblock(cache_size);
 104
 105                while (u--)
 106                        list_add(&p->cblocks[u].list, &p->free);
 107
 108                p->nr_cblocks_allocated = 0;
 109
 110                /* Cache entries hash. */
 111                r = alloc_hash(&p->chash, from_cblock(cache_size));
 112                if (r)
 113                        vfree(p->cblocks);
 114        }
 115
 116        return r;
 117}
 118
 119static void free_cache_blocks_and_hash(struct policy *p)
 120{
 121        free_hash(&p->chash);
 122        vfree(p->cblocks);
 123}
 124
 125static struct wb_cache_entry *alloc_cache_entry(struct policy *p)
 126{
 127        struct wb_cache_entry *e;
 128
 129        BUG_ON(from_cblock(p->nr_cblocks_allocated) >= from_cblock(p->cache_size));
 130
 131        e = list_entry(list_pop(&p->free), struct wb_cache_entry, list);
 132        p->nr_cblocks_allocated = to_cblock(from_cblock(p->nr_cblocks_allocated) + 1);
 133
 134        return e;
 135}
 136
 137/*----------------------------------------------------------------------------*/
 138
 139/* Hash functions (lookup, insert, remove). */
 140static struct wb_cache_entry *lookup_cache_entry(struct policy *p, dm_oblock_t oblock)
 141{
 142        struct hash *hash = &p->chash;
 143        unsigned h = hash_64(from_oblock(oblock), hash->hash_bits);
 144        struct wb_cache_entry *cur;
 145        struct hlist_head *bucket = &hash->table[h];
 146
 147        hlist_for_each_entry(cur, bucket, hlist) {
 148                if (cur->oblock == oblock) {
 149                        /* Move upfront bucket for faster access. */
 150                        hlist_del(&cur->hlist);
 151                        hlist_add_head(&cur->hlist, bucket);
 152                        return cur;
 153                }
 154        }
 155
 156        return NULL;
 157}
 158
 159static void insert_cache_hash_entry(struct policy *p, struct wb_cache_entry *e)
 160{
 161        unsigned h = hash_64(from_oblock(e->oblock), p->chash.hash_bits);
 162
 163        hlist_add_head(&e->hlist, &p->chash.table[h]);
 164}
 165
 166static void remove_cache_hash_entry(struct wb_cache_entry *e)
 167{
 168        hlist_del(&e->hlist);
 169}
 170
 171/* Public interface (see dm-cache-policy.h */
 172static int wb_map(struct dm_cache_policy *pe, dm_oblock_t oblock,
 173                  bool can_block, bool can_migrate, bool discarded_oblock,
 174                  struct bio *bio, struct policy_result *result)
 175{
 176        struct policy *p = to_policy(pe);
 177        struct wb_cache_entry *e;
 178        unsigned long flags;
 179
 180        result->op = POLICY_MISS;
 181
 182        if (can_block)
 183                spin_lock_irqsave(&p->lock, flags);
 184
 185        else if (!spin_trylock_irqsave(&p->lock, flags))
 186                return -EWOULDBLOCK;
 187
 188        e = lookup_cache_entry(p, oblock);
 189        if (e) {
 190                result->op = POLICY_HIT;
 191                result->cblock = e->cblock;
 192
 193        }
 194
 195        spin_unlock_irqrestore(&p->lock, flags);
 196
 197        return 0;
 198}
 199
 200static int wb_lookup(struct dm_cache_policy *pe, dm_oblock_t oblock, dm_cblock_t *cblock)
 201{
 202        int r;
 203        struct policy *p = to_policy(pe);
 204        struct wb_cache_entry *e;
 205        unsigned long flags;
 206
 207        if (!spin_trylock_irqsave(&p->lock, flags))
 208                return -EWOULDBLOCK;
 209
 210        e = lookup_cache_entry(p, oblock);
 211        if (e) {
 212                *cblock = e->cblock;
 213                r = 0;
 214
 215        } else
 216                r = -ENOENT;
 217
 218        spin_unlock_irqrestore(&p->lock, flags);
 219
 220        return r;
 221}
 222
 223static void __set_clear_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock, bool set)
 224{
 225        struct policy *p = to_policy(pe);
 226        struct wb_cache_entry *e;
 227
 228        e = lookup_cache_entry(p, oblock);
 229        BUG_ON(!e);
 230
 231        if (set) {
 232                if (!e->dirty) {
 233                        e->dirty = true;
 234                        list_move(&e->list, &p->dirty);
 235                }
 236
 237        } else {
 238                if (e->dirty) {
 239                        e->pending = false;
 240                        e->dirty = false;
 241                        list_move(&e->list, &p->clean);
 242                }
 243        }
 244}
 245
 246static void wb_set_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock)
 247{
 248        struct policy *p = to_policy(pe);
 249        unsigned long flags;
 250
 251        spin_lock_irqsave(&p->lock, flags);
 252        __set_clear_dirty(pe, oblock, true);
 253        spin_unlock_irqrestore(&p->lock, flags);
 254}
 255
 256static void wb_clear_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock)
 257{
 258        struct policy *p = to_policy(pe);
 259        unsigned long flags;
 260
 261        spin_lock_irqsave(&p->lock, flags);
 262        __set_clear_dirty(pe, oblock, false);
 263        spin_unlock_irqrestore(&p->lock, flags);
 264}
 265
 266static void add_cache_entry(struct policy *p, struct wb_cache_entry *e)
 267{
 268        insert_cache_hash_entry(p, e);
 269        if (e->dirty)
 270                list_add(&e->list, &p->dirty);
 271        else
 272                list_add(&e->list, &p->clean);
 273}
 274
 275static int wb_load_mapping(struct dm_cache_policy *pe,
 276                           dm_oblock_t oblock, dm_cblock_t cblock,
 277                           uint32_t hint, bool hint_valid)
 278{
 279        int r;
 280        struct policy *p = to_policy(pe);
 281        struct wb_cache_entry *e = alloc_cache_entry(p);
 282
 283        if (e) {
 284                e->cblock = cblock;
 285                e->oblock = oblock;
 286                e->dirty = false; /* blocks default to clean */
 287                add_cache_entry(p, e);
 288                r = 0;
 289
 290        } else
 291                r = -ENOMEM;
 292
 293        return r;
 294}
 295
 296static void wb_destroy(struct dm_cache_policy *pe)
 297{
 298        struct policy *p = to_policy(pe);
 299
 300        free_cache_blocks_and_hash(p);
 301        kfree(p);
 302}
 303
 304static struct wb_cache_entry *__wb_force_remove_mapping(struct policy *p, dm_oblock_t oblock)
 305{
 306        struct wb_cache_entry *r = lookup_cache_entry(p, oblock);
 307
 308        BUG_ON(!r);
 309
 310        remove_cache_hash_entry(r);
 311        list_del(&r->list);
 312
 313        return r;
 314}
 315
 316static void wb_remove_mapping(struct dm_cache_policy *pe, dm_oblock_t oblock)
 317{
 318        struct policy *p = to_policy(pe);
 319        struct wb_cache_entry *e;
 320        unsigned long flags;
 321
 322        spin_lock_irqsave(&p->lock, flags);
 323        e = __wb_force_remove_mapping(p, oblock);
 324        list_add_tail(&e->list, &p->free);
 325        BUG_ON(!from_cblock(p->nr_cblocks_allocated));
 326        p->nr_cblocks_allocated = to_cblock(from_cblock(p->nr_cblocks_allocated) - 1);
 327        spin_unlock_irqrestore(&p->lock, flags);
 328}
 329
 330static void wb_force_mapping(struct dm_cache_policy *pe,
 331                                dm_oblock_t current_oblock, dm_oblock_t oblock)
 332{
 333        struct policy *p = to_policy(pe);
 334        struct wb_cache_entry *e;
 335        unsigned long flags;
 336
 337        spin_lock_irqsave(&p->lock, flags);
 338        e = __wb_force_remove_mapping(p, current_oblock);
 339        e->oblock = oblock;
 340        add_cache_entry(p, e);
 341        spin_unlock_irqrestore(&p->lock, flags);
 342}
 343
 344static struct wb_cache_entry *get_next_dirty_entry(struct policy *p)
 345{
 346        struct list_head *l;
 347        struct wb_cache_entry *r;
 348
 349        if (list_empty(&p->dirty))
 350                return NULL;
 351
 352        l = list_pop(&p->dirty);
 353        r = container_of(l, struct wb_cache_entry, list);
 354        list_add(l, &p->clean_pending);
 355
 356        return r;
 357}
 358
 359static int wb_writeback_work(struct dm_cache_policy *pe,
 360                             dm_oblock_t *oblock,
 361                             dm_cblock_t *cblock)
 362{
 363        int r = -ENOENT;
 364        struct policy *p = to_policy(pe);
 365        struct wb_cache_entry *e;
 366        unsigned long flags;
 367
 368        spin_lock_irqsave(&p->lock, flags);
 369
 370        e = get_next_dirty_entry(p);
 371        if (e) {
 372                *oblock = e->oblock;
 373                *cblock = e->cblock;
 374                r = 0;
 375        }
 376
 377        spin_unlock_irqrestore(&p->lock, flags);
 378
 379        return r;
 380}
 381
 382static dm_cblock_t wb_residency(struct dm_cache_policy *pe)
 383{
 384        return to_policy(pe)->nr_cblocks_allocated;
 385}
 386
 387/* Init the policy plugin interface function pointers. */
 388static void init_policy_functions(struct policy *p)
 389{
 390        p->policy.destroy = wb_destroy;
 391        p->policy.map = wb_map;
 392        p->policy.lookup = wb_lookup;
 393        p->policy.set_dirty = wb_set_dirty;
 394        p->policy.clear_dirty = wb_clear_dirty;
 395        p->policy.load_mapping = wb_load_mapping;
 396        p->policy.walk_mappings = NULL;
 397        p->policy.remove_mapping = wb_remove_mapping;
 398        p->policy.writeback_work = wb_writeback_work;
 399        p->policy.force_mapping = wb_force_mapping;
 400        p->policy.residency = wb_residency;
 401        p->policy.tick = NULL;
 402}
 403
 404static struct dm_cache_policy *wb_create(dm_cblock_t cache_size,
 405                                         sector_t origin_size,
 406                                         sector_t cache_block_size)
 407{
 408        int r;
 409        struct policy *p = kzalloc(sizeof(*p), GFP_KERNEL);
 410
 411        if (!p)
 412                return NULL;
 413
 414        init_policy_functions(p);
 415        INIT_LIST_HEAD(&p->free);
 416        INIT_LIST_HEAD(&p->clean);
 417        INIT_LIST_HEAD(&p->clean_pending);
 418        INIT_LIST_HEAD(&p->dirty);
 419
 420        p->cache_size = cache_size;
 421        spin_lock_init(&p->lock);
 422
 423        /* Allocate cache entry structs and add them to free list. */
 424        r = alloc_cache_blocks_with_hash(p, cache_size);
 425        if (!r)
 426                return &p->policy;
 427
 428        kfree(p);
 429
 430        return NULL;
 431}
 432/*----------------------------------------------------------------------------*/
 433
 434static struct dm_cache_policy_type wb_policy_type = {
 435        .name = "cleaner",
 436        .version = {1, 0, 0},
 437        .hint_size = 0,
 438        .owner = THIS_MODULE,
 439        .create = wb_create
 440};
 441
 442static int __init wb_init(void)
 443{
 444        int r = dm_cache_policy_register(&wb_policy_type);
 445
 446        if (r < 0)
 447                DMERR("register failed %d", r);
 448        else
 449                DMINFO("version %u.%u.%u loaded",
 450                       wb_policy_type.version[0],
 451                       wb_policy_type.version[1],
 452                       wb_policy_type.version[2]);
 453
 454        return r;
 455}
 456
 457static void __exit wb_exit(void)
 458{
 459        dm_cache_policy_unregister(&wb_policy_type);
 460}
 461
 462module_init(wb_init);
 463module_exit(wb_exit);
 464
 465MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>");
 466MODULE_LICENSE("GPL");
 467MODULE_DESCRIPTION("cleaner cache policy");
 468
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.