linux/fs/mbcache.c
<<
>>
Prefs
   1/*
   2 * linux/fs/mbcache.c
   3 * (C) 2001-2002 Andreas Gruenbacher, <a.gruenbacher@computer.org>
   4 */
   5
   6/*
   7 * Filesystem Meta Information Block Cache (mbcache)
   8 *
   9 * The mbcache caches blocks of block devices that need to be located
  10 * by their device/block number, as well as by other criteria (such
  11 * as the block's contents).
  12 *
  13 * There can only be one cache entry in a cache per device and block number.
  14 * Additional indexes need not be unique in this sense. The number of
  15 * additional indexes (=other criteria) can be hardwired at compile time
  16 * or specified at cache create time.
  17 *
  18 * Each cache entry is of fixed size. An entry may be `valid' or `invalid'
  19 * in the cache. A valid entry is in the main hash tables of the cache,
  20 * and may also be in the lru list. An invalid entry is not in any hashes
  21 * or lists.
  22 *
  23 * A valid cache entry is only in the lru list if no handles refer to it.
  24 * Invalid cache entries will be freed when the last handle to the cache
  25 * entry is released. Entries that cannot be freed immediately are put
  26 * back on the lru list.
  27 */
  28
  29#include <linux/kernel.h>
  30#include <linux/module.h>
  31
  32#include <linux/hash.h>
  33#include <linux/fs.h>
  34#include <linux/mm.h>
  35#include <linux/slab.h>
  36#include <linux/sched.h>
  37#include <linux/init.h>
  38#include <linux/mbcache.h>
  39
  40
  41#ifdef MB_CACHE_DEBUG
  42# define mb_debug(f...) do { \
  43                printk(KERN_DEBUG f); \
  44                printk("\n"); \
  45        } while (0)
  46#define mb_assert(c) do { if (!(c)) \
  47                printk(KERN_ERR "assertion " #c " failed\n"); \
  48        } while(0)
  49#else
  50# define mb_debug(f...) do { } while(0)
  51# define mb_assert(c) do { } while(0)
  52#endif
  53#define mb_error(f...) do { \
  54                printk(KERN_ERR f); \
  55                printk("\n"); \
  56        } while(0)
  57
  58#define MB_CACHE_WRITER ((unsigned short)~0U >> 1)
  59
  60static DECLARE_WAIT_QUEUE_HEAD(mb_cache_queue);
  61                
  62MODULE_AUTHOR("Andreas Gruenbacher <a.gruenbacher@computer.org>");
  63MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
  64MODULE_LICENSE("GPL");
  65
  66EXPORT_SYMBOL(mb_cache_create);
  67EXPORT_SYMBOL(mb_cache_shrink);
  68EXPORT_SYMBOL(mb_cache_destroy);
  69EXPORT_SYMBOL(mb_cache_entry_alloc);
  70EXPORT_SYMBOL(mb_cache_entry_insert);
  71EXPORT_SYMBOL(mb_cache_entry_release);
  72EXPORT_SYMBOL(mb_cache_entry_free);
  73EXPORT_SYMBOL(mb_cache_entry_get);
  74#if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
  75EXPORT_SYMBOL(mb_cache_entry_find_first);
  76EXPORT_SYMBOL(mb_cache_entry_find_next);
  77#endif
  78
  79/*
  80 * Global data: list of all mbcache's, lru list, and a spinlock for
  81 * accessing cache data structures on SMP machines. The lru list is
  82 * global across all mbcaches.
  83 */
  84
  85static LIST_HEAD(mb_cache_list);
  86static LIST_HEAD(mb_cache_lru_list);
  87static DEFINE_SPINLOCK(mb_cache_spinlock);
  88
  89static inline int
  90__mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
  91{
  92        return !list_empty(&ce->e_block_list);
  93}
  94
  95
  96static void
  97__mb_cache_entry_unhash(struct mb_cache_entry *ce)
  98{
  99        if (__mb_cache_entry_is_hashed(ce)) {
 100                list_del_init(&ce->e_block_list);
 101                list_del(&ce->e_index.o_list);
 102        }
 103}
 104
 105
 106static void
 107__mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask)
 108{
 109        struct mb_cache *cache = ce->e_cache;
 110
 111        mb_assert(!(ce->e_used || ce->e_queued));
 112        kmem_cache_free(cache->c_entry_cache, ce);
 113        atomic_dec(&cache->c_entry_count);
 114}
 115
 116
 117static void
 118__mb_cache_entry_release_unlock(struct mb_cache_entry *ce)
 119        __releases(mb_cache_spinlock)
 120{
 121        /* Wake up all processes queuing for this cache entry. */
 122        if (ce->e_queued)
 123                wake_up_all(&mb_cache_queue);
 124        if (ce->e_used >= MB_CACHE_WRITER)
 125                ce->e_used -= MB_CACHE_WRITER;
 126        ce->e_used--;
 127        if (!(ce->e_used || ce->e_queued)) {
 128                if (!__mb_cache_entry_is_hashed(ce))
 129                        goto forget;
 130                mb_assert(list_empty(&ce->e_lru_list));
 131                list_add_tail(&ce->e_lru_list, &mb_cache_lru_list);
 132        }
 133        spin_unlock(&mb_cache_spinlock);
 134        return;
 135forget:
 136        spin_unlock(&mb_cache_spinlock);
 137        __mb_cache_entry_forget(ce, GFP_KERNEL);
 138}
 139
 140
 141/*
 142 * mb_cache_shrink_scan()  memory pressure callback
 143 *
 144 * This function is called by the kernel memory management when memory
 145 * gets low.
 146 *
 147 * @shrink: (ignored)
 148 * @sc: shrink_control passed from reclaim
 149 *
 150 * Returns the number of objects freed.
 151 */
 152static unsigned long
 153mb_cache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 154{
 155        LIST_HEAD(free_list);
 156        struct mb_cache_entry *entry, *tmp;
 157        int nr_to_scan = sc->nr_to_scan;
 158        gfp_t gfp_mask = sc->gfp_mask;
 159        unsigned long freed = 0;
 160
 161        mb_debug("trying to free %d entries", nr_to_scan);
 162        spin_lock(&mb_cache_spinlock);
 163        while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) {
 164                struct mb_cache_entry *ce =
 165                        list_entry(mb_cache_lru_list.next,
 166                                   struct mb_cache_entry, e_lru_list);
 167                list_move_tail(&ce->e_lru_list, &free_list);
 168                __mb_cache_entry_unhash(ce);
 169                freed++;
 170        }
 171        spin_unlock(&mb_cache_spinlock);
 172        list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) {
 173                __mb_cache_entry_forget(entry, gfp_mask);
 174        }
 175        return freed;
 176}
 177
 178static unsigned long
 179mb_cache_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
 180{
 181        struct mb_cache *cache;
 182        unsigned long count = 0;
 183
 184        spin_lock(&mb_cache_spinlock);
 185        list_for_each_entry(cache, &mb_cache_list, c_cache_list) {
 186                mb_debug("cache %s (%d)", cache->c_name,
 187                          atomic_read(&cache->c_entry_count));
 188                count += atomic_read(&cache->c_entry_count);
 189        }
 190        spin_unlock(&mb_cache_spinlock);
 191
 192        return vfs_pressure_ratio(count);
 193}
 194
 195static struct shrinker mb_cache_shrinker = {
 196        .count_objects = mb_cache_shrink_count,
 197        .scan_objects = mb_cache_shrink_scan,
 198        .seeks = DEFAULT_SEEKS,
 199};
 200
 201/*
 202 * mb_cache_create()  create a new cache
 203 *
 204 * All entries in one cache are equal size. Cache entries may be from
 205 * multiple devices. If this is the first mbcache created, registers
 206 * the cache with kernel memory management. Returns NULL if no more
 207 * memory was available.
 208 *
 209 * @name: name of the cache (informal)
 210 * @bucket_bits: log2(number of hash buckets)
 211 */
 212struct mb_cache *
 213mb_cache_create(const char *name, int bucket_bits)
 214{
 215        int n, bucket_count = 1 << bucket_bits;
 216        struct mb_cache *cache = NULL;
 217
 218        cache = kmalloc(sizeof(struct mb_cache), GFP_KERNEL);
 219        if (!cache)
 220                return NULL;
 221        cache->c_name = name;
 222        atomic_set(&cache->c_entry_count, 0);
 223        cache->c_bucket_bits = bucket_bits;
 224        cache->c_block_hash = kmalloc(bucket_count * sizeof(struct list_head),
 225                                      GFP_KERNEL);
 226        if (!cache->c_block_hash)
 227                goto fail;
 228        for (n=0; n<bucket_count; n++)
 229                INIT_LIST_HEAD(&cache->c_block_hash[n]);
 230        cache->c_index_hash = kmalloc(bucket_count * sizeof(struct list_head),
 231                                      GFP_KERNEL);
 232        if (!cache->c_index_hash)
 233                goto fail;
 234        for (n=0; n<bucket_count; n++)
 235                INIT_LIST_HEAD(&cache->c_index_hash[n]);
 236        cache->c_entry_cache = kmem_cache_create(name,
 237                sizeof(struct mb_cache_entry), 0,
 238                SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
 239        if (!cache->c_entry_cache)
 240                goto fail2;
 241
 242        /*
 243         * Set an upper limit on the number of cache entries so that the hash
 244         * chains won't grow too long.
 245         */
 246        cache->c_max_entries = bucket_count << 4;
 247
 248        spin_lock(&mb_cache_spinlock);
 249        list_add(&cache->c_cache_list, &mb_cache_list);
 250        spin_unlock(&mb_cache_spinlock);
 251        return cache;
 252
 253fail2:
 254        kfree(cache->c_index_hash);
 255
 256fail:
 257        kfree(cache->c_block_hash);
 258        kfree(cache);
 259        return NULL;
 260}
 261
 262
 263/*
 264 * mb_cache_shrink()
 265 *
 266 * Removes all cache entries of a device from the cache. All cache entries
 267 * currently in use cannot be freed, and thus remain in the cache. All others
 268 * are freed.
 269 *
 270 * @bdev: which device's cache entries to shrink
 271 */
 272void
 273mb_cache_shrink(struct block_device *bdev)
 274{
 275        LIST_HEAD(free_list);
 276        struct list_head *l, *ltmp;
 277
 278        spin_lock(&mb_cache_spinlock);
 279        list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
 280                struct mb_cache_entry *ce =
 281                        list_entry(l, struct mb_cache_entry, e_lru_list);
 282                if (ce->e_bdev == bdev) {
 283                        list_move_tail(&ce->e_lru_list, &free_list);
 284                        __mb_cache_entry_unhash(ce);
 285                }
 286        }
 287        spin_unlock(&mb_cache_spinlock);
 288        list_for_each_safe(l, ltmp, &free_list) {
 289                __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
 290                                                   e_lru_list), GFP_KERNEL);
 291        }
 292}
 293
 294
 295/*
 296 * mb_cache_destroy()
 297 *
 298 * Shrinks the cache to its minimum possible size (hopefully 0 entries),
 299 * and then destroys it. If this was the last mbcache, un-registers the
 300 * mbcache from kernel memory management.
 301 */
 302void
 303mb_cache_destroy(struct mb_cache *cache)
 304{
 305        LIST_HEAD(free_list);
 306        struct list_head *l, *ltmp;
 307
 308        spin_lock(&mb_cache_spinlock);
 309        list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
 310                struct mb_cache_entry *ce =
 311                        list_entry(l, struct mb_cache_entry, e_lru_list);
 312                if (ce->e_cache == cache) {
 313                        list_move_tail(&ce->e_lru_list, &free_list);
 314                        __mb_cache_entry_unhash(ce);
 315                }
 316        }
 317        list_del(&cache->c_cache_list);
 318        spin_unlock(&mb_cache_spinlock);
 319
 320        list_for_each_safe(l, ltmp, &free_list) {
 321                __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
 322                                                   e_lru_list), GFP_KERNEL);
 323        }
 324
 325        if (atomic_read(&cache->c_entry_count) > 0) {
 326                mb_error("cache %s: %d orphaned entries",
 327                          cache->c_name,
 328                          atomic_read(&cache->c_entry_count));
 329        }
 330
 331        kmem_cache_destroy(cache->c_entry_cache);
 332
 333        kfree(cache->c_index_hash);
 334        kfree(cache->c_block_hash);
 335        kfree(cache);
 336}
 337
 338/*
 339 * mb_cache_entry_alloc()
 340 *
 341 * Allocates a new cache entry. The new entry will not be valid initially,
 342 * and thus cannot be looked up yet. It should be filled with data, and
 343 * then inserted into the cache using mb_cache_entry_insert(). Returns NULL
 344 * if no more memory was available.
 345 */
 346struct mb_cache_entry *
 347mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags)
 348{
 349        struct mb_cache_entry *ce = NULL;
 350
 351        if (atomic_read(&cache->c_entry_count) >= cache->c_max_entries) {
 352                spin_lock(&mb_cache_spinlock);
 353                if (!list_empty(&mb_cache_lru_list)) {
 354                        ce = list_entry(mb_cache_lru_list.next,
 355                                        struct mb_cache_entry, e_lru_list);
 356                        list_del_init(&ce->e_lru_list);
 357                        __mb_cache_entry_unhash(ce);
 358                }
 359                spin_unlock(&mb_cache_spinlock);
 360        }
 361        if (!ce) {
 362                ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
 363                if (!ce)
 364                        return NULL;
 365                atomic_inc(&cache->c_entry_count);
 366                INIT_LIST_HEAD(&ce->e_lru_list);
 367                INIT_LIST_HEAD(&ce->e_block_list);
 368                ce->e_cache = cache;
 369                ce->e_queued = 0;
 370        }
 371        ce->e_used = 1 + MB_CACHE_WRITER;
 372        return ce;
 373}
 374
 375
 376/*
 377 * mb_cache_entry_insert()
 378 *
 379 * Inserts an entry that was allocated using mb_cache_entry_alloc() into
 380 * the cache. After this, the cache entry can be looked up, but is not yet
 381 * in the lru list as the caller still holds a handle to it. Returns 0 on
 382 * success, or -EBUSY if a cache entry for that device + inode exists
 383 * already (this may happen after a failed lookup, but when another process
 384 * has inserted the same cache entry in the meantime).
 385 *
 386 * @bdev: device the cache entry belongs to
 387 * @block: block number
 388 * @key: lookup key
 389 */
 390int
 391mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev,
 392                      sector_t block, unsigned int key)
 393{
 394        struct mb_cache *cache = ce->e_cache;
 395        unsigned int bucket;
 396        struct list_head *l;
 397        int error = -EBUSY;
 398
 399        bucket = hash_long((unsigned long)bdev + (block & 0xffffffff), 
 400                           cache->c_bucket_bits);
 401        spin_lock(&mb_cache_spinlock);
 402        list_for_each_prev(l, &cache->c_block_hash[bucket]) {
 403                struct mb_cache_entry *ce =
 404                        list_entry(l, struct mb_cache_entry, e_block_list);
 405                if (ce->e_bdev == bdev && ce->e_block == block)
 406                        goto out;
 407        }
 408        __mb_cache_entry_unhash(ce);
 409        ce->e_bdev = bdev;
 410        ce->e_block = block;
 411        list_add(&ce->e_block_list, &cache->c_block_hash[bucket]);
 412        ce->e_index.o_key = key;
 413        bucket = hash_long(key, cache->c_bucket_bits);
 414        list_add(&ce->e_index.o_list, &cache->c_index_hash[bucket]);
 415        error = 0;
 416out:
 417        spin_unlock(&mb_cache_spinlock);
 418        return error;
 419}
 420
 421
 422/*
 423 * mb_cache_entry_release()
 424 *
 425 * Release a handle to a cache entry. When the last handle to a cache entry
 426 * is released it is either freed (if it is invalid) or otherwise inserted
 427 * in to the lru list.
 428 */
 429void
 430mb_cache_entry_release(struct mb_cache_entry *ce)
 431{
 432        spin_lock(&mb_cache_spinlock);
 433        __mb_cache_entry_release_unlock(ce);
 434}
 435
 436
 437/*
 438 * mb_cache_entry_free()
 439 *
 440 * This is equivalent to the sequence mb_cache_entry_takeout() --
 441 * mb_cache_entry_release().
 442 */
 443void
 444mb_cache_entry_free(struct mb_cache_entry *ce)
 445{
 446        spin_lock(&mb_cache_spinlock);
 447        mb_assert(list_empty(&ce->e_lru_list));
 448        __mb_cache_entry_unhash(ce);
 449        __mb_cache_entry_release_unlock(ce);
 450}
 451
 452
 453/*
 454 * mb_cache_entry_get()
 455 *
 456 * Get a cache entry  by device / block number. (There can only be one entry
 457 * in the cache per device and block.) Returns NULL if no such cache entry
 458 * exists. The returned cache entry is locked for exclusive access ("single
 459 * writer").
 460 */
 461struct mb_cache_entry *
 462mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev,
 463                   sector_t block)
 464{
 465        unsigned int bucket;
 466        struct list_head *l;
 467        struct mb_cache_entry *ce;
 468
 469        bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
 470                           cache->c_bucket_bits);
 471        spin_lock(&mb_cache_spinlock);
 472        list_for_each(l, &cache->c_block_hash[bucket]) {
 473                ce = list_entry(l, struct mb_cache_entry, e_block_list);
 474                if (ce->e_bdev == bdev && ce->e_block == block) {
 475                        DEFINE_WAIT(wait);
 476
 477                        if (!list_empty(&ce->e_lru_list))
 478                                list_del_init(&ce->e_lru_list);
 479
 480                        while (ce->e_used > 0) {
 481                                ce->e_queued++;
 482                                prepare_to_wait(&mb_cache_queue, &wait,
 483                                                TASK_UNINTERRUPTIBLE);
 484                                spin_unlock(&mb_cache_spinlock);
 485                                schedule();
 486                                spin_lock(&mb_cache_spinlock);
 487                                ce->e_queued--;
 488                        }
 489                        finish_wait(&mb_cache_queue, &wait);
 490                        ce->e_used += 1 + MB_CACHE_WRITER;
 491
 492                        if (!__mb_cache_entry_is_hashed(ce)) {
 493                                __mb_cache_entry_release_unlock(ce);
 494                                return NULL;
 495                        }
 496                        goto cleanup;
 497                }
 498        }
 499        ce = NULL;
 500
 501cleanup:
 502        spin_unlock(&mb_cache_spinlock);
 503        return ce;
 504}
 505
 506#if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
 507
 508static struct mb_cache_entry *
 509__mb_cache_entry_find(struct list_head *l, struct list_head *head,
 510                      struct block_device *bdev, unsigned int key)
 511{
 512        while (l != head) {
 513                struct mb_cache_entry *ce =
 514                        list_entry(l, struct mb_cache_entry, e_index.o_list);
 515                if (ce->e_bdev == bdev && ce->e_index.o_key == key) {
 516                        DEFINE_WAIT(wait);
 517
 518                        if (!list_empty(&ce->e_lru_list))
 519                                list_del_init(&ce->e_lru_list);
 520
 521                        /* Incrementing before holding the lock gives readers
 522                           priority over writers. */
 523                        ce->e_used++;
 524                        while (ce->e_used >= MB_CACHE_WRITER) {
 525                                ce->e_queued++;
 526                                prepare_to_wait(&mb_cache_queue, &wait,
 527                                                TASK_UNINTERRUPTIBLE);
 528                                spin_unlock(&mb_cache_spinlock);
 529                                schedule();
 530                                spin_lock(&mb_cache_spinlock);
 531                                ce->e_queued--;
 532                        }
 533                        finish_wait(&mb_cache_queue, &wait);
 534
 535                        if (!__mb_cache_entry_is_hashed(ce)) {
 536                                __mb_cache_entry_release_unlock(ce);
 537                                spin_lock(&mb_cache_spinlock);
 538                                return ERR_PTR(-EAGAIN);
 539                        }
 540                        return ce;
 541                }
 542                l = l->next;
 543        }
 544        return NULL;
 545}
 546
 547
 548/*
 549 * mb_cache_entry_find_first()
 550 *
 551 * Find the first cache entry on a given device with a certain key in
 552 * an additional index. Additional matches can be found with
 553 * mb_cache_entry_find_next(). Returns NULL if no match was found. The
 554 * returned cache entry is locked for shared access ("multiple readers").
 555 *
 556 * @cache: the cache to search
 557 * @bdev: the device the cache entry should belong to
 558 * @key: the key in the index
 559 */
 560struct mb_cache_entry *
 561mb_cache_entry_find_first(struct mb_cache *cache, struct block_device *bdev,
 562                          unsigned int key)
 563{
 564        unsigned int bucket = hash_long(key, cache->c_bucket_bits);
 565        struct list_head *l;
 566        struct mb_cache_entry *ce;
 567
 568        spin_lock(&mb_cache_spinlock);
 569        l = cache->c_index_hash[bucket].next;
 570        ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key);
 571        spin_unlock(&mb_cache_spinlock);
 572        return ce;
 573}
 574
 575
 576/*
 577 * mb_cache_entry_find_next()
 578 *
 579 * Find the next cache entry on a given device with a certain key in an
 580 * additional index. Returns NULL if no match could be found. The previous
 581 * entry is atomatically released, so that mb_cache_entry_find_next() can
 582 * be called like this:
 583 *
 584 * entry = mb_cache_entry_find_first();
 585 * while (entry) {
 586 *      ...
 587 *      entry = mb_cache_entry_find_next(entry, ...);
 588 * }
 589 *
 590 * @prev: The previous match
 591 * @bdev: the device the cache entry should belong to
 592 * @key: the key in the index
 593 */
 594struct mb_cache_entry *
 595mb_cache_entry_find_next(struct mb_cache_entry *prev,
 596                         struct block_device *bdev, unsigned int key)
 597{
 598        struct mb_cache *cache = prev->e_cache;
 599        unsigned int bucket = hash_long(key, cache->c_bucket_bits);
 600        struct list_head *l;
 601        struct mb_cache_entry *ce;
 602
 603        spin_lock(&mb_cache_spinlock);
 604        l = prev->e_index.o_list.next;
 605        ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key);
 606        __mb_cache_entry_release_unlock(prev);
 607        return ce;
 608}
 609
 610#endif  /* !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) */
 611
 612static int __init init_mbcache(void)
 613{
 614        register_shrinker(&mb_cache_shrinker);
 615        return 0;
 616}
 617
 618static void __exit exit_mbcache(void)
 619{
 620        unregister_shrinker(&mb_cache_shrinker);
 621}
 622
 623module_init(init_mbcache)
 624module_exit(exit_mbcache)
 625
 626
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.