linux/drivers/md/dm-bufio.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2009-2011 Red Hat, Inc.
   3 *
   4 * Author: Mikulas Patocka <mpatocka@redhat.com>
   5 *
   6 * This file is released under the GPL.
   7 */
   8
   9#include "dm-bufio.h"
  10
  11#include <linux/device-mapper.h>
  12#include <linux/dm-io.h>
  13#include <linux/slab.h>
  14#include <linux/vmalloc.h>
  15#include <linux/shrinker.h>
  16#include <linux/module.h>
  17
  18#define DM_MSG_PREFIX "bufio"
  19
  20/*
  21 * Memory management policy:
  22 *      Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
  23 *      or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
  24 *      Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
  25 *      Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
  26 *      dirty buffers.
  27 */
  28#define DM_BUFIO_MIN_BUFFERS            8
  29
  30#define DM_BUFIO_MEMORY_PERCENT         2
  31#define DM_BUFIO_VMALLOC_PERCENT        25
  32#define DM_BUFIO_WRITEBACK_PERCENT      75
  33
  34/*
  35 * Check buffer ages in this interval (seconds)
  36 */
  37#define DM_BUFIO_WORK_TIMER_SECS        10
  38
  39/*
  40 * Free buffers when they are older than this (seconds)
  41 */
  42#define DM_BUFIO_DEFAULT_AGE_SECS       60
  43
  44/*
  45 * The number of bvec entries that are embedded directly in the buffer.
  46 * If the chunk size is larger, dm-io is used to do the io.
  47 */
  48#define DM_BUFIO_INLINE_VECS            16
  49
  50/*
  51 * Buffer hash
  52 */
  53#define DM_BUFIO_HASH_BITS      20
  54#define DM_BUFIO_HASH(block) \
  55        ((((block) >> DM_BUFIO_HASH_BITS) ^ (block)) & \
  56         ((1 << DM_BUFIO_HASH_BITS) - 1))
  57
  58/*
  59 * Don't try to use kmem_cache_alloc for blocks larger than this.
  60 * For explanation, see alloc_buffer_data below.
  61 */
  62#define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT  (PAGE_SIZE >> 1)
  63#define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT   (PAGE_SIZE << (MAX_ORDER - 1))
  64
  65/*
  66 * dm_buffer->list_mode
  67 */
  68#define LIST_CLEAN      0
  69#define LIST_DIRTY      1
  70#define LIST_SIZE       2
  71
  72/*
  73 * Linking of buffers:
  74 *      All buffers are linked to cache_hash with their hash_list field.
  75 *
  76 *      Clean buffers that are not being written (B_WRITING not set)
  77 *      are linked to lru[LIST_CLEAN] with their lru_list field.
  78 *
  79 *      Dirty and clean buffers that are being written are linked to
  80 *      lru[LIST_DIRTY] with their lru_list field. When the write
  81 *      finishes, the buffer cannot be relinked immediately (because we
  82 *      are in an interrupt context and relinking requires process
  83 *      context), so some clean-not-writing buffers can be held on
  84 *      dirty_lru too.  They are later added to lru in the process
  85 *      context.
  86 */
  87struct dm_bufio_client {
  88        struct mutex lock;
  89
  90        struct list_head lru[LIST_SIZE];
  91        unsigned long n_buffers[LIST_SIZE];
  92
  93        struct block_device *bdev;
  94        unsigned block_size;
  95        unsigned char sectors_per_block_bits;
  96        unsigned char pages_per_block_bits;
  97        unsigned char blocks_per_page_bits;
  98        unsigned aux_size;
  99        void (*alloc_callback)(struct dm_buffer *);
 100        void (*write_callback)(struct dm_buffer *);
 101
 102        struct dm_io_client *dm_io;
 103
 104        struct list_head reserved_buffers;
 105        unsigned need_reserved_buffers;
 106
 107        struct hlist_head *cache_hash;
 108        wait_queue_head_t free_buffer_wait;
 109
 110        int async_write_error;
 111
 112        struct list_head client_list;
 113        struct shrinker shrinker;
 114};
 115
 116/*
 117 * Buffer state bits.
 118 */
 119#define B_READING       0
 120#define B_WRITING       1
 121#define B_DIRTY         2
 122
 123/*
 124 * Describes how the block was allocated:
 125 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
 126 * See the comment at alloc_buffer_data.
 127 */
 128enum data_mode {
 129        DATA_MODE_SLAB = 0,
 130        DATA_MODE_GET_FREE_PAGES = 1,
 131        DATA_MODE_VMALLOC = 2,
 132        DATA_MODE_LIMIT = 3
 133};
 134
 135struct dm_buffer {
 136        struct hlist_node hash_list;
 137        struct list_head lru_list;
 138        sector_t block;
 139        void *data;
 140        enum data_mode data_mode;
 141        unsigned char list_mode;                /* LIST_* */
 142        unsigned hold_count;
 143        int read_error;
 144        int write_error;
 145        unsigned long state;
 146        unsigned long last_accessed;
 147        struct dm_bufio_client *c;
 148        struct bio bio;
 149        struct bio_vec bio_vec[DM_BUFIO_INLINE_VECS];
 150};
 151
 152/*----------------------------------------------------------------*/
 153
 154static struct kmem_cache *dm_bufio_caches[PAGE_SHIFT - SECTOR_SHIFT];
 155static char *dm_bufio_cache_names[PAGE_SHIFT - SECTOR_SHIFT];
 156
 157static inline int dm_bufio_cache_index(struct dm_bufio_client *c)
 158{
 159        unsigned ret = c->blocks_per_page_bits - 1;
 160
 161        BUG_ON(ret >= ARRAY_SIZE(dm_bufio_caches));
 162
 163        return ret;
 164}
 165
 166#define DM_BUFIO_CACHE(c)       (dm_bufio_caches[dm_bufio_cache_index(c)])
 167#define DM_BUFIO_CACHE_NAME(c)  (dm_bufio_cache_names[dm_bufio_cache_index(c)])
 168
 169#define dm_bufio_in_request()   (!!current->bio_list)
 170
 171static void dm_bufio_lock(struct dm_bufio_client *c)
 172{
 173        mutex_lock_nested(&c->lock, dm_bufio_in_request());
 174}
 175
 176static int dm_bufio_trylock(struct dm_bufio_client *c)
 177{
 178        return mutex_trylock(&c->lock);
 179}
 180
 181static void dm_bufio_unlock(struct dm_bufio_client *c)
 182{
 183        mutex_unlock(&c->lock);
 184}
 185
 186/*
 187 * FIXME Move to sched.h?
 188 */
 189#ifdef CONFIG_PREEMPT_VOLUNTARY
 190#  define dm_bufio_cond_resched()               \
 191do {                                            \
 192        if (unlikely(need_resched()))           \
 193                _cond_resched();                \
 194} while (0)
 195#else
 196#  define dm_bufio_cond_resched()                do { } while (0)
 197#endif
 198
 199/*----------------------------------------------------------------*/
 200
 201/*
 202 * Default cache size: available memory divided by the ratio.
 203 */
 204static unsigned long dm_bufio_default_cache_size;
 205
 206/*
 207 * Total cache size set by the user.
 208 */
 209static unsigned long dm_bufio_cache_size;
 210
 211/*
 212 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
 213 * at any time.  If it disagrees, the user has changed cache size.
 214 */
 215static unsigned long dm_bufio_cache_size_latch;
 216
 217static DEFINE_SPINLOCK(param_spinlock);
 218
 219/*
 220 * Buffers are freed after this timeout
 221 */
 222static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
 223
 224static unsigned long dm_bufio_peak_allocated;
 225static unsigned long dm_bufio_allocated_kmem_cache;
 226static unsigned long dm_bufio_allocated_get_free_pages;
 227static unsigned long dm_bufio_allocated_vmalloc;
 228static unsigned long dm_bufio_current_allocated;
 229
 230/*----------------------------------------------------------------*/
 231
 232/*
 233 * Per-client cache: dm_bufio_cache_size / dm_bufio_client_count
 234 */
 235static unsigned long dm_bufio_cache_size_per_client;
 236
 237/*
 238 * The current number of clients.
 239 */
 240static int dm_bufio_client_count;
 241
 242/*
 243 * The list of all clients.
 244 */
 245static LIST_HEAD(dm_bufio_all_clients);
 246
 247/*
 248 * This mutex protects dm_bufio_cache_size_latch,
 249 * dm_bufio_cache_size_per_client and dm_bufio_client_count
 250 */
 251static DEFINE_MUTEX(dm_bufio_clients_lock);
 252
 253/*----------------------------------------------------------------*/
 254
 255static void adjust_total_allocated(enum data_mode data_mode, long diff)
 256{
 257        static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
 258                &dm_bufio_allocated_kmem_cache,
 259                &dm_bufio_allocated_get_free_pages,
 260                &dm_bufio_allocated_vmalloc,
 261        };
 262
 263        spin_lock(&param_spinlock);
 264
 265        *class_ptr[data_mode] += diff;
 266
 267        dm_bufio_current_allocated += diff;
 268
 269        if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
 270                dm_bufio_peak_allocated = dm_bufio_current_allocated;
 271
 272        spin_unlock(&param_spinlock);
 273}
 274
 275/*
 276 * Change the number of clients and recalculate per-client limit.
 277 */
 278static void __cache_size_refresh(void)
 279{
 280        BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
 281        BUG_ON(dm_bufio_client_count < 0);
 282
 283        dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size);
 284
 285        /*
 286         * Use default if set to 0 and report the actual cache size used.
 287         */
 288        if (!dm_bufio_cache_size_latch) {
 289                (void)cmpxchg(&dm_bufio_cache_size, 0,
 290                              dm_bufio_default_cache_size);
 291                dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
 292        }
 293
 294        dm_bufio_cache_size_per_client = dm_bufio_cache_size_latch /
 295                                         (dm_bufio_client_count ? : 1);
 296}
 297
 298/*
 299 * Allocating buffer data.
 300 *
 301 * Small buffers are allocated with kmem_cache, to use space optimally.
 302 *
 303 * For large buffers, we choose between get_free_pages and vmalloc.
 304 * Each has advantages and disadvantages.
 305 *
 306 * __get_free_pages can randomly fail if the memory is fragmented.
 307 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
 308 * as low as 128M) so using it for caching is not appropriate.
 309 *
 310 * If the allocation may fail we use __get_free_pages. Memory fragmentation
 311 * won't have a fatal effect here, but it just causes flushes of some other
 312 * buffers and more I/O will be performed. Don't use __get_free_pages if it
 313 * always fails (i.e. order >= MAX_ORDER).
 314 *
 315 * If the allocation shouldn't fail we use __vmalloc. This is only for the
 316 * initial reserve allocation, so there's no risk of wasting all vmalloc
 317 * space.
 318 */
 319static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
 320                               enum data_mode *data_mode)
 321{
 322        if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
 323                *data_mode = DATA_MODE_SLAB;
 324                return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
 325        }
 326
 327        if (c->block_size <= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT &&
 328            gfp_mask & __GFP_NORETRY) {
 329                *data_mode = DATA_MODE_GET_FREE_PAGES;
 330                return (void *)__get_free_pages(gfp_mask,
 331                                                c->pages_per_block_bits);
 332        }
 333
 334        *data_mode = DATA_MODE_VMALLOC;
 335        return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL);
 336}
 337
 338/*
 339 * Free buffer's data.
 340 */
 341static void free_buffer_data(struct dm_bufio_client *c,
 342                             void *data, enum data_mode data_mode)
 343{
 344        switch (data_mode) {
 345        case DATA_MODE_SLAB:
 346                kmem_cache_free(DM_BUFIO_CACHE(c), data);
 347                break;
 348
 349        case DATA_MODE_GET_FREE_PAGES:
 350                free_pages((unsigned long)data, c->pages_per_block_bits);
 351                break;
 352
 353        case DATA_MODE_VMALLOC:
 354                vfree(data);
 355                break;
 356
 357        default:
 358                DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
 359                       data_mode);
 360                BUG();
 361        }
 362}
 363
 364/*
 365 * Allocate buffer and its data.
 366 */
 367static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
 368{
 369        struct dm_buffer *b = kmalloc(sizeof(struct dm_buffer) + c->aux_size,
 370                                      gfp_mask);
 371
 372        if (!b)
 373                return NULL;
 374
 375        b->c = c;
 376
 377        b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
 378        if (!b->data) {
 379                kfree(b);
 380                return NULL;
 381        }
 382
 383        adjust_total_allocated(b->data_mode, (long)c->block_size);
 384
 385        return b;
 386}
 387
 388/*
 389 * Free buffer and its data.
 390 */
 391static void free_buffer(struct dm_buffer *b)
 392{
 393        struct dm_bufio_client *c = b->c;
 394
 395        adjust_total_allocated(b->data_mode, -(long)c->block_size);
 396
 397        free_buffer_data(c, b->data, b->data_mode);
 398        kfree(b);
 399}
 400
 401/*
 402 * Link buffer to the hash list and clean or dirty queue.
 403 */
 404static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
 405{
 406        struct dm_bufio_client *c = b->c;
 407
 408        c->n_buffers[dirty]++;
 409        b->block = block;
 410        b->list_mode = dirty;
 411        list_add(&b->lru_list, &c->lru[dirty]);
 412        hlist_add_head(&b->hash_list, &c->cache_hash[DM_BUFIO_HASH(block)]);
 413        b->last_accessed = jiffies;
 414}
 415
 416/*
 417 * Unlink buffer from the hash list and dirty or clean queue.
 418 */
 419static void __unlink_buffer(struct dm_buffer *b)
 420{
 421        struct dm_bufio_client *c = b->c;
 422
 423        BUG_ON(!c->n_buffers[b->list_mode]);
 424
 425        c->n_buffers[b->list_mode]--;
 426        hlist_del(&b->hash_list);
 427        list_del(&b->lru_list);
 428}
 429
 430/*
 431 * Place the buffer to the head of dirty or clean LRU queue.
 432 */
 433static void __relink_lru(struct dm_buffer *b, int dirty)
 434{
 435        struct dm_bufio_client *c = b->c;
 436
 437        BUG_ON(!c->n_buffers[b->list_mode]);
 438
 439        c->n_buffers[b->list_mode]--;
 440        c->n_buffers[dirty]++;
 441        b->list_mode = dirty;
 442        list_move(&b->lru_list, &c->lru[dirty]);
 443}
 444
 445/*----------------------------------------------------------------
 446 * Submit I/O on the buffer.
 447 *
 448 * Bio interface is faster but it has some problems:
 449 *      the vector list is limited (increasing this limit increases
 450 *      memory-consumption per buffer, so it is not viable);
 451 *
 452 *      the memory must be direct-mapped, not vmalloced;
 453 *
 454 *      the I/O driver can reject requests spuriously if it thinks that
 455 *      the requests are too big for the device or if they cross a
 456 *      controller-defined memory boundary.
 457 *
 458 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
 459 * it is not vmalloced, try using the bio interface.
 460 *
 461 * If the buffer is big, if it is vmalloced or if the underlying device
 462 * rejects the bio because it is too large, use dm-io layer to do the I/O.
 463 * The dm-io layer splits the I/O into multiple requests, avoiding the above
 464 * shortcomings.
 465 *--------------------------------------------------------------*/
 466
 467/*
 468 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
 469 * that the request was handled directly with bio interface.
 470 */
 471static void dmio_complete(unsigned long error, void *context)
 472{
 473        struct dm_buffer *b = context;
 474
 475        b->bio.bi_end_io(&b->bio, error ? -EIO : 0);
 476}
 477
 478static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
 479                     bio_end_io_t *end_io)
 480{
 481        int r;
 482        struct dm_io_request io_req = {
 483                .bi_rw = rw,
 484                .notify.fn = dmio_complete,
 485                .notify.context = b,
 486                .client = b->c->dm_io,
 487        };
 488        struct dm_io_region region = {
 489                .bdev = b->c->bdev,
 490                .sector = block << b->c->sectors_per_block_bits,
 491                .count = b->c->block_size >> SECTOR_SHIFT,
 492        };
 493
 494        if (b->data_mode != DATA_MODE_VMALLOC) {
 495                io_req.mem.type = DM_IO_KMEM;
 496                io_req.mem.ptr.addr = b->data;
 497        } else {
 498                io_req.mem.type = DM_IO_VMA;
 499                io_req.mem.ptr.vma = b->data;
 500        }
 501
 502        b->bio.bi_end_io = end_io;
 503
 504        r = dm_io(&io_req, 1, &region, NULL);
 505        if (r)
 506                end_io(&b->bio, r);
 507}
 508
 509static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
 510                           bio_end_io_t *end_io)
 511{
 512        char *ptr;
 513        int len;
 514
 515        bio_init(&b->bio);
 516        b->bio.bi_io_vec = b->bio_vec;
 517        b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
 518        b->bio.bi_sector = block << b->c->sectors_per_block_bits;
 519        b->bio.bi_bdev = b->c->bdev;
 520        b->bio.bi_end_io = end_io;
 521
 522        /*
 523         * We assume that if len >= PAGE_SIZE ptr is page-aligned.
 524         * If len < PAGE_SIZE the buffer doesn't cross page boundary.
 525         */
 526        ptr = b->data;
 527        len = b->c->block_size;
 528
 529        if (len >= PAGE_SIZE)
 530                BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1));
 531        else
 532                BUG_ON((unsigned long)ptr & (len - 1));
 533
 534        do {
 535                if (!bio_add_page(&b->bio, virt_to_page(ptr),
 536                                  len < PAGE_SIZE ? len : PAGE_SIZE,
 537                                  virt_to_phys(ptr) & (PAGE_SIZE - 1))) {
 538                        BUG_ON(b->c->block_size <= PAGE_SIZE);
 539                        use_dmio(b, rw, block, end_io);
 540                        return;
 541                }
 542
 543                len -= PAGE_SIZE;
 544                ptr += PAGE_SIZE;
 545        } while (len > 0);
 546
 547        submit_bio(rw, &b->bio);
 548}
 549
 550static void submit_io(struct dm_buffer *b, int rw, sector_t block,
 551                      bio_end_io_t *end_io)
 552{
 553        if (rw == WRITE && b->c->write_callback)
 554                b->c->write_callback(b);
 555
 556        if (b->c->block_size <= DM_BUFIO_INLINE_VECS * PAGE_SIZE &&
 557            b->data_mode != DATA_MODE_VMALLOC)
 558                use_inline_bio(b, rw, block, end_io);
 559        else
 560                use_dmio(b, rw, block, end_io);
 561}
 562
 563/*----------------------------------------------------------------
 564 * Writing dirty buffers
 565 *--------------------------------------------------------------*/
 566
 567/*
 568 * The endio routine for write.
 569 *
 570 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
 571 * it.
 572 */
 573static void write_endio(struct bio *bio, int error)
 574{
 575        struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
 576
 577        b->write_error = error;
 578        if (unlikely(error)) {
 579                struct dm_bufio_client *c = b->c;
 580                (void)cmpxchg(&c->async_write_error, 0, error);
 581        }
 582
 583        BUG_ON(!test_bit(B_WRITING, &b->state));
 584
 585        smp_mb__before_clear_bit();
 586        clear_bit(B_WRITING, &b->state);
 587        smp_mb__after_clear_bit();
 588
 589        wake_up_bit(&b->state, B_WRITING);
 590}
 591
 592/*
 593 * This function is called when wait_on_bit is actually waiting.
 594 */
 595static int do_io_schedule(void *word)
 596{
 597        io_schedule();
 598
 599        return 0;
 600}
 601
 602/*
 603 * Initiate a write on a dirty buffer, but don't wait for it.
 604 *
 605 * - If the buffer is not dirty, exit.
 606 * - If there some previous write going on, wait for it to finish (we can't
 607 *   have two writes on the same buffer simultaneously).
 608 * - Submit our write and don't wait on it. We set B_WRITING indicating
 609 *   that there is a write in progress.
 610 */
 611static void __write_dirty_buffer(struct dm_buffer *b)
 612{
 613        if (!test_bit(B_DIRTY, &b->state))
 614                return;
 615
 616        clear_bit(B_DIRTY, &b->state);
 617        wait_on_bit_lock(&b->state, B_WRITING,
 618                         do_io_schedule, TASK_UNINTERRUPTIBLE);
 619
 620        submit_io(b, WRITE, b->block, write_endio);
 621}
 622
 623/*
 624 * Wait until any activity on the buffer finishes.  Possibly write the
 625 * buffer if it is dirty.  When this function finishes, there is no I/O
 626 * running on the buffer and the buffer is not dirty.
 627 */
 628static void __make_buffer_clean(struct dm_buffer *b)
 629{
 630        BUG_ON(b->hold_count);
 631
 632        if (!b->state)  /* fast case */
 633                return;
 634
 635        wait_on_bit(&b->state, B_READING, do_io_schedule, TASK_UNINTERRUPTIBLE);
 636        __write_dirty_buffer(b);
 637        wait_on_bit(&b->state, B_WRITING, do_io_schedule, TASK_UNINTERRUPTIBLE);
 638}
 639
 640/*
 641 * Find some buffer that is not held by anybody, clean it, unlink it and
 642 * return it.
 643 */
 644static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
 645{
 646        struct dm_buffer *b;
 647
 648        list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
 649                BUG_ON(test_bit(B_WRITING, &b->state));
 650                BUG_ON(test_bit(B_DIRTY, &b->state));
 651
 652                if (!b->hold_count) {
 653                        __make_buffer_clean(b);
 654                        __unlink_buffer(b);
 655                        return b;
 656                }
 657                dm_bufio_cond_resched();
 658        }
 659
 660        list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
 661                BUG_ON(test_bit(B_READING, &b->state));
 662
 663                if (!b->hold_count) {
 664                        __make_buffer_clean(b);
 665                        __unlink_buffer(b);
 666                        return b;
 667                }
 668                dm_bufio_cond_resched();
 669        }
 670
 671        return NULL;
 672}
 673
 674/*
 675 * Wait until some other threads free some buffer or release hold count on
 676 * some buffer.
 677 *
 678 * This function is entered with c->lock held, drops it and regains it
 679 * before exiting.
 680 */
 681static void __wait_for_free_buffer(struct dm_bufio_client *c)
 682{
 683        DECLARE_WAITQUEUE(wait, current);
 684
 685        add_wait_queue(&c->free_buffer_wait, &wait);
 686        set_task_state(current, TASK_UNINTERRUPTIBLE);
 687        dm_bufio_unlock(c);
 688
 689        io_schedule();
 690
 691        set_task_state(current, TASK_RUNNING);
 692        remove_wait_queue(&c->free_buffer_wait, &wait);
 693
 694        dm_bufio_lock(c);
 695}
 696
 697enum new_flag {
 698        NF_FRESH = 0,
 699        NF_READ = 1,
 700        NF_GET = 2,
 701        NF_PREFETCH = 3
 702};
 703
 704/*
 705 * Allocate a new buffer. If the allocation is not possible, wait until
 706 * some other thread frees a buffer.
 707 *
 708 * May drop the lock and regain it.
 709 */
 710static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
 711{
 712        struct dm_buffer *b;
 713
 714        /*
 715         * dm-bufio is resistant to allocation failures (it just keeps
 716         * one buffer reserved in cases all the allocations fail).
 717         * So set flags to not try too hard:
 718         *      GFP_NOIO: don't recurse into the I/O layer
 719         *      __GFP_NORETRY: don't retry and rather return failure
 720         *      __GFP_NOMEMALLOC: don't use emergency reserves
 721         *      __GFP_NOWARN: don't print a warning in case of failure
 722         *
 723         * For debugging, if we set the cache size to 1, no new buffers will
 724         * be allocated.
 725         */
 726        while (1) {
 727                if (dm_bufio_cache_size_latch != 1) {
 728                        b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
 729                        if (b)
 730                                return b;
 731                }
 732
 733                if (nf == NF_PREFETCH)
 734                        return NULL;
 735
 736                if (!list_empty(&c->reserved_buffers)) {
 737                        b = list_entry(c->reserved_buffers.next,
 738                                       struct dm_buffer, lru_list);
 739                        list_del(&b->lru_list);
 740                        c->need_reserved_buffers++;
 741
 742                        return b;
 743                }
 744
 745                b = __get_unclaimed_buffer(c);
 746                if (b)
 747                        return b;
 748
 749                __wait_for_free_buffer(c);
 750        }
 751}
 752
 753static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
 754{
 755        struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
 756
 757        if (!b)
 758                return NULL;
 759
 760        if (c->alloc_callback)
 761                c->alloc_callback(b);
 762
 763        return b;
 764}
 765
 766/*
 767 * Free a buffer and wake other threads waiting for free buffers.
 768 */
 769static void __free_buffer_wake(struct dm_buffer *b)
 770{
 771        struct dm_bufio_client *c = b->c;
 772
 773        if (!c->need_reserved_buffers)
 774                free_buffer(b);
 775        else {
 776                list_add(&b->lru_list, &c->reserved_buffers);
 777                c->need_reserved_buffers--;
 778        }
 779
 780        wake_up(&c->free_buffer_wait);
 781}
 782
 783static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait)
 784{
 785        struct dm_buffer *b, *tmp;
 786
 787        list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
 788                BUG_ON(test_bit(B_READING, &b->state));
 789
 790                if (!test_bit(B_DIRTY, &b->state) &&
 791                    !test_bit(B_WRITING, &b->state)) {
 792                        __relink_lru(b, LIST_CLEAN);
 793                        continue;
 794                }
 795
 796                if (no_wait && test_bit(B_WRITING, &b->state))
 797                        return;
 798
 799                __write_dirty_buffer(b);
 800                dm_bufio_cond_resched();
 801        }
 802}
 803
 804/*
 805 * Get writeback threshold and buffer limit for a given client.
 806 */
 807static void __get_memory_limit(struct dm_bufio_client *c,
 808                               unsigned long *threshold_buffers,
 809                               unsigned long *limit_buffers)
 810{
 811        unsigned long buffers;
 812
 813        if (ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch) {
 814                mutex_lock(&dm_bufio_clients_lock);
 815                __cache_size_refresh();
 816                mutex_unlock(&dm_bufio_clients_lock);
 817        }
 818
 819        buffers = dm_bufio_cache_size_per_client >>
 820                  (c->sectors_per_block_bits + SECTOR_SHIFT);
 821
 822        if (buffers < DM_BUFIO_MIN_BUFFERS)
 823                buffers = DM_BUFIO_MIN_BUFFERS;
 824
 825        *limit_buffers = buffers;
 826        *threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
 827}
 828
 829/*
 830 * Check if we're over watermark.
 831 * If we are over threshold_buffers, start freeing buffers.
 832 * If we're over "limit_buffers", block until we get under the limit.
 833 */
 834static void __check_watermark(struct dm_bufio_client *c)
 835{
 836        unsigned long threshold_buffers, limit_buffers;
 837
 838        __get_memory_limit(c, &threshold_buffers, &limit_buffers);
 839
 840        while (c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY] >
 841               limit_buffers) {
 842
 843                struct dm_buffer *b = __get_unclaimed_buffer(c);
 844
 845                if (!b)
 846                        return;
 847
 848                __free_buffer_wake(b);
 849                dm_bufio_cond_resched();
 850        }
 851
 852        if (c->n_buffers[LIST_DIRTY] > threshold_buffers)
 853                __write_dirty_buffers_async(c, 1);
 854}
 855
 856/*
 857 * Find a buffer in the hash.
 858 */
 859static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
 860{
 861        struct dm_buffer *b;
 862        struct hlist_node *hn;
 863
 864        hlist_for_each_entry(b, hn, &c->cache_hash[DM_BUFIO_HASH(block)],
 865                             hash_list) {
 866                dm_bufio_cond_resched();
 867                if (b->block == block)
 868                        return b;
 869        }
 870
 871        return NULL;
 872}
 873
 874/*----------------------------------------------------------------
 875 * Getting a buffer
 876 *--------------------------------------------------------------*/
 877
 878static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
 879                                     enum new_flag nf, int *need_submit)
 880{
 881        struct dm_buffer *b, *new_b = NULL;
 882
 883        *need_submit = 0;
 884
 885        b = __find(c, block);
 886        if (b)
 887                goto found_buffer;
 888
 889        if (nf == NF_GET)
 890                return NULL;
 891
 892        new_b = __alloc_buffer_wait(c, nf);
 893        if (!new_b)
 894                return NULL;
 895
 896        /*
 897         * We've had a period where the mutex was unlocked, so need to
 898         * recheck the hash table.
 899         */
 900        b = __find(c, block);
 901        if (b) {
 902                __free_buffer_wake(new_b);
 903                goto found_buffer;
 904        }
 905
 906        __check_watermark(c);
 907
 908        b = new_b;
 909        b->hold_count = 1;
 910        b->read_error = 0;
 911        b->write_error = 0;
 912        __link_buffer(b, block, LIST_CLEAN);
 913
 914        if (nf == NF_FRESH) {
 915                b->state = 0;
 916                return b;
 917        }
 918
 919        b->state = 1 << B_READING;
 920        *need_submit = 1;
 921
 922        return b;
 923
 924found_buffer:
 925        if (nf == NF_PREFETCH)
 926                return NULL;
 927        /*
 928         * Note: it is essential that we don't wait for the buffer to be
 929         * read if dm_bufio_get function is used. Both dm_bufio_get and
 930         * dm_bufio_prefetch can be used in the driver request routine.
 931         * If the user called both dm_bufio_prefetch and dm_bufio_get on
 932         * the same buffer, it would deadlock if we waited.
 933         */
 934        if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
 935                return NULL;
 936
 937        b->hold_count++;
 938        __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
 939                     test_bit(B_WRITING, &b->state));
 940        return b;
 941}
 942
 943/*
 944 * The endio routine for reading: set the error, clear the bit and wake up
 945 * anyone waiting on the buffer.
 946 */
 947static void read_endio(struct bio *bio, int error)
 948{
 949        struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
 950
 951        b->read_error = error;
 952
 953        BUG_ON(!test_bit(B_READING, &b->state));
 954
 955        smp_mb__before_clear_bit();
 956        clear_bit(B_READING, &b->state);
 957        smp_mb__after_clear_bit();
 958
 959        wake_up_bit(&b->state, B_READING);
 960}
 961
 962/*
 963 * A common routine for dm_bufio_new and dm_bufio_read.  Operation of these
 964 * functions is similar except that dm_bufio_new doesn't read the
 965 * buffer from the disk (assuming that the caller overwrites all the data
 966 * and uses dm_bufio_mark_buffer_dirty to write new data back).
 967 */
 968static void *new_read(struct dm_bufio_client *c, sector_t block,
 969                      enum new_flag nf, struct dm_buffer **bp)
 970{
 971        int need_submit;
 972        struct dm_buffer *b;
 973
 974        dm_bufio_lock(c);
 975        b = __bufio_new(c, block, nf, &need_submit);
 976        dm_bufio_unlock(c);
 977
 978        if (!b)
 979                return b;
 980
 981        if (need_submit)
 982                submit_io(b, READ, b->block, read_endio);
 983
 984        wait_on_bit(&b->state, B_READING, do_io_schedule, TASK_UNINTERRUPTIBLE);
 985
 986        if (b->read_error) {
 987                int error = b->read_error;
 988
 989                dm_bufio_release(b);
 990
 991                return ERR_PTR(error);
 992        }
 993
 994        *bp = b;
 995
 996        return b->data;
 997}
 998
 999void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1000                   struct dm_buffer **bp)
1001{
1002        return new_read(c, block, NF_GET, bp);
1003}
1004EXPORT_SYMBOL_GPL(dm_bufio_get);
1005
1006void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1007                    struct dm_buffer **bp)
1008{
1009        BUG_ON(dm_bufio_in_request());
1010
1011        return new_read(c, block, NF_READ, bp);
1012}
1013EXPORT_SYMBOL_GPL(dm_bufio_read);
1014
1015void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1016                   struct dm_buffer **bp)
1017{
1018        BUG_ON(dm_bufio_in_request());
1019
1020        return new_read(c, block, NF_FRESH, bp);
1021}
1022EXPORT_SYMBOL_GPL(dm_bufio_new);
1023
1024void dm_bufio_prefetch(struct dm_bufio_client *c,
1025                       sector_t block, unsigned n_blocks)
1026{
1027        struct blk_plug plug;
1028
1029        BUG_ON(dm_bufio_in_request());
1030
1031        blk_start_plug(&plug);
1032        dm_bufio_lock(c);
1033
1034        for (; n_blocks--; block++) {
1035                int need_submit;
1036                struct dm_buffer *b;
1037                b = __bufio_new(c, block, NF_PREFETCH, &need_submit);
1038                if (unlikely(b != NULL)) {
1039                        dm_bufio_unlock(c);
1040
1041                        if (need_submit)
1042                                submit_io(b, READ, b->block, read_endio);
1043                        dm_bufio_release(b);
1044
1045                        dm_bufio_cond_resched();
1046
1047                        if (!n_blocks)
1048                                goto flush_plug;
1049                        dm_bufio_lock(c);
1050                }
1051
1052        }
1053
1054        dm_bufio_unlock(c);
1055
1056flush_plug:
1057        blk_finish_plug(&plug);
1058}
1059EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1060
1061void dm_bufio_release(struct dm_buffer *b)
1062{
1063        struct dm_bufio_client *c = b->c;
1064
1065        dm_bufio_lock(c);
1066
1067        BUG_ON(!b->hold_count);
1068
1069        b->hold_count--;
1070        if (!b->hold_count) {
1071                wake_up(&c->free_buffer_wait);
1072
1073                /*
1074                 * If there were errors on the buffer, and the buffer is not
1075                 * to be written, free the buffer. There is no point in caching
1076                 * invalid buffer.
1077                 */
1078                if ((b->read_error || b->write_error) &&
1079                    !test_bit(B_READING, &b->state) &&
1080                    !test_bit(B_WRITING, &b->state) &&
1081                    !test_bit(B_DIRTY, &b->state)) {
1082                        __unlink_buffer(b);
1083                        __free_buffer_wake(b);
1084                }
1085        }
1086
1087        dm_bufio_unlock(c);
1088}
1089EXPORT_SYMBOL_GPL(dm_bufio_release);
1090
1091void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1092{
1093        struct dm_bufio_client *c = b->c;
1094
1095        dm_bufio_lock(c);
1096
1097        BUG_ON(test_bit(B_READING, &b->state));
1098
1099        if (!test_and_set_bit(B_DIRTY, &b->state))
1100                __relink_lru(b, LIST_DIRTY);
1101
1102        dm_bufio_unlock(c);
1103}
1104EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1105
1106void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1107{
1108        BUG_ON(dm_bufio_in_request());
1109
1110        dm_bufio_lock(c);
1111        __write_dirty_buffers_async(c, 0);
1112        dm_bufio_unlock(c);
1113}
1114EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1115
1116/*
1117 * For performance, it is essential that the buffers are written asynchronously
1118 * and simultaneously (so that the block layer can merge the writes) and then
1119 * waited upon.
1120 *
1121 * Finally, we flush hardware disk cache.
1122 */
1123int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1124{
1125        int a, f;
1126        unsigned long buffers_processed = 0;
1127        struct dm_buffer *b, *tmp;
1128
1129        dm_bufio_lock(c);
1130        __write_dirty_buffers_async(c, 0);
1131
1132again:
1133        list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1134                int dropped_lock = 0;
1135
1136                if (buffers_processed < c->n_buffers[LIST_DIRTY])
1137                        buffers_processed++;
1138
1139                BUG_ON(test_bit(B_READING, &b->state));
1140
1141                if (test_bit(B_WRITING, &b->state)) {
1142                        if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1143                                dropped_lock = 1;
1144                                b->hold_count++;
1145                                dm_bufio_unlock(c);
1146                                wait_on_bit(&b->state, B_WRITING,
1147                                            do_io_schedule,
1148                                            TASK_UNINTERRUPTIBLE);
1149                                dm_bufio_lock(c);
1150                                b->hold_count--;
1151                        } else
1152                                wait_on_bit(&b->state, B_WRITING,
1153                                            do_io_schedule,
1154                                            TASK_UNINTERRUPTIBLE);
1155                }
1156
1157                if (!test_bit(B_DIRTY, &b->state) &&
1158                    !test_bit(B_WRITING, &b->state))
1159                        __relink_lru(b, LIST_CLEAN);
1160
1161                dm_bufio_cond_resched();
1162
1163                /*
1164                 * If we dropped the lock, the list is no longer consistent,
1165                 * so we must restart the search.
1166                 *
1167                 * In the most common case, the buffer just processed is
1168                 * relinked to the clean list, so we won't loop scanning the
1169                 * same buffer again and again.
1170                 *
1171                 * This may livelock if there is another thread simultaneously
1172                 * dirtying buffers, so we count the number of buffers walked
1173                 * and if it exceeds the total number of buffers, it means that
1174                 * someone is doing some writes simultaneously with us.  In
1175                 * this case, stop, dropping the lock.
1176                 */
1177                if (dropped_lock)
1178                        goto again;
1179        }
1180        wake_up(&c->free_buffer_wait);
1181        dm_bufio_unlock(c);
1182
1183        a = xchg(&c->async_write_error, 0);
1184        f = dm_bufio_issue_flush(c);
1185        if (a)
1186                return a;
1187
1188        return f;
1189}
1190EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1191
1192/*
1193 * Use dm-io to send and empty barrier flush the device.
1194 */
1195int dm_bufio_issue_flush(struct dm_bufio_client *c)
1196{
1197        struct dm_io_request io_req = {
1198                .bi_rw = REQ_FLUSH,
1199                .mem.type = DM_IO_KMEM,
1200                .mem.ptr.addr = NULL,
1201                .client = c->dm_io,
1202        };
1203        struct dm_io_region io_reg = {
1204                .bdev = c->bdev,
1205                .sector = 0,
1206                .count = 0,
1207        };
1208
1209        BUG_ON(dm_bufio_in_request());
1210
1211        return dm_io(&io_req, 1, &io_reg, NULL);
1212}
1213EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1214
1215/*
1216 * We first delete any other buffer that may be at that new location.
1217 *
1218 * Then, we write the buffer to the original location if it was dirty.
1219 *
1220 * Then, if we are the only one who is holding the buffer, relink the buffer
1221 * in the hash queue for the new location.
1222 *
1223 * If there was someone else holding the buffer, we write it to the new
1224 * location but not relink it, because that other user needs to have the buffer
1225 * at the same place.
1226 */
1227void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1228{
1229        struct dm_bufio_client *c = b->c;
1230        struct dm_buffer *new;
1231
1232        BUG_ON(dm_bufio_in_request());
1233
1234        dm_bufio_lock(c);
1235
1236retry:
1237        new = __find(c, new_block);
1238        if (new) {
1239                if (new->hold_count) {
1240                        __wait_for_free_buffer(c);
1241                        goto retry;
1242                }
1243
1244                /*
1245                 * FIXME: Is there any point waiting for a write that's going
1246                 * to be overwritten in a bit?
1247                 */
1248                __make_buffer_clean(new);
1249                __unlink_buffer(new);
1250                __free_buffer_wake(new);
1251        }
1252
1253        BUG_ON(!b->hold_count);
1254        BUG_ON(test_bit(B_READING, &b->state));
1255
1256        __write_dirty_buffer(b);
1257        if (b->hold_count == 1) {
1258                wait_on_bit(&b->state, B_WRITING,
1259                            do_io_schedule, TASK_UNINTERRUPTIBLE);
1260                set_bit(B_DIRTY, &b->state);
1261                __unlink_buffer(b);
1262                __link_buffer(b, new_block, LIST_DIRTY);
1263        } else {
1264                sector_t old_block;
1265                wait_on_bit_lock(&b->state, B_WRITING,
1266                                 do_io_schedule, TASK_UNINTERRUPTIBLE);
1267                /*
1268                 * Relink buffer to "new_block" so that write_callback
1269                 * sees "new_block" as a block number.
1270                 * After the write, link the buffer back to old_block.
1271                 * All this must be done in bufio lock, so that block number
1272                 * change isn't visible to other threads.
1273                 */
1274                old_block = b->block;
1275                __unlink_buffer(b);
1276                __link_buffer(b, new_block, b->list_mode);
1277                submit_io(b, WRITE, new_block, write_endio);
1278                wait_on_bit(&b->state, B_WRITING,
1279                            do_io_schedule, TASK_UNINTERRUPTIBLE);
1280                __unlink_buffer(b);
1281                __link_buffer(b, old_block, b->list_mode);
1282        }
1283
1284        dm_bufio_unlock(c);
1285        dm_bufio_release(b);
1286}
1287EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1288
1289unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
1290{
1291        return c->block_size;
1292}
1293EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1294
1295sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1296{
1297        return i_size_read(c->bdev->bd_inode) >>
1298                           (SECTOR_SHIFT + c->sectors_per_block_bits);
1299}
1300EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1301
1302sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1303{
1304        return b->block;
1305}
1306EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1307
1308void *dm_bufio_get_block_data(struct dm_buffer *b)
1309{
1310        return b->data;
1311}
1312EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1313
1314void *dm_bufio_get_aux_data(struct dm_buffer *b)
1315{
1316        return b + 1;
1317}
1318EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1319
1320struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1321{
1322        return b->c;
1323}
1324EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1325
1326static void drop_buffers(struct dm_bufio_client *c)
1327{
1328        struct dm_buffer *b;
1329        int i;
1330
1331        BUG_ON(dm_bufio_in_request());
1332
1333        /*
1334         * An optimization so that the buffers are not written one-by-one.
1335         */
1336        dm_bufio_write_dirty_buffers_async(c);
1337
1338        dm_bufio_lock(c);
1339
1340        while ((b = __get_unclaimed_buffer(c)))
1341                __free_buffer_wake(b);
1342
1343        for (i = 0; i < LIST_SIZE; i++)
1344                list_for_each_entry(b, &c->lru[i], lru_list)
1345                        DMERR("leaked buffer %llx, hold count %u, list %d",
1346                              (unsigned long long)b->block, b->hold_count, i);
1347
1348        for (i = 0; i < LIST_SIZE; i++)
1349                BUG_ON(!list_empty(&c->lru[i]));
1350
1351        dm_bufio_unlock(c);
1352}
1353
1354/*
1355 * Test if the buffer is unused and too old, and commit it.
1356 * At if noio is set, we must not do any I/O because we hold
1357 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets rerouted to
1358 * different bufio client.
1359 */
1360static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
1361                                unsigned long max_jiffies)
1362{
1363        if (jiffies - b->last_accessed < max_jiffies)
1364                return 1;
1365
1366        if (!(gfp & __GFP_IO)) {
1367                if (test_bit(B_READING, &b->state) ||
1368                    test_bit(B_WRITING, &b->state) ||
1369                    test_bit(B_DIRTY, &b->state))
1370                        return 1;
1371        }
1372
1373        if (b->hold_count)
1374                return 1;
1375
1376        __make_buffer_clean(b);
1377        __unlink_buffer(b);
1378        __free_buffer_wake(b);
1379
1380        return 0;
1381}
1382
1383static void __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1384                   struct shrink_control *sc)
1385{
1386        int l;
1387        struct dm_buffer *b, *tmp;
1388
1389        for (l = 0; l < LIST_SIZE; l++) {
1390                list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list)
1391                        if (!__cleanup_old_buffer(b, sc->gfp_mask, 0) &&
1392                            !--nr_to_scan)
1393                                return;
1394                dm_bufio_cond_resched();
1395        }
1396}
1397
1398static int shrink(struct shrinker *shrinker, struct shrink_control *sc)
1399{
1400        struct dm_bufio_client *c =
1401            container_of(shrinker, struct dm_bufio_client, shrinker);
1402        unsigned long r;
1403        unsigned long nr_to_scan = sc->nr_to_scan;
1404
1405        if (sc->gfp_mask & __GFP_IO)
1406                dm_bufio_lock(c);
1407        else if (!dm_bufio_trylock(c))
1408                return !nr_to_scan ? 0 : -1;
1409
1410        if (nr_to_scan)
1411                __scan(c, nr_to_scan, sc);
1412
1413        r = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1414        if (r > INT_MAX)
1415                r = INT_MAX;
1416
1417        dm_bufio_unlock(c);
1418
1419        return r;
1420}
1421
1422/*
1423 * Create the buffering interface
1424 */
1425struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1426                                               unsigned reserved_buffers, unsigned aux_size,
1427                                               void (*alloc_callback)(struct dm_buffer *),
1428                                               void (*write_callback)(struct dm_buffer *))
1429{
1430        int r;
1431        struct dm_bufio_client *c;
1432        unsigned i;
1433
1434        BUG_ON(block_size < 1 << SECTOR_SHIFT ||
1435               (block_size & (block_size - 1)));
1436
1437        c = kmalloc(sizeof(*c), GFP_KERNEL);
1438        if (!c) {
1439                r = -ENOMEM;
1440                goto bad_client;
1441        }
1442        c->cache_hash = vmalloc(sizeof(struct hlist_head) << DM_BUFIO_HASH_BITS);
1443        if (!c->cache_hash) {
1444                r = -ENOMEM;
1445                goto bad_hash;
1446        }
1447
1448        c->bdev = bdev;
1449        c->block_size = block_size;
1450        c->sectors_per_block_bits = ffs(block_size) - 1 - SECTOR_SHIFT;
1451        c->pages_per_block_bits = (ffs(block_size) - 1 >= PAGE_SHIFT) ?
1452                                  ffs(block_size) - 1 - PAGE_SHIFT : 0;
1453        c->blocks_per_page_bits = (ffs(block_size) - 1 < PAGE_SHIFT ?
1454                                  PAGE_SHIFT - (ffs(block_size) - 1) : 0);
1455
1456        c->aux_size = aux_size;
1457        c->alloc_callback = alloc_callback;
1458        c->write_callback = write_callback;
1459
1460        for (i = 0; i < LIST_SIZE; i++) {
1461                INIT_LIST_HEAD(&c->lru[i]);
1462                c->n_buffers[i] = 0;
1463        }
1464
1465        for (i = 0; i < 1 << DM_BUFIO_HASH_BITS; i++)
1466                INIT_HLIST_HEAD(&c->cache_hash[i]);
1467
1468        mutex_init(&c->lock);
1469        INIT_LIST_HEAD(&c->reserved_buffers);
1470        c->need_reserved_buffers = reserved_buffers;
1471
1472        init_waitqueue_head(&c->free_buffer_wait);
1473        c->async_write_error = 0;
1474
1475        c->dm_io = dm_io_client_create();
1476        if (IS_ERR(c->dm_io)) {
1477                r = PTR_ERR(c->dm_io);
1478                goto bad_dm_io;
1479        }
1480
1481        mutex_lock(&dm_bufio_clients_lock);
1482        if (c->blocks_per_page_bits) {
1483                if (!DM_BUFIO_CACHE_NAME(c)) {
1484                        DM_BUFIO_CACHE_NAME(c) = kasprintf(GFP_KERNEL, "dm_bufio_cache-%u", c->block_size);
1485                        if (!DM_BUFIO_CACHE_NAME(c)) {
1486                                r = -ENOMEM;
1487                                mutex_unlock(&dm_bufio_clients_lock);
1488                                goto bad_cache;
1489                        }
1490                }
1491
1492                if (!DM_BUFIO_CACHE(c)) {
1493                        DM_BUFIO_CACHE(c) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c),
1494                                                              c->block_size,
1495                                                              c->block_size, 0, NULL);
1496                        if (!DM_BUFIO_CACHE(c)) {
1497                                r = -ENOMEM;
1498                                mutex_unlock(&dm_bufio_clients_lock);
1499                                goto bad_cache;
1500                        }
1501                }
1502        }
1503        mutex_unlock(&dm_bufio_clients_lock);
1504
1505        while (c->need_reserved_buffers) {
1506                struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1507
1508                if (!b) {
1509                        r = -ENOMEM;
1510                        goto bad_buffer;
1511                }
1512                __free_buffer_wake(b);
1513        }
1514
1515        mutex_lock(&dm_bufio_clients_lock);
1516        dm_bufio_client_count++;
1517        list_add(&c->client_list, &dm_bufio_all_clients);
1518        __cache_size_refresh();
1519        mutex_unlock(&dm_bufio_clients_lock);
1520
1521        c->shrinker.shrink = shrink;
1522        c->shrinker.seeks = 1;
1523        c->shrinker.batch = 0;
1524        register_shrinker(&c->shrinker);
1525
1526        return c;
1527
1528bad_buffer:
1529bad_cache:
1530        while (!list_empty(&c->reserved_buffers)) {
1531                struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1532                                                 struct dm_buffer, lru_list);
1533                list_del(&b->lru_list);
1534                free_buffer(b);
1535        }
1536        dm_io_client_destroy(c->dm_io);
1537bad_dm_io:
1538        vfree(c->cache_hash);
1539bad_hash:
1540        kfree(c);
1541bad_client:
1542        return ERR_PTR(r);
1543}
1544EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1545
1546/*
1547 * Free the buffering interface.
1548 * It is required that there are no references on any buffers.
1549 */
1550void dm_bufio_client_destroy(struct dm_bufio_client *c)
1551{
1552        unsigned i;
1553
1554        drop_buffers(c);
1555
1556        unregister_shrinker(&c->shrinker);
1557
1558        mutex_lock(&dm_bufio_clients_lock);
1559
1560        list_del(&c->client_list);
1561        dm_bufio_client_count--;
1562        __cache_size_refresh();
1563
1564        mutex_unlock(&dm_bufio_clients_lock);
1565
1566        for (i = 0; i < 1 << DM_BUFIO_HASH_BITS; i++)
1567                BUG_ON(!hlist_empty(&c->cache_hash[i]));
1568
1569        BUG_ON(c->need_reserved_buffers);
1570
1571        while (!list_empty(&c->reserved_buffers)) {
1572                struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1573                                                 struct dm_buffer, lru_list);
1574                list_del(&b->lru_list);
1575                free_buffer(b);
1576        }
1577
1578        for (i = 0; i < LIST_SIZE; i++)
1579                if (c->n_buffers[i])
1580                        DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1581
1582        for (i = 0; i < LIST_SIZE; i++)
1583                BUG_ON(c->n_buffers[i]);
1584
1585        dm_io_client_destroy(c->dm_io);
1586        vfree(c->cache_hash);
1587        kfree(c);
1588}
1589EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1590
1591static void cleanup_old_buffers(void)
1592{
1593        unsigned long max_age = ACCESS_ONCE(dm_bufio_max_age);
1594        struct dm_bufio_client *c;
1595
1596        if (max_age > ULONG_MAX / HZ)
1597                max_age = ULONG_MAX / HZ;
1598
1599        mutex_lock(&dm_bufio_clients_lock);
1600        list_for_each_entry(c, &dm_bufio_all_clients, client_list) {
1601                if (!dm_bufio_trylock(c))
1602                        continue;
1603
1604                while (!list_empty(&c->lru[LIST_CLEAN])) {
1605                        struct dm_buffer *b;
1606                        b = list_entry(c->lru[LIST_CLEAN].prev,
1607                                       struct dm_buffer, lru_list);
1608                        if (__cleanup_old_buffer(b, 0, max_age * HZ))
1609                                break;
1610                        dm_bufio_cond_resched();
1611                }
1612
1613                dm_bufio_unlock(c);
1614                dm_bufio_cond_resched();
1615        }
1616        mutex_unlock(&dm_bufio_clients_lock);
1617}
1618
1619static struct workqueue_struct *dm_bufio_wq;
1620static struct delayed_work dm_bufio_work;
1621
1622static void work_fn(struct work_struct *w)
1623{
1624        cleanup_old_buffers();
1625
1626        queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1627                           DM_BUFIO_WORK_TIMER_SECS * HZ);
1628}
1629
1630/*----------------------------------------------------------------
1631 * Module setup
1632 *--------------------------------------------------------------*/
1633
1634/*
1635 * This is called only once for the whole dm_bufio module.
1636 * It initializes memory limit.
1637 */
1638static int __init dm_bufio_init(void)
1639{
1640        __u64 mem;
1641
1642        memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
1643        memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
1644
1645        mem = (__u64)((totalram_pages - totalhigh_pages) *
1646                      DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT;
1647
1648        if (mem > ULONG_MAX)
1649                mem = ULONG_MAX;
1650
1651#ifdef CONFIG_MMU
1652        /*
1653         * Get the size of vmalloc space the same way as VMALLOC_TOTAL
1654         * in fs/proc/internal.h
1655         */
1656        if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100)
1657                mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100;
1658#endif
1659
1660        dm_bufio_default_cache_size = mem;
1661
1662        mutex_lock(&dm_bufio_clients_lock);
1663        __cache_size_refresh();
1664        mutex_unlock(&dm_bufio_clients_lock);
1665
1666        dm_bufio_wq = create_singlethread_workqueue("dm_bufio_cache");
1667        if (!dm_bufio_wq)
1668                return -ENOMEM;
1669
1670        INIT_DELAYED_WORK(&dm_bufio_work, work_fn);
1671        queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1672                           DM_BUFIO_WORK_TIMER_SECS * HZ);
1673
1674        return 0;
1675}
1676
1677/*
1678 * This is called once when unloading the dm_bufio module.
1679 */
1680static void __exit dm_bufio_exit(void)
1681{
1682        int bug = 0;
1683        int i;
1684
1685        cancel_delayed_work_sync(&dm_bufio_work);
1686        destroy_workqueue(dm_bufio_wq);
1687
1688        for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++) {
1689                struct kmem_cache *kc = dm_bufio_caches[i];
1690
1691                if (kc)
1692                        kmem_cache_destroy(kc);
1693        }
1694
1695        for (i = 0; i < ARRAY_SIZE(dm_bufio_cache_names); i++)
1696                kfree(dm_bufio_cache_names[i]);
1697
1698        if (dm_bufio_client_count) {
1699                DMCRIT("%s: dm_bufio_client_count leaked: %d",
1700                        __func__, dm_bufio_client_count);
1701                bug = 1;
1702        }
1703
1704        if (dm_bufio_current_allocated) {
1705                DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
1706                        __func__, dm_bufio_current_allocated);
1707                bug = 1;
1708        }
1709
1710        if (dm_bufio_allocated_get_free_pages) {
1711                DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
1712                       __func__, dm_bufio_allocated_get_free_pages);
1713                bug = 1;
1714        }
1715
1716        if (dm_bufio_allocated_vmalloc) {
1717                DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
1718                       __func__, dm_bufio_allocated_vmalloc);
1719                bug = 1;
1720        }
1721
1722        if (bug)
1723                BUG();
1724}
1725
1726module_init(dm_bufio_init)
1727module_exit(dm_bufio_exit)
1728
1729module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
1730MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
1731
1732module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
1733MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
1734
1735module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
1736MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
1737
1738module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
1739MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
1740
1741module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
1742MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
1743
1744module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
1745MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
1746
1747module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
1748MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
1749
1750MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
1751MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
1752MODULE_LICENSE("GPL");
1753
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.