linux/mm/backing-dev.c
<<
>>
Prefs
   1
   2#include <linux/wait.h>
   3#include <linux/backing-dev.h>
   4#include <linux/fs.h>
   5#include <linux/sched.h>
   6#include <linux/module.h>
   7#include <linux/writeback.h>
   8#include <linux/device.h>
   9
  10
  11static struct class *bdi_class;
  12
  13#ifdef CONFIG_DEBUG_FS
  14#include <linux/debugfs.h>
  15#include <linux/seq_file.h>
  16
  17static struct dentry *bdi_debug_root;
  18
  19static void bdi_debug_init(void)
  20{
  21        bdi_debug_root = debugfs_create_dir("bdi", NULL);
  22}
  23
  24static int bdi_debug_stats_show(struct seq_file *m, void *v)
  25{
  26        struct backing_dev_info *bdi = m->private;
  27        unsigned long background_thresh;
  28        unsigned long dirty_thresh;
  29        unsigned long bdi_thresh;
  30
  31        get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi);
  32
  33#define K(x) ((x) << (PAGE_SHIFT - 10))
  34        seq_printf(m,
  35                   "BdiWriteback:     %8lu kB\n"
  36                   "BdiReclaimable:   %8lu kB\n"
  37                   "BdiDirtyThresh:   %8lu kB\n"
  38                   "DirtyThresh:      %8lu kB\n"
  39                   "BackgroundThresh: %8lu kB\n",
  40                   (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
  41                   (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
  42                   K(bdi_thresh),
  43                   K(dirty_thresh),
  44                   K(background_thresh));
  45#undef K
  46
  47        return 0;
  48}
  49
  50static int bdi_debug_stats_open(struct inode *inode, struct file *file)
  51{
  52        return single_open(file, bdi_debug_stats_show, inode->i_private);
  53}
  54
  55static const struct file_operations bdi_debug_stats_fops = {
  56        .open           = bdi_debug_stats_open,
  57        .read           = seq_read,
  58        .llseek         = seq_lseek,
  59        .release        = single_release,
  60};
  61
  62static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
  63{
  64        bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
  65        bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
  66                                               bdi, &bdi_debug_stats_fops);
  67}
  68
  69static void bdi_debug_unregister(struct backing_dev_info *bdi)
  70{
  71        debugfs_remove(bdi->debug_stats);
  72        debugfs_remove(bdi->debug_dir);
  73}
  74#else
  75static inline void bdi_debug_init(void)
  76{
  77}
  78static inline void bdi_debug_register(struct backing_dev_info *bdi,
  79                                      const char *name)
  80{
  81}
  82static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
  83{
  84}
  85#endif
  86
  87static ssize_t read_ahead_kb_store(struct device *dev,
  88                                  struct device_attribute *attr,
  89                                  const char *buf, size_t count)
  90{
  91        struct backing_dev_info *bdi = dev_get_drvdata(dev);
  92        char *end;
  93        unsigned long read_ahead_kb;
  94        ssize_t ret = -EINVAL;
  95
  96        read_ahead_kb = simple_strtoul(buf, &end, 10);
  97        if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
  98                bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
  99                ret = count;
 100        }
 101        return ret;
 102}
 103
 104#define K(pages) ((pages) << (PAGE_SHIFT - 10))
 105
 106#define BDI_SHOW(name, expr)                                            \
 107static ssize_t name##_show(struct device *dev,                          \
 108                           struct device_attribute *attr, char *page)   \
 109{                                                                       \
 110        struct backing_dev_info *bdi = dev_get_drvdata(dev);            \
 111                                                                        \
 112        return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr);  \
 113}
 114
 115BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
 116
 117static ssize_t min_ratio_store(struct device *dev,
 118                struct device_attribute *attr, const char *buf, size_t count)
 119{
 120        struct backing_dev_info *bdi = dev_get_drvdata(dev);
 121        char *end;
 122        unsigned int ratio;
 123        ssize_t ret = -EINVAL;
 124
 125        ratio = simple_strtoul(buf, &end, 10);
 126        if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
 127                ret = bdi_set_min_ratio(bdi, ratio);
 128                if (!ret)
 129                        ret = count;
 130        }
 131        return ret;
 132}
 133BDI_SHOW(min_ratio, bdi->min_ratio)
 134
 135static ssize_t max_ratio_store(struct device *dev,
 136                struct device_attribute *attr, const char *buf, size_t count)
 137{
 138        struct backing_dev_info *bdi = dev_get_drvdata(dev);
 139        char *end;
 140        unsigned int ratio;
 141        ssize_t ret = -EINVAL;
 142
 143        ratio = simple_strtoul(buf, &end, 10);
 144        if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
 145                ret = bdi_set_max_ratio(bdi, ratio);
 146                if (!ret)
 147                        ret = count;
 148        }
 149        return ret;
 150}
 151BDI_SHOW(max_ratio, bdi->max_ratio)
 152
 153#define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store)
 154
 155static struct device_attribute bdi_dev_attrs[] = {
 156        __ATTR_RW(read_ahead_kb),
 157        __ATTR_RW(min_ratio),
 158        __ATTR_RW(max_ratio),
 159        __ATTR_NULL,
 160};
 161
 162static __init int bdi_class_init(void)
 163{
 164        bdi_class = class_create(THIS_MODULE, "bdi");
 165        bdi_class->dev_attrs = bdi_dev_attrs;
 166        bdi_debug_init();
 167        return 0;
 168}
 169
 170postcore_initcall(bdi_class_init);
 171
 172int bdi_register(struct backing_dev_info *bdi, struct device *parent,
 173                const char *fmt, ...)
 174{
 175        va_list args;
 176        int ret = 0;
 177        struct device *dev;
 178
 179        if (bdi->dev)   /* The driver needs to use separate queues per device */
 180                goto exit;
 181
 182        va_start(args, fmt);
 183        dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
 184        va_end(args);
 185        if (IS_ERR(dev)) {
 186                ret = PTR_ERR(dev);
 187                goto exit;
 188        }
 189
 190        bdi->dev = dev;
 191        bdi_debug_register(bdi, dev_name(dev));
 192
 193exit:
 194        return ret;
 195}
 196EXPORT_SYMBOL(bdi_register);
 197
 198int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
 199{
 200        return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
 201}
 202EXPORT_SYMBOL(bdi_register_dev);
 203
 204void bdi_unregister(struct backing_dev_info *bdi)
 205{
 206        if (bdi->dev) {
 207                bdi_debug_unregister(bdi);
 208                device_unregister(bdi->dev);
 209                bdi->dev = NULL;
 210        }
 211}
 212EXPORT_SYMBOL(bdi_unregister);
 213
 214int bdi_init(struct backing_dev_info *bdi)
 215{
 216        int i;
 217        int err;
 218
 219        bdi->dev = NULL;
 220
 221        bdi->min_ratio = 0;
 222        bdi->max_ratio = 100;
 223        bdi->max_prop_frac = PROP_FRAC_BASE;
 224
 225        for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
 226                err = percpu_counter_init(&bdi->bdi_stat[i], 0);
 227                if (err)
 228                        goto err;
 229        }
 230
 231        bdi->dirty_exceeded = 0;
 232        err = prop_local_init_percpu(&bdi->completions);
 233
 234        if (err) {
 235err:
 236                while (i--)
 237                        percpu_counter_destroy(&bdi->bdi_stat[i]);
 238        }
 239
 240        return err;
 241}
 242EXPORT_SYMBOL(bdi_init);
 243
 244void bdi_destroy(struct backing_dev_info *bdi)
 245{
 246        int i;
 247
 248        bdi_unregister(bdi);
 249
 250        for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
 251                percpu_counter_destroy(&bdi->bdi_stat[i]);
 252
 253        prop_local_destroy_percpu(&bdi->completions);
 254}
 255EXPORT_SYMBOL(bdi_destroy);
 256
 257static wait_queue_head_t congestion_wqh[2] = {
 258                __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
 259                __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
 260        };
 261
 262
 263void clear_bdi_congested(struct backing_dev_info *bdi, int rw)
 264{
 265        enum bdi_state bit;
 266        wait_queue_head_t *wqh = &congestion_wqh[rw];
 267
 268        bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
 269        clear_bit(bit, &bdi->state);
 270        smp_mb__after_clear_bit();
 271        if (waitqueue_active(wqh))
 272                wake_up(wqh);
 273}
 274EXPORT_SYMBOL(clear_bdi_congested);
 275
 276void set_bdi_congested(struct backing_dev_info *bdi, int rw)
 277{
 278        enum bdi_state bit;
 279
 280        bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
 281        set_bit(bit, &bdi->state);
 282}
 283EXPORT_SYMBOL(set_bdi_congested);
 284
 285/**
 286 * congestion_wait - wait for a backing_dev to become uncongested
 287 * @rw: READ or WRITE
 288 * @timeout: timeout in jiffies
 289 *
 290 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
 291 * write congestion.  If no backing_devs are congested then just wait for the
 292 * next write to be completed.
 293 */
 294long congestion_wait(int rw, long timeout)
 295{
 296        long ret;
 297        DEFINE_WAIT(wait);
 298        wait_queue_head_t *wqh = &congestion_wqh[rw];
 299
 300        prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
 301        ret = io_schedule_timeout(timeout);
 302        finish_wait(wqh, &wait);
 303        return ret;
 304}
 305EXPORT_SYMBOL(congestion_wait);
 306
 307