linux/mm/backing-dev.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2
   3#include <linux/wait.h>
   4#include <linux/rbtree.h>
   5#include <linux/backing-dev.h>
   6#include <linux/kthread.h>
   7#include <linux/freezer.h>
   8#include <linux/fs.h>
   9#include <linux/pagemap.h>
  10#include <linux/mm.h>
  11#include <linux/sched/mm.h>
  12#include <linux/sched.h>
  13#include <linux/module.h>
  14#include <linux/writeback.h>
  15#include <linux/device.h>
  16#include <trace/events/writeback.h>
  17
  18struct backing_dev_info noop_backing_dev_info;
  19EXPORT_SYMBOL_GPL(noop_backing_dev_info);
  20
  21static struct class *bdi_class;
  22static const char *bdi_unknown_name = "(unknown)";
  23
  24/*
  25 * bdi_lock protects bdi_tree and updates to bdi_list. bdi_list has RCU
  26 * reader side locking.
  27 */
  28DEFINE_SPINLOCK(bdi_lock);
  29static u64 bdi_id_cursor;
  30static struct rb_root bdi_tree = RB_ROOT;
  31LIST_HEAD(bdi_list);
  32
  33/* bdi_wq serves all asynchronous writeback tasks */
  34struct workqueue_struct *bdi_wq;
  35
  36#define K(x) ((x) << (PAGE_SHIFT - 10))
  37
  38#ifdef CONFIG_DEBUG_FS
  39#include <linux/debugfs.h>
  40#include <linux/seq_file.h>
  41
  42static struct dentry *bdi_debug_root;
  43
  44static void bdi_debug_init(void)
  45{
  46        bdi_debug_root = debugfs_create_dir("bdi", NULL);
  47}
  48
  49static int bdi_debug_stats_show(struct seq_file *m, void *v)
  50{
  51        struct backing_dev_info *bdi = m->private;
  52        struct bdi_writeback *wb = &bdi->wb;
  53        unsigned long background_thresh;
  54        unsigned long dirty_thresh;
  55        unsigned long wb_thresh;
  56        unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time;
  57        struct inode *inode;
  58
  59        nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0;
  60        spin_lock(&wb->list_lock);
  61        list_for_each_entry(inode, &wb->b_dirty, i_io_list)
  62                nr_dirty++;
  63        list_for_each_entry(inode, &wb->b_io, i_io_list)
  64                nr_io++;
  65        list_for_each_entry(inode, &wb->b_more_io, i_io_list)
  66                nr_more_io++;
  67        list_for_each_entry(inode, &wb->b_dirty_time, i_io_list)
  68                if (inode->i_state & I_DIRTY_TIME)
  69                        nr_dirty_time++;
  70        spin_unlock(&wb->list_lock);
  71
  72        global_dirty_limits(&background_thresh, &dirty_thresh);
  73        wb_thresh = wb_calc_thresh(wb, dirty_thresh);
  74
  75        seq_printf(m,
  76                   "BdiWriteback:       %10lu kB\n"
  77                   "BdiReclaimable:     %10lu kB\n"
  78                   "BdiDirtyThresh:     %10lu kB\n"
  79                   "DirtyThresh:        %10lu kB\n"
  80                   "BackgroundThresh:   %10lu kB\n"
  81                   "BdiDirtied:         %10lu kB\n"
  82                   "BdiWritten:         %10lu kB\n"
  83                   "BdiWriteBandwidth:  %10lu kBps\n"
  84                   "b_dirty:            %10lu\n"
  85                   "b_io:               %10lu\n"
  86                   "b_more_io:          %10lu\n"
  87                   "b_dirty_time:       %10lu\n"
  88                   "bdi_list:           %10u\n"
  89                   "state:              %10lx\n",
  90                   (unsigned long) K(wb_stat(wb, WB_WRITEBACK)),
  91                   (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)),
  92                   K(wb_thresh),
  93                   K(dirty_thresh),
  94                   K(background_thresh),
  95                   (unsigned long) K(wb_stat(wb, WB_DIRTIED)),
  96                   (unsigned long) K(wb_stat(wb, WB_WRITTEN)),
  97                   (unsigned long) K(wb->write_bandwidth),
  98                   nr_dirty,
  99                   nr_io,
 100                   nr_more_io,
 101                   nr_dirty_time,
 102                   !list_empty(&bdi->bdi_list), bdi->wb.state);
 103
 104        return 0;
 105}
 106DEFINE_SHOW_ATTRIBUTE(bdi_debug_stats);
 107
 108static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
 109{
 110        bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
 111
 112        debugfs_create_file("stats", 0444, bdi->debug_dir, bdi,
 113                            &bdi_debug_stats_fops);
 114}
 115
 116static void bdi_debug_unregister(struct backing_dev_info *bdi)
 117{
 118        debugfs_remove_recursive(bdi->debug_dir);
 119}
 120#else
 121static inline void bdi_debug_init(void)
 122{
 123}
 124static inline void bdi_debug_register(struct backing_dev_info *bdi,
 125                                      const char *name)
 126{
 127}
 128static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
 129{
 130}
 131#endif
 132
 133static ssize_t read_ahead_kb_store(struct device *dev,
 134                                  struct device_attribute *attr,
 135                                  const char *buf, size_t count)
 136{
 137        struct backing_dev_info *bdi = dev_get_drvdata(dev);
 138        unsigned long read_ahead_kb;
 139        ssize_t ret;
 140
 141        ret = kstrtoul(buf, 10, &read_ahead_kb);
 142        if (ret < 0)
 143                return ret;
 144
 145        bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
 146
 147        return count;
 148}
 149
 150#define BDI_SHOW(name, expr)                                            \
 151static ssize_t name##_show(struct device *dev,                          \
 152                           struct device_attribute *attr, char *buf)    \
 153{                                                                       \
 154        struct backing_dev_info *bdi = dev_get_drvdata(dev);            \
 155                                                                        \
 156        return sysfs_emit(buf, "%lld\n", (long long)expr);              \
 157}                                                                       \
 158static DEVICE_ATTR_RW(name);
 159
 160BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
 161
 162static ssize_t min_ratio_store(struct device *dev,
 163                struct device_attribute *attr, const char *buf, size_t count)
 164{
 165        struct backing_dev_info *bdi = dev_get_drvdata(dev);
 166        unsigned int ratio;
 167        ssize_t ret;
 168
 169        ret = kstrtouint(buf, 10, &ratio);
 170        if (ret < 0)
 171                return ret;
 172
 173        ret = bdi_set_min_ratio(bdi, ratio);
 174        if (!ret)
 175                ret = count;
 176
 177        return ret;
 178}
 179BDI_SHOW(min_ratio, bdi->min_ratio)
 180
 181static ssize_t max_ratio_store(struct device *dev,
 182                struct device_attribute *attr, const char *buf, size_t count)
 183{
 184        struct backing_dev_info *bdi = dev_get_drvdata(dev);
 185        unsigned int ratio;
 186        ssize_t ret;
 187
 188        ret = kstrtouint(buf, 10, &ratio);
 189        if (ret < 0)
 190                return ret;
 191
 192        ret = bdi_set_max_ratio(bdi, ratio);
 193        if (!ret)
 194                ret = count;
 195
 196        return ret;
 197}
 198BDI_SHOW(max_ratio, bdi->max_ratio)
 199
 200static ssize_t stable_pages_required_show(struct device *dev,
 201                                          struct device_attribute *attr,
 202                                          char *buf)
 203{
 204        dev_warn_once(dev,
 205                "the stable_pages_required attribute has been removed. Use the stable_writes queue attribute instead.\n");
 206        return sysfs_emit(buf, "%d\n", 0);
 207}
 208static DEVICE_ATTR_RO(stable_pages_required);
 209
 210static struct attribute *bdi_dev_attrs[] = {
 211        &dev_attr_read_ahead_kb.attr,
 212        &dev_attr_min_ratio.attr,
 213        &dev_attr_max_ratio.attr,
 214        &dev_attr_stable_pages_required.attr,
 215        NULL,
 216};
 217ATTRIBUTE_GROUPS(bdi_dev);
 218
 219static __init int bdi_class_init(void)
 220{
 221        bdi_class = class_create(THIS_MODULE, "bdi");
 222        if (IS_ERR(bdi_class))
 223                return PTR_ERR(bdi_class);
 224
 225        bdi_class->dev_groups = bdi_dev_groups;
 226        bdi_debug_init();
 227
 228        return 0;
 229}
 230postcore_initcall(bdi_class_init);
 231
 232static int bdi_init(struct backing_dev_info *bdi);
 233
 234static int __init default_bdi_init(void)
 235{
 236        int err;
 237
 238        bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_UNBOUND |
 239                                 WQ_SYSFS, 0);
 240        if (!bdi_wq)
 241                return -ENOMEM;
 242
 243        err = bdi_init(&noop_backing_dev_info);
 244
 245        return err;
 246}
 247subsys_initcall(default_bdi_init);
 248
 249/*
 250 * This function is used when the first inode for this wb is marked dirty. It
 251 * wakes-up the corresponding bdi thread which should then take care of the
 252 * periodic background write-out of dirty inodes. Since the write-out would
 253 * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
 254 * set up a timer which wakes the bdi thread up later.
 255 *
 256 * Note, we wouldn't bother setting up the timer, but this function is on the
 257 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
 258 * by delaying the wake-up.
 259 *
 260 * We have to be careful not to postpone flush work if it is scheduled for
 261 * earlier. Thus we use queue_delayed_work().
 262 */
 263void wb_wakeup_delayed(struct bdi_writeback *wb)
 264{
 265        unsigned long timeout;
 266
 267        timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
 268        spin_lock_bh(&wb->work_lock);
 269        if (test_bit(WB_registered, &wb->state))
 270                queue_delayed_work(bdi_wq, &wb->dwork, timeout);
 271        spin_unlock_bh(&wb->work_lock);
 272}
 273
 274/*
 275 * Initial write bandwidth: 100 MB/s
 276 */
 277#define INIT_BW         (100 << (20 - PAGE_SHIFT))
 278
 279static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
 280                   gfp_t gfp)
 281{
 282        int i, err;
 283
 284        memset(wb, 0, sizeof(*wb));
 285
 286        if (wb != &bdi->wb)
 287                bdi_get(bdi);
 288        wb->bdi = bdi;
 289        wb->last_old_flush = jiffies;
 290        INIT_LIST_HEAD(&wb->b_dirty);
 291        INIT_LIST_HEAD(&wb->b_io);
 292        INIT_LIST_HEAD(&wb->b_more_io);
 293        INIT_LIST_HEAD(&wb->b_dirty_time);
 294        spin_lock_init(&wb->list_lock);
 295
 296        wb->bw_time_stamp = jiffies;
 297        wb->balanced_dirty_ratelimit = INIT_BW;
 298        wb->dirty_ratelimit = INIT_BW;
 299        wb->write_bandwidth = INIT_BW;
 300        wb->avg_write_bandwidth = INIT_BW;
 301
 302        spin_lock_init(&wb->work_lock);
 303        INIT_LIST_HEAD(&wb->work_list);
 304        INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
 305        wb->dirty_sleep = jiffies;
 306
 307        err = fprop_local_init_percpu(&wb->completions, gfp);
 308        if (err)
 309                goto out_put_bdi;
 310
 311        for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
 312                err = percpu_counter_init(&wb->stat[i], 0, gfp);
 313                if (err)
 314                        goto out_destroy_stat;
 315        }
 316
 317        return 0;
 318
 319out_destroy_stat:
 320        while (i--)
 321                percpu_counter_destroy(&wb->stat[i]);
 322        fprop_local_destroy_percpu(&wb->completions);
 323out_put_bdi:
 324        if (wb != &bdi->wb)
 325                bdi_put(bdi);
 326        return err;
 327}
 328
 329static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb);
 330
 331/*
 332 * Remove bdi from the global list and shutdown any threads we have running
 333 */
 334static void wb_shutdown(struct bdi_writeback *wb)
 335{
 336        /* Make sure nobody queues further work */
 337        spin_lock_bh(&wb->work_lock);
 338        if (!test_and_clear_bit(WB_registered, &wb->state)) {
 339                spin_unlock_bh(&wb->work_lock);
 340                return;
 341        }
 342        spin_unlock_bh(&wb->work_lock);
 343
 344        cgwb_remove_from_bdi_list(wb);
 345        /*
 346         * Drain work list and shutdown the delayed_work.  !WB_registered
 347         * tells wb_workfn() that @wb is dying and its work_list needs to
 348         * be drained no matter what.
 349         */
 350        mod_delayed_work(bdi_wq, &wb->dwork, 0);
 351        flush_delayed_work(&wb->dwork);
 352        WARN_ON(!list_empty(&wb->work_list));
 353}
 354
 355static void wb_exit(struct bdi_writeback *wb)
 356{
 357        int i;
 358
 359        WARN_ON(delayed_work_pending(&wb->dwork));
 360
 361        for (i = 0; i < NR_WB_STAT_ITEMS; i++)
 362                percpu_counter_destroy(&wb->stat[i]);
 363
 364        fprop_local_destroy_percpu(&wb->completions);
 365        if (wb != &wb->bdi->wb)
 366                bdi_put(wb->bdi);
 367}
 368
 369#ifdef CONFIG_CGROUP_WRITEBACK
 370
 371#include <linux/memcontrol.h>
 372
 373/*
 374 * cgwb_lock protects bdi->cgwb_tree, blkcg->cgwb_list, offline_cgwbs and
 375 * memcg->cgwb_list.  bdi->cgwb_tree is also RCU protected.
 376 */
 377static DEFINE_SPINLOCK(cgwb_lock);
 378static struct workqueue_struct *cgwb_release_wq;
 379
 380static LIST_HEAD(offline_cgwbs);
 381static void cleanup_offline_cgwbs_workfn(struct work_struct *work);
 382static DECLARE_WORK(cleanup_offline_cgwbs_work, cleanup_offline_cgwbs_workfn);
 383
 384static void cgwb_release_workfn(struct work_struct *work)
 385{
 386        struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
 387                                                release_work);
 388        struct blkcg *blkcg = css_to_blkcg(wb->blkcg_css);
 389
 390        mutex_lock(&wb->bdi->cgwb_release_mutex);
 391        wb_shutdown(wb);
 392
 393        css_put(wb->memcg_css);
 394        css_put(wb->blkcg_css);
 395        mutex_unlock(&wb->bdi->cgwb_release_mutex);
 396
 397        /* triggers blkg destruction if no online users left */
 398        blkcg_unpin_online(blkcg);
 399
 400        fprop_local_destroy_percpu(&wb->memcg_completions);
 401
 402        spin_lock_irq(&cgwb_lock);
 403        list_del(&wb->offline_node);
 404        spin_unlock_irq(&cgwb_lock);
 405
 406        percpu_ref_exit(&wb->refcnt);
 407        wb_exit(wb);
 408        WARN_ON_ONCE(!list_empty(&wb->b_attached));
 409        kfree_rcu(wb, rcu);
 410}
 411
 412static void cgwb_release(struct percpu_ref *refcnt)
 413{
 414        struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
 415                                                refcnt);
 416        queue_work(cgwb_release_wq, &wb->release_work);
 417}
 418
 419static void cgwb_kill(struct bdi_writeback *wb)
 420{
 421        lockdep_assert_held(&cgwb_lock);
 422
 423        WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id));
 424        list_del(&wb->memcg_node);
 425        list_del(&wb->blkcg_node);
 426        list_add(&wb->offline_node, &offline_cgwbs);
 427        percpu_ref_kill(&wb->refcnt);
 428}
 429
 430static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
 431{
 432        spin_lock_irq(&cgwb_lock);
 433        list_del_rcu(&wb->bdi_node);
 434        spin_unlock_irq(&cgwb_lock);
 435}
 436
 437static int cgwb_create(struct backing_dev_info *bdi,
 438                       struct cgroup_subsys_state *memcg_css, gfp_t gfp)
 439{
 440        struct mem_cgroup *memcg;
 441        struct cgroup_subsys_state *blkcg_css;
 442        struct blkcg *blkcg;
 443        struct list_head *memcg_cgwb_list, *blkcg_cgwb_list;
 444        struct bdi_writeback *wb;
 445        unsigned long flags;
 446        int ret = 0;
 447
 448        memcg = mem_cgroup_from_css(memcg_css);
 449        blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
 450        blkcg = css_to_blkcg(blkcg_css);
 451        memcg_cgwb_list = &memcg->cgwb_list;
 452        blkcg_cgwb_list = &blkcg->cgwb_list;
 453
 454        /* look up again under lock and discard on blkcg mismatch */
 455        spin_lock_irqsave(&cgwb_lock, flags);
 456        wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
 457        if (wb && wb->blkcg_css != blkcg_css) {
 458                cgwb_kill(wb);
 459                wb = NULL;
 460        }
 461        spin_unlock_irqrestore(&cgwb_lock, flags);
 462        if (wb)
 463                goto out_put;
 464
 465        /* need to create a new one */
 466        wb = kmalloc(sizeof(*wb), gfp);
 467        if (!wb) {
 468                ret = -ENOMEM;
 469                goto out_put;
 470        }
 471
 472        ret = wb_init(wb, bdi, gfp);
 473        if (ret)
 474                goto err_free;
 475
 476        ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp);
 477        if (ret)
 478                goto err_wb_exit;
 479
 480        ret = fprop_local_init_percpu(&wb->memcg_completions, gfp);
 481        if (ret)
 482                goto err_ref_exit;
 483
 484        wb->memcg_css = memcg_css;
 485        wb->blkcg_css = blkcg_css;
 486        INIT_LIST_HEAD(&wb->b_attached);
 487        INIT_WORK(&wb->release_work, cgwb_release_workfn);
 488        set_bit(WB_registered, &wb->state);
 489
 490        /*
 491         * The root wb determines the registered state of the whole bdi and
 492         * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
 493         * whether they're still online.  Don't link @wb if any is dead.
 494         * See wb_memcg_offline() and wb_blkcg_offline().
 495         */
 496        ret = -ENODEV;
 497        spin_lock_irqsave(&cgwb_lock, flags);
 498        if (test_bit(WB_registered, &bdi->wb.state) &&
 499            blkcg_cgwb_list->next && memcg_cgwb_list->next) {
 500                /* we might have raced another instance of this function */
 501                ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
 502                if (!ret) {
 503                        list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
 504                        list_add(&wb->memcg_node, memcg_cgwb_list);
 505                        list_add(&wb->blkcg_node, blkcg_cgwb_list);
 506                        blkcg_pin_online(blkcg);
 507                        css_get(memcg_css);
 508                        css_get(blkcg_css);
 509                }
 510        }
 511        spin_unlock_irqrestore(&cgwb_lock, flags);
 512        if (ret) {
 513                if (ret == -EEXIST)
 514                        ret = 0;
 515                goto err_fprop_exit;
 516        }
 517        goto out_put;
 518
 519err_fprop_exit:
 520        fprop_local_destroy_percpu(&wb->memcg_completions);
 521err_ref_exit:
 522        percpu_ref_exit(&wb->refcnt);
 523err_wb_exit:
 524        wb_exit(wb);
 525err_free:
 526        kfree(wb);
 527out_put:
 528        css_put(blkcg_css);
 529        return ret;
 530}
 531
 532/**
 533 * wb_get_lookup - get wb for a given memcg
 534 * @bdi: target bdi
 535 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
 536 *
 537 * Try to get the wb for @memcg_css on @bdi.  The returned wb has its
 538 * refcount incremented.
 539 *
 540 * This function uses css_get() on @memcg_css and thus expects its refcnt
 541 * to be positive on invocation.  IOW, rcu_read_lock() protection on
 542 * @memcg_css isn't enough.  try_get it before calling this function.
 543 *
 544 * A wb is keyed by its associated memcg.  As blkcg implicitly enables
 545 * memcg on the default hierarchy, memcg association is guaranteed to be
 546 * more specific (equal or descendant to the associated blkcg) and thus can
 547 * identify both the memcg and blkcg associations.
 548 *
 549 * Because the blkcg associated with a memcg may change as blkcg is enabled
 550 * and disabled closer to root in the hierarchy, each wb keeps track of
 551 * both the memcg and blkcg associated with it and verifies the blkcg on
 552 * each lookup.  On mismatch, the existing wb is discarded and a new one is
 553 * created.
 554 */
 555struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
 556                                    struct cgroup_subsys_state *memcg_css)
 557{
 558        struct bdi_writeback *wb;
 559
 560        if (!memcg_css->parent)
 561                return &bdi->wb;
 562
 563        rcu_read_lock();
 564        wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
 565        if (wb) {
 566                struct cgroup_subsys_state *blkcg_css;
 567
 568                /* see whether the blkcg association has changed */
 569                blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
 570                if (unlikely(wb->blkcg_css != blkcg_css || !wb_tryget(wb)))
 571                        wb = NULL;
 572                css_put(blkcg_css);
 573        }
 574        rcu_read_unlock();
 575
 576        return wb;
 577}
 578
 579/**
 580 * wb_get_create - get wb for a given memcg, create if necessary
 581 * @bdi: target bdi
 582 * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
 583 * @gfp: allocation mask to use
 584 *
 585 * Try to get the wb for @memcg_css on @bdi.  If it doesn't exist, try to
 586 * create one.  See wb_get_lookup() for more details.
 587 */
 588struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
 589                                    struct cgroup_subsys_state *memcg_css,
 590                                    gfp_t gfp)
 591{
 592        struct bdi_writeback *wb;
 593
 594        might_alloc(gfp);
 595
 596        if (!memcg_css->parent)
 597                return &bdi->wb;
 598
 599        do {
 600                wb = wb_get_lookup(bdi, memcg_css);
 601        } while (!wb && !cgwb_create(bdi, memcg_css, gfp));
 602
 603        return wb;
 604}
 605
 606static int cgwb_bdi_init(struct backing_dev_info *bdi)
 607{
 608        int ret;
 609
 610        INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
 611        mutex_init(&bdi->cgwb_release_mutex);
 612        init_rwsem(&bdi->wb_switch_rwsem);
 613
 614        ret = wb_init(&bdi->wb, bdi, GFP_KERNEL);
 615        if (!ret) {
 616                bdi->wb.memcg_css = &root_mem_cgroup->css;
 617                bdi->wb.blkcg_css = blkcg_root_css;
 618        }
 619        return ret;
 620}
 621
 622static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
 623{
 624        struct radix_tree_iter iter;
 625        void **slot;
 626        struct bdi_writeback *wb;
 627
 628        WARN_ON(test_bit(WB_registered, &bdi->wb.state));
 629
 630        spin_lock_irq(&cgwb_lock);
 631        radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
 632                cgwb_kill(*slot);
 633        spin_unlock_irq(&cgwb_lock);
 634
 635        mutex_lock(&bdi->cgwb_release_mutex);
 636        spin_lock_irq(&cgwb_lock);
 637        while (!list_empty(&bdi->wb_list)) {
 638                wb = list_first_entry(&bdi->wb_list, struct bdi_writeback,
 639                                      bdi_node);
 640                spin_unlock_irq(&cgwb_lock);
 641                wb_shutdown(wb);
 642                spin_lock_irq(&cgwb_lock);
 643        }
 644        spin_unlock_irq(&cgwb_lock);
 645        mutex_unlock(&bdi->cgwb_release_mutex);
 646}
 647
 648/*
 649 * cleanup_offline_cgwbs_workfn - try to release dying cgwbs
 650 *
 651 * Try to release dying cgwbs by switching attached inodes to the nearest
 652 * living ancestor's writeback. Processed wbs are placed at the end
 653 * of the list to guarantee the forward progress.
 654 */
 655static void cleanup_offline_cgwbs_workfn(struct work_struct *work)
 656{
 657        struct bdi_writeback *wb;
 658        LIST_HEAD(processed);
 659
 660        spin_lock_irq(&cgwb_lock);
 661
 662        while (!list_empty(&offline_cgwbs)) {
 663                wb = list_first_entry(&offline_cgwbs, struct bdi_writeback,
 664                                      offline_node);
 665                list_move(&wb->offline_node, &processed);
 666
 667                /*
 668                 * If wb is dirty, cleaning up the writeback by switching
 669                 * attached inodes will result in an effective removal of any
 670                 * bandwidth restrictions, which isn't the goal.  Instead,
 671                 * it can be postponed until the next time, when all io
 672                 * will be likely completed.  If in the meantime some inodes
 673                 * will get re-dirtied, they should be eventually switched to
 674                 * a new cgwb.
 675                 */
 676                if (wb_has_dirty_io(wb))
 677                        continue;
 678
 679                if (!wb_tryget(wb))
 680                        continue;
 681
 682                spin_unlock_irq(&cgwb_lock);
 683                while (cleanup_offline_cgwb(wb))
 684                        cond_resched();
 685                spin_lock_irq(&cgwb_lock);
 686
 687                wb_put(wb);
 688        }
 689
 690        if (!list_empty(&processed))
 691                list_splice_tail(&processed, &offline_cgwbs);
 692
 693        spin_unlock_irq(&cgwb_lock);
 694}
 695
 696/**
 697 * wb_memcg_offline - kill all wb's associated with a memcg being offlined
 698 * @memcg: memcg being offlined
 699 *
 700 * Also prevents creation of any new wb's associated with @memcg.
 701 */
 702void wb_memcg_offline(struct mem_cgroup *memcg)
 703{
 704        struct list_head *memcg_cgwb_list = &memcg->cgwb_list;
 705        struct bdi_writeback *wb, *next;
 706
 707        spin_lock_irq(&cgwb_lock);
 708        list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node)
 709                cgwb_kill(wb);
 710        memcg_cgwb_list->next = NULL;   /* prevent new wb's */
 711        spin_unlock_irq(&cgwb_lock);
 712
 713        queue_work(system_unbound_wq, &cleanup_offline_cgwbs_work);
 714}
 715
 716/**
 717 * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
 718 * @blkcg: blkcg being offlined
 719 *
 720 * Also prevents creation of any new wb's associated with @blkcg.
 721 */
 722void wb_blkcg_offline(struct blkcg *blkcg)
 723{
 724        struct bdi_writeback *wb, *next;
 725
 726        spin_lock_irq(&cgwb_lock);
 727        list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node)
 728                cgwb_kill(wb);
 729        blkcg->cgwb_list.next = NULL;   /* prevent new wb's */
 730        spin_unlock_irq(&cgwb_lock);
 731}
 732
 733static void cgwb_bdi_register(struct backing_dev_info *bdi)
 734{
 735        spin_lock_irq(&cgwb_lock);
 736        list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
 737        spin_unlock_irq(&cgwb_lock);
 738}
 739
 740static int __init cgwb_init(void)
 741{
 742        /*
 743         * There can be many concurrent release work items overwhelming
 744         * system_wq.  Put them in a separate wq and limit concurrency.
 745         * There's no point in executing many of these in parallel.
 746         */
 747        cgwb_release_wq = alloc_workqueue("cgwb_release", 0, 1);
 748        if (!cgwb_release_wq)
 749                return -ENOMEM;
 750
 751        return 0;
 752}
 753subsys_initcall(cgwb_init);
 754
 755#else   /* CONFIG_CGROUP_WRITEBACK */
 756
 757static int cgwb_bdi_init(struct backing_dev_info *bdi)
 758{
 759        return wb_init(&bdi->wb, bdi, GFP_KERNEL);
 760}
 761
 762static void cgwb_bdi_unregister(struct backing_dev_info *bdi) { }
 763
 764static void cgwb_bdi_register(struct backing_dev_info *bdi)
 765{
 766        list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
 767}
 768
 769static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb)
 770{
 771        list_del_rcu(&wb->bdi_node);
 772}
 773
 774#endif  /* CONFIG_CGROUP_WRITEBACK */
 775
 776static int bdi_init(struct backing_dev_info *bdi)
 777{
 778        int ret;
 779
 780        bdi->dev = NULL;
 781
 782        kref_init(&bdi->refcnt);
 783        bdi->min_ratio = 0;
 784        bdi->max_ratio = 100;
 785        bdi->max_prop_frac = FPROP_FRAC_BASE;
 786        INIT_LIST_HEAD(&bdi->bdi_list);
 787        INIT_LIST_HEAD(&bdi->wb_list);
 788        init_waitqueue_head(&bdi->wb_waitq);
 789
 790        ret = cgwb_bdi_init(bdi);
 791
 792        return ret;
 793}
 794
 795struct backing_dev_info *bdi_alloc(int node_id)
 796{
 797        struct backing_dev_info *bdi;
 798
 799        bdi = kzalloc_node(sizeof(*bdi), GFP_KERNEL, node_id);
 800        if (!bdi)
 801                return NULL;
 802
 803        if (bdi_init(bdi)) {
 804                kfree(bdi);
 805                return NULL;
 806        }
 807        bdi->capabilities = BDI_CAP_WRITEBACK | BDI_CAP_WRITEBACK_ACCT;
 808        bdi->ra_pages = VM_READAHEAD_PAGES;
 809        bdi->io_pages = VM_READAHEAD_PAGES;
 810        return bdi;
 811}
 812EXPORT_SYMBOL(bdi_alloc);
 813
 814static struct rb_node **bdi_lookup_rb_node(u64 id, struct rb_node **parentp)
 815{
 816        struct rb_node **p = &bdi_tree.rb_node;
 817        struct rb_node *parent = NULL;
 818        struct backing_dev_info *bdi;
 819
 820        lockdep_assert_held(&bdi_lock);
 821
 822        while (*p) {
 823                parent = *p;
 824                bdi = rb_entry(parent, struct backing_dev_info, rb_node);
 825
 826                if (bdi->id > id)
 827                        p = &(*p)->rb_left;
 828                else if (bdi->id < id)
 829                        p = &(*p)->rb_right;
 830                else
 831                        break;
 832        }
 833
 834        if (parentp)
 835                *parentp = parent;
 836        return p;
 837}
 838
 839/**
 840 * bdi_get_by_id - lookup and get bdi from its id
 841 * @id: bdi id to lookup
 842 *
 843 * Find bdi matching @id and get it.  Returns NULL if the matching bdi
 844 * doesn't exist or is already unregistered.
 845 */
 846struct backing_dev_info *bdi_get_by_id(u64 id)
 847{
 848        struct backing_dev_info *bdi = NULL;
 849        struct rb_node **p;
 850
 851        spin_lock_bh(&bdi_lock);
 852        p = bdi_lookup_rb_node(id, NULL);
 853        if (*p) {
 854                bdi = rb_entry(*p, struct backing_dev_info, rb_node);
 855                bdi_get(bdi);
 856        }
 857        spin_unlock_bh(&bdi_lock);
 858
 859        return bdi;
 860}
 861
 862int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args)
 863{
 864        struct device *dev;
 865        struct rb_node *parent, **p;
 866
 867        if (bdi->dev)   /* The driver needs to use separate queues per device */
 868                return 0;
 869
 870        vsnprintf(bdi->dev_name, sizeof(bdi->dev_name), fmt, args);
 871        dev = device_create(bdi_class, NULL, MKDEV(0, 0), bdi, bdi->dev_name);
 872        if (IS_ERR(dev))
 873                return PTR_ERR(dev);
 874
 875        cgwb_bdi_register(bdi);
 876        bdi->dev = dev;
 877
 878        bdi_debug_register(bdi, dev_name(dev));
 879        set_bit(WB_registered, &bdi->wb.state);
 880
 881        spin_lock_bh(&bdi_lock);
 882
 883        bdi->id = ++bdi_id_cursor;
 884
 885        p = bdi_lookup_rb_node(bdi->id, &parent);
 886        rb_link_node(&bdi->rb_node, parent, p);
 887        rb_insert_color(&bdi->rb_node, &bdi_tree);
 888
 889        list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
 890
 891        spin_unlock_bh(&bdi_lock);
 892
 893        trace_writeback_bdi_register(bdi);
 894        return 0;
 895}
 896
 897int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...)
 898{
 899        va_list args;
 900        int ret;
 901
 902        va_start(args, fmt);
 903        ret = bdi_register_va(bdi, fmt, args);
 904        va_end(args);
 905        return ret;
 906}
 907EXPORT_SYMBOL(bdi_register);
 908
 909void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner)
 910{
 911        WARN_ON_ONCE(bdi->owner);
 912        bdi->owner = owner;
 913        get_device(owner);
 914}
 915
 916/*
 917 * Remove bdi from bdi_list, and ensure that it is no longer visible
 918 */
 919static void bdi_remove_from_list(struct backing_dev_info *bdi)
 920{
 921        spin_lock_bh(&bdi_lock);
 922        rb_erase(&bdi->rb_node, &bdi_tree);
 923        list_del_rcu(&bdi->bdi_list);
 924        spin_unlock_bh(&bdi_lock);
 925
 926        synchronize_rcu_expedited();
 927}
 928
 929void bdi_unregister(struct backing_dev_info *bdi)
 930{
 931        /* make sure nobody finds us on the bdi_list anymore */
 932        bdi_remove_from_list(bdi);
 933        wb_shutdown(&bdi->wb);
 934        cgwb_bdi_unregister(bdi);
 935
 936        if (bdi->dev) {
 937                bdi_debug_unregister(bdi);
 938                device_unregister(bdi->dev);
 939                bdi->dev = NULL;
 940        }
 941
 942        if (bdi->owner) {
 943                put_device(bdi->owner);
 944                bdi->owner = NULL;
 945        }
 946}
 947
 948static void release_bdi(struct kref *ref)
 949{
 950        struct backing_dev_info *bdi =
 951                        container_of(ref, struct backing_dev_info, refcnt);
 952
 953        if (test_bit(WB_registered, &bdi->wb.state))
 954                bdi_unregister(bdi);
 955        WARN_ON_ONCE(bdi->dev);
 956        wb_exit(&bdi->wb);
 957        kfree(bdi);
 958}
 959
 960void bdi_put(struct backing_dev_info *bdi)
 961{
 962        kref_put(&bdi->refcnt, release_bdi);
 963}
 964EXPORT_SYMBOL(bdi_put);
 965
 966const char *bdi_dev_name(struct backing_dev_info *bdi)
 967{
 968        if (!bdi || !bdi->dev)
 969                return bdi_unknown_name;
 970        return bdi->dev_name;
 971}
 972EXPORT_SYMBOL_GPL(bdi_dev_name);
 973
 974static wait_queue_head_t congestion_wqh[2] = {
 975                __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
 976                __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
 977        };
 978static atomic_t nr_wb_congested[2];
 979
 980void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
 981{
 982        wait_queue_head_t *wqh = &congestion_wqh[sync];
 983        enum wb_congested_state bit;
 984
 985        bit = sync ? WB_sync_congested : WB_async_congested;
 986        if (test_and_clear_bit(bit, &bdi->wb.congested))
 987                atomic_dec(&nr_wb_congested[sync]);
 988        smp_mb__after_atomic();
 989        if (waitqueue_active(wqh))
 990                wake_up(wqh);
 991}
 992EXPORT_SYMBOL(clear_bdi_congested);
 993
 994void set_bdi_congested(struct backing_dev_info *bdi, int sync)
 995{
 996        enum wb_congested_state bit;
 997
 998        bit = sync ? WB_sync_congested : WB_async_congested;
 999        if (!test_and_set_bit(bit, &bdi->wb.congested))
1000                atomic_inc(&nr_wb_congested[sync]);
1001}
1002EXPORT_SYMBOL(set_bdi_congested);
1003
1004/**
1005 * congestion_wait - wait for a backing_dev to become uncongested
1006 * @sync: SYNC or ASYNC IO
1007 * @timeout: timeout in jiffies
1008 *
1009 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
1010 * write congestion.  If no backing_devs are congested then just wait for the
1011 * next write to be completed.
1012 */
1013long congestion_wait(int sync, long timeout)
1014{
1015        long ret;
1016        unsigned long start = jiffies;
1017        DEFINE_WAIT(wait);
1018        wait_queue_head_t *wqh = &congestion_wqh[sync];
1019
1020        prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1021        ret = io_schedule_timeout(timeout);
1022        finish_wait(wqh, &wait);
1023
1024        trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
1025                                        jiffies_to_usecs(jiffies - start));
1026
1027        return ret;
1028}
1029EXPORT_SYMBOL(congestion_wait);
1030
1031/**
1032 * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a pgdat to complete writes
1033 * @sync: SYNC or ASYNC IO
1034 * @timeout: timeout in jiffies
1035 *
1036 * In the event of a congested backing_dev (any backing_dev) this waits
1037 * for up to @timeout jiffies for either a BDI to exit congestion of the
1038 * given @sync queue or a write to complete.
1039 *
1040 * The return value is 0 if the sleep is for the full timeout. Otherwise,
1041 * it is the number of jiffies that were still remaining when the function
1042 * returned. return_value == timeout implies the function did not sleep.
1043 */
1044long wait_iff_congested(int sync, long timeout)
1045{
1046        long ret;
1047        unsigned long start = jiffies;
1048        DEFINE_WAIT(wait);
1049        wait_queue_head_t *wqh = &congestion_wqh[sync];
1050
1051        /*
1052         * If there is no congestion, yield if necessary instead
1053         * of sleeping on the congestion queue
1054         */
1055        if (atomic_read(&nr_wb_congested[sync]) == 0) {
1056                cond_resched();
1057
1058                /* In case we scheduled, work out time remaining */
1059                ret = timeout - (jiffies - start);
1060                if (ret < 0)
1061                        ret = 0;
1062
1063                goto out;
1064        }
1065
1066        /* Sleep until uncongested or a write happens */
1067        prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1068        ret = io_schedule_timeout(timeout);
1069        finish_wait(wqh, &wait);
1070
1071out:
1072        trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
1073                                        jiffies_to_usecs(jiffies - start));
1074
1075        return ret;
1076}
1077EXPORT_SYMBOL(wait_iff_congested);
1078
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.