linux/block/blk-cgroup.c
<<
>>
Prefs
   1/*
   2 * Common Block IO controller cgroup interface
   3 *
   4 * Based on ideas and code from CFQ, CFS and BFQ:
   5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
   6 *
   7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
   8 *                    Paolo Valente <paolo.valente@unimore.it>
   9 *
  10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11 *                    Nauman Rafique <nauman@google.com>
  12 */
  13#include <linux/ioprio.h>
  14#include <linux/kdev_t.h>
  15#include <linux/module.h>
  16#include <linux/err.h>
  17#include <linux/blkdev.h>
  18#include <linux/slab.h>
  19#include <linux/genhd.h>
  20#include <linux/delay.h>
  21#include <linux/atomic.h>
  22#include "blk-cgroup.h"
  23#include "blk.h"
  24
  25#define MAX_KEY_LEN 100
  26
  27static DEFINE_MUTEX(blkcg_pol_mutex);
  28
  29struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT,
  30                            .cfq_leaf_weight = 2 * CFQ_WEIGHT_DEFAULT, };
  31EXPORT_SYMBOL_GPL(blkcg_root);
  32
  33static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
  34
  35static bool blkcg_policy_enabled(struct request_queue *q,
  36                                 const struct blkcg_policy *pol)
  37{
  38        return pol && test_bit(pol->plid, q->blkcg_pols);
  39}
  40
  41/**
  42 * blkg_free - free a blkg
  43 * @blkg: blkg to free
  44 *
  45 * Free @blkg which may be partially allocated.
  46 */
  47static void blkg_free(struct blkcg_gq *blkg)
  48{
  49        int i;
  50
  51        if (!blkg)
  52                return;
  53
  54        for (i = 0; i < BLKCG_MAX_POLS; i++)
  55                kfree(blkg->pd[i]);
  56
  57        blk_exit_rl(&blkg->rl);
  58        kfree(blkg);
  59}
  60
  61/**
  62 * blkg_alloc - allocate a blkg
  63 * @blkcg: block cgroup the new blkg is associated with
  64 * @q: request_queue the new blkg is associated with
  65 * @gfp_mask: allocation mask to use
  66 *
  67 * Allocate a new blkg assocating @blkcg and @q.
  68 */
  69static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
  70                                   gfp_t gfp_mask)
  71{
  72        struct blkcg_gq *blkg;
  73        int i;
  74
  75        /* alloc and init base part */
  76        blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
  77        if (!blkg)
  78                return NULL;
  79
  80        blkg->q = q;
  81        INIT_LIST_HEAD(&blkg->q_node);
  82        blkg->blkcg = blkcg;
  83        blkg->refcnt = 1;
  84
  85        /* root blkg uses @q->root_rl, init rl only for !root blkgs */
  86        if (blkcg != &blkcg_root) {
  87                if (blk_init_rl(&blkg->rl, q, gfp_mask))
  88                        goto err_free;
  89                blkg->rl.blkg = blkg;
  90        }
  91
  92        for (i = 0; i < BLKCG_MAX_POLS; i++) {
  93                struct blkcg_policy *pol = blkcg_policy[i];
  94                struct blkg_policy_data *pd;
  95
  96                if (!blkcg_policy_enabled(q, pol))
  97                        continue;
  98
  99                /* alloc per-policy data and attach it to blkg */
 100                pd = kzalloc_node(pol->pd_size, gfp_mask, q->node);
 101                if (!pd)
 102                        goto err_free;
 103
 104                blkg->pd[i] = pd;
 105                pd->blkg = blkg;
 106                pd->plid = i;
 107        }
 108
 109        return blkg;
 110
 111err_free:
 112        blkg_free(blkg);
 113        return NULL;
 114}
 115
 116/**
 117 * __blkg_lookup - internal version of blkg_lookup()
 118 * @blkcg: blkcg of interest
 119 * @q: request_queue of interest
 120 * @update_hint: whether to update lookup hint with the result or not
 121 *
 122 * This is internal version and shouldn't be used by policy
 123 * implementations.  Looks up blkgs for the @blkcg - @q pair regardless of
 124 * @q's bypass state.  If @update_hint is %true, the caller should be
 125 * holding @q->queue_lock and lookup hint is updated on success.
 126 */
 127struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
 128                               bool update_hint)
 129{
 130        struct blkcg_gq *blkg;
 131
 132        blkg = rcu_dereference(blkcg->blkg_hint);
 133        if (blkg && blkg->q == q)
 134                return blkg;
 135
 136        /*
 137         * Hint didn't match.  Look up from the radix tree.  Note that the
 138         * hint can only be updated under queue_lock as otherwise @blkg
 139         * could have already been removed from blkg_tree.  The caller is
 140         * responsible for grabbing queue_lock if @update_hint.
 141         */
 142        blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
 143        if (blkg && blkg->q == q) {
 144                if (update_hint) {
 145                        lockdep_assert_held(q->queue_lock);
 146                        rcu_assign_pointer(blkcg->blkg_hint, blkg);
 147                }
 148                return blkg;
 149        }
 150
 151        return NULL;
 152}
 153
 154/**
 155 * blkg_lookup - lookup blkg for the specified blkcg - q pair
 156 * @blkcg: blkcg of interest
 157 * @q: request_queue of interest
 158 *
 159 * Lookup blkg for the @blkcg - @q pair.  This function should be called
 160 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
 161 * - see blk_queue_bypass_start() for details.
 162 */
 163struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
 164{
 165        WARN_ON_ONCE(!rcu_read_lock_held());
 166
 167        if (unlikely(blk_queue_bypass(q)))
 168                return NULL;
 169        return __blkg_lookup(blkcg, q, false);
 170}
 171EXPORT_SYMBOL_GPL(blkg_lookup);
 172
 173/*
 174 * If @new_blkg is %NULL, this function tries to allocate a new one as
 175 * necessary using %GFP_ATOMIC.  @new_blkg is always consumed on return.
 176 */
 177static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
 178                                    struct request_queue *q,
 179                                    struct blkcg_gq *new_blkg)
 180{
 181        struct blkcg_gq *blkg;
 182        int i, ret;
 183
 184        WARN_ON_ONCE(!rcu_read_lock_held());
 185        lockdep_assert_held(q->queue_lock);
 186
 187        /* blkg holds a reference to blkcg */
 188        if (!css_tryget(&blkcg->css)) {
 189                ret = -EINVAL;
 190                goto err_free_blkg;
 191        }
 192
 193        /* allocate */
 194        if (!new_blkg) {
 195                new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC);
 196                if (unlikely(!new_blkg)) {
 197                        ret = -ENOMEM;
 198                        goto err_put_css;
 199                }
 200        }
 201        blkg = new_blkg;
 202
 203        /* link parent */
 204        if (blkcg_parent(blkcg)) {
 205                blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
 206                if (WARN_ON_ONCE(!blkg->parent)) {
 207                        ret = -EINVAL;
 208                        goto err_put_css;
 209                }
 210                blkg_get(blkg->parent);
 211        }
 212
 213        /* invoke per-policy init */
 214        for (i = 0; i < BLKCG_MAX_POLS; i++) {
 215                struct blkcg_policy *pol = blkcg_policy[i];
 216
 217                if (blkg->pd[i] && pol->pd_init_fn)
 218                        pol->pd_init_fn(blkg);
 219        }
 220
 221        /* insert */
 222        spin_lock(&blkcg->lock);
 223        ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
 224        if (likely(!ret)) {
 225                hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
 226                list_add(&blkg->q_node, &q->blkg_list);
 227
 228                for (i = 0; i < BLKCG_MAX_POLS; i++) {
 229                        struct blkcg_policy *pol = blkcg_policy[i];
 230
 231                        if (blkg->pd[i] && pol->pd_online_fn)
 232                                pol->pd_online_fn(blkg);
 233                }
 234        }
 235        blkg->online = true;
 236        spin_unlock(&blkcg->lock);
 237
 238        if (!ret) {
 239                if (blkcg == &blkcg_root) {
 240                        q->root_blkg = blkg;
 241                        q->root_rl.blkg = blkg;
 242                }
 243                return blkg;
 244        }
 245
 246        /* @blkg failed fully initialized, use the usual release path */
 247        blkg_put(blkg);
 248        return ERR_PTR(ret);
 249
 250err_put_css:
 251        css_put(&blkcg->css);
 252err_free_blkg:
 253        blkg_free(new_blkg);
 254        return ERR_PTR(ret);
 255}
 256
 257/**
 258 * blkg_lookup_create - lookup blkg, try to create one if not there
 259 * @blkcg: blkcg of interest
 260 * @q: request_queue of interest
 261 *
 262 * Lookup blkg for the @blkcg - @q pair.  If it doesn't exist, try to
 263 * create one.  blkg creation is performed recursively from blkcg_root such
 264 * that all non-root blkg's have access to the parent blkg.  This function
 265 * should be called under RCU read lock and @q->queue_lock.
 266 *
 267 * Returns pointer to the looked up or created blkg on success, ERR_PTR()
 268 * value on error.  If @q is dead, returns ERR_PTR(-EINVAL).  If @q is not
 269 * dead and bypassing, returns ERR_PTR(-EBUSY).
 270 */
 271struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
 272                                    struct request_queue *q)
 273{
 274        struct blkcg_gq *blkg;
 275
 276        WARN_ON_ONCE(!rcu_read_lock_held());
 277        lockdep_assert_held(q->queue_lock);
 278
 279        /*
 280         * This could be the first entry point of blkcg implementation and
 281         * we shouldn't allow anything to go through for a bypassing queue.
 282         */
 283        if (unlikely(blk_queue_bypass(q)))
 284                return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY);
 285
 286        blkg = __blkg_lookup(blkcg, q, true);
 287        if (blkg)
 288                return blkg;
 289
 290        /*
 291         * Create blkgs walking down from blkcg_root to @blkcg, so that all
 292         * non-root blkgs have access to their parents.
 293         */
 294        while (true) {
 295                struct blkcg *pos = blkcg;
 296                struct blkcg *parent = blkcg_parent(blkcg);
 297
 298                while (parent && !__blkg_lookup(parent, q, false)) {
 299                        pos = parent;
 300                        parent = blkcg_parent(parent);
 301                }
 302
 303                blkg = blkg_create(pos, q, NULL);
 304                if (pos == blkcg || IS_ERR(blkg))
 305                        return blkg;
 306        }
 307}
 308EXPORT_SYMBOL_GPL(blkg_lookup_create);
 309
 310static void blkg_destroy(struct blkcg_gq *blkg)
 311{
 312        struct blkcg *blkcg = blkg->blkcg;
 313        int i;
 314
 315        lockdep_assert_held(blkg->q->queue_lock);
 316        lockdep_assert_held(&blkcg->lock);
 317
 318        /* Something wrong if we are trying to remove same group twice */
 319        WARN_ON_ONCE(list_empty(&blkg->q_node));
 320        WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
 321
 322        for (i = 0; i < BLKCG_MAX_POLS; i++) {
 323                struct blkcg_policy *pol = blkcg_policy[i];
 324
 325                if (blkg->pd[i] && pol->pd_offline_fn)
 326                        pol->pd_offline_fn(blkg);
 327        }
 328        blkg->online = false;
 329
 330        radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
 331        list_del_init(&blkg->q_node);
 332        hlist_del_init_rcu(&blkg->blkcg_node);
 333
 334        /*
 335         * Both setting lookup hint to and clearing it from @blkg are done
 336         * under queue_lock.  If it's not pointing to @blkg now, it never
 337         * will.  Hint assignment itself can race safely.
 338         */
 339        if (rcu_dereference_raw(blkcg->blkg_hint) == blkg)
 340                rcu_assign_pointer(blkcg->blkg_hint, NULL);
 341
 342        /*
 343         * If root blkg is destroyed.  Just clear the pointer since root_rl
 344         * does not take reference on root blkg.
 345         */
 346        if (blkcg == &blkcg_root) {
 347                blkg->q->root_blkg = NULL;
 348                blkg->q->root_rl.blkg = NULL;
 349        }
 350
 351        /*
 352         * Put the reference taken at the time of creation so that when all
 353         * queues are gone, group can be destroyed.
 354         */
 355        blkg_put(blkg);
 356}
 357
 358/**
 359 * blkg_destroy_all - destroy all blkgs associated with a request_queue
 360 * @q: request_queue of interest
 361 *
 362 * Destroy all blkgs associated with @q.
 363 */
 364static void blkg_destroy_all(struct request_queue *q)
 365{
 366        struct blkcg_gq *blkg, *n;
 367
 368        lockdep_assert_held(q->queue_lock);
 369
 370        list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
 371                struct blkcg *blkcg = blkg->blkcg;
 372
 373                spin_lock(&blkcg->lock);
 374                blkg_destroy(blkg);
 375                spin_unlock(&blkcg->lock);
 376        }
 377}
 378
 379/*
 380 * A group is RCU protected, but having an rcu lock does not mean that one
 381 * can access all the fields of blkg and assume these are valid.  For
 382 * example, don't try to follow throtl_data and request queue links.
 383 *
 384 * Having a reference to blkg under an rcu allows accesses to only values
 385 * local to groups like group stats and group rate limits.
 386 */
 387void __blkg_release_rcu(struct rcu_head *rcu_head)
 388{
 389        struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head);
 390        int i;
 391
 392        /* tell policies that this one is being freed */
 393        for (i = 0; i < BLKCG_MAX_POLS; i++) {
 394                struct blkcg_policy *pol = blkcg_policy[i];
 395
 396                if (blkg->pd[i] && pol->pd_exit_fn)
 397                        pol->pd_exit_fn(blkg);
 398        }
 399
 400        /* release the blkcg and parent blkg refs this blkg has been holding */
 401        css_put(&blkg->blkcg->css);
 402        if (blkg->parent) {
 403                spin_lock_irq(blkg->q->queue_lock);
 404                blkg_put(blkg->parent);
 405                spin_unlock_irq(blkg->q->queue_lock);
 406        }
 407
 408        blkg_free(blkg);
 409}
 410EXPORT_SYMBOL_GPL(__blkg_release_rcu);
 411
 412/*
 413 * The next function used by blk_queue_for_each_rl().  It's a bit tricky
 414 * because the root blkg uses @q->root_rl instead of its own rl.
 415 */
 416struct request_list *__blk_queue_next_rl(struct request_list *rl,
 417                                         struct request_queue *q)
 418{
 419        struct list_head *ent;
 420        struct blkcg_gq *blkg;
 421
 422        /*
 423         * Determine the current blkg list_head.  The first entry is
 424         * root_rl which is off @q->blkg_list and mapped to the head.
 425         */
 426        if (rl == &q->root_rl) {
 427                ent = &q->blkg_list;
 428                /* There are no more block groups, hence no request lists */
 429                if (list_empty(ent))
 430                        return NULL;
 431        } else {
 432                blkg = container_of(rl, struct blkcg_gq, rl);
 433                ent = &blkg->q_node;
 434        }
 435
 436        /* walk to the next list_head, skip root blkcg */
 437        ent = ent->next;
 438        if (ent == &q->root_blkg->q_node)
 439                ent = ent->next;
 440        if (ent == &q->blkg_list)
 441                return NULL;
 442
 443        blkg = container_of(ent, struct blkcg_gq, q_node);
 444        return &blkg->rl;
 445}
 446
 447static int blkcg_reset_stats(struct cgroup_subsys_state *css,
 448                             struct cftype *cftype, u64 val)
 449{
 450        struct blkcg *blkcg = css_to_blkcg(css);
 451        struct blkcg_gq *blkg;
 452        int i;
 453
 454        mutex_lock(&blkcg_pol_mutex);
 455        spin_lock_irq(&blkcg->lock);
 456
 457        /*
 458         * Note that stat reset is racy - it doesn't synchronize against
 459         * stat updates.  This is a debug feature which shouldn't exist
 460         * anyway.  If you get hit by a race, retry.
 461         */
 462        hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
 463                for (i = 0; i < BLKCG_MAX_POLS; i++) {
 464                        struct blkcg_policy *pol = blkcg_policy[i];
 465
 466                        if (blkcg_policy_enabled(blkg->q, pol) &&
 467                            pol->pd_reset_stats_fn)
 468                                pol->pd_reset_stats_fn(blkg);
 469                }
 470        }
 471
 472        spin_unlock_irq(&blkcg->lock);
 473        mutex_unlock(&blkcg_pol_mutex);
 474        return 0;
 475}
 476
 477static const char *blkg_dev_name(struct blkcg_gq *blkg)
 478{
 479        /* some drivers (floppy) instantiate a queue w/o disk registered */
 480        if (blkg->q->backing_dev_info.dev)
 481                return dev_name(blkg->q->backing_dev_info.dev);
 482        return NULL;
 483}
 484
 485/**
 486 * blkcg_print_blkgs - helper for printing per-blkg data
 487 * @sf: seq_file to print to
 488 * @blkcg: blkcg of interest
 489 * @prfill: fill function to print out a blkg
 490 * @pol: policy in question
 491 * @data: data to be passed to @prfill
 492 * @show_total: to print out sum of prfill return values or not
 493 *
 494 * This function invokes @prfill on each blkg of @blkcg if pd for the
 495 * policy specified by @pol exists.  @prfill is invoked with @sf, the
 496 * policy data and @data and the matching queue lock held.  If @show_total
 497 * is %true, the sum of the return values from @prfill is printed with
 498 * "Total" label at the end.
 499 *
 500 * This is to be used to construct print functions for
 501 * cftype->read_seq_string method.
 502 */
 503void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
 504                       u64 (*prfill)(struct seq_file *,
 505                                     struct blkg_policy_data *, int),
 506                       const struct blkcg_policy *pol, int data,
 507                       bool show_total)
 508{
 509        struct blkcg_gq *blkg;
 510        u64 total = 0;
 511
 512        rcu_read_lock();
 513        hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
 514                spin_lock_irq(blkg->q->queue_lock);
 515                if (blkcg_policy_enabled(blkg->q, pol))
 516                        total += prfill(sf, blkg->pd[pol->plid], data);
 517                spin_unlock_irq(blkg->q->queue_lock);
 518        }
 519        rcu_read_unlock();
 520
 521        if (show_total)
 522                seq_printf(sf, "Total %llu\n", (unsigned long long)total);
 523}
 524EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
 525
 526/**
 527 * __blkg_prfill_u64 - prfill helper for a single u64 value
 528 * @sf: seq_file to print to
 529 * @pd: policy private data of interest
 530 * @v: value to print
 531 *
 532 * Print @v to @sf for the device assocaited with @pd.
 533 */
 534u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
 535{
 536        const char *dname = blkg_dev_name(pd->blkg);
 537
 538        if (!dname)
 539                return 0;
 540
 541        seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
 542        return v;
 543}
 544EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
 545
 546/**
 547 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
 548 * @sf: seq_file to print to
 549 * @pd: policy private data of interest
 550 * @rwstat: rwstat to print
 551 *
 552 * Print @rwstat to @sf for the device assocaited with @pd.
 553 */
 554u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
 555                         const struct blkg_rwstat *rwstat)
 556{
 557        static const char *rwstr[] = {
 558                [BLKG_RWSTAT_READ]      = "Read",
 559                [BLKG_RWSTAT_WRITE]     = "Write",
 560                [BLKG_RWSTAT_SYNC]      = "Sync",
 561                [BLKG_RWSTAT_ASYNC]     = "Async",
 562        };
 563        const char *dname = blkg_dev_name(pd->blkg);
 564        u64 v;
 565        int i;
 566
 567        if (!dname)
 568                return 0;
 569
 570        for (i = 0; i < BLKG_RWSTAT_NR; i++)
 571                seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
 572                           (unsigned long long)rwstat->cnt[i]);
 573
 574        v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
 575        seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
 576        return v;
 577}
 578EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
 579
 580/**
 581 * blkg_prfill_stat - prfill callback for blkg_stat
 582 * @sf: seq_file to print to
 583 * @pd: policy private data of interest
 584 * @off: offset to the blkg_stat in @pd
 585 *
 586 * prfill callback for printing a blkg_stat.
 587 */
 588u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
 589{
 590        return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
 591}
 592EXPORT_SYMBOL_GPL(blkg_prfill_stat);
 593
 594/**
 595 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
 596 * @sf: seq_file to print to
 597 * @pd: policy private data of interest
 598 * @off: offset to the blkg_rwstat in @pd
 599 *
 600 * prfill callback for printing a blkg_rwstat.
 601 */
 602u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
 603                       int off)
 604{
 605        struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
 606
 607        return __blkg_prfill_rwstat(sf, pd, &rwstat);
 608}
 609EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
 610
 611/**
 612 * blkg_stat_recursive_sum - collect hierarchical blkg_stat
 613 * @pd: policy private data of interest
 614 * @off: offset to the blkg_stat in @pd
 615 *
 616 * Collect the blkg_stat specified by @off from @pd and all its online
 617 * descendants and return the sum.  The caller must be holding the queue
 618 * lock for online tests.
 619 */
 620u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off)
 621{
 622        struct blkcg_policy *pol = blkcg_policy[pd->plid];
 623        struct blkcg_gq *pos_blkg;
 624        struct cgroup_subsys_state *pos_css;
 625        u64 sum = 0;
 626
 627        lockdep_assert_held(pd->blkg->q->queue_lock);
 628
 629        rcu_read_lock();
 630        blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) {
 631                struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
 632                struct blkg_stat *stat = (void *)pos_pd + off;
 633
 634                if (pos_blkg->online)
 635                        sum += blkg_stat_read(stat);
 636        }
 637        rcu_read_unlock();
 638
 639        return sum;
 640}
 641EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);
 642
 643/**
 644 * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
 645 * @pd: policy private data of interest
 646 * @off: offset to the blkg_stat in @pd
 647 *
 648 * Collect the blkg_rwstat specified by @off from @pd and all its online
 649 * descendants and return the sum.  The caller must be holding the queue
 650 * lock for online tests.
 651 */
 652struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
 653                                             int off)
 654{
 655        struct blkcg_policy *pol = blkcg_policy[pd->plid];
 656        struct blkcg_gq *pos_blkg;
 657        struct cgroup_subsys_state *pos_css;
 658        struct blkg_rwstat sum = { };
 659        int i;
 660
 661        lockdep_assert_held(pd->blkg->q->queue_lock);
 662
 663        rcu_read_lock();
 664        blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) {
 665                struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
 666                struct blkg_rwstat *rwstat = (void *)pos_pd + off;
 667                struct blkg_rwstat tmp;
 668
 669                if (!pos_blkg->online)
 670                        continue;
 671
 672                tmp = blkg_rwstat_read(rwstat);
 673
 674                for (i = 0; i < BLKG_RWSTAT_NR; i++)
 675                        sum.cnt[i] += tmp.cnt[i];
 676        }
 677        rcu_read_unlock();
 678
 679        return sum;
 680}
 681EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
 682
 683/**
 684 * blkg_conf_prep - parse and prepare for per-blkg config update
 685 * @blkcg: target block cgroup
 686 * @pol: target policy
 687 * @input: input string
 688 * @ctx: blkg_conf_ctx to be filled
 689 *
 690 * Parse per-blkg config update from @input and initialize @ctx with the
 691 * result.  @ctx->blkg points to the blkg to be updated and @ctx->v the new
 692 * value.  This function returns with RCU read lock and queue lock held and
 693 * must be paired with blkg_conf_finish().
 694 */
 695int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
 696                   const char *input, struct blkg_conf_ctx *ctx)
 697        __acquires(rcu) __acquires(disk->queue->queue_lock)
 698{
 699        struct gendisk *disk;
 700        struct blkcg_gq *blkg;
 701        unsigned int major, minor;
 702        unsigned long long v;
 703        int part, ret;
 704
 705        if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3)
 706                return -EINVAL;
 707
 708        disk = get_gendisk(MKDEV(major, minor), &part);
 709        if (!disk || part)
 710                return -EINVAL;
 711
 712        rcu_read_lock();
 713        spin_lock_irq(disk->queue->queue_lock);
 714
 715        if (blkcg_policy_enabled(disk->queue, pol))
 716                blkg = blkg_lookup_create(blkcg, disk->queue);
 717        else
 718                blkg = ERR_PTR(-EINVAL);
 719
 720        if (IS_ERR(blkg)) {
 721                ret = PTR_ERR(blkg);
 722                rcu_read_unlock();
 723                spin_unlock_irq(disk->queue->queue_lock);
 724                put_disk(disk);
 725                /*
 726                 * If queue was bypassing, we should retry.  Do so after a
 727                 * short msleep().  It isn't strictly necessary but queue
 728                 * can be bypassing for some time and it's always nice to
 729                 * avoid busy looping.
 730                 */
 731                if (ret == -EBUSY) {
 732                        msleep(10);
 733                        ret = restart_syscall();
 734                }
 735                return ret;
 736        }
 737
 738        ctx->disk = disk;
 739        ctx->blkg = blkg;
 740        ctx->v = v;
 741        return 0;
 742}
 743EXPORT_SYMBOL_GPL(blkg_conf_prep);
 744
 745/**
 746 * blkg_conf_finish - finish up per-blkg config update
 747 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
 748 *
 749 * Finish up after per-blkg config update.  This function must be paired
 750 * with blkg_conf_prep().
 751 */
 752void blkg_conf_finish(struct blkg_conf_ctx *ctx)
 753        __releases(ctx->disk->queue->queue_lock) __releases(rcu)
 754{
 755        spin_unlock_irq(ctx->disk->queue->queue_lock);
 756        rcu_read_unlock();
 757        put_disk(ctx->disk);
 758}
 759EXPORT_SYMBOL_GPL(blkg_conf_finish);
 760
 761struct cftype blkcg_files[] = {
 762        {
 763                .name = "reset_stats",
 764                .write_u64 = blkcg_reset_stats,
 765        },
 766        { }     /* terminate */
 767};
 768
 769/**
 770 * blkcg_css_offline - cgroup css_offline callback
 771 * @css: css of interest
 772 *
 773 * This function is called when @css is about to go away and responsible
 774 * for shooting down all blkgs associated with @css.  blkgs should be
 775 * removed while holding both q and blkcg locks.  As blkcg lock is nested
 776 * inside q lock, this function performs reverse double lock dancing.
 777 *
 778 * This is the blkcg counterpart of ioc_release_fn().
 779 */
 780static void blkcg_css_offline(struct cgroup_subsys_state *css)
 781{
 782        struct blkcg *blkcg = css_to_blkcg(css);
 783
 784        spin_lock_irq(&blkcg->lock);
 785
 786        while (!hlist_empty(&blkcg->blkg_list)) {
 787                struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
 788                                                struct blkcg_gq, blkcg_node);
 789                struct request_queue *q = blkg->q;
 790
 791                if (spin_trylock(q->queue_lock)) {
 792                        blkg_destroy(blkg);
 793                        spin_unlock(q->queue_lock);
 794                } else {
 795                        spin_unlock_irq(&blkcg->lock);
 796                        cpu_relax();
 797                        spin_lock_irq(&blkcg->lock);
 798                }
 799        }
 800
 801        spin_unlock_irq(&blkcg->lock);
 802}
 803
 804static void blkcg_css_free(struct cgroup_subsys_state *css)
 805{
 806        struct blkcg *blkcg = css_to_blkcg(css);
 807
 808        if (blkcg != &blkcg_root)
 809                kfree(blkcg);
 810}
 811
 812static struct cgroup_subsys_state *
 813blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
 814{
 815        static atomic64_t id_seq = ATOMIC64_INIT(0);
 816        struct blkcg *blkcg;
 817
 818        if (!parent_css) {
 819                blkcg = &blkcg_root;
 820                goto done;
 821        }
 822
 823        blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
 824        if (!blkcg)
 825                return ERR_PTR(-ENOMEM);
 826
 827        blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
 828        blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT;
 829        blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
 830done:
 831        spin_lock_init(&blkcg->lock);
 832        INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
 833        INIT_HLIST_HEAD(&blkcg->blkg_list);
 834
 835        return &blkcg->css;
 836}
 837
 838/**
 839 * blkcg_init_queue - initialize blkcg part of request queue
 840 * @q: request_queue to initialize
 841 *
 842 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
 843 * part of new request_queue @q.
 844 *
 845 * RETURNS:
 846 * 0 on success, -errno on failure.
 847 */
 848int blkcg_init_queue(struct request_queue *q)
 849{
 850        might_sleep();
 851
 852        return blk_throtl_init(q);
 853}
 854
 855/**
 856 * blkcg_drain_queue - drain blkcg part of request_queue
 857 * @q: request_queue to drain
 858 *
 859 * Called from blk_drain_queue().  Responsible for draining blkcg part.
 860 */
 861void blkcg_drain_queue(struct request_queue *q)
 862{
 863        lockdep_assert_held(q->queue_lock);
 864
 865        blk_throtl_drain(q);
 866}
 867
 868/**
 869 * blkcg_exit_queue - exit and release blkcg part of request_queue
 870 * @q: request_queue being released
 871 *
 872 * Called from blk_release_queue().  Responsible for exiting blkcg part.
 873 */
 874void blkcg_exit_queue(struct request_queue *q)
 875{
 876        spin_lock_irq(q->queue_lock);
 877        blkg_destroy_all(q);
 878        spin_unlock_irq(q->queue_lock);
 879
 880        blk_throtl_exit(q);
 881}
 882
 883/*
 884 * We cannot support shared io contexts, as we have no mean to support
 885 * two tasks with the same ioc in two different groups without major rework
 886 * of the main cic data structures.  For now we allow a task to change
 887 * its cgroup only if it's the only owner of its ioc.
 888 */
 889static int blkcg_can_attach(struct cgroup_subsys_state *css,
 890                            struct cgroup_taskset *tset)
 891{
 892        struct task_struct *task;
 893        struct io_context *ioc;
 894        int ret = 0;
 895
 896        /* task_lock() is needed to avoid races with exit_io_context() */
 897        cgroup_taskset_for_each(task, css, tset) {
 898                task_lock(task);
 899                ioc = task->io_context;
 900                if (ioc && atomic_read(&ioc->nr_tasks) > 1)
 901                        ret = -EINVAL;
 902                task_unlock(task);
 903                if (ret)
 904                        break;
 905        }
 906        return ret;
 907}
 908
 909struct cgroup_subsys blkio_subsys = {
 910        .name = "blkio",
 911        .css_alloc = blkcg_css_alloc,
 912        .css_offline = blkcg_css_offline,
 913        .css_free = blkcg_css_free,
 914        .can_attach = blkcg_can_attach,
 915        .subsys_id = blkio_subsys_id,
 916        .base_cftypes = blkcg_files,
 917        .module = THIS_MODULE,
 918};
 919EXPORT_SYMBOL_GPL(blkio_subsys);
 920
 921/**
 922 * blkcg_activate_policy - activate a blkcg policy on a request_queue
 923 * @q: request_queue of interest
 924 * @pol: blkcg policy to activate
 925 *
 926 * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
 927 * bypass mode to populate its blkgs with policy_data for @pol.
 928 *
 929 * Activation happens with @q bypassed, so nobody would be accessing blkgs
 930 * from IO path.  Update of each blkg is protected by both queue and blkcg
 931 * locks so that holding either lock and testing blkcg_policy_enabled() is
 932 * always enough for dereferencing policy data.
 933 *
 934 * The caller is responsible for synchronizing [de]activations and policy
 935 * [un]registerations.  Returns 0 on success, -errno on failure.
 936 */
 937int blkcg_activate_policy(struct request_queue *q,
 938                          const struct blkcg_policy *pol)
 939{
 940        LIST_HEAD(pds);
 941        struct blkcg_gq *blkg, *new_blkg;
 942        struct blkg_policy_data *pd, *n;
 943        int cnt = 0, ret;
 944        bool preloaded;
 945
 946        if (blkcg_policy_enabled(q, pol))
 947                return 0;
 948
 949        /* preallocations for root blkg */
 950        new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
 951        if (!new_blkg)
 952                return -ENOMEM;
 953
 954        blk_queue_bypass_start(q);
 955
 956        preloaded = !radix_tree_preload(GFP_KERNEL);
 957
 958        /*
 959         * Make sure the root blkg exists and count the existing blkgs.  As
 960         * @q is bypassing at this point, blkg_lookup_create() can't be
 961         * used.  Open code it.
 962         */
 963        spin_lock_irq(q->queue_lock);
 964
 965        rcu_read_lock();
 966        blkg = __blkg_lookup(&blkcg_root, q, false);
 967        if (blkg)
 968                blkg_free(new_blkg);
 969        else
 970                blkg = blkg_create(&blkcg_root, q, new_blkg);
 971        rcu_read_unlock();
 972
 973        if (preloaded)
 974                radix_tree_preload_end();
 975
 976        if (IS_ERR(blkg)) {
 977                ret = PTR_ERR(blkg);
 978                goto out_unlock;
 979        }
 980
 981        list_for_each_entry(blkg, &q->blkg_list, q_node)
 982                cnt++;
 983
 984        spin_unlock_irq(q->queue_lock);
 985
 986        /* allocate policy_data for all existing blkgs */
 987        while (cnt--) {
 988                pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
 989                if (!pd) {
 990                        ret = -ENOMEM;
 991                        goto out_free;
 992                }
 993                list_add_tail(&pd->alloc_node, &pds);
 994        }
 995
 996        /*
 997         * Install the allocated pds.  With @q bypassing, no new blkg
 998         * should have been created while the queue lock was dropped.
 999         */
1000        spin_lock_irq(q->queue_lock);
1001
1002        list_for_each_entry(blkg, &q->blkg_list, q_node) {
1003                if (WARN_ON(list_empty(&pds))) {
1004                        /* umm... this shouldn't happen, just abort */
1005                        ret = -ENOMEM;
1006                        goto out_unlock;
1007                }
1008                pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
1009                list_del_init(&pd->alloc_node);
1010
1011                /* grab blkcg lock too while installing @pd on @blkg */
1012                spin_lock(&blkg->blkcg->lock);
1013
1014                blkg->pd[pol->plid] = pd;
1015                pd->blkg = blkg;
1016                pd->plid = pol->plid;
1017                pol->pd_init_fn(blkg);
1018
1019                spin_unlock(&blkg->blkcg->lock);
1020        }
1021
1022        __set_bit(pol->plid, q->blkcg_pols);
1023        ret = 0;
1024out_unlock:
1025        spin_unlock_irq(q->queue_lock);
1026out_free:
1027        blk_queue_bypass_end(q);
1028        list_for_each_entry_safe(pd, n, &pds, alloc_node)
1029                kfree(pd);
1030        return ret;
1031}
1032EXPORT_SYMBOL_GPL(blkcg_activate_policy);
1033
1034/**
1035 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
1036 * @q: request_queue of interest
1037 * @pol: blkcg policy to deactivate
1038 *
1039 * Deactivate @pol on @q.  Follows the same synchronization rules as
1040 * blkcg_activate_policy().
1041 */
1042void blkcg_deactivate_policy(struct request_queue *q,
1043                             const struct blkcg_policy *pol)
1044{
1045        struct blkcg_gq *blkg;
1046
1047        if (!blkcg_policy_enabled(q, pol))
1048                return;
1049
1050        blk_queue_bypass_start(q);
1051        spin_lock_irq(q->queue_lock);
1052
1053        __clear_bit(pol->plid, q->blkcg_pols);
1054
1055        /* if no policy is left, no need for blkgs - shoot them down */
1056        if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS))
1057                blkg_destroy_all(q);
1058
1059        list_for_each_entry(blkg, &q->blkg_list, q_node) {
1060                /* grab blkcg lock too while removing @pd from @blkg */
1061                spin_lock(&blkg->blkcg->lock);
1062
1063                if (pol->pd_offline_fn)
1064                        pol->pd_offline_fn(blkg);
1065                if (pol->pd_exit_fn)
1066                        pol->pd_exit_fn(blkg);
1067
1068                kfree(blkg->pd[pol->plid]);
1069                blkg->pd[pol->plid] = NULL;
1070
1071                spin_unlock(&blkg->blkcg->lock);
1072        }
1073
1074        spin_unlock_irq(q->queue_lock);
1075        blk_queue_bypass_end(q);
1076}
1077EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1078
1079/**
1080 * blkcg_policy_register - register a blkcg policy
1081 * @pol: blkcg policy to register
1082 *
1083 * Register @pol with blkcg core.  Might sleep and @pol may be modified on
1084 * successful registration.  Returns 0 on success and -errno on failure.
1085 */
1086int blkcg_policy_register(struct blkcg_policy *pol)
1087{
1088        int i, ret;
1089
1090        if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data)))
1091                return -EINVAL;
1092
1093        mutex_lock(&blkcg_pol_mutex);
1094
1095        /* find an empty slot */
1096        ret = -ENOSPC;
1097        for (i = 0; i < BLKCG_MAX_POLS; i++)
1098                if (!blkcg_policy[i])
1099                        break;
1100        if (i >= BLKCG_MAX_POLS)
1101                goto out_unlock;
1102
1103        /* register and update blkgs */
1104        pol->plid = i;
1105        blkcg_policy[i] = pol;
1106
1107        /* everything is in place, add intf files for the new policy */
1108        if (pol->cftypes)
1109                WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes));
1110        ret = 0;
1111out_unlock:
1112        mutex_unlock(&blkcg_pol_mutex);
1113        return ret;
1114}
1115EXPORT_SYMBOL_GPL(blkcg_policy_register);
1116
1117/**
1118 * blkcg_policy_unregister - unregister a blkcg policy
1119 * @pol: blkcg policy to unregister
1120 *
1121 * Undo blkcg_policy_register(@pol).  Might sleep.
1122 */
1123void blkcg_policy_unregister(struct blkcg_policy *pol)
1124{
1125        mutex_lock(&blkcg_pol_mutex);
1126
1127        if (WARN_ON(blkcg_policy[pol->plid] != pol))
1128                goto out_unlock;
1129
1130        /* kill the intf files first */
1131        if (pol->cftypes)
1132                cgroup_rm_cftypes(pol->cftypes);
1133
1134        /* unregister and update blkgs */
1135        blkcg_policy[pol->plid] = NULL;
1136out_unlock:
1137        mutex_unlock(&blkcg_pol_mutex);
1138}
1139EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
1140
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.