linux/block/blk-cgroup.c
<<
>>
Prefs
   1/*
   2 * Common Block IO controller cgroup interface
   3 *
   4 * Based on ideas and code from CFQ, CFS and BFQ:
   5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
   6 *
   7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
   8 *                    Paolo Valente <paolo.valente@unimore.it>
   9 *
  10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11 *                    Nauman Rafique <nauman@google.com>
  12 */
  13#include <linux/ioprio.h>
  14#include <linux/kdev_t.h>
  15#include <linux/module.h>
  16#include <linux/err.h>
  17#include <linux/blkdev.h>
  18#include <linux/slab.h>
  19#include <linux/genhd.h>
  20#include <linux/delay.h>
  21#include <linux/atomic.h>
  22#include "blk-cgroup.h"
  23#include "blk.h"
  24
  25#define MAX_KEY_LEN 100
  26
  27static DEFINE_MUTEX(blkcg_pol_mutex);
  28
  29struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT };
  30EXPORT_SYMBOL_GPL(blkcg_root);
  31
  32static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
  33
  34static bool blkcg_policy_enabled(struct request_queue *q,
  35                                 const struct blkcg_policy *pol)
  36{
  37        return pol && test_bit(pol->plid, q->blkcg_pols);
  38}
  39
  40/**
  41 * blkg_free - free a blkg
  42 * @blkg: blkg to free
  43 *
  44 * Free @blkg which may be partially allocated.
  45 */
  46static void blkg_free(struct blkcg_gq *blkg)
  47{
  48        int i;
  49
  50        if (!blkg)
  51                return;
  52
  53        for (i = 0; i < BLKCG_MAX_POLS; i++) {
  54                struct blkcg_policy *pol = blkcg_policy[i];
  55                struct blkg_policy_data *pd = blkg->pd[i];
  56
  57                if (!pd)
  58                        continue;
  59
  60                if (pol && pol->pd_exit_fn)
  61                        pol->pd_exit_fn(blkg);
  62
  63                kfree(pd);
  64        }
  65
  66        blk_exit_rl(&blkg->rl);
  67        kfree(blkg);
  68}
  69
  70/**
  71 * blkg_alloc - allocate a blkg
  72 * @blkcg: block cgroup the new blkg is associated with
  73 * @q: request_queue the new blkg is associated with
  74 * @gfp_mask: allocation mask to use
  75 *
  76 * Allocate a new blkg assocating @blkcg and @q.
  77 */
  78static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
  79                                   gfp_t gfp_mask)
  80{
  81        struct blkcg_gq *blkg;
  82        int i;
  83
  84        /* alloc and init base part */
  85        blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
  86        if (!blkg)
  87                return NULL;
  88
  89        blkg->q = q;
  90        INIT_LIST_HEAD(&blkg->q_node);
  91        blkg->blkcg = blkcg;
  92        blkg->refcnt = 1;
  93
  94        /* root blkg uses @q->root_rl, init rl only for !root blkgs */
  95        if (blkcg != &blkcg_root) {
  96                if (blk_init_rl(&blkg->rl, q, gfp_mask))
  97                        goto err_free;
  98                blkg->rl.blkg = blkg;
  99        }
 100
 101        for (i = 0; i < BLKCG_MAX_POLS; i++) {
 102                struct blkcg_policy *pol = blkcg_policy[i];
 103                struct blkg_policy_data *pd;
 104
 105                if (!blkcg_policy_enabled(q, pol))
 106                        continue;
 107
 108                /* alloc per-policy data and attach it to blkg */
 109                pd = kzalloc_node(pol->pd_size, gfp_mask, q->node);
 110                if (!pd)
 111                        goto err_free;
 112
 113                blkg->pd[i] = pd;
 114                pd->blkg = blkg;
 115
 116                /* invoke per-policy init */
 117                if (blkcg_policy_enabled(blkg->q, pol))
 118                        pol->pd_init_fn(blkg);
 119        }
 120
 121        return blkg;
 122
 123err_free:
 124        blkg_free(blkg);
 125        return NULL;
 126}
 127
 128static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
 129                                      struct request_queue *q)
 130{
 131        struct blkcg_gq *blkg;
 132
 133        blkg = rcu_dereference(blkcg->blkg_hint);
 134        if (blkg && blkg->q == q)
 135                return blkg;
 136
 137        /*
 138         * Hint didn't match.  Look up from the radix tree.  Note that we
 139         * may not be holding queue_lock and thus are not sure whether
 140         * @blkg from blkg_tree has already been removed or not, so we
 141         * can't update hint to the lookup result.  Leave it to the caller.
 142         */
 143        blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
 144        if (blkg && blkg->q == q)
 145                return blkg;
 146
 147        return NULL;
 148}
 149
 150/**
 151 * blkg_lookup - lookup blkg for the specified blkcg - q pair
 152 * @blkcg: blkcg of interest
 153 * @q: request_queue of interest
 154 *
 155 * Lookup blkg for the @blkcg - @q pair.  This function should be called
 156 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
 157 * - see blk_queue_bypass_start() for details.
 158 */
 159struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
 160{
 161        WARN_ON_ONCE(!rcu_read_lock_held());
 162
 163        if (unlikely(blk_queue_bypass(q)))
 164                return NULL;
 165        return __blkg_lookup(blkcg, q);
 166}
 167EXPORT_SYMBOL_GPL(blkg_lookup);
 168
 169/*
 170 * If @new_blkg is %NULL, this function tries to allocate a new one as
 171 * necessary using %GFP_ATOMIC.  @new_blkg is always consumed on return.
 172 */
 173static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
 174                                             struct request_queue *q,
 175                                             struct blkcg_gq *new_blkg)
 176{
 177        struct blkcg_gq *blkg;
 178        int ret;
 179
 180        WARN_ON_ONCE(!rcu_read_lock_held());
 181        lockdep_assert_held(q->queue_lock);
 182
 183        /* lookup and update hint on success, see __blkg_lookup() for details */
 184        blkg = __blkg_lookup(blkcg, q);
 185        if (blkg) {
 186                rcu_assign_pointer(blkcg->blkg_hint, blkg);
 187                goto out_free;
 188        }
 189
 190        /* blkg holds a reference to blkcg */
 191        if (!css_tryget(&blkcg->css)) {
 192                blkg = ERR_PTR(-EINVAL);
 193                goto out_free;
 194        }
 195
 196        /* allocate */
 197        if (!new_blkg) {
 198                new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC);
 199                if (unlikely(!new_blkg)) {
 200                        blkg = ERR_PTR(-ENOMEM);
 201                        goto out_put;
 202                }
 203        }
 204        blkg = new_blkg;
 205
 206        /* insert */
 207        spin_lock(&blkcg->lock);
 208        ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
 209        if (likely(!ret)) {
 210                hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
 211                list_add(&blkg->q_node, &q->blkg_list);
 212        }
 213        spin_unlock(&blkcg->lock);
 214
 215        if (!ret)
 216                return blkg;
 217
 218        blkg = ERR_PTR(ret);
 219out_put:
 220        css_put(&blkcg->css);
 221out_free:
 222        blkg_free(new_blkg);
 223        return blkg;
 224}
 225
 226struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
 227                                    struct request_queue *q)
 228{
 229        /*
 230         * This could be the first entry point of blkcg implementation and
 231         * we shouldn't allow anything to go through for a bypassing queue.
 232         */
 233        if (unlikely(blk_queue_bypass(q)))
 234                return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY);
 235        return __blkg_lookup_create(blkcg, q, NULL);
 236}
 237EXPORT_SYMBOL_GPL(blkg_lookup_create);
 238
 239static void blkg_destroy(struct blkcg_gq *blkg)
 240{
 241        struct blkcg *blkcg = blkg->blkcg;
 242
 243        lockdep_assert_held(blkg->q->queue_lock);
 244        lockdep_assert_held(&blkcg->lock);
 245
 246        /* Something wrong if we are trying to remove same group twice */
 247        WARN_ON_ONCE(list_empty(&blkg->q_node));
 248        WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
 249
 250        radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
 251        list_del_init(&blkg->q_node);
 252        hlist_del_init_rcu(&blkg->blkcg_node);
 253
 254        /*
 255         * Both setting lookup hint to and clearing it from @blkg are done
 256         * under queue_lock.  If it's not pointing to @blkg now, it never
 257         * will.  Hint assignment itself can race safely.
 258         */
 259        if (rcu_dereference_raw(blkcg->blkg_hint) == blkg)
 260                rcu_assign_pointer(blkcg->blkg_hint, NULL);
 261
 262        /*
 263         * Put the reference taken at the time of creation so that when all
 264         * queues are gone, group can be destroyed.
 265         */
 266        blkg_put(blkg);
 267}
 268
 269/**
 270 * blkg_destroy_all - destroy all blkgs associated with a request_queue
 271 * @q: request_queue of interest
 272 *
 273 * Destroy all blkgs associated with @q.
 274 */
 275static void blkg_destroy_all(struct request_queue *q)
 276{
 277        struct blkcg_gq *blkg, *n;
 278
 279        lockdep_assert_held(q->queue_lock);
 280
 281        list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
 282                struct blkcg *blkcg = blkg->blkcg;
 283
 284                spin_lock(&blkcg->lock);
 285                blkg_destroy(blkg);
 286                spin_unlock(&blkcg->lock);
 287        }
 288
 289        /*
 290         * root blkg is destroyed.  Just clear the pointer since
 291         * root_rl does not take reference on root blkg.
 292         */
 293        q->root_blkg = NULL;
 294        q->root_rl.blkg = NULL;
 295}
 296
 297static void blkg_rcu_free(struct rcu_head *rcu_head)
 298{
 299        blkg_free(container_of(rcu_head, struct blkcg_gq, rcu_head));
 300}
 301
 302void __blkg_release(struct blkcg_gq *blkg)
 303{
 304        /* release the extra blkcg reference this blkg has been holding */
 305        css_put(&blkg->blkcg->css);
 306
 307        /*
 308         * A group is freed in rcu manner. But having an rcu lock does not
 309         * mean that one can access all the fields of blkg and assume these
 310         * are valid. For example, don't try to follow throtl_data and
 311         * request queue links.
 312         *
 313         * Having a reference to blkg under an rcu allows acess to only
 314         * values local to groups like group stats and group rate limits
 315         */
 316        call_rcu(&blkg->rcu_head, blkg_rcu_free);
 317}
 318EXPORT_SYMBOL_GPL(__blkg_release);
 319
 320/*
 321 * The next function used by blk_queue_for_each_rl().  It's a bit tricky
 322 * because the root blkg uses @q->root_rl instead of its own rl.
 323 */
 324struct request_list *__blk_queue_next_rl(struct request_list *rl,
 325                                         struct request_queue *q)
 326{
 327        struct list_head *ent;
 328        struct blkcg_gq *blkg;
 329
 330        /*
 331         * Determine the current blkg list_head.  The first entry is
 332         * root_rl which is off @q->blkg_list and mapped to the head.
 333         */
 334        if (rl == &q->root_rl) {
 335                ent = &q->blkg_list;
 336                /* There are no more block groups, hence no request lists */
 337                if (list_empty(ent))
 338                        return NULL;
 339        } else {
 340                blkg = container_of(rl, struct blkcg_gq, rl);
 341                ent = &blkg->q_node;
 342        }
 343
 344        /* walk to the next list_head, skip root blkcg */
 345        ent = ent->next;
 346        if (ent == &q->root_blkg->q_node)
 347                ent = ent->next;
 348        if (ent == &q->blkg_list)
 349                return NULL;
 350
 351        blkg = container_of(ent, struct blkcg_gq, q_node);
 352        return &blkg->rl;
 353}
 354
 355static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
 356                             u64 val)
 357{
 358        struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
 359        struct blkcg_gq *blkg;
 360        struct hlist_node *n;
 361        int i;
 362
 363        mutex_lock(&blkcg_pol_mutex);
 364        spin_lock_irq(&blkcg->lock);
 365
 366        /*
 367         * Note that stat reset is racy - it doesn't synchronize against
 368         * stat updates.  This is a debug feature which shouldn't exist
 369         * anyway.  If you get hit by a race, retry.
 370         */
 371        hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
 372                for (i = 0; i < BLKCG_MAX_POLS; i++) {
 373                        struct blkcg_policy *pol = blkcg_policy[i];
 374
 375                        if (blkcg_policy_enabled(blkg->q, pol) &&
 376                            pol->pd_reset_stats_fn)
 377                                pol->pd_reset_stats_fn(blkg);
 378                }
 379        }
 380
 381        spin_unlock_irq(&blkcg->lock);
 382        mutex_unlock(&blkcg_pol_mutex);
 383        return 0;
 384}
 385
 386static const char *blkg_dev_name(struct blkcg_gq *blkg)
 387{
 388        /* some drivers (floppy) instantiate a queue w/o disk registered */
 389        if (blkg->q->backing_dev_info.dev)
 390                return dev_name(blkg->q->backing_dev_info.dev);
 391        return NULL;
 392}
 393
 394/**
 395 * blkcg_print_blkgs - helper for printing per-blkg data
 396 * @sf: seq_file to print to
 397 * @blkcg: blkcg of interest
 398 * @prfill: fill function to print out a blkg
 399 * @pol: policy in question
 400 * @data: data to be passed to @prfill
 401 * @show_total: to print out sum of prfill return values or not
 402 *
 403 * This function invokes @prfill on each blkg of @blkcg if pd for the
 404 * policy specified by @pol exists.  @prfill is invoked with @sf, the
 405 * policy data and @data.  If @show_total is %true, the sum of the return
 406 * values from @prfill is printed with "Total" label at the end.
 407 *
 408 * This is to be used to construct print functions for
 409 * cftype->read_seq_string method.
 410 */
 411void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
 412                       u64 (*prfill)(struct seq_file *,
 413                                     struct blkg_policy_data *, int),
 414                       const struct blkcg_policy *pol, int data,
 415                       bool show_total)
 416{
 417        struct blkcg_gq *blkg;
 418        struct hlist_node *n;
 419        u64 total = 0;
 420
 421        spin_lock_irq(&blkcg->lock);
 422        hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
 423                if (blkcg_policy_enabled(blkg->q, pol))
 424                        total += prfill(sf, blkg->pd[pol->plid], data);
 425        spin_unlock_irq(&blkcg->lock);
 426
 427        if (show_total)
 428                seq_printf(sf, "Total %llu\n", (unsigned long long)total);
 429}
 430EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
 431
 432/**
 433 * __blkg_prfill_u64 - prfill helper for a single u64 value
 434 * @sf: seq_file to print to
 435 * @pd: policy private data of interest
 436 * @v: value to print
 437 *
 438 * Print @v to @sf for the device assocaited with @pd.
 439 */
 440u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
 441{
 442        const char *dname = blkg_dev_name(pd->blkg);
 443
 444        if (!dname)
 445                return 0;
 446
 447        seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
 448        return v;
 449}
 450EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
 451
 452/**
 453 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
 454 * @sf: seq_file to print to
 455 * @pd: policy private data of interest
 456 * @rwstat: rwstat to print
 457 *
 458 * Print @rwstat to @sf for the device assocaited with @pd.
 459 */
 460u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
 461                         const struct blkg_rwstat *rwstat)
 462{
 463        static const char *rwstr[] = {
 464                [BLKG_RWSTAT_READ]      = "Read",
 465                [BLKG_RWSTAT_WRITE]     = "Write",
 466                [BLKG_RWSTAT_SYNC]      = "Sync",
 467                [BLKG_RWSTAT_ASYNC]     = "Async",
 468        };
 469        const char *dname = blkg_dev_name(pd->blkg);
 470        u64 v;
 471        int i;
 472
 473        if (!dname)
 474                return 0;
 475
 476        for (i = 0; i < BLKG_RWSTAT_NR; i++)
 477                seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
 478                           (unsigned long long)rwstat->cnt[i]);
 479
 480        v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
 481        seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
 482        return v;
 483}
 484
 485/**
 486 * blkg_prfill_stat - prfill callback for blkg_stat
 487 * @sf: seq_file to print to
 488 * @pd: policy private data of interest
 489 * @off: offset to the blkg_stat in @pd
 490 *
 491 * prfill callback for printing a blkg_stat.
 492 */
 493u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
 494{
 495        return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
 496}
 497EXPORT_SYMBOL_GPL(blkg_prfill_stat);
 498
 499/**
 500 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
 501 * @sf: seq_file to print to
 502 * @pd: policy private data of interest
 503 * @off: offset to the blkg_rwstat in @pd
 504 *
 505 * prfill callback for printing a blkg_rwstat.
 506 */
 507u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
 508                       int off)
 509{
 510        struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
 511
 512        return __blkg_prfill_rwstat(sf, pd, &rwstat);
 513}
 514EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
 515
 516/**
 517 * blkg_conf_prep - parse and prepare for per-blkg config update
 518 * @blkcg: target block cgroup
 519 * @pol: target policy
 520 * @input: input string
 521 * @ctx: blkg_conf_ctx to be filled
 522 *
 523 * Parse per-blkg config update from @input and initialize @ctx with the
 524 * result.  @ctx->blkg points to the blkg to be updated and @ctx->v the new
 525 * value.  This function returns with RCU read lock and queue lock held and
 526 * must be paired with blkg_conf_finish().
 527 */
 528int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
 529                   const char *input, struct blkg_conf_ctx *ctx)
 530        __acquires(rcu) __acquires(disk->queue->queue_lock)
 531{
 532        struct gendisk *disk;
 533        struct blkcg_gq *blkg;
 534        unsigned int major, minor;
 535        unsigned long long v;
 536        int part, ret;
 537
 538        if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3)
 539                return -EINVAL;
 540
 541        disk = get_gendisk(MKDEV(major, minor), &part);
 542        if (!disk || part)
 543                return -EINVAL;
 544
 545        rcu_read_lock();
 546        spin_lock_irq(disk->queue->queue_lock);
 547
 548        if (blkcg_policy_enabled(disk->queue, pol))
 549                blkg = blkg_lookup_create(blkcg, disk->queue);
 550        else
 551                blkg = ERR_PTR(-EINVAL);
 552
 553        if (IS_ERR(blkg)) {
 554                ret = PTR_ERR(blkg);
 555                rcu_read_unlock();
 556                spin_unlock_irq(disk->queue->queue_lock);
 557                put_disk(disk);
 558                /*
 559                 * If queue was bypassing, we should retry.  Do so after a
 560                 * short msleep().  It isn't strictly necessary but queue
 561                 * can be bypassing for some time and it's always nice to
 562                 * avoid busy looping.
 563                 */
 564                if (ret == -EBUSY) {
 565                        msleep(10);
 566                        ret = restart_syscall();
 567                }
 568                return ret;
 569        }
 570
 571        ctx->disk = disk;
 572        ctx->blkg = blkg;
 573        ctx->v = v;
 574        return 0;
 575}
 576EXPORT_SYMBOL_GPL(blkg_conf_prep);
 577
 578/**
 579 * blkg_conf_finish - finish up per-blkg config update
 580 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
 581 *
 582 * Finish up after per-blkg config update.  This function must be paired
 583 * with blkg_conf_prep().
 584 */
 585void blkg_conf_finish(struct blkg_conf_ctx *ctx)
 586        __releases(ctx->disk->queue->queue_lock) __releases(rcu)
 587{
 588        spin_unlock_irq(ctx->disk->queue->queue_lock);
 589        rcu_read_unlock();
 590        put_disk(ctx->disk);
 591}
 592EXPORT_SYMBOL_GPL(blkg_conf_finish);
 593
 594struct cftype blkcg_files[] = {
 595        {
 596                .name = "reset_stats",
 597                .write_u64 = blkcg_reset_stats,
 598        },
 599        { }     /* terminate */
 600};
 601
 602/**
 603 * blkcg_css_offline - cgroup css_offline callback
 604 * @cgroup: cgroup of interest
 605 *
 606 * This function is called when @cgroup is about to go away and responsible
 607 * for shooting down all blkgs associated with @cgroup.  blkgs should be
 608 * removed while holding both q and blkcg locks.  As blkcg lock is nested
 609 * inside q lock, this function performs reverse double lock dancing.
 610 *
 611 * This is the blkcg counterpart of ioc_release_fn().
 612 */
 613static void blkcg_css_offline(struct cgroup *cgroup)
 614{
 615        struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
 616
 617        spin_lock_irq(&blkcg->lock);
 618
 619        while (!hlist_empty(&blkcg->blkg_list)) {
 620                struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
 621                                                struct blkcg_gq, blkcg_node);
 622                struct request_queue *q = blkg->q;
 623
 624                if (spin_trylock(q->queue_lock)) {
 625                        blkg_destroy(blkg);
 626                        spin_unlock(q->queue_lock);
 627                } else {
 628                        spin_unlock_irq(&blkcg->lock);
 629                        cpu_relax();
 630                        spin_lock_irq(&blkcg->lock);
 631                }
 632        }
 633
 634        spin_unlock_irq(&blkcg->lock);
 635}
 636
 637static void blkcg_css_free(struct cgroup *cgroup)
 638{
 639        struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
 640
 641        if (blkcg != &blkcg_root)
 642                kfree(blkcg);
 643}
 644
 645static struct cgroup_subsys_state *blkcg_css_alloc(struct cgroup *cgroup)
 646{
 647        static atomic64_t id_seq = ATOMIC64_INIT(0);
 648        struct blkcg *blkcg;
 649        struct cgroup *parent = cgroup->parent;
 650
 651        if (!parent) {
 652                blkcg = &blkcg_root;
 653                goto done;
 654        }
 655
 656        blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
 657        if (!blkcg)
 658                return ERR_PTR(-ENOMEM);
 659
 660        blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
 661        blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
 662done:
 663        spin_lock_init(&blkcg->lock);
 664        INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
 665        INIT_HLIST_HEAD(&blkcg->blkg_list);
 666
 667        return &blkcg->css;
 668}
 669
 670/**
 671 * blkcg_init_queue - initialize blkcg part of request queue
 672 * @q: request_queue to initialize
 673 *
 674 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
 675 * part of new request_queue @q.
 676 *
 677 * RETURNS:
 678 * 0 on success, -errno on failure.
 679 */
 680int blkcg_init_queue(struct request_queue *q)
 681{
 682        might_sleep();
 683
 684        return blk_throtl_init(q);
 685}
 686
 687/**
 688 * blkcg_drain_queue - drain blkcg part of request_queue
 689 * @q: request_queue to drain
 690 *
 691 * Called from blk_drain_queue().  Responsible for draining blkcg part.
 692 */
 693void blkcg_drain_queue(struct request_queue *q)
 694{
 695        lockdep_assert_held(q->queue_lock);
 696
 697        blk_throtl_drain(q);
 698}
 699
 700/**
 701 * blkcg_exit_queue - exit and release blkcg part of request_queue
 702 * @q: request_queue being released
 703 *
 704 * Called from blk_release_queue().  Responsible for exiting blkcg part.
 705 */
 706void blkcg_exit_queue(struct request_queue *q)
 707{
 708        spin_lock_irq(q->queue_lock);
 709        blkg_destroy_all(q);
 710        spin_unlock_irq(q->queue_lock);
 711
 712        blk_throtl_exit(q);
 713}
 714
 715/*
 716 * We cannot support shared io contexts, as we have no mean to support
 717 * two tasks with the same ioc in two different groups without major rework
 718 * of the main cic data structures.  For now we allow a task to change
 719 * its cgroup only if it's the only owner of its ioc.
 720 */
 721static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
 722{
 723        struct task_struct *task;
 724        struct io_context *ioc;
 725        int ret = 0;
 726
 727        /* task_lock() is needed to avoid races with exit_io_context() */
 728        cgroup_taskset_for_each(task, cgrp, tset) {
 729                task_lock(task);
 730                ioc = task->io_context;
 731                if (ioc && atomic_read(&ioc->nr_tasks) > 1)
 732                        ret = -EINVAL;
 733                task_unlock(task);
 734                if (ret)
 735                        break;
 736        }
 737        return ret;
 738}
 739
 740struct cgroup_subsys blkio_subsys = {
 741        .name = "blkio",
 742        .css_alloc = blkcg_css_alloc,
 743        .css_offline = blkcg_css_offline,
 744        .css_free = blkcg_css_free,
 745        .can_attach = blkcg_can_attach,
 746        .subsys_id = blkio_subsys_id,
 747        .base_cftypes = blkcg_files,
 748        .module = THIS_MODULE,
 749
 750        /*
 751         * blkio subsystem is utterly broken in terms of hierarchy support.
 752         * It treats all cgroups equally regardless of where they're
 753         * located in the hierarchy - all cgroups are treated as if they're
 754         * right below the root.  Fix it and remove the following.
 755         */
 756        .broken_hierarchy = true,
 757};
 758EXPORT_SYMBOL_GPL(blkio_subsys);
 759
 760/**
 761 * blkcg_activate_policy - activate a blkcg policy on a request_queue
 762 * @q: request_queue of interest
 763 * @pol: blkcg policy to activate
 764 *
 765 * Activate @pol on @q.  Requires %GFP_KERNEL context.  @q goes through
 766 * bypass mode to populate its blkgs with policy_data for @pol.
 767 *
 768 * Activation happens with @q bypassed, so nobody would be accessing blkgs
 769 * from IO path.  Update of each blkg is protected by both queue and blkcg
 770 * locks so that holding either lock and testing blkcg_policy_enabled() is
 771 * always enough for dereferencing policy data.
 772 *
 773 * The caller is responsible for synchronizing [de]activations and policy
 774 * [un]registerations.  Returns 0 on success, -errno on failure.
 775 */
 776int blkcg_activate_policy(struct request_queue *q,
 777                          const struct blkcg_policy *pol)
 778{
 779        LIST_HEAD(pds);
 780        struct blkcg_gq *blkg;
 781        struct blkg_policy_data *pd, *n;
 782        int cnt = 0, ret;
 783        bool preloaded;
 784
 785        if (blkcg_policy_enabled(q, pol))
 786                return 0;
 787
 788        /* preallocations for root blkg */
 789        blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
 790        if (!blkg)
 791                return -ENOMEM;
 792
 793        preloaded = !radix_tree_preload(GFP_KERNEL);
 794
 795        blk_queue_bypass_start(q);
 796
 797        /* make sure the root blkg exists and count the existing blkgs */
 798        spin_lock_irq(q->queue_lock);
 799
 800        rcu_read_lock();
 801        blkg = __blkg_lookup_create(&blkcg_root, q, blkg);
 802        rcu_read_unlock();
 803
 804        if (preloaded)
 805                radix_tree_preload_end();
 806
 807        if (IS_ERR(blkg)) {
 808                ret = PTR_ERR(blkg);
 809                goto out_unlock;
 810        }
 811        q->root_blkg = blkg;
 812        q->root_rl.blkg = blkg;
 813
 814        list_for_each_entry(blkg, &q->blkg_list, q_node)
 815                cnt++;
 816
 817        spin_unlock_irq(q->queue_lock);
 818
 819        /* allocate policy_data for all existing blkgs */
 820        while (cnt--) {
 821                pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
 822                if (!pd) {
 823                        ret = -ENOMEM;
 824                        goto out_free;
 825                }
 826                list_add_tail(&pd->alloc_node, &pds);
 827        }
 828
 829        /*
 830         * Install the allocated pds.  With @q bypassing, no new blkg
 831         * should have been created while the queue lock was dropped.
 832         */
 833        spin_lock_irq(q->queue_lock);
 834
 835        list_for_each_entry(blkg, &q->blkg_list, q_node) {
 836                if (WARN_ON(list_empty(&pds))) {
 837                        /* umm... this shouldn't happen, just abort */
 838                        ret = -ENOMEM;
 839                        goto out_unlock;
 840                }
 841                pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
 842                list_del_init(&pd->alloc_node);
 843
 844                /* grab blkcg lock too while installing @pd on @blkg */
 845                spin_lock(&blkg->blkcg->lock);
 846
 847                blkg->pd[pol->plid] = pd;
 848                pd->blkg = blkg;
 849                pol->pd_init_fn(blkg);
 850
 851                spin_unlock(&blkg->blkcg->lock);
 852        }
 853
 854        __set_bit(pol->plid, q->blkcg_pols);
 855        ret = 0;
 856out_unlock:
 857        spin_unlock_irq(q->queue_lock);
 858out_free:
 859        blk_queue_bypass_end(q);
 860        list_for_each_entry_safe(pd, n, &pds, alloc_node)
 861                kfree(pd);
 862        return ret;
 863}
 864EXPORT_SYMBOL_GPL(blkcg_activate_policy);
 865
 866/**
 867 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
 868 * @q: request_queue of interest
 869 * @pol: blkcg policy to deactivate
 870 *
 871 * Deactivate @pol on @q.  Follows the same synchronization rules as
 872 * blkcg_activate_policy().
 873 */
 874void blkcg_deactivate_policy(struct request_queue *q,
 875                             const struct blkcg_policy *pol)
 876{
 877        struct blkcg_gq *blkg;
 878
 879        if (!blkcg_policy_enabled(q, pol))
 880                return;
 881
 882        blk_queue_bypass_start(q);
 883        spin_lock_irq(q->queue_lock);
 884
 885        __clear_bit(pol->plid, q->blkcg_pols);
 886
 887        /* if no policy is left, no need for blkgs - shoot them down */
 888        if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS))
 889                blkg_destroy_all(q);
 890
 891        list_for_each_entry(blkg, &q->blkg_list, q_node) {
 892                /* grab blkcg lock too while removing @pd from @blkg */
 893                spin_lock(&blkg->blkcg->lock);
 894
 895                if (pol->pd_exit_fn)
 896                        pol->pd_exit_fn(blkg);
 897
 898                kfree(blkg->pd[pol->plid]);
 899                blkg->pd[pol->plid] = NULL;
 900
 901                spin_unlock(&blkg->blkcg->lock);
 902        }
 903
 904        spin_unlock_irq(q->queue_lock);
 905        blk_queue_bypass_end(q);
 906}
 907EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
 908
 909/**
 910 * blkcg_policy_register - register a blkcg policy
 911 * @pol: blkcg policy to register
 912 *
 913 * Register @pol with blkcg core.  Might sleep and @pol may be modified on
 914 * successful registration.  Returns 0 on success and -errno on failure.
 915 */
 916int blkcg_policy_register(struct blkcg_policy *pol)
 917{
 918        int i, ret;
 919
 920        if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data)))
 921                return -EINVAL;
 922
 923        mutex_lock(&blkcg_pol_mutex);
 924
 925        /* find an empty slot */
 926        ret = -ENOSPC;
 927        for (i = 0; i < BLKCG_MAX_POLS; i++)
 928                if (!blkcg_policy[i])
 929                        break;
 930        if (i >= BLKCG_MAX_POLS)
 931                goto out_unlock;
 932
 933        /* register and update blkgs */
 934        pol->plid = i;
 935        blkcg_policy[i] = pol;
 936
 937        /* everything is in place, add intf files for the new policy */
 938        if (pol->cftypes)
 939                WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes));
 940        ret = 0;
 941out_unlock:
 942        mutex_unlock(&blkcg_pol_mutex);
 943        return ret;
 944}
 945EXPORT_SYMBOL_GPL(blkcg_policy_register);
 946
 947/**
 948 * blkcg_policy_unregister - unregister a blkcg policy
 949 * @pol: blkcg policy to unregister
 950 *
 951 * Undo blkcg_policy_register(@pol).  Might sleep.
 952 */
 953void blkcg_policy_unregister(struct blkcg_policy *pol)
 954{
 955        mutex_lock(&blkcg_pol_mutex);
 956
 957        if (WARN_ON(blkcg_policy[pol->plid] != pol))
 958                goto out_unlock;
 959
 960        /* kill the intf files first */
 961        if (pol->cftypes)
 962                cgroup_rm_cftypes(&blkio_subsys, pol->cftypes);
 963
 964        /* unregister and update blkgs */
 965        blkcg_policy[pol->plid] = NULL;
 966out_unlock:
 967        mutex_unlock(&blkcg_pol_mutex);
 968}
 969EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
 970
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.