linux/block/blk-cgroup.h
<<
>>
Prefs
   1#ifndef _BLK_CGROUP_H
   2#define _BLK_CGROUP_H
   3/*
   4 * Common Block IO controller cgroup interface
   5 *
   6 * Based on ideas and code from CFQ, CFS and BFQ:
   7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
   8 *
   9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  10 *                    Paolo Valente <paolo.valente@unimore.it>
  11 *
  12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  13 *                    Nauman Rafique <nauman@google.com>
  14 */
  15
  16#include <linux/cgroup.h>
  17#include <linux/u64_stats_sync.h>
  18#include <linux/seq_file.h>
  19#include <linux/radix-tree.h>
  20
  21/* Max limits for throttle policy */
  22#define THROTL_IOPS_MAX         UINT_MAX
  23
  24/* CFQ specific, out here for blkcg->cfq_weight */
  25#define CFQ_WEIGHT_MIN          10
  26#define CFQ_WEIGHT_MAX          1000
  27#define CFQ_WEIGHT_DEFAULT      500
  28
  29#ifdef CONFIG_BLK_CGROUP
  30
  31enum blkg_rwstat_type {
  32        BLKG_RWSTAT_READ,
  33        BLKG_RWSTAT_WRITE,
  34        BLKG_RWSTAT_SYNC,
  35        BLKG_RWSTAT_ASYNC,
  36
  37        BLKG_RWSTAT_NR,
  38        BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
  39};
  40
  41struct blkcg_gq;
  42
  43struct blkcg {
  44        struct cgroup_subsys_state      css;
  45        spinlock_t                      lock;
  46
  47        struct radix_tree_root          blkg_tree;
  48        struct blkcg_gq                 *blkg_hint;
  49        struct hlist_head               blkg_list;
  50
  51        /* for policies to test whether associated blkcg has changed */
  52        uint64_t                        id;
  53
  54        /* TODO: per-policy storage in blkcg */
  55        unsigned int                    cfq_weight;     /* belongs to cfq */
  56};
  57
  58struct blkg_stat {
  59        struct u64_stats_sync           syncp;
  60        uint64_t                        cnt;
  61};
  62
  63struct blkg_rwstat {
  64        struct u64_stats_sync           syncp;
  65        uint64_t                        cnt[BLKG_RWSTAT_NR];
  66};
  67
  68/*
  69 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
  70 * request_queue (q).  This is used by blkcg policies which need to track
  71 * information per blkcg - q pair.
  72 *
  73 * There can be multiple active blkcg policies and each has its private
  74 * data on each blkg, the size of which is determined by
  75 * blkcg_policy->pd_size.  blkcg core allocates and frees such areas
  76 * together with blkg and invokes pd_init/exit_fn() methods.
  77 *
  78 * Such private data must embed struct blkg_policy_data (pd) at the
  79 * beginning and pd_size can't be smaller than pd.
  80 */
  81struct blkg_policy_data {
  82        /* the blkg this per-policy data belongs to */
  83        struct blkcg_gq                 *blkg;
  84
  85        /* used during policy activation */
  86        struct list_head                alloc_node;
  87};
  88
  89/* association between a blk cgroup and a request queue */
  90struct blkcg_gq {
  91        /* Pointer to the associated request_queue */
  92        struct request_queue            *q;
  93        struct list_head                q_node;
  94        struct hlist_node               blkcg_node;
  95        struct blkcg                    *blkcg;
  96        /* reference count */
  97        int                             refcnt;
  98
  99        struct blkg_policy_data         *pd[BLKCG_MAX_POLS];
 100
 101        struct rcu_head                 rcu_head;
 102};
 103
 104typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
 105typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
 106typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
 107
 108struct blkcg_policy {
 109        int                             plid;
 110        /* policy specific private data size */
 111        size_t                          pd_size;
 112        /* cgroup files for the policy */
 113        struct cftype                   *cftypes;
 114
 115        /* operations */
 116        blkcg_pol_init_pd_fn            *pd_init_fn;
 117        blkcg_pol_exit_pd_fn            *pd_exit_fn;
 118        blkcg_pol_reset_pd_stats_fn     *pd_reset_stats_fn;
 119};
 120
 121extern struct blkcg blkcg_root;
 122
 123struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup);
 124struct blkcg *bio_blkcg(struct bio *bio);
 125struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
 126struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
 127                                    struct request_queue *q);
 128int blkcg_init_queue(struct request_queue *q);
 129void blkcg_drain_queue(struct request_queue *q);
 130void blkcg_exit_queue(struct request_queue *q);
 131
 132/* Blkio controller policy registration */
 133int blkcg_policy_register(struct blkcg_policy *pol);
 134void blkcg_policy_unregister(struct blkcg_policy *pol);
 135int blkcg_activate_policy(struct request_queue *q,
 136                          const struct blkcg_policy *pol);
 137void blkcg_deactivate_policy(struct request_queue *q,
 138                             const struct blkcg_policy *pol);
 139
 140void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
 141                       u64 (*prfill)(struct seq_file *,
 142                                     struct blkg_policy_data *, int),
 143                       const struct blkcg_policy *pol, int data,
 144                       bool show_total);
 145u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
 146u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
 147                         const struct blkg_rwstat *rwstat);
 148u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
 149u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
 150                       int off);
 151
 152struct blkg_conf_ctx {
 153        struct gendisk                  *disk;
 154        struct blkcg_gq                 *blkg;
 155        u64                             v;
 156};
 157
 158int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
 159                   const char *input, struct blkg_conf_ctx *ctx);
 160void blkg_conf_finish(struct blkg_conf_ctx *ctx);
 161
 162
 163/**
 164 * blkg_to_pdata - get policy private data
 165 * @blkg: blkg of interest
 166 * @pol: policy of interest
 167 *
 168 * Return pointer to private data associated with the @blkg-@pol pair.
 169 */
 170static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
 171                                                  struct blkcg_policy *pol)
 172{
 173        return blkg ? blkg->pd[pol->plid] : NULL;
 174}
 175
 176/**
 177 * pdata_to_blkg - get blkg associated with policy private data
 178 * @pd: policy private data of interest
 179 *
 180 * @pd is policy private data.  Determine the blkg it's associated with.
 181 */
 182static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
 183{
 184        return pd ? pd->blkg : NULL;
 185}
 186
 187/**
 188 * blkg_path - format cgroup path of blkg
 189 * @blkg: blkg of interest
 190 * @buf: target buffer
 191 * @buflen: target buffer length
 192 *
 193 * Format the path of the cgroup of @blkg into @buf.
 194 */
 195static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
 196{
 197        int ret;
 198
 199        rcu_read_lock();
 200        ret = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
 201        rcu_read_unlock();
 202        if (ret)
 203                strncpy(buf, "<unavailable>", buflen);
 204        return ret;
 205}
 206
 207/**
 208 * blkg_get - get a blkg reference
 209 * @blkg: blkg to get
 210 *
 211 * The caller should be holding queue_lock and an existing reference.
 212 */
 213static inline void blkg_get(struct blkcg_gq *blkg)
 214{
 215        lockdep_assert_held(blkg->q->queue_lock);
 216        WARN_ON_ONCE(!blkg->refcnt);
 217        blkg->refcnt++;
 218}
 219
 220void __blkg_release(struct blkcg_gq *blkg);
 221
 222/**
 223 * blkg_put - put a blkg reference
 224 * @blkg: blkg to put
 225 *
 226 * The caller should be holding queue_lock.
 227 */
 228static inline void blkg_put(struct blkcg_gq *blkg)
 229{
 230        lockdep_assert_held(blkg->q->queue_lock);
 231        WARN_ON_ONCE(blkg->refcnt <= 0);
 232        if (!--blkg->refcnt)
 233                __blkg_release(blkg);
 234}
 235
 236/**
 237 * blkg_stat_add - add a value to a blkg_stat
 238 * @stat: target blkg_stat
 239 * @val: value to add
 240 *
 241 * Add @val to @stat.  The caller is responsible for synchronizing calls to
 242 * this function.
 243 */
 244static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
 245{
 246        u64_stats_update_begin(&stat->syncp);
 247        stat->cnt += val;
 248        u64_stats_update_end(&stat->syncp);
 249}
 250
 251/**
 252 * blkg_stat_read - read the current value of a blkg_stat
 253 * @stat: blkg_stat to read
 254 *
 255 * Read the current value of @stat.  This function can be called without
 256 * synchroniztion and takes care of u64 atomicity.
 257 */
 258static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
 259{
 260        unsigned int start;
 261        uint64_t v;
 262
 263        do {
 264                start = u64_stats_fetch_begin(&stat->syncp);
 265                v = stat->cnt;
 266        } while (u64_stats_fetch_retry(&stat->syncp, start));
 267
 268        return v;
 269}
 270
 271/**
 272 * blkg_stat_reset - reset a blkg_stat
 273 * @stat: blkg_stat to reset
 274 */
 275static inline void blkg_stat_reset(struct blkg_stat *stat)
 276{
 277        stat->cnt = 0;
 278}
 279
 280/**
 281 * blkg_rwstat_add - add a value to a blkg_rwstat
 282 * @rwstat: target blkg_rwstat
 283 * @rw: mask of REQ_{WRITE|SYNC}
 284 * @val: value to add
 285 *
 286 * Add @val to @rwstat.  The counters are chosen according to @rw.  The
 287 * caller is responsible for synchronizing calls to this function.
 288 */
 289static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
 290                                   int rw, uint64_t val)
 291{
 292        u64_stats_update_begin(&rwstat->syncp);
 293
 294        if (rw & REQ_WRITE)
 295                rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
 296        else
 297                rwstat->cnt[BLKG_RWSTAT_READ] += val;
 298        if (rw & REQ_SYNC)
 299                rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
 300        else
 301                rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
 302
 303        u64_stats_update_end(&rwstat->syncp);
 304}
 305
 306/**
 307 * blkg_rwstat_read - read the current values of a blkg_rwstat
 308 * @rwstat: blkg_rwstat to read
 309 *
 310 * Read the current snapshot of @rwstat and return it as the return value.
 311 * This function can be called without synchronization and takes care of
 312 * u64 atomicity.
 313 */
 314static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
 315{
 316        unsigned int start;
 317        struct blkg_rwstat tmp;
 318
 319        do {
 320                start = u64_stats_fetch_begin(&rwstat->syncp);
 321                tmp = *rwstat;
 322        } while (u64_stats_fetch_retry(&rwstat->syncp, start));
 323
 324        return tmp;
 325}
 326
 327/**
 328 * blkg_rwstat_sum - read the total count of a blkg_rwstat
 329 * @rwstat: blkg_rwstat to read
 330 *
 331 * Return the total count of @rwstat regardless of the IO direction.  This
 332 * function can be called without synchronization and takes care of u64
 333 * atomicity.
 334 */
 335static inline uint64_t blkg_rwstat_sum(struct blkg_rwstat *rwstat)
 336{
 337        struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
 338
 339        return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
 340}
 341
 342/**
 343 * blkg_rwstat_reset - reset a blkg_rwstat
 344 * @rwstat: blkg_rwstat to reset
 345 */
 346static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
 347{
 348        memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
 349}
 350
 351#else   /* CONFIG_BLK_CGROUP */
 352
 353struct cgroup;
 354
 355struct blkg_policy_data {
 356};
 357
 358struct blkcg_gq {
 359};
 360
 361struct blkcg_policy {
 362};
 363
 364static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; }
 365static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
 366static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
 367static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
 368static inline void blkcg_drain_queue(struct request_queue *q) { }
 369static inline void blkcg_exit_queue(struct request_queue *q) { }
 370static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
 371static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
 372static inline int blkcg_activate_policy(struct request_queue *q,
 373                                        const struct blkcg_policy *pol) { return 0; }
 374static inline void blkcg_deactivate_policy(struct request_queue *q,
 375                                           const struct blkcg_policy *pol) { }
 376
 377static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
 378                                                  struct blkcg_policy *pol) { return NULL; }
 379static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
 380static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
 381static inline void blkg_get(struct blkcg_gq *blkg) { }
 382static inline void blkg_put(struct blkcg_gq *blkg) { }
 383
 384#endif  /* CONFIG_BLK_CGROUP */
 385#endif  /* _BLK_CGROUP_H */
 386
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.