linux/block/blk-iolatency.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Block rq-qos base io controller
   4 *
   5 * This works similar to wbt with a few exceptions
   6 *
   7 * - It's bio based, so the latency covers the whole block layer in addition to
   8 *   the actual io.
   9 * - We will throttle all IO that comes in here if we need to.
  10 * - We use the mean latency over the 100ms window.  This is because writes can
  11 *   be particularly fast, which could give us a false sense of the impact of
  12 *   other workloads on our protected workload.
  13 * - By default there's no throttling, we set the queue_depth to UINT_MAX so
  14 *   that we can have as many outstanding bio's as we're allowed to.  Only at
  15 *   throttle time do we pay attention to the actual queue depth.
  16 *
  17 * The hierarchy works like the cpu controller does, we track the latency at
  18 * every configured node, and each configured node has it's own independent
  19 * queue depth.  This means that we only care about our latency targets at the
  20 * peer level.  Some group at the bottom of the hierarchy isn't going to affect
  21 * a group at the end of some other path if we're only configred at leaf level.
  22 *
  23 * Consider the following
  24 *
  25 *                   root blkg
  26 *             /                     \
  27 *        fast (target=5ms)     slow (target=10ms)
  28 *         /     \                  /        \
  29 *       a        b          normal(15ms)   unloved
  30 *
  31 * "a" and "b" have no target, but their combined io under "fast" cannot exceed
  32 * an average latency of 5ms.  If it does then we will throttle the "slow"
  33 * group.  In the case of "normal", if it exceeds its 15ms target, we will
  34 * throttle "unloved", but nobody else.
  35 *
  36 * In this example "fast", "slow", and "normal" will be the only groups actually
  37 * accounting their io latencies.  We have to walk up the heirarchy to the root
  38 * on every submit and complete so we can do the appropriate stat recording and
  39 * adjust the queue depth of ourselves if needed.
  40 *
  41 * There are 2 ways we throttle IO.
  42 *
  43 * 1) Queue depth throttling.  As we throttle down we will adjust the maximum
  44 * number of IO's we're allowed to have in flight.  This starts at (u64)-1 down
  45 * to 1.  If the group is only ever submitting IO for itself then this is the
  46 * only way we throttle.
  47 *
  48 * 2) Induced delay throttling.  This is for the case that a group is generating
  49 * IO that has to be issued by the root cg to avoid priority inversion. So think
  50 * REQ_META or REQ_SWAP.  If we are already at qd == 1 and we're getting a lot
  51 * of work done for us on behalf of the root cg and are being asked to scale
  52 * down more then we induce a latency at userspace return.  We accumulate the
  53 * total amount of time we need to be punished by doing
  54 *
  55 * total_time += min_lat_nsec - actual_io_completion
  56 *
  57 * and then at throttle time will do
  58 *
  59 * throttle_time = min(total_time, NSEC_PER_SEC)
  60 *
  61 * This induced delay will throttle back the activity that is generating the
  62 * root cg issued io's, wethere that's some metadata intensive operation or the
  63 * group is using so much memory that it is pushing us into swap.
  64 *
  65 * Copyright (C) 2018 Josef Bacik
  66 */
  67#include <linux/kernel.h>
  68#include <linux/blk_types.h>
  69#include <linux/backing-dev.h>
  70#include <linux/module.h>
  71#include <linux/timer.h>
  72#include <linux/memcontrol.h>
  73#include <linux/sched/loadavg.h>
  74#include <linux/sched/signal.h>
  75#include <trace/events/block.h>
  76#include <linux/blk-mq.h>
  77#include "blk-rq-qos.h"
  78#include "blk-stat.h"
  79#include "blk.h"
  80
  81#define DEFAULT_SCALE_COOKIE 1000000U
  82
  83static struct blkcg_policy blkcg_policy_iolatency;
  84struct iolatency_grp;
  85
  86struct blk_iolatency {
  87        struct rq_qos rqos;
  88        struct timer_list timer;
  89        atomic_t enabled;
  90};
  91
  92static inline struct blk_iolatency *BLKIOLATENCY(struct rq_qos *rqos)
  93{
  94        return container_of(rqos, struct blk_iolatency, rqos);
  95}
  96
  97static inline bool blk_iolatency_enabled(struct blk_iolatency *blkiolat)
  98{
  99        return atomic_read(&blkiolat->enabled) > 0;
 100}
 101
 102struct child_latency_info {
 103        spinlock_t lock;
 104
 105        /* Last time we adjusted the scale of everybody. */
 106        u64 last_scale_event;
 107
 108        /* The latency that we missed. */
 109        u64 scale_lat;
 110
 111        /* Total io's from all of our children for the last summation. */
 112        u64 nr_samples;
 113
 114        /* The guy who actually changed the latency numbers. */
 115        struct iolatency_grp *scale_grp;
 116
 117        /* Cookie to tell if we need to scale up or down. */
 118        atomic_t scale_cookie;
 119};
 120
 121struct percentile_stats {
 122        u64 total;
 123        u64 missed;
 124};
 125
 126struct latency_stat {
 127        union {
 128                struct percentile_stats ps;
 129                struct blk_rq_stat rqs;
 130        };
 131};
 132
 133struct iolatency_grp {
 134        struct blkg_policy_data pd;
 135        struct latency_stat __percpu *stats;
 136        struct latency_stat cur_stat;
 137        struct blk_iolatency *blkiolat;
 138        struct rq_depth rq_depth;
 139        struct rq_wait rq_wait;
 140        atomic64_t window_start;
 141        atomic_t scale_cookie;
 142        u64 min_lat_nsec;
 143        u64 cur_win_nsec;
 144
 145        /* total running average of our io latency. */
 146        u64 lat_avg;
 147
 148        /* Our current number of IO's for the last summation. */
 149        u64 nr_samples;
 150
 151        bool ssd;
 152        struct child_latency_info child_lat;
 153};
 154
 155#define BLKIOLATENCY_MIN_WIN_SIZE (100 * NSEC_PER_MSEC)
 156#define BLKIOLATENCY_MAX_WIN_SIZE NSEC_PER_SEC
 157/*
 158 * These are the constants used to fake the fixed-point moving average
 159 * calculation just like load average.  The call to calc_load() folds
 160 * (FIXED_1 (2048) - exp_factor) * new_sample into lat_avg.  The sampling
 161 * window size is bucketed to try to approximately calculate average
 162 * latency such that 1/exp (decay rate) is [1 min, 2.5 min) when windows
 163 * elapse immediately.  Note, windows only elapse with IO activity.  Idle
 164 * periods extend the most recent window.
 165 */
 166#define BLKIOLATENCY_NR_EXP_FACTORS 5
 167#define BLKIOLATENCY_EXP_BUCKET_SIZE (BLKIOLATENCY_MAX_WIN_SIZE / \
 168                                      (BLKIOLATENCY_NR_EXP_FACTORS - 1))
 169static const u64 iolatency_exp_factors[BLKIOLATENCY_NR_EXP_FACTORS] = {
 170        2045, // exp(1/600) - 600 samples
 171        2039, // exp(1/240) - 240 samples
 172        2031, // exp(1/120) - 120 samples
 173        2023, // exp(1/80)  - 80 samples
 174        2014, // exp(1/60)  - 60 samples
 175};
 176
 177static inline struct iolatency_grp *pd_to_lat(struct blkg_policy_data *pd)
 178{
 179        return pd ? container_of(pd, struct iolatency_grp, pd) : NULL;
 180}
 181
 182static inline struct iolatency_grp *blkg_to_lat(struct blkcg_gq *blkg)
 183{
 184        return pd_to_lat(blkg_to_pd(blkg, &blkcg_policy_iolatency));
 185}
 186
 187static inline struct blkcg_gq *lat_to_blkg(struct iolatency_grp *iolat)
 188{
 189        return pd_to_blkg(&iolat->pd);
 190}
 191
 192static inline void latency_stat_init(struct iolatency_grp *iolat,
 193                                     struct latency_stat *stat)
 194{
 195        if (iolat->ssd) {
 196                stat->ps.total = 0;
 197                stat->ps.missed = 0;
 198        } else
 199                blk_rq_stat_init(&stat->rqs);
 200}
 201
 202static inline void latency_stat_sum(struct iolatency_grp *iolat,
 203                                    struct latency_stat *sum,
 204                                    struct latency_stat *stat)
 205{
 206        if (iolat->ssd) {
 207                sum->ps.total += stat->ps.total;
 208                sum->ps.missed += stat->ps.missed;
 209        } else
 210                blk_rq_stat_sum(&sum->rqs, &stat->rqs);
 211}
 212
 213static inline void latency_stat_record_time(struct iolatency_grp *iolat,
 214                                            u64 req_time)
 215{
 216        struct latency_stat *stat = get_cpu_ptr(iolat->stats);
 217        if (iolat->ssd) {
 218                if (req_time >= iolat->min_lat_nsec)
 219                        stat->ps.missed++;
 220                stat->ps.total++;
 221        } else
 222                blk_rq_stat_add(&stat->rqs, req_time);
 223        put_cpu_ptr(stat);
 224}
 225
 226static inline bool latency_sum_ok(struct iolatency_grp *iolat,
 227                                  struct latency_stat *stat)
 228{
 229        if (iolat->ssd) {
 230                u64 thresh = div64_u64(stat->ps.total, 10);
 231                thresh = max(thresh, 1ULL);
 232                return stat->ps.missed < thresh;
 233        }
 234        return stat->rqs.mean <= iolat->min_lat_nsec;
 235}
 236
 237static inline u64 latency_stat_samples(struct iolatency_grp *iolat,
 238                                       struct latency_stat *stat)
 239{
 240        if (iolat->ssd)
 241                return stat->ps.total;
 242        return stat->rqs.nr_samples;
 243}
 244
 245static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat,
 246                                              struct latency_stat *stat)
 247{
 248        int exp_idx;
 249
 250        if (iolat->ssd)
 251                return;
 252
 253        /*
 254         * calc_load() takes in a number stored in fixed point representation.
 255         * Because we are using this for IO time in ns, the values stored
 256         * are significantly larger than the FIXED_1 denominator (2048).
 257         * Therefore, rounding errors in the calculation are negligible and
 258         * can be ignored.
 259         */
 260        exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1,
 261                        div64_u64(iolat->cur_win_nsec,
 262                                  BLKIOLATENCY_EXP_BUCKET_SIZE));
 263        iolat->lat_avg = calc_load(iolat->lat_avg,
 264                                   iolatency_exp_factors[exp_idx],
 265                                   stat->rqs.mean);
 266}
 267
 268static void iolat_cleanup_cb(struct rq_wait *rqw, void *private_data)
 269{
 270        atomic_dec(&rqw->inflight);
 271        wake_up(&rqw->wait);
 272}
 273
 274static bool iolat_acquire_inflight(struct rq_wait *rqw, void *private_data)
 275{
 276        struct iolatency_grp *iolat = private_data;
 277        return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth);
 278}
 279
 280static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
 281                                       struct iolatency_grp *iolat,
 282                                       bool issue_as_root,
 283                                       bool use_memdelay)
 284{
 285        struct rq_wait *rqw = &iolat->rq_wait;
 286        unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
 287
 288        if (use_delay)
 289                blkcg_schedule_throttle(rqos->q, use_memdelay);
 290
 291        /*
 292         * To avoid priority inversions we want to just take a slot if we are
 293         * issuing as root.  If we're being killed off there's no point in
 294         * delaying things, we may have been killed by OOM so throttling may
 295         * make recovery take even longer, so just let the IO's through so the
 296         * task can go away.
 297         */
 298        if (issue_as_root || fatal_signal_pending(current)) {
 299                atomic_inc(&rqw->inflight);
 300                return;
 301        }
 302
 303        rq_qos_wait(rqw, iolat, iolat_acquire_inflight, iolat_cleanup_cb);
 304}
 305
 306#define SCALE_DOWN_FACTOR 2
 307#define SCALE_UP_FACTOR 4
 308
 309static inline unsigned long scale_amount(unsigned long qd, bool up)
 310{
 311        return max(up ? qd >> SCALE_UP_FACTOR : qd >> SCALE_DOWN_FACTOR, 1UL);
 312}
 313
 314/*
 315 * We scale the qd down faster than we scale up, so we need to use this helper
 316 * to adjust the scale_cookie accordingly so we don't prematurely get
 317 * scale_cookie at DEFAULT_SCALE_COOKIE and unthrottle too much.
 318 *
 319 * Each group has their own local copy of the last scale cookie they saw, so if
 320 * the global scale cookie goes up or down they know which way they need to go
 321 * based on their last knowledge of it.
 322 */
 323static void scale_cookie_change(struct blk_iolatency *blkiolat,
 324                                struct child_latency_info *lat_info,
 325                                bool up)
 326{
 327        unsigned long qd = blkiolat->rqos.q->nr_requests;
 328        unsigned long scale = scale_amount(qd, up);
 329        unsigned long old = atomic_read(&lat_info->scale_cookie);
 330        unsigned long max_scale = qd << 1;
 331        unsigned long diff = 0;
 332
 333        if (old < DEFAULT_SCALE_COOKIE)
 334                diff = DEFAULT_SCALE_COOKIE - old;
 335
 336        if (up) {
 337                if (scale + old > DEFAULT_SCALE_COOKIE)
 338                        atomic_set(&lat_info->scale_cookie,
 339                                   DEFAULT_SCALE_COOKIE);
 340                else if (diff > qd)
 341                        atomic_inc(&lat_info->scale_cookie);
 342                else
 343                        atomic_add(scale, &lat_info->scale_cookie);
 344        } else {
 345                /*
 346                 * We don't want to dig a hole so deep that it takes us hours to
 347                 * dig out of it.  Just enough that we don't throttle/unthrottle
 348                 * with jagged workloads but can still unthrottle once pressure
 349                 * has sufficiently dissipated.
 350                 */
 351                if (diff > qd) {
 352                        if (diff < max_scale)
 353                                atomic_dec(&lat_info->scale_cookie);
 354                } else {
 355                        atomic_sub(scale, &lat_info->scale_cookie);
 356                }
 357        }
 358}
 359
 360/*
 361 * Change the queue depth of the iolatency_grp.  We add/subtract 1/16th of the
 362 * queue depth at a time so we don't get wild swings and hopefully dial in to
 363 * fairer distribution of the overall queue depth.
 364 */
 365static void scale_change(struct iolatency_grp *iolat, bool up)
 366{
 367        unsigned long qd = iolat->blkiolat->rqos.q->nr_requests;
 368        unsigned long scale = scale_amount(qd, up);
 369        unsigned long old = iolat->rq_depth.max_depth;
 370
 371        if (old > qd)
 372                old = qd;
 373
 374        if (up) {
 375                if (old == 1 && blkcg_unuse_delay(lat_to_blkg(iolat)))
 376                        return;
 377
 378                if (old < qd) {
 379                        old += scale;
 380                        old = min(old, qd);
 381                        iolat->rq_depth.max_depth = old;
 382                        wake_up_all(&iolat->rq_wait.wait);
 383                }
 384        } else {
 385                old >>= 1;
 386                iolat->rq_depth.max_depth = max(old, 1UL);
 387        }
 388}
 389
 390/* Check our parent and see if the scale cookie has changed. */
 391static void check_scale_change(struct iolatency_grp *iolat)
 392{
 393        struct iolatency_grp *parent;
 394        struct child_latency_info *lat_info;
 395        unsigned int cur_cookie;
 396        unsigned int our_cookie = atomic_read(&iolat->scale_cookie);
 397        u64 scale_lat;
 398        unsigned int old;
 399        int direction = 0;
 400
 401        if (lat_to_blkg(iolat)->parent == NULL)
 402                return;
 403
 404        parent = blkg_to_lat(lat_to_blkg(iolat)->parent);
 405        if (!parent)
 406                return;
 407
 408        lat_info = &parent->child_lat;
 409        cur_cookie = atomic_read(&lat_info->scale_cookie);
 410        scale_lat = READ_ONCE(lat_info->scale_lat);
 411
 412        if (cur_cookie < our_cookie)
 413                direction = -1;
 414        else if (cur_cookie > our_cookie)
 415                direction = 1;
 416        else
 417                return;
 418
 419        old = atomic_cmpxchg(&iolat->scale_cookie, our_cookie, cur_cookie);
 420
 421        /* Somebody beat us to the punch, just bail. */
 422        if (old != our_cookie)
 423                return;
 424
 425        if (direction < 0 && iolat->min_lat_nsec) {
 426                u64 samples_thresh;
 427
 428                if (!scale_lat || iolat->min_lat_nsec <= scale_lat)
 429                        return;
 430
 431                /*
 432                 * Sometimes high priority groups are their own worst enemy, so
 433                 * instead of taking it out on some poor other group that did 5%
 434                 * or less of the IO's for the last summation just skip this
 435                 * scale down event.
 436                 */
 437                samples_thresh = lat_info->nr_samples * 5;
 438                samples_thresh = max(1ULL, div64_u64(samples_thresh, 100));
 439                if (iolat->nr_samples <= samples_thresh)
 440                        return;
 441        }
 442
 443        /* We're as low as we can go. */
 444        if (iolat->rq_depth.max_depth == 1 && direction < 0) {
 445                blkcg_use_delay(lat_to_blkg(iolat));
 446                return;
 447        }
 448
 449        /* We're back to the default cookie, unthrottle all the things. */
 450        if (cur_cookie == DEFAULT_SCALE_COOKIE) {
 451                blkcg_clear_delay(lat_to_blkg(iolat));
 452                iolat->rq_depth.max_depth = UINT_MAX;
 453                wake_up_all(&iolat->rq_wait.wait);
 454                return;
 455        }
 456
 457        scale_change(iolat, direction > 0);
 458}
 459
 460static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio)
 461{
 462        struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
 463        struct blkcg_gq *blkg = bio->bi_blkg;
 464        bool issue_as_root = bio_issue_as_root_blkg(bio);
 465
 466        if (!blk_iolatency_enabled(blkiolat))
 467                return;
 468
 469        while (blkg && blkg->parent) {
 470                struct iolatency_grp *iolat = blkg_to_lat(blkg);
 471                if (!iolat) {
 472                        blkg = blkg->parent;
 473                        continue;
 474                }
 475
 476                check_scale_change(iolat);
 477                __blkcg_iolatency_throttle(rqos, iolat, issue_as_root,
 478                                     (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
 479                blkg = blkg->parent;
 480        }
 481        if (!timer_pending(&blkiolat->timer))
 482                mod_timer(&blkiolat->timer, jiffies + HZ);
 483}
 484
 485static void iolatency_record_time(struct iolatency_grp *iolat,
 486                                  struct bio_issue *issue, u64 now,
 487                                  bool issue_as_root)
 488{
 489        u64 start = bio_issue_time(issue);
 490        u64 req_time;
 491
 492        /*
 493         * Have to do this so we are truncated to the correct time that our
 494         * issue is truncated to.
 495         */
 496        now = __bio_issue_time(now);
 497
 498        if (now <= start)
 499                return;
 500
 501        req_time = now - start;
 502
 503        /*
 504         * We don't want to count issue_as_root bio's in the cgroups latency
 505         * statistics as it could skew the numbers downwards.
 506         */
 507        if (unlikely(issue_as_root && iolat->rq_depth.max_depth != UINT_MAX)) {
 508                u64 sub = iolat->min_lat_nsec;
 509                if (req_time < sub)
 510                        blkcg_add_delay(lat_to_blkg(iolat), now, sub - req_time);
 511                return;
 512        }
 513
 514        latency_stat_record_time(iolat, req_time);
 515}
 516
 517#define BLKIOLATENCY_MIN_ADJUST_TIME (500 * NSEC_PER_MSEC)
 518#define BLKIOLATENCY_MIN_GOOD_SAMPLES 5
 519
 520static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
 521{
 522        struct blkcg_gq *blkg = lat_to_blkg(iolat);
 523        struct iolatency_grp *parent;
 524        struct child_latency_info *lat_info;
 525        struct latency_stat stat;
 526        unsigned long flags;
 527        int cpu;
 528
 529        latency_stat_init(iolat, &stat);
 530        preempt_disable();
 531        for_each_online_cpu(cpu) {
 532                struct latency_stat *s;
 533                s = per_cpu_ptr(iolat->stats, cpu);
 534                latency_stat_sum(iolat, &stat, s);
 535                latency_stat_init(iolat, s);
 536        }
 537        preempt_enable();
 538
 539        parent = blkg_to_lat(blkg->parent);
 540        if (!parent)
 541                return;
 542
 543        lat_info = &parent->child_lat;
 544
 545        iolat_update_total_lat_avg(iolat, &stat);
 546
 547        /* Everything is ok and we don't need to adjust the scale. */
 548        if (latency_sum_ok(iolat, &stat) &&
 549            atomic_read(&lat_info->scale_cookie) == DEFAULT_SCALE_COOKIE)
 550                return;
 551
 552        /* Somebody beat us to the punch, just bail. */
 553        spin_lock_irqsave(&lat_info->lock, flags);
 554
 555        latency_stat_sum(iolat, &iolat->cur_stat, &stat);
 556        lat_info->nr_samples -= iolat->nr_samples;
 557        lat_info->nr_samples += latency_stat_samples(iolat, &iolat->cur_stat);
 558        iolat->nr_samples = latency_stat_samples(iolat, &iolat->cur_stat);
 559
 560        if ((lat_info->last_scale_event >= now ||
 561            now - lat_info->last_scale_event < BLKIOLATENCY_MIN_ADJUST_TIME))
 562                goto out;
 563
 564        if (latency_sum_ok(iolat, &iolat->cur_stat) &&
 565            latency_sum_ok(iolat, &stat)) {
 566                if (latency_stat_samples(iolat, &iolat->cur_stat) <
 567                    BLKIOLATENCY_MIN_GOOD_SAMPLES)
 568                        goto out;
 569                if (lat_info->scale_grp == iolat) {
 570                        lat_info->last_scale_event = now;
 571                        scale_cookie_change(iolat->blkiolat, lat_info, true);
 572                }
 573        } else if (lat_info->scale_lat == 0 ||
 574                   lat_info->scale_lat >= iolat->min_lat_nsec) {
 575                lat_info->last_scale_event = now;
 576                if (!lat_info->scale_grp ||
 577                    lat_info->scale_lat > iolat->min_lat_nsec) {
 578                        WRITE_ONCE(lat_info->scale_lat, iolat->min_lat_nsec);
 579                        lat_info->scale_grp = iolat;
 580                }
 581                scale_cookie_change(iolat->blkiolat, lat_info, false);
 582        }
 583        latency_stat_init(iolat, &iolat->cur_stat);
 584out:
 585        spin_unlock_irqrestore(&lat_info->lock, flags);
 586}
 587
 588static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
 589{
 590        struct blkcg_gq *blkg;
 591        struct rq_wait *rqw;
 592        struct iolatency_grp *iolat;
 593        u64 window_start;
 594        u64 now;
 595        bool issue_as_root = bio_issue_as_root_blkg(bio);
 596        bool enabled = false;
 597        int inflight = 0;
 598
 599        blkg = bio->bi_blkg;
 600        if (!blkg || !bio_flagged(bio, BIO_TRACKED))
 601                return;
 602
 603        iolat = blkg_to_lat(bio->bi_blkg);
 604        if (!iolat)
 605                return;
 606
 607        enabled = blk_iolatency_enabled(iolat->blkiolat);
 608        if (!enabled)
 609                return;
 610
 611        now = ktime_to_ns(ktime_get());
 612        while (blkg && blkg->parent) {
 613                iolat = blkg_to_lat(blkg);
 614                if (!iolat) {
 615                        blkg = blkg->parent;
 616                        continue;
 617                }
 618                rqw = &iolat->rq_wait;
 619
 620                inflight = atomic_dec_return(&rqw->inflight);
 621                WARN_ON_ONCE(inflight < 0);
 622                /*
 623                 * If bi_status is BLK_STS_AGAIN, the bio wasn't actually
 624                 * submitted, so do not account for it.
 625                 */
 626                if (iolat->min_lat_nsec && bio->bi_status != BLK_STS_AGAIN) {
 627                        iolatency_record_time(iolat, &bio->bi_issue, now,
 628                                              issue_as_root);
 629                        window_start = atomic64_read(&iolat->window_start);
 630                        if (now > window_start &&
 631                            (now - window_start) >= iolat->cur_win_nsec) {
 632                                if (atomic64_cmpxchg(&iolat->window_start,
 633                                             window_start, now) == window_start)
 634                                        iolatency_check_latencies(iolat, now);
 635                        }
 636                }
 637                wake_up(&rqw->wait);
 638                blkg = blkg->parent;
 639        }
 640}
 641
 642static void blkcg_iolatency_exit(struct rq_qos *rqos)
 643{
 644        struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
 645
 646        del_timer_sync(&blkiolat->timer);
 647        blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency);
 648        kfree(blkiolat);
 649}
 650
 651static struct rq_qos_ops blkcg_iolatency_ops = {
 652        .throttle = blkcg_iolatency_throttle,
 653        .done_bio = blkcg_iolatency_done_bio,
 654        .exit = blkcg_iolatency_exit,
 655};
 656
 657static void blkiolatency_timer_fn(struct timer_list *t)
 658{
 659        struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer);
 660        struct blkcg_gq *blkg;
 661        struct cgroup_subsys_state *pos_css;
 662        u64 now = ktime_to_ns(ktime_get());
 663
 664        rcu_read_lock();
 665        blkg_for_each_descendant_pre(blkg, pos_css,
 666                                     blkiolat->rqos.q->root_blkg) {
 667                struct iolatency_grp *iolat;
 668                struct child_latency_info *lat_info;
 669                unsigned long flags;
 670                u64 cookie;
 671
 672                /*
 673                 * We could be exiting, don't access the pd unless we have a
 674                 * ref on the blkg.
 675                 */
 676                if (!blkg_tryget(blkg))
 677                        continue;
 678
 679                iolat = blkg_to_lat(blkg);
 680                if (!iolat)
 681                        goto next;
 682
 683                lat_info = &iolat->child_lat;
 684                cookie = atomic_read(&lat_info->scale_cookie);
 685
 686                if (cookie >= DEFAULT_SCALE_COOKIE)
 687                        goto next;
 688
 689                spin_lock_irqsave(&lat_info->lock, flags);
 690                if (lat_info->last_scale_event >= now)
 691                        goto next_lock;
 692
 693                /*
 694                 * We scaled down but don't have a scale_grp, scale up and carry
 695                 * on.
 696                 */
 697                if (lat_info->scale_grp == NULL) {
 698                        scale_cookie_change(iolat->blkiolat, lat_info, true);
 699                        goto next_lock;
 700                }
 701
 702                /*
 703                 * It's been 5 seconds since our last scale event, clear the
 704                 * scale grp in case the group that needed the scale down isn't
 705                 * doing any IO currently.
 706                 */
 707                if (now - lat_info->last_scale_event >=
 708                    ((u64)NSEC_PER_SEC * 5))
 709                        lat_info->scale_grp = NULL;
 710next_lock:
 711                spin_unlock_irqrestore(&lat_info->lock, flags);
 712next:
 713                blkg_put(blkg);
 714        }
 715        rcu_read_unlock();
 716}
 717
 718int blk_iolatency_init(struct request_queue *q)
 719{
 720        struct blk_iolatency *blkiolat;
 721        struct rq_qos *rqos;
 722        int ret;
 723
 724        blkiolat = kzalloc(sizeof(*blkiolat), GFP_KERNEL);
 725        if (!blkiolat)
 726                return -ENOMEM;
 727
 728        rqos = &blkiolat->rqos;
 729        rqos->id = RQ_QOS_LATENCY;
 730        rqos->ops = &blkcg_iolatency_ops;
 731        rqos->q = q;
 732
 733        rq_qos_add(q, rqos);
 734
 735        ret = blkcg_activate_policy(q, &blkcg_policy_iolatency);
 736        if (ret) {
 737                rq_qos_del(q, rqos);
 738                kfree(blkiolat);
 739                return ret;
 740        }
 741
 742        timer_setup(&blkiolat->timer, blkiolatency_timer_fn, 0);
 743
 744        return 0;
 745}
 746
 747/*
 748 * return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise
 749 * return 0.
 750 */
 751static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
 752{
 753        struct iolatency_grp *iolat = blkg_to_lat(blkg);
 754        u64 oldval = iolat->min_lat_nsec;
 755
 756        iolat->min_lat_nsec = val;
 757        iolat->cur_win_nsec = max_t(u64, val << 4, BLKIOLATENCY_MIN_WIN_SIZE);
 758        iolat->cur_win_nsec = min_t(u64, iolat->cur_win_nsec,
 759                                    BLKIOLATENCY_MAX_WIN_SIZE);
 760
 761        if (!oldval && val)
 762                return 1;
 763        if (oldval && !val) {
 764                blkcg_clear_delay(blkg);
 765                return -1;
 766        }
 767        return 0;
 768}
 769
 770static void iolatency_clear_scaling(struct blkcg_gq *blkg)
 771{
 772        if (blkg->parent) {
 773                struct iolatency_grp *iolat = blkg_to_lat(blkg->parent);
 774                struct child_latency_info *lat_info;
 775                if (!iolat)
 776                        return;
 777
 778                lat_info = &iolat->child_lat;
 779                spin_lock(&lat_info->lock);
 780                atomic_set(&lat_info->scale_cookie, DEFAULT_SCALE_COOKIE);
 781                lat_info->last_scale_event = 0;
 782                lat_info->scale_grp = NULL;
 783                lat_info->scale_lat = 0;
 784                spin_unlock(&lat_info->lock);
 785        }
 786}
 787
 788static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
 789                             size_t nbytes, loff_t off)
 790{
 791        struct blkcg *blkcg = css_to_blkcg(of_css(of));
 792        struct blkcg_gq *blkg;
 793        struct blkg_conf_ctx ctx;
 794        struct iolatency_grp *iolat;
 795        char *p, *tok;
 796        u64 lat_val = 0;
 797        u64 oldval;
 798        int ret;
 799        int enable = 0;
 800
 801        ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
 802        if (ret)
 803                return ret;
 804
 805        iolat = blkg_to_lat(ctx.blkg);
 806        p = ctx.body;
 807
 808        ret = -EINVAL;
 809        while ((tok = strsep(&p, " "))) {
 810                char key[16];
 811                char val[21];   /* 18446744073709551616 */
 812
 813                if (sscanf(tok, "%15[^=]=%20s", key, val) != 2)
 814                        goto out;
 815
 816                if (!strcmp(key, "target")) {
 817                        u64 v;
 818
 819                        if (!strcmp(val, "max"))
 820                                lat_val = 0;
 821                        else if (sscanf(val, "%llu", &v) == 1)
 822                                lat_val = v * NSEC_PER_USEC;
 823                        else
 824                                goto out;
 825                } else {
 826                        goto out;
 827                }
 828        }
 829
 830        /* Walk up the tree to see if our new val is lower than it should be. */
 831        blkg = ctx.blkg;
 832        oldval = iolat->min_lat_nsec;
 833
 834        enable = iolatency_set_min_lat_nsec(blkg, lat_val);
 835        if (enable) {
 836                if (!blk_get_queue(blkg->q)) {
 837                        ret = -ENODEV;
 838                        goto out;
 839                }
 840
 841                blkg_get(blkg);
 842        }
 843
 844        if (oldval != iolat->min_lat_nsec) {
 845                iolatency_clear_scaling(blkg);
 846        }
 847
 848        ret = 0;
 849out:
 850        blkg_conf_finish(&ctx);
 851        if (ret == 0 && enable) {
 852                struct iolatency_grp *tmp = blkg_to_lat(blkg);
 853                struct blk_iolatency *blkiolat = tmp->blkiolat;
 854
 855                blk_mq_freeze_queue(blkg->q);
 856
 857                if (enable == 1)
 858                        atomic_inc(&blkiolat->enabled);
 859                else if (enable == -1)
 860                        atomic_dec(&blkiolat->enabled);
 861                else
 862                        WARN_ON_ONCE(1);
 863
 864                blk_mq_unfreeze_queue(blkg->q);
 865
 866                blkg_put(blkg);
 867                blk_put_queue(blkg->q);
 868        }
 869        return ret ?: nbytes;
 870}
 871
 872static u64 iolatency_prfill_limit(struct seq_file *sf,
 873                                  struct blkg_policy_data *pd, int off)
 874{
 875        struct iolatency_grp *iolat = pd_to_lat(pd);
 876        const char *dname = blkg_dev_name(pd->blkg);
 877
 878        if (!dname || !iolat->min_lat_nsec)
 879                return 0;
 880        seq_printf(sf, "%s target=%llu\n",
 881                   dname, div_u64(iolat->min_lat_nsec, NSEC_PER_USEC));
 882        return 0;
 883}
 884
 885static int iolatency_print_limit(struct seq_file *sf, void *v)
 886{
 887        blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
 888                          iolatency_prfill_limit,
 889                          &blkcg_policy_iolatency, seq_cft(sf)->private, false);
 890        return 0;
 891}
 892
 893static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf,
 894                                 size_t size)
 895{
 896        struct latency_stat stat;
 897        int cpu;
 898
 899        latency_stat_init(iolat, &stat);
 900        preempt_disable();
 901        for_each_online_cpu(cpu) {
 902                struct latency_stat *s;
 903                s = per_cpu_ptr(iolat->stats, cpu);
 904                latency_stat_sum(iolat, &stat, s);
 905        }
 906        preempt_enable();
 907
 908        if (iolat->rq_depth.max_depth == UINT_MAX)
 909                return scnprintf(buf, size, " missed=%llu total=%llu depth=max",
 910                                 (unsigned long long)stat.ps.missed,
 911                                 (unsigned long long)stat.ps.total);
 912        return scnprintf(buf, size, " missed=%llu total=%llu depth=%u",
 913                         (unsigned long long)stat.ps.missed,
 914                         (unsigned long long)stat.ps.total,
 915                         iolat->rq_depth.max_depth);
 916}
 917
 918static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf,
 919                                size_t size)
 920{
 921        struct iolatency_grp *iolat = pd_to_lat(pd);
 922        unsigned long long avg_lat;
 923        unsigned long long cur_win;
 924
 925        if (!blkcg_debug_stats)
 926                return 0;
 927
 928        if (iolat->ssd)
 929                return iolatency_ssd_stat(iolat, buf, size);
 930
 931        avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
 932        cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
 933        if (iolat->rq_depth.max_depth == UINT_MAX)
 934                return scnprintf(buf, size, " depth=max avg_lat=%llu win=%llu",
 935                                 avg_lat, cur_win);
 936
 937        return scnprintf(buf, size, " depth=%u avg_lat=%llu win=%llu",
 938                         iolat->rq_depth.max_depth, avg_lat, cur_win);
 939}
 940
 941
 942static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp,
 943                                                   struct request_queue *q,
 944                                                   struct blkcg *blkcg)
 945{
 946        struct iolatency_grp *iolat;
 947
 948        iolat = kzalloc_node(sizeof(*iolat), gfp, q->node);
 949        if (!iolat)
 950                return NULL;
 951        iolat->stats = __alloc_percpu_gfp(sizeof(struct latency_stat),
 952                                       __alignof__(struct latency_stat), gfp);
 953        if (!iolat->stats) {
 954                kfree(iolat);
 955                return NULL;
 956        }
 957        return &iolat->pd;
 958}
 959
 960static void iolatency_pd_init(struct blkg_policy_data *pd)
 961{
 962        struct iolatency_grp *iolat = pd_to_lat(pd);
 963        struct blkcg_gq *blkg = lat_to_blkg(iolat);
 964        struct rq_qos *rqos = blkcg_rq_qos(blkg->q);
 965        struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
 966        u64 now = ktime_to_ns(ktime_get());
 967        int cpu;
 968
 969        if (blk_queue_nonrot(blkg->q))
 970                iolat->ssd = true;
 971        else
 972                iolat->ssd = false;
 973
 974        for_each_possible_cpu(cpu) {
 975                struct latency_stat *stat;
 976                stat = per_cpu_ptr(iolat->stats, cpu);
 977                latency_stat_init(iolat, stat);
 978        }
 979
 980        latency_stat_init(iolat, &iolat->cur_stat);
 981        rq_wait_init(&iolat->rq_wait);
 982        spin_lock_init(&iolat->child_lat.lock);
 983        iolat->rq_depth.queue_depth = blkg->q->nr_requests;
 984        iolat->rq_depth.max_depth = UINT_MAX;
 985        iolat->rq_depth.default_depth = iolat->rq_depth.queue_depth;
 986        iolat->blkiolat = blkiolat;
 987        iolat->cur_win_nsec = 100 * NSEC_PER_MSEC;
 988        atomic64_set(&iolat->window_start, now);
 989
 990        /*
 991         * We init things in list order, so the pd for the parent may not be
 992         * init'ed yet for whatever reason.
 993         */
 994        if (blkg->parent && blkg_to_pd(blkg->parent, &blkcg_policy_iolatency)) {
 995                struct iolatency_grp *parent = blkg_to_lat(blkg->parent);
 996                atomic_set(&iolat->scale_cookie,
 997                           atomic_read(&parent->child_lat.scale_cookie));
 998        } else {
 999                atomic_set(&iolat->scale_cookie, DEFAULT_SCALE_COOKIE);
1000        }
1001
1002        atomic_set(&iolat->child_lat.scale_cookie, DEFAULT_SCALE_COOKIE);
1003}
1004
1005static void iolatency_pd_offline(struct blkg_policy_data *pd)
1006{
1007        struct iolatency_grp *iolat = pd_to_lat(pd);
1008        struct blkcg_gq *blkg = lat_to_blkg(iolat);
1009        struct blk_iolatency *blkiolat = iolat->blkiolat;
1010        int ret;
1011
1012        ret = iolatency_set_min_lat_nsec(blkg, 0);
1013        if (ret == 1)
1014                atomic_inc(&blkiolat->enabled);
1015        if (ret == -1)
1016                atomic_dec(&blkiolat->enabled);
1017        iolatency_clear_scaling(blkg);
1018}
1019
1020static void iolatency_pd_free(struct blkg_policy_data *pd)
1021{
1022        struct iolatency_grp *iolat = pd_to_lat(pd);
1023        free_percpu(iolat->stats);
1024        kfree(iolat);
1025}
1026
1027static struct cftype iolatency_files[] = {
1028        {
1029                .name = "latency",
1030                .flags = CFTYPE_NOT_ON_ROOT,
1031                .seq_show = iolatency_print_limit,
1032                .write = iolatency_set_limit,
1033        },
1034        {}
1035};
1036
1037static struct blkcg_policy blkcg_policy_iolatency = {
1038        .dfl_cftypes    = iolatency_files,
1039        .pd_alloc_fn    = iolatency_pd_alloc,
1040        .pd_init_fn     = iolatency_pd_init,
1041        .pd_offline_fn  = iolatency_pd_offline,
1042        .pd_free_fn     = iolatency_pd_free,
1043        .pd_stat_fn     = iolatency_pd_stat,
1044};
1045
1046static int __init iolatency_init(void)
1047{
1048        return blkcg_policy_register(&blkcg_policy_iolatency);
1049}
1050
1051static void __exit iolatency_exit(void)
1052{
1053        blkcg_policy_unregister(&blkcg_policy_iolatency);
1054}
1055
1056module_init(iolatency_init);
1057module_exit(iolatency_exit);
1058
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.