linux/block/blk-rq-qos.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2
   3#include "blk-rq-qos.h"
   4
   5/*
   6 * Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
   7 * false if 'v' + 1 would be bigger than 'below'.
   8 */
   9static bool atomic_inc_below(atomic_t *v, unsigned int below)
  10{
  11        unsigned int cur = atomic_read(v);
  12
  13        for (;;) {
  14                unsigned int old;
  15
  16                if (cur >= below)
  17                        return false;
  18                old = atomic_cmpxchg(v, cur, cur + 1);
  19                if (old == cur)
  20                        break;
  21                cur = old;
  22        }
  23
  24        return true;
  25}
  26
  27bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit)
  28{
  29        return atomic_inc_below(&rq_wait->inflight, limit);
  30}
  31
  32void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio)
  33{
  34        do {
  35                if (rqos->ops->cleanup)
  36                        rqos->ops->cleanup(rqos, bio);
  37                rqos = rqos->next;
  38        } while (rqos);
  39}
  40
  41void __rq_qos_done(struct rq_qos *rqos, struct request *rq)
  42{
  43        do {
  44                if (rqos->ops->done)
  45                        rqos->ops->done(rqos, rq);
  46                rqos = rqos->next;
  47        } while (rqos);
  48}
  49
  50void __rq_qos_issue(struct rq_qos *rqos, struct request *rq)
  51{
  52        do {
  53                if (rqos->ops->issue)
  54                        rqos->ops->issue(rqos, rq);
  55                rqos = rqos->next;
  56        } while (rqos);
  57}
  58
  59void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq)
  60{
  61        do {
  62                if (rqos->ops->requeue)
  63                        rqos->ops->requeue(rqos, rq);
  64                rqos = rqos->next;
  65        } while (rqos);
  66}
  67
  68void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio)
  69{
  70        do {
  71                if (rqos->ops->throttle)
  72                        rqos->ops->throttle(rqos, bio);
  73                rqos = rqos->next;
  74        } while (rqos);
  75}
  76
  77void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
  78{
  79        do {
  80                if (rqos->ops->track)
  81                        rqos->ops->track(rqos, rq, bio);
  82                rqos = rqos->next;
  83        } while (rqos);
  84}
  85
  86void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio)
  87{
  88        do {
  89                if (rqos->ops->merge)
  90                        rqos->ops->merge(rqos, rq, bio);
  91                rqos = rqos->next;
  92        } while (rqos);
  93}
  94
  95void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio)
  96{
  97        do {
  98                if (rqos->ops->done_bio)
  99                        rqos->ops->done_bio(rqos, bio);
 100                rqos = rqos->next;
 101        } while (rqos);
 102}
 103
 104void __rq_qos_queue_depth_changed(struct rq_qos *rqos)
 105{
 106        do {
 107                if (rqos->ops->queue_depth_changed)
 108                        rqos->ops->queue_depth_changed(rqos);
 109                rqos = rqos->next;
 110        } while (rqos);
 111}
 112
 113/*
 114 * Return true, if we can't increase the depth further by scaling
 115 */
 116bool rq_depth_calc_max_depth(struct rq_depth *rqd)
 117{
 118        unsigned int depth;
 119        bool ret = false;
 120
 121        /*
 122         * For QD=1 devices, this is a special case. It's important for those
 123         * to have one request ready when one completes, so force a depth of
 124         * 2 for those devices. On the backend, it'll be a depth of 1 anyway,
 125         * since the device can't have more than that in flight. If we're
 126         * scaling down, then keep a setting of 1/1/1.
 127         */
 128        if (rqd->queue_depth == 1) {
 129                if (rqd->scale_step > 0)
 130                        rqd->max_depth = 1;
 131                else {
 132                        rqd->max_depth = 2;
 133                        ret = true;
 134                }
 135        } else {
 136                /*
 137                 * scale_step == 0 is our default state. If we have suffered
 138                 * latency spikes, step will be > 0, and we shrink the
 139                 * allowed write depths. If step is < 0, we're only doing
 140                 * writes, and we allow a temporarily higher depth to
 141                 * increase performance.
 142                 */
 143                depth = min_t(unsigned int, rqd->default_depth,
 144                              rqd->queue_depth);
 145                if (rqd->scale_step > 0)
 146                        depth = 1 + ((depth - 1) >> min(31, rqd->scale_step));
 147                else if (rqd->scale_step < 0) {
 148                        unsigned int maxd = 3 * rqd->queue_depth / 4;
 149
 150                        depth = 1 + ((depth - 1) << -rqd->scale_step);
 151                        if (depth > maxd) {
 152                                depth = maxd;
 153                                ret = true;
 154                        }
 155                }
 156
 157                rqd->max_depth = depth;
 158        }
 159
 160        return ret;
 161}
 162
 163/* Returns true on success and false if scaling up wasn't possible */
 164bool rq_depth_scale_up(struct rq_depth *rqd)
 165{
 166        /*
 167         * Hit max in previous round, stop here
 168         */
 169        if (rqd->scaled_max)
 170                return false;
 171
 172        rqd->scale_step--;
 173
 174        rqd->scaled_max = rq_depth_calc_max_depth(rqd);
 175        return true;
 176}
 177
 178/*
 179 * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
 180 * had a latency violation. Returns true on success and returns false if
 181 * scaling down wasn't possible.
 182 */
 183bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
 184{
 185        /*
 186         * Stop scaling down when we've hit the limit. This also prevents
 187         * ->scale_step from going to crazy values, if the device can't
 188         * keep up.
 189         */
 190        if (rqd->max_depth == 1)
 191                return false;
 192
 193        if (rqd->scale_step < 0 && hard_throttle)
 194                rqd->scale_step = 0;
 195        else
 196                rqd->scale_step++;
 197
 198        rqd->scaled_max = false;
 199        rq_depth_calc_max_depth(rqd);
 200        return true;
 201}
 202
 203struct rq_qos_wait_data {
 204        struct wait_queue_entry wq;
 205        struct task_struct *task;
 206        struct rq_wait *rqw;
 207        acquire_inflight_cb_t *cb;
 208        void *private_data;
 209        bool got_token;
 210};
 211
 212static int rq_qos_wake_function(struct wait_queue_entry *curr,
 213                                unsigned int mode, int wake_flags, void *key)
 214{
 215        struct rq_qos_wait_data *data = container_of(curr,
 216                                                     struct rq_qos_wait_data,
 217                                                     wq);
 218
 219        /*
 220         * If we fail to get a budget, return -1 to interrupt the wake up loop
 221         * in __wake_up_common.
 222         */
 223        if (!data->cb(data->rqw, data->private_data))
 224                return -1;
 225
 226        data->got_token = true;
 227        smp_wmb();
 228        list_del_init(&curr->entry);
 229        wake_up_process(data->task);
 230        return 1;
 231}
 232
 233/**
 234 * rq_qos_wait - throttle on a rqw if we need to
 235 * @rqw: rqw to throttle on
 236 * @private_data: caller provided specific data
 237 * @acquire_inflight_cb: inc the rqw->inflight counter if we can
 238 * @cleanup_cb: the callback to cleanup in case we race with a waker
 239 *
 240 * This provides a uniform place for the rq_qos users to do their throttling.
 241 * Since you can end up with a lot of things sleeping at once, this manages the
 242 * waking up based on the resources available.  The acquire_inflight_cb should
 243 * inc the rqw->inflight if we have the ability to do so, or return false if not
 244 * and then we will sleep until the room becomes available.
 245 *
 246 * cleanup_cb is in case that we race with a waker and need to cleanup the
 247 * inflight count accordingly.
 248 */
 249void rq_qos_wait(struct rq_wait *rqw, void *private_data,
 250                 acquire_inflight_cb_t *acquire_inflight_cb,
 251                 cleanup_cb_t *cleanup_cb)
 252{
 253        struct rq_qos_wait_data data = {
 254                .wq = {
 255                        .func   = rq_qos_wake_function,
 256                        .entry  = LIST_HEAD_INIT(data.wq.entry),
 257                },
 258                .task = current,
 259                .rqw = rqw,
 260                .cb = acquire_inflight_cb,
 261                .private_data = private_data,
 262        };
 263        bool has_sleeper;
 264
 265        has_sleeper = wq_has_sleeper(&rqw->wait);
 266        if (!has_sleeper && acquire_inflight_cb(rqw, private_data))
 267                return;
 268
 269        has_sleeper = !prepare_to_wait_exclusive(&rqw->wait, &data.wq,
 270                                                 TASK_UNINTERRUPTIBLE);
 271        do {
 272                /* The memory barrier in set_task_state saves us here. */
 273                if (data.got_token)
 274                        break;
 275                if (!has_sleeper && acquire_inflight_cb(rqw, private_data)) {
 276                        finish_wait(&rqw->wait, &data.wq);
 277
 278                        /*
 279                         * We raced with wbt_wake_function() getting a token,
 280                         * which means we now have two. Put our local token
 281                         * and wake anyone else potentially waiting for one.
 282                         */
 283                        smp_rmb();
 284                        if (data.got_token)
 285                                cleanup_cb(rqw, private_data);
 286                        break;
 287                }
 288                io_schedule();
 289                has_sleeper = true;
 290                set_current_state(TASK_UNINTERRUPTIBLE);
 291        } while (1);
 292        finish_wait(&rqw->wait, &data.wq);
 293}
 294
 295void rq_qos_exit(struct request_queue *q)
 296{
 297        blk_mq_debugfs_unregister_queue_rqos(q);
 298
 299        while (q->rq_qos) {
 300                struct rq_qos *rqos = q->rq_qos;
 301                q->rq_qos = rqos->next;
 302                rqos->ops->exit(rqos);
 303        }
 304}
 305
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.