linux/block/blk-flush.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Functions to sequence PREFLUSH and FUA writes.
   4 *
   5 * Copyright (C) 2011           Max Planck Institute for Gravitational Physics
   6 * Copyright (C) 2011           Tejun Heo <tj@kernel.org>
   7 *
   8 * REQ_{PREFLUSH|FUA} requests are decomposed to sequences consisted of three
   9 * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request
  10 * properties and hardware capability.
  11 *
  12 * If a request doesn't have data, only REQ_PREFLUSH makes sense, which
  13 * indicates a simple flush request.  If there is data, REQ_PREFLUSH indicates
  14 * that the device cache should be flushed before the data is executed, and
  15 * REQ_FUA means that the data must be on non-volatile media on request
  16 * completion.
  17 *
  18 * If the device doesn't have writeback cache, PREFLUSH and FUA don't make any
  19 * difference.  The requests are either completed immediately if there's no data
  20 * or executed as normal requests otherwise.
  21 *
  22 * If the device has writeback cache and supports FUA, REQ_PREFLUSH is
  23 * translated to PREFLUSH but REQ_FUA is passed down directly with DATA.
  24 *
  25 * If the device has writeback cache and doesn't support FUA, REQ_PREFLUSH
  26 * is translated to PREFLUSH and REQ_FUA to POSTFLUSH.
  27 *
  28 * The actual execution of flush is double buffered.  Whenever a request
  29 * needs to execute PRE or POSTFLUSH, it queues at
  30 * fq->flush_queue[fq->flush_pending_idx].  Once certain criteria are met, a
  31 * REQ_OP_FLUSH is issued and the pending_idx is toggled.  When the flush
  32 * completes, all the requests which were pending are proceeded to the next
  33 * step.  This allows arbitrary merging of different types of PREFLUSH/FUA
  34 * requests.
  35 *
  36 * Currently, the following conditions are used to determine when to issue
  37 * flush.
  38 *
  39 * C1. At any given time, only one flush shall be in progress.  This makes
  40 *     double buffering sufficient.
  41 *
  42 * C2. Flush is deferred if any request is executing DATA of its sequence.
  43 *     This avoids issuing separate POSTFLUSHes for requests which shared
  44 *     PREFLUSH.
  45 *
  46 * C3. The second condition is ignored if there is a request which has
  47 *     waited longer than FLUSH_PENDING_TIMEOUT.  This is to avoid
  48 *     starvation in the unlikely case where there are continuous stream of
  49 *     FUA (without PREFLUSH) requests.
  50 *
  51 * For devices which support FUA, it isn't clear whether C2 (and thus C3)
  52 * is beneficial.
  53 *
  54 * Note that a sequenced PREFLUSH/FUA request with DATA is completed twice.
  55 * Once while executing DATA and again after the whole sequence is
  56 * complete.  The first completion updates the contained bio but doesn't
  57 * finish it so that the bio submitter is notified only after the whole
  58 * sequence is complete.  This is implemented by testing RQF_FLUSH_SEQ in
  59 * req_bio_endio().
  60 *
  61 * The above peculiarity requires that each PREFLUSH/FUA request has only one
  62 * bio attached to it, which is guaranteed as they aren't allowed to be
  63 * merged in the usual way.
  64 */
  65
  66#include <linux/kernel.h>
  67#include <linux/module.h>
  68#include <linux/bio.h>
  69#include <linux/blkdev.h>
  70#include <linux/gfp.h>
  71#include <linux/part_stat.h>
  72
  73#include "blk.h"
  74#include "blk-mq.h"
  75#include "blk-mq-sched.h"
  76
  77/* PREFLUSH/FUA sequences */
  78enum {
  79        REQ_FSEQ_PREFLUSH       = (1 << 0), /* pre-flushing in progress */
  80        REQ_FSEQ_DATA           = (1 << 1), /* data write in progress */
  81        REQ_FSEQ_POSTFLUSH      = (1 << 2), /* post-flushing in progress */
  82        REQ_FSEQ_DONE           = (1 << 3),
  83
  84        REQ_FSEQ_ACTIONS        = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA |
  85                                  REQ_FSEQ_POSTFLUSH,
  86
  87        /*
  88         * If flush has been pending longer than the following timeout,
  89         * it's issued even if flush_data requests are still in flight.
  90         */
  91        FLUSH_PENDING_TIMEOUT   = 5 * HZ,
  92};
  93
  94static void blk_kick_flush(struct request_queue *q,
  95                           struct blk_flush_queue *fq, blk_opf_t flags);
  96
  97static inline struct blk_flush_queue *
  98blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
  99{
 100        return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
 101}
 102
 103static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
 104{
 105        unsigned int policy = 0;
 106
 107        if (blk_rq_sectors(rq))
 108                policy |= REQ_FSEQ_DATA;
 109
 110        if (fflags & (1UL << QUEUE_FLAG_WC)) {
 111                if (rq->cmd_flags & REQ_PREFLUSH)
 112                        policy |= REQ_FSEQ_PREFLUSH;
 113                if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
 114                    (rq->cmd_flags & REQ_FUA))
 115                        policy |= REQ_FSEQ_POSTFLUSH;
 116        }
 117        return policy;
 118}
 119
 120static unsigned int blk_flush_cur_seq(struct request *rq)
 121{
 122        return 1 << ffz(rq->flush.seq);
 123}
 124
 125static void blk_flush_restore_request(struct request *rq)
 126{
 127        /*
 128         * After flush data completion, @rq->bio is %NULL but we need to
 129         * complete the bio again.  @rq->biotail is guaranteed to equal the
 130         * original @rq->bio.  Restore it.
 131         */
 132        rq->bio = rq->biotail;
 133
 134        /* make @rq a normal request */
 135        rq->rq_flags &= ~RQF_FLUSH_SEQ;
 136        rq->end_io = rq->flush.saved_end_io;
 137}
 138
 139static void blk_account_io_flush(struct request *rq)
 140{
 141        struct block_device *part = rq->q->disk->part0;
 142
 143        part_stat_lock();
 144        part_stat_inc(part, ios[STAT_FLUSH]);
 145        part_stat_add(part, nsecs[STAT_FLUSH],
 146                      ktime_get_ns() - rq->start_time_ns);
 147        part_stat_unlock();
 148}
 149
 150/**
 151 * blk_flush_complete_seq - complete flush sequence
 152 * @rq: PREFLUSH/FUA request being sequenced
 153 * @fq: flush queue
 154 * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero)
 155 * @error: whether an error occurred
 156 *
 157 * @rq just completed @seq part of its flush sequence, record the
 158 * completion and trigger the next step.
 159 *
 160 * CONTEXT:
 161 * spin_lock_irq(fq->mq_flush_lock)
 162 */
 163static void blk_flush_complete_seq(struct request *rq,
 164                                   struct blk_flush_queue *fq,
 165                                   unsigned int seq, blk_status_t error)
 166{
 167        struct request_queue *q = rq->q;
 168        struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
 169        blk_opf_t cmd_flags;
 170
 171        BUG_ON(rq->flush.seq & seq);
 172        rq->flush.seq |= seq;
 173        cmd_flags = rq->cmd_flags;
 174
 175        if (likely(!error))
 176                seq = blk_flush_cur_seq(rq);
 177        else
 178                seq = REQ_FSEQ_DONE;
 179
 180        switch (seq) {
 181        case REQ_FSEQ_PREFLUSH:
 182        case REQ_FSEQ_POSTFLUSH:
 183                /* queue for flush */
 184                if (list_empty(pending))
 185                        fq->flush_pending_since = jiffies;
 186                list_move_tail(&rq->queuelist, pending);
 187                break;
 188
 189        case REQ_FSEQ_DATA:
 190                fq->flush_data_in_flight++;
 191                spin_lock(&q->requeue_lock);
 192                list_move(&rq->queuelist, &q->requeue_list);
 193                spin_unlock(&q->requeue_lock);
 194                blk_mq_kick_requeue_list(q);
 195                break;
 196
 197        case REQ_FSEQ_DONE:
 198                /*
 199                 * @rq was previously adjusted by blk_insert_flush() for
 200                 * flush sequencing and may already have gone through the
 201                 * flush data request completion path.  Restore @rq for
 202                 * normal completion and end it.
 203                 */
 204                list_del_init(&rq->queuelist);
 205                blk_flush_restore_request(rq);
 206                blk_mq_end_request(rq, error);
 207                break;
 208
 209        default:
 210                BUG();
 211        }
 212
 213        blk_kick_flush(q, fq, cmd_flags);
 214}
 215
 216static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
 217                                       blk_status_t error)
 218{
 219        struct request_queue *q = flush_rq->q;
 220        struct list_head *running;
 221        struct request *rq, *n;
 222        unsigned long flags = 0;
 223        struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
 224
 225        /* release the tag's ownership to the req cloned from */
 226        spin_lock_irqsave(&fq->mq_flush_lock, flags);
 227
 228        if (!req_ref_put_and_test(flush_rq)) {
 229                fq->rq_status = error;
 230                spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
 231                return RQ_END_IO_NONE;
 232        }
 233
 234        blk_account_io_flush(flush_rq);
 235        /*
 236         * Flush request has to be marked as IDLE when it is really ended
 237         * because its .end_io() is called from timeout code path too for
 238         * avoiding use-after-free.
 239         */
 240        WRITE_ONCE(flush_rq->state, MQ_RQ_IDLE);
 241        if (fq->rq_status != BLK_STS_OK) {
 242                error = fq->rq_status;
 243                fq->rq_status = BLK_STS_OK;
 244        }
 245
 246        if (!q->elevator) {
 247                flush_rq->tag = BLK_MQ_NO_TAG;
 248        } else {
 249                blk_mq_put_driver_tag(flush_rq);
 250                flush_rq->internal_tag = BLK_MQ_NO_TAG;
 251        }
 252
 253        running = &fq->flush_queue[fq->flush_running_idx];
 254        BUG_ON(fq->flush_pending_idx == fq->flush_running_idx);
 255
 256        /* account completion of the flush request */
 257        fq->flush_running_idx ^= 1;
 258
 259        /* and push the waiting requests to the next stage */
 260        list_for_each_entry_safe(rq, n, running, queuelist) {
 261                unsigned int seq = blk_flush_cur_seq(rq);
 262
 263                BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
 264                blk_flush_complete_seq(rq, fq, seq, error);
 265        }
 266
 267        spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
 268        return RQ_END_IO_NONE;
 269}
 270
 271bool is_flush_rq(struct request *rq)
 272{
 273        return rq->end_io == flush_end_io;
 274}
 275
 276/**
 277 * blk_kick_flush - consider issuing flush request
 278 * @q: request_queue being kicked
 279 * @fq: flush queue
 280 * @flags: cmd_flags of the original request
 281 *
 282 * Flush related states of @q have changed, consider issuing flush request.
 283 * Please read the comment at the top of this file for more info.
 284 *
 285 * CONTEXT:
 286 * spin_lock_irq(fq->mq_flush_lock)
 287 *
 288 */
 289static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
 290                           blk_opf_t flags)
 291{
 292        struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
 293        struct request *first_rq =
 294                list_first_entry(pending, struct request, queuelist);
 295        struct request *flush_rq = fq->flush_rq;
 296
 297        /* C1 described at the top of this file */
 298        if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
 299                return;
 300
 301        /* C2 and C3 */
 302        if (fq->flush_data_in_flight &&
 303            time_before(jiffies,
 304                        fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
 305                return;
 306
 307        /*
 308         * Issue flush and toggle pending_idx.  This makes pending_idx
 309         * different from running_idx, which means flush is in flight.
 310         */
 311        fq->flush_pending_idx ^= 1;
 312
 313        blk_rq_init(q, flush_rq);
 314
 315        /*
 316         * In case of none scheduler, borrow tag from the first request
 317         * since they can't be in flight at the same time. And acquire
 318         * the tag's ownership for flush req.
 319         *
 320         * In case of IO scheduler, flush rq need to borrow scheduler tag
 321         * just for cheating put/get driver tag.
 322         */
 323        flush_rq->mq_ctx = first_rq->mq_ctx;
 324        flush_rq->mq_hctx = first_rq->mq_hctx;
 325
 326        if (!q->elevator)
 327                flush_rq->tag = first_rq->tag;
 328        else
 329                flush_rq->internal_tag = first_rq->internal_tag;
 330
 331        flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
 332        flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
 333        flush_rq->rq_flags |= RQF_FLUSH_SEQ;
 334        flush_rq->end_io = flush_end_io;
 335        /*
 336         * Order WRITE ->end_io and WRITE rq->ref, and its pair is the one
 337         * implied in refcount_inc_not_zero() called from
 338         * blk_mq_find_and_get_req(), which orders WRITE/READ flush_rq->ref
 339         * and READ flush_rq->end_io
 340         */
 341        smp_wmb();
 342        req_ref_set(flush_rq, 1);
 343
 344        spin_lock(&q->requeue_lock);
 345        list_add_tail(&flush_rq->queuelist, &q->flush_list);
 346        spin_unlock(&q->requeue_lock);
 347
 348        blk_mq_kick_requeue_list(q);
 349}
 350
 351static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
 352                                               blk_status_t error)
 353{
 354        struct request_queue *q = rq->q;
 355        struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
 356        struct blk_mq_ctx *ctx = rq->mq_ctx;
 357        unsigned long flags;
 358        struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
 359
 360        if (q->elevator) {
 361                WARN_ON(rq->tag < 0);
 362                blk_mq_put_driver_tag(rq);
 363        }
 364
 365        /*
 366         * After populating an empty queue, kick it to avoid stall.  Read
 367         * the comment in flush_end_io().
 368         */
 369        spin_lock_irqsave(&fq->mq_flush_lock, flags);
 370        fq->flush_data_in_flight--;
 371        /*
 372         * May have been corrupted by rq->rq_next reuse, we need to
 373         * re-initialize rq->queuelist before reusing it here.
 374         */
 375        INIT_LIST_HEAD(&rq->queuelist);
 376        blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
 377        spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
 378
 379        blk_mq_sched_restart(hctx);
 380        return RQ_END_IO_NONE;
 381}
 382
 383static void blk_rq_init_flush(struct request *rq)
 384{
 385        rq->flush.seq = 0;
 386        rq->rq_flags |= RQF_FLUSH_SEQ;
 387        rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
 388        rq->end_io = mq_flush_data_end_io;
 389}
 390
 391/*
 392 * Insert a PREFLUSH/FUA request into the flush state machine.
 393 * Returns true if the request has been consumed by the flush state machine,
 394 * or false if the caller should continue to process it.
 395 */
 396bool blk_insert_flush(struct request *rq)
 397{
 398        struct request_queue *q = rq->q;
 399        unsigned long fflags = q->queue_flags;  /* may change, cache */
 400        unsigned int policy = blk_flush_policy(fflags, rq);
 401        struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
 402
 403        /* FLUSH/FUA request must never be merged */
 404        WARN_ON_ONCE(rq->bio != rq->biotail);
 405
 406        /*
 407         * @policy now records what operations need to be done.  Adjust
 408         * REQ_PREFLUSH and FUA for the driver.
 409         */
 410        rq->cmd_flags &= ~REQ_PREFLUSH;
 411        if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
 412                rq->cmd_flags &= ~REQ_FUA;
 413
 414        /*
 415         * REQ_PREFLUSH|REQ_FUA implies REQ_SYNC, so if we clear any
 416         * of those flags, we have to set REQ_SYNC to avoid skewing
 417         * the request accounting.
 418         */
 419        rq->cmd_flags |= REQ_SYNC;
 420
 421        switch (policy) {
 422        case 0:
 423                /*
 424                 * An empty flush handed down from a stacking driver may
 425                 * translate into nothing if the underlying device does not
 426                 * advertise a write-back cache.  In this case, simply
 427                 * complete the request.
 428                 */
 429                blk_mq_end_request(rq, 0);
 430                return true;
 431        case REQ_FSEQ_DATA:
 432                /*
 433                 * If there's data, but no flush is necessary, the request can
 434                 * be processed directly without going through flush machinery.
 435                 * Queue for normal execution.
 436                 */
 437                return false;
 438        case REQ_FSEQ_DATA | REQ_FSEQ_POSTFLUSH:
 439                /*
 440                 * Initialize the flush fields and completion handler to trigger
 441                 * the post flush, and then just pass the command on.
 442                 */
 443                blk_rq_init_flush(rq);
 444                rq->flush.seq |= REQ_FSEQ_PREFLUSH;
 445                spin_lock_irq(&fq->mq_flush_lock);
 446                fq->flush_data_in_flight++;
 447                spin_unlock_irq(&fq->mq_flush_lock);
 448                return false;
 449        default:
 450                /*
 451                 * Mark the request as part of a flush sequence and submit it
 452                 * for further processing to the flush state machine.
 453                 */
 454                blk_rq_init_flush(rq);
 455                spin_lock_irq(&fq->mq_flush_lock);
 456                blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
 457                spin_unlock_irq(&fq->mq_flush_lock);
 458                return true;
 459        }
 460}
 461
 462/**
 463 * blkdev_issue_flush - queue a flush
 464 * @bdev:       blockdev to issue flush for
 465 *
 466 * Description:
 467 *    Issue a flush for the block device in question.
 468 */
 469int blkdev_issue_flush(struct block_device *bdev)
 470{
 471        struct bio bio;
 472
 473        bio_init(&bio, bdev, NULL, 0, REQ_OP_WRITE | REQ_PREFLUSH);
 474        return submit_bio_wait(&bio);
 475}
 476EXPORT_SYMBOL(blkdev_issue_flush);
 477
 478struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
 479                                              gfp_t flags)
 480{
 481        struct blk_flush_queue *fq;
 482        int rq_sz = sizeof(struct request);
 483
 484        fq = kzalloc_node(sizeof(*fq), flags, node);
 485        if (!fq)
 486                goto fail;
 487
 488        spin_lock_init(&fq->mq_flush_lock);
 489
 490        rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
 491        fq->flush_rq = kzalloc_node(rq_sz, flags, node);
 492        if (!fq->flush_rq)
 493                goto fail_rq;
 494
 495        INIT_LIST_HEAD(&fq->flush_queue[0]);
 496        INIT_LIST_HEAD(&fq->flush_queue[1]);
 497
 498        return fq;
 499
 500 fail_rq:
 501        kfree(fq);
 502 fail:
 503        return NULL;
 504}
 505
 506void blk_free_flush_queue(struct blk_flush_queue *fq)
 507{
 508        /* bio based request queue hasn't flush queue */
 509        if (!fq)
 510                return;
 511
 512        kfree(fq->flush_rq);
 513        kfree(fq);
 514}
 515
 516/*
 517 * Allow driver to set its own lock class to fq->mq_flush_lock for
 518 * avoiding lockdep complaint.
 519 *
 520 * flush_end_io() may be called recursively from some driver, such as
 521 * nvme-loop, so lockdep may complain 'possible recursive locking' because
 522 * all 'struct blk_flush_queue' instance share same mq_flush_lock lock class
 523 * key. We need to assign different lock class for these driver's
 524 * fq->mq_flush_lock for avoiding the lockdep warning.
 525 *
 526 * Use dynamically allocated lock class key for each 'blk_flush_queue'
 527 * instance is over-kill, and more worse it introduces horrible boot delay
 528 * issue because synchronize_rcu() is implied in lockdep_unregister_key which
 529 * is called for each hctx release. SCSI probing may synchronously create and
 530 * destroy lots of MQ request_queues for non-existent devices, and some robot
 531 * test kernel always enable lockdep option. It is observed that more than half
 532 * an hour is taken during SCSI MQ probe with per-fq lock class.
 533 */
 534void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
 535                struct lock_class_key *key)
 536{
 537        lockdep_set_class(&hctx->fq->mq_flush_lock, key);
 538}
 539EXPORT_SYMBOL_GPL(blk_mq_hctx_set_fq_lock_class);
 540