linux/block/blk-mq-sched.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef BLK_MQ_SCHED_H
   3#define BLK_MQ_SCHED_H
   4
   5#include "blk-mq.h"
   6#include "blk-mq-tag.h"
   7
   8void blk_mq_sched_assign_ioc(struct request *rq);
   9
  10bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
  11                unsigned int nr_segs, struct request **merged_request);
  12bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
  13                unsigned int nr_segs);
  14bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
  15void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
  16void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
  17
  18void blk_mq_sched_insert_request(struct request *rq, bool at_head,
  19                                 bool run_queue, bool async);
  20void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
  21                                  struct blk_mq_ctx *ctx,
  22                                  struct list_head *list, bool run_queue_async);
  23
  24void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
  25
  26int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
  27void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
  28void blk_mq_sched_free_requests(struct request_queue *q);
  29
  30static inline bool
  31blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
  32                unsigned int nr_segs)
  33{
  34        if (blk_queue_nomerges(q) || !bio_mergeable(bio))
  35                return false;
  36
  37        return __blk_mq_sched_bio_merge(q, bio, nr_segs);
  38}
  39
  40static inline bool
  41blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
  42                         struct bio *bio)
  43{
  44        struct elevator_queue *e = q->elevator;
  45
  46        if (e && e->type->ops.allow_merge)
  47                return e->type->ops.allow_merge(q, rq, bio);
  48
  49        return true;
  50}
  51
  52static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
  53{
  54        struct elevator_queue *e = rq->q->elevator;
  55
  56        if (e && e->type->ops.completed_request)
  57                e->type->ops.completed_request(rq, now);
  58}
  59
  60static inline void blk_mq_sched_requeue_request(struct request *rq)
  61{
  62        struct request_queue *q = rq->q;
  63        struct elevator_queue *e = q->elevator;
  64
  65        if ((rq->rq_flags & RQF_ELVPRIV) && e && e->type->ops.requeue_request)
  66                e->type->ops.requeue_request(rq);
  67}
  68
  69static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
  70{
  71        struct elevator_queue *e = hctx->queue->elevator;
  72
  73        if (e && e->type->ops.has_work)
  74                return e->type->ops.has_work(hctx);
  75
  76        return false;
  77}
  78
  79static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
  80{
  81        return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
  82}
  83
  84#endif
  85