linux/block/blk.h
<<
>>
Prefs
   1#ifndef BLK_INTERNAL_H
   2#define BLK_INTERNAL_H
   3
   4#include <linux/idr.h>
   5
   6/* Amount of time in which a process may batch requests */
   7#define BLK_BATCH_TIME  (HZ/50UL)
   8
   9/* Number of requests a "batching" process may submit */
  10#define BLK_BATCH_REQ   32
  11
  12extern struct kmem_cache *blk_requestq_cachep;
  13extern struct kobj_type blk_queue_ktype;
  14extern struct ida blk_queue_ida;
  15
  16static inline void __blk_get_queue(struct request_queue *q)
  17{
  18        kobject_get(&q->kobj);
  19}
  20
  21int blk_init_rl(struct request_list *rl, struct request_queue *q,
  22                gfp_t gfp_mask);
  23void blk_exit_rl(struct request_list *rl);
  24void init_request_from_bio(struct request *req, struct bio *bio);
  25void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
  26                        struct bio *bio);
  27int blk_rq_append_bio(struct request_queue *q, struct request *rq,
  28                      struct bio *bio);
  29void blk_queue_bypass_start(struct request_queue *q);
  30void blk_queue_bypass_end(struct request_queue *q);
  31void blk_dequeue_request(struct request *rq);
  32void __blk_queue_free_tags(struct request_queue *q);
  33bool __blk_end_bidi_request(struct request *rq, int error,
  34                            unsigned int nr_bytes, unsigned int bidi_bytes);
  35
  36void blk_rq_timed_out_timer(unsigned long data);
  37void blk_delete_timer(struct request *);
  38void blk_add_timer(struct request *);
  39
  40/*
  41 * Internal atomic flags for request handling
  42 */
  43enum rq_atomic_flags {
  44        REQ_ATOM_COMPLETE = 0,
  45};
  46
  47/*
  48 * EH timer and IO completion will both attempt to 'grab' the request, make
  49 * sure that only one of them succeeds
  50 */
  51static inline int blk_mark_rq_complete(struct request *rq)
  52{
  53        return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
  54}
  55
  56static inline void blk_clear_rq_complete(struct request *rq)
  57{
  58        clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
  59}
  60
  61/*
  62 * Internal elevator interface
  63 */
  64#define ELV_ON_HASH(rq) hash_hashed(&(rq)->hash)
  65
  66void blk_insert_flush(struct request *rq);
  67void blk_abort_flushes(struct request_queue *q);
  68
  69static inline struct request *__elv_next_request(struct request_queue *q)
  70{
  71        struct request *rq;
  72
  73        while (1) {
  74                if (!list_empty(&q->queue_head)) {
  75                        rq = list_entry_rq(q->queue_head.next);
  76                        return rq;
  77                }
  78
  79                /*
  80                 * Flush request is running and flush request isn't queueable
  81                 * in the drive, we can hold the queue till flush request is
  82                 * finished. Even we don't do this, driver can't dispatch next
  83                 * requests and will requeue them. And this can improve
  84                 * throughput too. For example, we have request flush1, write1,
  85                 * flush 2. flush1 is dispatched, then queue is hold, write1
  86                 * isn't inserted to queue. After flush1 is finished, flush2
  87                 * will be dispatched. Since disk cache is already clean,
  88                 * flush2 will be finished very soon, so looks like flush2 is
  89                 * folded to flush1.
  90                 * Since the queue is hold, a flag is set to indicate the queue
  91                 * should be restarted later. Please see flush_end_io() for
  92                 * details.
  93                 */
  94                if (q->flush_pending_idx != q->flush_running_idx &&
  95                                !queue_flush_queueable(q)) {
  96                        q->flush_queue_delayed = 1;
  97                        return NULL;
  98                }
  99                if (unlikely(blk_queue_dying(q)) ||
 100                    !q->elevator->type->ops.elevator_dispatch_fn(q, 0))
 101                        return NULL;
 102        }
 103}
 104
 105static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
 106{
 107        struct elevator_queue *e = q->elevator;
 108
 109        if (e->type->ops.elevator_activate_req_fn)
 110                e->type->ops.elevator_activate_req_fn(q, rq);
 111}
 112
 113static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
 114{
 115        struct elevator_queue *e = q->elevator;
 116
 117        if (e->type->ops.elevator_deactivate_req_fn)
 118                e->type->ops.elevator_deactivate_req_fn(q, rq);
 119}
 120
 121#ifdef CONFIG_FAIL_IO_TIMEOUT
 122int blk_should_fake_timeout(struct request_queue *);
 123ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
 124ssize_t part_timeout_store(struct device *, struct device_attribute *,
 125                                const char *, size_t);
 126#else
 127static inline int blk_should_fake_timeout(struct request_queue *q)
 128{
 129        return 0;
 130}
 131#endif
 132
 133int ll_back_merge_fn(struct request_queue *q, struct request *req,
 134                     struct bio *bio);
 135int ll_front_merge_fn(struct request_queue *q, struct request *req, 
 136                      struct bio *bio);
 137int attempt_back_merge(struct request_queue *q, struct request *rq);
 138int attempt_front_merge(struct request_queue *q, struct request *rq);
 139int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
 140                                struct request *next);
 141void blk_recalc_rq_segments(struct request *rq);
 142void blk_rq_set_mixed_merge(struct request *rq);
 143bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
 144int blk_try_merge(struct request *rq, struct bio *bio);
 145
 146void blk_queue_congestion_threshold(struct request_queue *q);
 147
 148void __blk_run_queue_uncond(struct request_queue *q);
 149
 150int blk_dev_init(void);
 151
 152
 153/*
 154 * Return the threshold (number of used requests) at which the queue is
 155 * considered to be congested.  It include a little hysteresis to keep the
 156 * context switch rate down.
 157 */
 158static inline int queue_congestion_on_threshold(struct request_queue *q)
 159{
 160        return q->nr_congestion_on;
 161}
 162
 163/*
 164 * The threshold at which a queue is considered to be uncongested
 165 */
 166static inline int queue_congestion_off_threshold(struct request_queue *q)
 167{
 168        return q->nr_congestion_off;
 169}
 170
 171/*
 172 * Contribute to IO statistics IFF:
 173 *
 174 *      a) it's attached to a gendisk, and
 175 *      b) the queue had IO stats enabled when this request was started, and
 176 *      c) it's a file system request
 177 */
 178static inline int blk_do_io_stat(struct request *rq)
 179{
 180        return rq->rq_disk &&
 181               (rq->cmd_flags & REQ_IO_STAT) &&
 182                (rq->cmd_type == REQ_TYPE_FS);
 183}
 184
 185/*
 186 * Internal io_context interface
 187 */
 188void get_io_context(struct io_context *ioc);
 189struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
 190struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
 191                             gfp_t gfp_mask);
 192void ioc_clear_queue(struct request_queue *q);
 193
 194int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
 195
 196/**
 197 * create_io_context - try to create task->io_context
 198 * @gfp_mask: allocation mask
 199 * @node: allocation node
 200 *
 201 * If %current->io_context is %NULL, allocate a new io_context and install
 202 * it.  Returns the current %current->io_context which may be %NULL if
 203 * allocation failed.
 204 *
 205 * Note that this function can't be called with IRQ disabled because
 206 * task_lock which protects %current->io_context is IRQ-unsafe.
 207 */
 208static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
 209{
 210        WARN_ON_ONCE(irqs_disabled());
 211        if (unlikely(!current->io_context))
 212                create_task_io_context(current, gfp_mask, node);
 213        return current->io_context;
 214}
 215
 216/*
 217 * Internal throttling interface
 218 */
 219#ifdef CONFIG_BLK_DEV_THROTTLING
 220extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
 221extern void blk_throtl_drain(struct request_queue *q);
 222extern int blk_throtl_init(struct request_queue *q);
 223extern void blk_throtl_exit(struct request_queue *q);
 224#else /* CONFIG_BLK_DEV_THROTTLING */
 225static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
 226{
 227        return false;
 228}
 229static inline void blk_throtl_drain(struct request_queue *q) { }
 230static inline int blk_throtl_init(struct request_queue *q) { return 0; }
 231static inline void blk_throtl_exit(struct request_queue *q) { }
 232#endif /* CONFIG_BLK_DEV_THROTTLING */
 233
 234#endif /* BLK_INTERNAL_H */
 235
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.