linux/block/blk.h
<<
>>
Prefs
   1#ifndef BLK_INTERNAL_H
   2#define BLK_INTERNAL_H
   3
   4#include <linux/idr.h>
   5
   6/* Amount of time in which a process may batch requests */
   7#define BLK_BATCH_TIME  (HZ/50UL)
   8
   9/* Number of requests a "batching" process may submit */
  10#define BLK_BATCH_REQ   32
  11
  12extern struct kmem_cache *blk_requestq_cachep;
  13extern struct kobj_type blk_queue_ktype;
  14extern struct ida blk_queue_ida;
  15
  16static inline void __blk_get_queue(struct request_queue *q)
  17{
  18        kobject_get(&q->kobj);
  19}
  20
  21void init_request_from_bio(struct request *req, struct bio *bio);
  22void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
  23                        struct bio *bio);
  24int blk_rq_append_bio(struct request_queue *q, struct request *rq,
  25                      struct bio *bio);
  26void blk_drain_queue(struct request_queue *q, bool drain_all);
  27void blk_dequeue_request(struct request *rq);
  28void __blk_queue_free_tags(struct request_queue *q);
  29bool __blk_end_bidi_request(struct request *rq, int error,
  30                            unsigned int nr_bytes, unsigned int bidi_bytes);
  31
  32void blk_rq_timed_out_timer(unsigned long data);
  33void blk_delete_timer(struct request *);
  34void blk_add_timer(struct request *);
  35void __generic_unplug_device(struct request_queue *);
  36
  37/*
  38 * Internal atomic flags for request handling
  39 */
  40enum rq_atomic_flags {
  41        REQ_ATOM_COMPLETE = 0,
  42};
  43
  44/*
  45 * EH timer and IO completion will both attempt to 'grab' the request, make
  46 * sure that only one of them succeeds
  47 */
  48static inline int blk_mark_rq_complete(struct request *rq)
  49{
  50        return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
  51}
  52
  53static inline void blk_clear_rq_complete(struct request *rq)
  54{
  55        clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
  56}
  57
  58/*
  59 * Internal elevator interface
  60 */
  61#define ELV_ON_HASH(rq)         (!hlist_unhashed(&(rq)->hash))
  62
  63void blk_insert_flush(struct request *rq);
  64void blk_abort_flushes(struct request_queue *q);
  65
  66static inline struct request *__elv_next_request(struct request_queue *q)
  67{
  68        struct request *rq;
  69
  70        while (1) {
  71                if (!list_empty(&q->queue_head)) {
  72                        rq = list_entry_rq(q->queue_head.next);
  73                        return rq;
  74                }
  75
  76                /*
  77                 * Flush request is running and flush request isn't queueable
  78                 * in the drive, we can hold the queue till flush request is
  79                 * finished. Even we don't do this, driver can't dispatch next
  80                 * requests and will requeue them. And this can improve
  81                 * throughput too. For example, we have request flush1, write1,
  82                 * flush 2. flush1 is dispatched, then queue is hold, write1
  83                 * isn't inserted to queue. After flush1 is finished, flush2
  84                 * will be dispatched. Since disk cache is already clean,
  85                 * flush2 will be finished very soon, so looks like flush2 is
  86                 * folded to flush1.
  87                 * Since the queue is hold, a flag is set to indicate the queue
  88                 * should be restarted later. Please see flush_end_io() for
  89                 * details.
  90                 */
  91                if (q->flush_pending_idx != q->flush_running_idx &&
  92                                !queue_flush_queueable(q)) {
  93                        q->flush_queue_delayed = 1;
  94                        return NULL;
  95                }
  96                if (unlikely(blk_queue_dead(q)) ||
  97                    !q->elevator->type->ops.elevator_dispatch_fn(q, 0))
  98                        return NULL;
  99        }
 100}
 101
 102static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
 103{
 104        struct elevator_queue *e = q->elevator;
 105
 106        if (e->type->ops.elevator_activate_req_fn)
 107                e->type->ops.elevator_activate_req_fn(q, rq);
 108}
 109
 110static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
 111{
 112        struct elevator_queue *e = q->elevator;
 113
 114        if (e->type->ops.elevator_deactivate_req_fn)
 115                e->type->ops.elevator_deactivate_req_fn(q, rq);
 116}
 117
 118#ifdef CONFIG_FAIL_IO_TIMEOUT
 119int blk_should_fake_timeout(struct request_queue *);
 120ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
 121ssize_t part_timeout_store(struct device *, struct device_attribute *,
 122                                const char *, size_t);
 123#else
 124static inline int blk_should_fake_timeout(struct request_queue *q)
 125{
 126        return 0;
 127}
 128#endif
 129
 130int ll_back_merge_fn(struct request_queue *q, struct request *req,
 131                     struct bio *bio);
 132int ll_front_merge_fn(struct request_queue *q, struct request *req, 
 133                      struct bio *bio);
 134int attempt_back_merge(struct request_queue *q, struct request *rq);
 135int attempt_front_merge(struct request_queue *q, struct request *rq);
 136int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
 137                                struct request *next);
 138void blk_recalc_rq_segments(struct request *rq);
 139void blk_rq_set_mixed_merge(struct request *rq);
 140bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
 141int blk_try_merge(struct request *rq, struct bio *bio);
 142
 143void blk_queue_congestion_threshold(struct request_queue *q);
 144
 145int blk_dev_init(void);
 146
 147void elv_quiesce_start(struct request_queue *q);
 148void elv_quiesce_end(struct request_queue *q);
 149
 150
 151/*
 152 * Return the threshold (number of used requests) at which the queue is
 153 * considered to be congested.  It include a little hysteresis to keep the
 154 * context switch rate down.
 155 */
 156static inline int queue_congestion_on_threshold(struct request_queue *q)
 157{
 158        return q->nr_congestion_on;
 159}
 160
 161/*
 162 * The threshold at which a queue is considered to be uncongested
 163 */
 164static inline int queue_congestion_off_threshold(struct request_queue *q)
 165{
 166        return q->nr_congestion_off;
 167}
 168
 169static inline int blk_cpu_to_group(int cpu)
 170{
 171        int group = NR_CPUS;
 172#ifdef CONFIG_SCHED_MC
 173        const struct cpumask *mask = cpu_coregroup_mask(cpu);
 174        group = cpumask_first(mask);
 175#elif defined(CONFIG_SCHED_SMT)
 176        group = cpumask_first(topology_thread_cpumask(cpu));
 177#else
 178        return cpu;
 179#endif
 180        if (likely(group < NR_CPUS))
 181                return group;
 182        return cpu;
 183}
 184
 185/*
 186 * Contribute to IO statistics IFF:
 187 *
 188 *      a) it's attached to a gendisk, and
 189 *      b) the queue had IO stats enabled when this request was started, and
 190 *      c) it's a file system request or a discard request
 191 */
 192static inline int blk_do_io_stat(struct request *rq)
 193{
 194        return rq->rq_disk &&
 195               (rq->cmd_flags & REQ_IO_STAT) &&
 196               (rq->cmd_type == REQ_TYPE_FS ||
 197                (rq->cmd_flags & REQ_DISCARD));
 198}
 199
 200/*
 201 * Internal io_context interface
 202 */
 203void get_io_context(struct io_context *ioc);
 204struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
 205struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask);
 206void ioc_clear_queue(struct request_queue *q);
 207
 208void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_mask,
 209                                int node);
 210
 211/**
 212 * create_io_context - try to create task->io_context
 213 * @task: target task
 214 * @gfp_mask: allocation mask
 215 * @node: allocation node
 216 *
 217 * If @task->io_context is %NULL, allocate a new io_context and install it.
 218 * Returns the current @task->io_context which may be %NULL if allocation
 219 * failed.
 220 *
 221 * Note that this function can't be called with IRQ disabled because
 222 * task_lock which protects @task->io_context is IRQ-unsafe.
 223 */
 224static inline struct io_context *create_io_context(struct task_struct *task,
 225                                                   gfp_t gfp_mask, int node)
 226{
 227        WARN_ON_ONCE(irqs_disabled());
 228        if (unlikely(!task->io_context))
 229                create_io_context_slowpath(task, gfp_mask, node);
 230        return task->io_context;
 231}
 232
 233/*
 234 * Internal throttling interface
 235 */
 236#ifdef CONFIG_BLK_DEV_THROTTLING
 237extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
 238extern void blk_throtl_drain(struct request_queue *q);
 239extern int blk_throtl_init(struct request_queue *q);
 240extern void blk_throtl_exit(struct request_queue *q);
 241extern void blk_throtl_release(struct request_queue *q);
 242#else /* CONFIG_BLK_DEV_THROTTLING */
 243static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
 244{
 245        return false;
 246}
 247static inline void blk_throtl_drain(struct request_queue *q) { }
 248static inline int blk_throtl_init(struct request_queue *q) { return 0; }
 249static inline void blk_throtl_exit(struct request_queue *q) { }
 250static inline void blk_throtl_release(struct request_queue *q) { }
 251#endif /* CONFIG_BLK_DEV_THROTTLING */
 252
 253#endif /* BLK_INTERNAL_H */
 254
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.