linux/block/blk-settings.c
<<
>>
Prefs
   1/*
   2 * Functions related to setting various queue properties from drivers
   3 */
   4#include <linux/kernel.h>
   5#include <linux/module.h>
   6#include <linux/init.h>
   7#include <linux/bio.h>
   8#include <linux/blkdev.h>
   9#include <linux/bootmem.h>      /* for max_pfn/max_low_pfn */
  10
  11#include "blk.h"
  12
  13unsigned long blk_max_low_pfn;
  14EXPORT_SYMBOL(blk_max_low_pfn);
  15
  16unsigned long blk_max_pfn;
  17
  18/**
  19 * blk_queue_prep_rq - set a prepare_request function for queue
  20 * @q:          queue
  21 * @pfn:        prepare_request function
  22 *
  23 * It's possible for a queue to register a prepare_request callback which
  24 * is invoked before the request is handed to the request_fn. The goal of
  25 * the function is to prepare a request for I/O, it can be used to build a
  26 * cdb from the request data for instance.
  27 *
  28 */
  29void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
  30{
  31        q->prep_rq_fn = pfn;
  32}
  33EXPORT_SYMBOL(blk_queue_prep_rq);
  34
  35/**
  36 * blk_queue_merge_bvec - set a merge_bvec function for queue
  37 * @q:          queue
  38 * @mbfn:       merge_bvec_fn
  39 *
  40 * Usually queues have static limitations on the max sectors or segments that
  41 * we can put in a request. Stacking drivers may have some settings that
  42 * are dynamic, and thus we have to query the queue whether it is ok to
  43 * add a new bio_vec to a bio at a given offset or not. If the block device
  44 * has such limitations, it needs to register a merge_bvec_fn to control
  45 * the size of bio's sent to it. Note that a block device *must* allow a
  46 * single page to be added to an empty bio. The block device driver may want
  47 * to use the bio_split() function to deal with these bio's. By default
  48 * no merge_bvec_fn is defined for a queue, and only the fixed limits are
  49 * honored.
  50 */
  51void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
  52{
  53        q->merge_bvec_fn = mbfn;
  54}
  55EXPORT_SYMBOL(blk_queue_merge_bvec);
  56
  57void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
  58{
  59        q->softirq_done_fn = fn;
  60}
  61EXPORT_SYMBOL(blk_queue_softirq_done);
  62
  63/**
  64 * blk_queue_make_request - define an alternate make_request function for a device
  65 * @q:  the request queue for the device to be affected
  66 * @mfn: the alternate make_request function
  67 *
  68 * Description:
  69 *    The normal way for &struct bios to be passed to a device
  70 *    driver is for them to be collected into requests on a request
  71 *    queue, and then to allow the device driver to select requests
  72 *    off that queue when it is ready.  This works well for many block
  73 *    devices. However some block devices (typically virtual devices
  74 *    such as md or lvm) do not benefit from the processing on the
  75 *    request queue, and are served best by having the requests passed
  76 *    directly to them.  This can be achieved by providing a function
  77 *    to blk_queue_make_request().
  78 *
  79 * Caveat:
  80 *    The driver that does this *must* be able to deal appropriately
  81 *    with buffers in "highmemory". This can be accomplished by either calling
  82 *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
  83 *    blk_queue_bounce() to create a buffer in normal memory.
  84 **/
  85void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
  86{
  87        /*
  88         * set defaults
  89         */
  90        q->nr_requests = BLKDEV_MAX_RQ;
  91        blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
  92        blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
  93        q->make_request_fn = mfn;
  94        q->backing_dev_info.ra_pages =
  95                        (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
  96        q->backing_dev_info.state = 0;
  97        q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
  98        blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
  99        blk_queue_hardsect_size(q, 512);
 100        blk_queue_dma_alignment(q, 511);
 101        blk_queue_congestion_threshold(q);
 102        q->nr_batching = BLK_BATCH_REQ;
 103
 104        q->unplug_thresh = 4;           /* hmm */
 105        q->unplug_delay = (3 * HZ) / 1000;      /* 3 milliseconds */
 106        if (q->unplug_delay == 0)
 107                q->unplug_delay = 1;
 108
 109        INIT_WORK(&q->unplug_work, blk_unplug_work);
 110
 111        q->unplug_timer.function = blk_unplug_timeout;
 112        q->unplug_timer.data = (unsigned long)q;
 113
 114        /*
 115         * by default assume old behaviour and bounce for any highmem page
 116         */
 117        blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
 118}
 119EXPORT_SYMBOL(blk_queue_make_request);
 120
 121/**
 122 * blk_queue_bounce_limit - set bounce buffer limit for queue
 123 * @q:  the request queue for the device
 124 * @dma_addr:   bus address limit
 125 *
 126 * Description:
 127 *    Different hardware can have different requirements as to what pages
 128 *    it can do I/O directly to. A low level driver can call
 129 *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
 130 *    buffers for doing I/O to pages residing above @page.
 131 **/
 132void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
 133{
 134        unsigned long b_pfn = dma_addr >> PAGE_SHIFT;
 135        int dma = 0;
 136
 137        q->bounce_gfp = GFP_NOIO;
 138#if BITS_PER_LONG == 64
 139        /* Assume anything <= 4GB can be handled by IOMMU.
 140           Actually some IOMMUs can handle everything, but I don't
 141           know of a way to test this here. */
 142        if (b_pfn < (min_t(u64, 0x100000000UL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
 143                dma = 1;
 144        q->bounce_pfn = max_low_pfn;
 145#else
 146        if (b_pfn < blk_max_low_pfn)
 147                dma = 1;
 148        q->bounce_pfn = b_pfn;
 149#endif
 150        if (dma) {
 151                init_emergency_isa_pool();
 152                q->bounce_gfp = GFP_NOIO | GFP_DMA;
 153                q->bounce_pfn = b_pfn;
 154        }
 155}
 156EXPORT_SYMBOL(blk_queue_bounce_limit);
 157
 158/**
 159 * blk_queue_max_sectors - set max sectors for a request for this queue
 160 * @q:  the request queue for the device
 161 * @max_sectors:  max sectors in the usual 512b unit
 162 *
 163 * Description:
 164 *    Enables a low level driver to set an upper limit on the size of
 165 *    received requests.
 166 **/
 167void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
 168{
 169        if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
 170                max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
 171                printk(KERN_INFO "%s: set to minimum %d\n",
 172                       __func__, max_sectors);
 173        }
 174
 175        if (BLK_DEF_MAX_SECTORS > max_sectors)
 176                q->max_hw_sectors = q->max_sectors = max_sectors;
 177        else {
 178                q->max_sectors = BLK_DEF_MAX_SECTORS;
 179                q->max_hw_sectors = max_sectors;
 180        }
 181}
 182EXPORT_SYMBOL(blk_queue_max_sectors);
 183
 184/**
 185 * blk_queue_max_phys_segments - set max phys segments for a request for this queue
 186 * @q:  the request queue for the device
 187 * @max_segments:  max number of segments
 188 *
 189 * Description:
 190 *    Enables a low level driver to set an upper limit on the number of
 191 *    physical data segments in a request.  This would be the largest sized
 192 *    scatter list the driver could handle.
 193 **/
 194void blk_queue_max_phys_segments(struct request_queue *q,
 195                                 unsigned short max_segments)
 196{
 197        if (!max_segments) {
 198                max_segments = 1;
 199                printk(KERN_INFO "%s: set to minimum %d\n",
 200                       __func__, max_segments);
 201        }
 202
 203        q->max_phys_segments = max_segments;
 204}
 205EXPORT_SYMBOL(blk_queue_max_phys_segments);
 206
 207/**
 208 * blk_queue_max_hw_segments - set max hw segments for a request for this queue
 209 * @q:  the request queue for the device
 210 * @max_segments:  max number of segments
 211 *
 212 * Description:
 213 *    Enables a low level driver to set an upper limit on the number of
 214 *    hw data segments in a request.  This would be the largest number of
 215 *    address/length pairs the host adapter can actually give as once
 216 *    to the device.
 217 **/
 218void blk_queue_max_hw_segments(struct request_queue *q,
 219                               unsigned short max_segments)
 220{
 221        if (!max_segments) {
 222                max_segments = 1;
 223                printk(KERN_INFO "%s: set to minimum %d\n",
 224                       __func__, max_segments);
 225        }
 226
 227        q->max_hw_segments = max_segments;
 228}
 229EXPORT_SYMBOL(blk_queue_max_hw_segments);
 230
 231/**
 232 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
 233 * @q:  the request queue for the device
 234 * @max_size:  max size of segment in bytes
 235 *
 236 * Description:
 237 *    Enables a low level driver to set an upper limit on the size of a
 238 *    coalesced segment
 239 **/
 240void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
 241{
 242        if (max_size < PAGE_CACHE_SIZE) {
 243                max_size = PAGE_CACHE_SIZE;
 244                printk(KERN_INFO "%s: set to minimum %d\n",
 245                       __func__, max_size);
 246        }
 247
 248        q->max_segment_size = max_size;
 249}
 250EXPORT_SYMBOL(blk_queue_max_segment_size);
 251
 252/**
 253 * blk_queue_hardsect_size - set hardware sector size for the queue
 254 * @q:  the request queue for the device
 255 * @size:  the hardware sector size, in bytes
 256 *
 257 * Description:
 258 *   This should typically be set to the lowest possible sector size
 259 *   that the hardware can operate on (possible without reverting to
 260 *   even internal read-modify-write operations). Usually the default
 261 *   of 512 covers most hardware.
 262 **/
 263void blk_queue_hardsect_size(struct request_queue *q, unsigned short size)
 264{
 265        q->hardsect_size = size;
 266}
 267EXPORT_SYMBOL(blk_queue_hardsect_size);
 268
 269/*
 270 * Returns the minimum that is _not_ zero, unless both are zero.
 271 */
 272#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
 273
 274/**
 275 * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
 276 * @t:  the stacking driver (top)
 277 * @b:  the underlying device (bottom)
 278 **/
 279void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
 280{
 281        /* zero is "infinity" */
 282        t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
 283        t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
 284
 285        t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments);
 286        t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments);
 287        t->max_segment_size = min(t->max_segment_size, b->max_segment_size);
 288        t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
 289        if (!t->queue_lock)
 290                WARN_ON_ONCE(1);
 291        else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
 292                unsigned long flags;
 293                spin_lock_irqsave(t->queue_lock, flags);
 294                queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
 295                spin_unlock_irqrestore(t->queue_lock, flags);
 296        }
 297}
 298EXPORT_SYMBOL(blk_queue_stack_limits);
 299
 300/**
 301 * blk_queue_dma_pad - set pad mask
 302 * @q:     the request queue for the device
 303 * @mask:  pad mask
 304 *
 305 * Set dma pad mask.
 306 *
 307 * Appending pad buffer to a request modifies the last entry of a
 308 * scatter list such that it includes the pad buffer.
 309 **/
 310void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
 311{
 312        q->dma_pad_mask = mask;
 313}
 314EXPORT_SYMBOL(blk_queue_dma_pad);
 315
 316/**
 317 * blk_queue_update_dma_pad - update pad mask
 318 * @q:     the request queue for the device
 319 * @mask:  pad mask
 320 *
 321 * Update dma pad mask.
 322 *
 323 * Appending pad buffer to a request modifies the last entry of a
 324 * scatter list such that it includes the pad buffer.
 325 **/
 326void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
 327{
 328        if (mask > q->dma_pad_mask)
 329                q->dma_pad_mask = mask;
 330}
 331EXPORT_SYMBOL(blk_queue_update_dma_pad);
 332
 333/**
 334 * blk_queue_dma_drain - Set up a drain buffer for excess dma.
 335 * @q:  the request queue for the device
 336 * @dma_drain_needed: fn which returns non-zero if drain is necessary
 337 * @buf:        physically contiguous buffer
 338 * @size:       size of the buffer in bytes
 339 *
 340 * Some devices have excess DMA problems and can't simply discard (or
 341 * zero fill) the unwanted piece of the transfer.  They have to have a
 342 * real area of memory to transfer it into.  The use case for this is
 343 * ATAPI devices in DMA mode.  If the packet command causes a transfer
 344 * bigger than the transfer size some HBAs will lock up if there
 345 * aren't DMA elements to contain the excess transfer.  What this API
 346 * does is adjust the queue so that the buf is always appended
 347 * silently to the scatterlist.
 348 *
 349 * Note: This routine adjusts max_hw_segments to make room for
 350 * appending the drain buffer.  If you call
 351 * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after
 352 * calling this routine, you must set the limit to one fewer than your
 353 * device can support otherwise there won't be room for the drain
 354 * buffer.
 355 */
 356int blk_queue_dma_drain(struct request_queue *q,
 357                               dma_drain_needed_fn *dma_drain_needed,
 358                               void *buf, unsigned int size)
 359{
 360        if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
 361                return -EINVAL;
 362        /* make room for appending the drain */
 363        --q->max_hw_segments;
 364        --q->max_phys_segments;
 365        q->dma_drain_needed = dma_drain_needed;
 366        q->dma_drain_buffer = buf;
 367        q->dma_drain_size = size;
 368
 369        return 0;
 370}
 371EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
 372
 373/**
 374 * blk_queue_segment_boundary - set boundary rules for segment merging
 375 * @q:  the request queue for the device
 376 * @mask:  the memory boundary mask
 377 **/
 378void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
 379{
 380        if (mask < PAGE_CACHE_SIZE - 1) {
 381                mask = PAGE_CACHE_SIZE - 1;
 382                printk(KERN_INFO "%s: set to minimum %lx\n",
 383                       __func__, mask);
 384        }
 385
 386        q->seg_boundary_mask = mask;
 387}
 388EXPORT_SYMBOL(blk_queue_segment_boundary);
 389
 390/**
 391 * blk_queue_dma_alignment - set dma length and memory alignment
 392 * @q:     the request queue for the device
 393 * @mask:  alignment mask
 394 *
 395 * description:
 396 *    set required memory and length aligment for direct dma transactions.
 397 *    this is used when buiding direct io requests for the queue.
 398 *
 399 **/
 400void blk_queue_dma_alignment(struct request_queue *q, int mask)
 401{
 402        q->dma_alignment = mask;
 403}
 404EXPORT_SYMBOL(blk_queue_dma_alignment);
 405
 406/**
 407 * blk_queue_update_dma_alignment - update dma length and memory alignment
 408 * @q:     the request queue for the device
 409 * @mask:  alignment mask
 410 *
 411 * description:
 412 *    update required memory and length aligment for direct dma transactions.
 413 *    If the requested alignment is larger than the current alignment, then
 414 *    the current queue alignment is updated to the new value, otherwise it
 415 *    is left alone.  The design of this is to allow multiple objects
 416 *    (driver, device, transport etc) to set their respective
 417 *    alignments without having them interfere.
 418 *
 419 **/
 420void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
 421{
 422        BUG_ON(mask > PAGE_SIZE);
 423
 424        if (mask > q->dma_alignment)
 425                q->dma_alignment = mask;
 426}
 427EXPORT_SYMBOL(blk_queue_update_dma_alignment);
 428
 429static int __init blk_settings_init(void)
 430{
 431        blk_max_low_pfn = max_low_pfn - 1;
 432        blk_max_pfn = max_pfn - 1;
 433        return 0;
 434}
 435subsys_initcall(blk_settings_init);
 436