linux/block/blk-settings.c
<<
>>
Prefs
   1/*
   2 * Functions related to setting various queue properties from drivers
   3 */
   4#include <linux/kernel.h>
   5#include <linux/module.h>
   6#include <linux/init.h>
   7#include <linux/bio.h>
   8#include <linux/blkdev.h>
   9#include <linux/bootmem.h>      /* for max_pfn/max_low_pfn */
  10#include <linux/gcd.h>
  11#include <linux/lcm.h>
  12#include <linux/jiffies.h>
  13#include <linux/gfp.h>
  14
  15#include "blk.h"
  16
  17unsigned long blk_max_low_pfn;
  18EXPORT_SYMBOL(blk_max_low_pfn);
  19
  20unsigned long blk_max_pfn;
  21
  22/**
  23 * blk_queue_prep_rq - set a prepare_request function for queue
  24 * @q:          queue
  25 * @pfn:        prepare_request function
  26 *
  27 * It's possible for a queue to register a prepare_request callback which
  28 * is invoked before the request is handed to the request_fn. The goal of
  29 * the function is to prepare a request for I/O, it can be used to build a
  30 * cdb from the request data for instance.
  31 *
  32 */
  33void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
  34{
  35        q->prep_rq_fn = pfn;
  36}
  37EXPORT_SYMBOL(blk_queue_prep_rq);
  38
  39/**
  40 * blk_queue_unprep_rq - set an unprepare_request function for queue
  41 * @q:          queue
  42 * @ufn:        unprepare_request function
  43 *
  44 * It's possible for a queue to register an unprepare_request callback
  45 * which is invoked before the request is finally completed. The goal
  46 * of the function is to deallocate any data that was allocated in the
  47 * prepare_request callback.
  48 *
  49 */
  50void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn)
  51{
  52        q->unprep_rq_fn = ufn;
  53}
  54EXPORT_SYMBOL(blk_queue_unprep_rq);
  55
  56/**
  57 * blk_queue_merge_bvec - set a merge_bvec function for queue
  58 * @q:          queue
  59 * @mbfn:       merge_bvec_fn
  60 *
  61 * Usually queues have static limitations on the max sectors or segments that
  62 * we can put in a request. Stacking drivers may have some settings that
  63 * are dynamic, and thus we have to query the queue whether it is ok to
  64 * add a new bio_vec to a bio at a given offset or not. If the block device
  65 * has such limitations, it needs to register a merge_bvec_fn to control
  66 * the size of bio's sent to it. Note that a block device *must* allow a
  67 * single page to be added to an empty bio. The block device driver may want
  68 * to use the bio_split() function to deal with these bio's. By default
  69 * no merge_bvec_fn is defined for a queue, and only the fixed limits are
  70 * honored.
  71 */
  72void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
  73{
  74        q->merge_bvec_fn = mbfn;
  75}
  76EXPORT_SYMBOL(blk_queue_merge_bvec);
  77
  78void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
  79{
  80        q->softirq_done_fn = fn;
  81}
  82EXPORT_SYMBOL(blk_queue_softirq_done);
  83
  84void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
  85{
  86        q->rq_timeout = timeout;
  87}
  88EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
  89
  90void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
  91{
  92        q->rq_timed_out_fn = fn;
  93}
  94EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
  95
  96void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
  97{
  98        q->lld_busy_fn = fn;
  99}
 100EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
 101
 102/**
 103 * blk_set_default_limits - reset limits to default values
 104 * @lim:  the queue_limits structure to reset
 105 *
 106 * Description:
 107 *   Returns a queue_limit struct to its default state.  Can be used by
 108 *   stacking drivers like DM that stage table swaps and reuse an
 109 *   existing device queue.
 110 */
 111void blk_set_default_limits(struct queue_limits *lim)
 112{
 113        lim->max_segments = BLK_MAX_SEGMENTS;
 114        lim->max_integrity_segments = 0;
 115        lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
 116        lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
 117        lim->max_sectors = BLK_DEF_MAX_SECTORS;
 118        lim->max_hw_sectors = INT_MAX;
 119        lim->max_discard_sectors = 0;
 120        lim->discard_granularity = 0;
 121        lim->discard_alignment = 0;
 122        lim->discard_misaligned = 0;
 123        lim->discard_zeroes_data = -1;
 124        lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
 125        lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
 126        lim->alignment_offset = 0;
 127        lim->io_opt = 0;
 128        lim->misaligned = 0;
 129        lim->cluster = 1;
 130}
 131EXPORT_SYMBOL(blk_set_default_limits);
 132
 133/**
 134 * blk_queue_make_request - define an alternate make_request function for a device
 135 * @q:  the request queue for the device to be affected
 136 * @mfn: the alternate make_request function
 137 *
 138 * Description:
 139 *    The normal way for &struct bios to be passed to a device
 140 *    driver is for them to be collected into requests on a request
 141 *    queue, and then to allow the device driver to select requests
 142 *    off that queue when it is ready.  This works well for many block
 143 *    devices. However some block devices (typically virtual devices
 144 *    such as md or lvm) do not benefit from the processing on the
 145 *    request queue, and are served best by having the requests passed
 146 *    directly to them.  This can be achieved by providing a function
 147 *    to blk_queue_make_request().
 148 *
 149 * Caveat:
 150 *    The driver that does this *must* be able to deal appropriately
 151 *    with buffers in "highmemory". This can be accomplished by either calling
 152 *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
 153 *    blk_queue_bounce() to create a buffer in normal memory.
 154 **/
 155void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
 156{
 157        /*
 158         * set defaults
 159         */
 160        q->nr_requests = BLKDEV_MAX_RQ;
 161
 162        q->make_request_fn = mfn;
 163        blk_queue_dma_alignment(q, 511);
 164        blk_queue_congestion_threshold(q);
 165        q->nr_batching = BLK_BATCH_REQ;
 166
 167        blk_set_default_limits(&q->limits);
 168        blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
 169
 170        /*
 171         * by default assume old behaviour and bounce for any highmem page
 172         */
 173        blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
 174}
 175EXPORT_SYMBOL(blk_queue_make_request);
 176
 177/**
 178 * blk_queue_bounce_limit - set bounce buffer limit for queue
 179 * @q: the request queue for the device
 180 * @dma_mask: the maximum address the device can handle
 181 *
 182 * Description:
 183 *    Different hardware can have different requirements as to what pages
 184 *    it can do I/O directly to. A low level driver can call
 185 *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
 186 *    buffers for doing I/O to pages residing above @dma_mask.
 187 **/
 188void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
 189{
 190        unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
 191        int dma = 0;
 192
 193        q->bounce_gfp = GFP_NOIO;
 194#if BITS_PER_LONG == 64
 195        /*
 196         * Assume anything <= 4GB can be handled by IOMMU.  Actually
 197         * some IOMMUs can handle everything, but I don't know of a
 198         * way to test this here.
 199         */
 200        if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
 201                dma = 1;
 202        q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
 203#else
 204        if (b_pfn < blk_max_low_pfn)
 205                dma = 1;
 206        q->limits.bounce_pfn = b_pfn;
 207#endif
 208        if (dma) {
 209                init_emergency_isa_pool();
 210                q->bounce_gfp = GFP_NOIO | GFP_DMA;
 211                q->limits.bounce_pfn = b_pfn;
 212        }
 213}
 214EXPORT_SYMBOL(blk_queue_bounce_limit);
 215
 216/**
 217 * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request
 218 * @limits: the queue limits
 219 * @max_hw_sectors:  max hardware sectors in the usual 512b unit
 220 *
 221 * Description:
 222 *    Enables a low level driver to set a hard upper limit,
 223 *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
 224 *    the device driver based upon the combined capabilities of I/O
 225 *    controller and storage device.
 226 *
 227 *    max_sectors is a soft limit imposed by the block layer for
 228 *    filesystem type requests.  This value can be overridden on a
 229 *    per-device basis in /sys/block/<device>/queue/max_sectors_kb.
 230 *    The soft limit can not exceed max_hw_sectors.
 231 **/
 232void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors)
 233{
 234        if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
 235                max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
 236                printk(KERN_INFO "%s: set to minimum %d\n",
 237                       __func__, max_hw_sectors);
 238        }
 239
 240        limits->max_hw_sectors = max_hw_sectors;
 241        limits->max_sectors = min_t(unsigned int, max_hw_sectors,
 242                                    BLK_DEF_MAX_SECTORS);
 243}
 244EXPORT_SYMBOL(blk_limits_max_hw_sectors);
 245
 246/**
 247 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
 248 * @q:  the request queue for the device
 249 * @max_hw_sectors:  max hardware sectors in the usual 512b unit
 250 *
 251 * Description:
 252 *    See description for blk_limits_max_hw_sectors().
 253 **/
 254void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
 255{
 256        blk_limits_max_hw_sectors(&q->limits, max_hw_sectors);
 257}
 258EXPORT_SYMBOL(blk_queue_max_hw_sectors);
 259
 260/**
 261 * blk_queue_max_discard_sectors - set max sectors for a single discard
 262 * @q:  the request queue for the device
 263 * @max_discard_sectors: maximum number of sectors to discard
 264 **/
 265void blk_queue_max_discard_sectors(struct request_queue *q,
 266                unsigned int max_discard_sectors)
 267{
 268        q->limits.max_discard_sectors = max_discard_sectors;
 269}
 270EXPORT_SYMBOL(blk_queue_max_discard_sectors);
 271
 272/**
 273 * blk_queue_max_segments - set max hw segments for a request for this queue
 274 * @q:  the request queue for the device
 275 * @max_segments:  max number of segments
 276 *
 277 * Description:
 278 *    Enables a low level driver to set an upper limit on the number of
 279 *    hw data segments in a request.
 280 **/
 281void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
 282{
 283        if (!max_segments) {
 284                max_segments = 1;
 285                printk(KERN_INFO "%s: set to minimum %d\n",
 286                       __func__, max_segments);
 287        }
 288
 289        q->limits.max_segments = max_segments;
 290}
 291EXPORT_SYMBOL(blk_queue_max_segments);
 292
 293/**
 294 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
 295 * @q:  the request queue for the device
 296 * @max_size:  max size of segment in bytes
 297 *
 298 * Description:
 299 *    Enables a low level driver to set an upper limit on the size of a
 300 *    coalesced segment
 301 **/
 302void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
 303{
 304        if (max_size < PAGE_CACHE_SIZE) {
 305                max_size = PAGE_CACHE_SIZE;
 306                printk(KERN_INFO "%s: set to minimum %d\n",
 307                       __func__, max_size);
 308        }
 309
 310        q->limits.max_segment_size = max_size;
 311}
 312EXPORT_SYMBOL(blk_queue_max_segment_size);
 313
 314/**
 315 * blk_queue_logical_block_size - set logical block size for the queue
 316 * @q:  the request queue for the device
 317 * @size:  the logical block size, in bytes
 318 *
 319 * Description:
 320 *   This should be set to the lowest possible block size that the
 321 *   storage device can address.  The default of 512 covers most
 322 *   hardware.
 323 **/
 324void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
 325{
 326        q->limits.logical_block_size = size;
 327
 328        if (q->limits.physical_block_size < size)
 329                q->limits.physical_block_size = size;
 330
 331        if (q->limits.io_min < q->limits.physical_block_size)
 332                q->limits.io_min = q->limits.physical_block_size;
 333}
 334EXPORT_SYMBOL(blk_queue_logical_block_size);
 335
 336/**
 337 * blk_queue_physical_block_size - set physical block size for the queue
 338 * @q:  the request queue for the device
 339 * @size:  the physical block size, in bytes
 340 *
 341 * Description:
 342 *   This should be set to the lowest possible sector size that the
 343 *   hardware can operate on without reverting to read-modify-write
 344 *   operations.
 345 */
 346void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
 347{
 348        q->limits.physical_block_size = size;
 349
 350        if (q->limits.physical_block_size < q->limits.logical_block_size)
 351                q->limits.physical_block_size = q->limits.logical_block_size;
 352
 353        if (q->limits.io_min < q->limits.physical_block_size)
 354                q->limits.io_min = q->limits.physical_block_size;
 355}
 356EXPORT_SYMBOL(blk_queue_physical_block_size);
 357
 358/**
 359 * blk_queue_alignment_offset - set physical block alignment offset
 360 * @q:  the request queue for the device
 361 * @offset: alignment offset in bytes
 362 *
 363 * Description:
 364 *   Some devices are naturally misaligned to compensate for things like
 365 *   the legacy DOS partition table 63-sector offset.  Low-level drivers
 366 *   should call this function for devices whose first sector is not
 367 *   naturally aligned.
 368 */
 369void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
 370{
 371        q->limits.alignment_offset =
 372                offset & (q->limits.physical_block_size - 1);
 373        q->limits.misaligned = 0;
 374}
 375EXPORT_SYMBOL(blk_queue_alignment_offset);
 376
 377/**
 378 * blk_limits_io_min - set minimum request size for a device
 379 * @limits: the queue limits
 380 * @min:  smallest I/O size in bytes
 381 *
 382 * Description:
 383 *   Some devices have an internal block size bigger than the reported
 384 *   hardware sector size.  This function can be used to signal the
 385 *   smallest I/O the device can perform without incurring a performance
 386 *   penalty.
 387 */
 388void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
 389{
 390        limits->io_min = min;
 391
 392        if (limits->io_min < limits->logical_block_size)
 393                limits->io_min = limits->logical_block_size;
 394
 395        if (limits->io_min < limits->physical_block_size)
 396                limits->io_min = limits->physical_block_size;
 397}
 398EXPORT_SYMBOL(blk_limits_io_min);
 399
 400/**
 401 * blk_queue_io_min - set minimum request size for the queue
 402 * @q:  the request queue for the device
 403 * @min:  smallest I/O size in bytes
 404 *
 405 * Description:
 406 *   Storage devices may report a granularity or preferred minimum I/O
 407 *   size which is the smallest request the device can perform without
 408 *   incurring a performance penalty.  For disk drives this is often the
 409 *   physical block size.  For RAID arrays it is often the stripe chunk
 410 *   size.  A properly aligned multiple of minimum_io_size is the
 411 *   preferred request size for workloads where a high number of I/O
 412 *   operations is desired.
 413 */
 414void blk_queue_io_min(struct request_queue *q, unsigned int min)
 415{
 416        blk_limits_io_min(&q->limits, min);
 417}
 418EXPORT_SYMBOL(blk_queue_io_min);
 419
 420/**
 421 * blk_limits_io_opt - set optimal request size for a device
 422 * @limits: the queue limits
 423 * @opt:  smallest I/O size in bytes
 424 *
 425 * Description:
 426 *   Storage devices may report an optimal I/O size, which is the
 427 *   device's preferred unit for sustained I/O.  This is rarely reported
 428 *   for disk drives.  For RAID arrays it is usually the stripe width or
 429 *   the internal track size.  A properly aligned multiple of
 430 *   optimal_io_size is the preferred request size for workloads where
 431 *   sustained throughput is desired.
 432 */
 433void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
 434{
 435        limits->io_opt = opt;
 436}
 437EXPORT_SYMBOL(blk_limits_io_opt);
 438
 439/**
 440 * blk_queue_io_opt - set optimal request size for the queue
 441 * @q:  the request queue for the device
 442 * @opt:  optimal request size in bytes
 443 *
 444 * Description:
 445 *   Storage devices may report an optimal I/O size, which is the
 446 *   device's preferred unit for sustained I/O.  This is rarely reported
 447 *   for disk drives.  For RAID arrays it is usually the stripe width or
 448 *   the internal track size.  A properly aligned multiple of
 449 *   optimal_io_size is the preferred request size for workloads where
 450 *   sustained throughput is desired.
 451 */
 452void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
 453{
 454        blk_limits_io_opt(&q->limits, opt);
 455}
 456EXPORT_SYMBOL(blk_queue_io_opt);
 457
 458/**
 459 * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
 460 * @t:  the stacking driver (top)
 461 * @b:  the underlying device (bottom)
 462 **/
 463void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
 464{
 465        blk_stack_limits(&t->limits, &b->limits, 0);
 466}
 467EXPORT_SYMBOL(blk_queue_stack_limits);
 468
 469/**
 470 * blk_stack_limits - adjust queue_limits for stacked devices
 471 * @t:  the stacking driver limits (top device)
 472 * @b:  the underlying queue limits (bottom, component device)
 473 * @start:  first data sector within component device
 474 *
 475 * Description:
 476 *    This function is used by stacking drivers like MD and DM to ensure
 477 *    that all component devices have compatible block sizes and
 478 *    alignments.  The stacking driver must provide a queue_limits
 479 *    struct (top) and then iteratively call the stacking function for
 480 *    all component (bottom) devices.  The stacking function will
 481 *    attempt to combine the values and ensure proper alignment.
 482 *
 483 *    Returns 0 if the top and bottom queue_limits are compatible.  The
 484 *    top device's block sizes and alignment offsets may be adjusted to
 485 *    ensure alignment with the bottom device. If no compatible sizes
 486 *    and alignments exist, -1 is returned and the resulting top
 487 *    queue_limits will have the misaligned flag set to indicate that
 488 *    the alignment_offset is undefined.
 489 */
 490int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
 491                     sector_t start)
 492{
 493        unsigned int top, bottom, alignment, ret = 0;
 494
 495        t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
 496        t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
 497        t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
 498
 499        t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
 500                                            b->seg_boundary_mask);
 501
 502        t->max_segments = min_not_zero(t->max_segments, b->max_segments);
 503        t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
 504                                                 b->max_integrity_segments);
 505
 506        t->max_segment_size = min_not_zero(t->max_segment_size,
 507                                           b->max_segment_size);
 508
 509        t->misaligned |= b->misaligned;
 510
 511        alignment = queue_limit_alignment_offset(b, start);
 512
 513        /* Bottom device has different alignment.  Check that it is
 514         * compatible with the current top alignment.
 515         */
 516        if (t->alignment_offset != alignment) {
 517
 518                top = max(t->physical_block_size, t->io_min)
 519                        + t->alignment_offset;
 520                bottom = max(b->physical_block_size, b->io_min) + alignment;
 521
 522                /* Verify that top and bottom intervals line up */
 523                if (max(top, bottom) & (min(top, bottom) - 1)) {
 524                        t->misaligned = 1;
 525                        ret = -1;
 526                }
 527        }
 528
 529        t->logical_block_size = max(t->logical_block_size,
 530                                    b->logical_block_size);
 531
 532        t->physical_block_size = max(t->physical_block_size,
 533                                     b->physical_block_size);
 534
 535        t->io_min = max(t->io_min, b->io_min);
 536        t->io_opt = lcm(t->io_opt, b->io_opt);
 537
 538        t->cluster &= b->cluster;
 539        t->discard_zeroes_data &= b->discard_zeroes_data;
 540
 541        /* Physical block size a multiple of the logical block size? */
 542        if (t->physical_block_size & (t->logical_block_size - 1)) {
 543                t->physical_block_size = t->logical_block_size;
 544                t->misaligned = 1;
 545                ret = -1;
 546        }
 547
 548        /* Minimum I/O a multiple of the physical block size? */
 549        if (t->io_min & (t->physical_block_size - 1)) {
 550                t->io_min = t->physical_block_size;
 551                t->misaligned = 1;
 552                ret = -1;
 553        }
 554
 555        /* Optimal I/O a multiple of the physical block size? */
 556        if (t->io_opt & (t->physical_block_size - 1)) {
 557                t->io_opt = 0;
 558                t->misaligned = 1;
 559                ret = -1;
 560        }
 561
 562        /* Find lowest common alignment_offset */
 563        t->alignment_offset = lcm(t->alignment_offset, alignment)
 564                & (max(t->physical_block_size, t->io_min) - 1);
 565
 566        /* Verify that new alignment_offset is on a logical block boundary */
 567        if (t->alignment_offset & (t->logical_block_size - 1)) {
 568                t->misaligned = 1;
 569                ret = -1;
 570        }
 571
 572        /* Discard alignment and granularity */
 573        if (b->discard_granularity) {
 574                alignment = queue_limit_discard_alignment(b, start);
 575
 576                if (t->discard_granularity != 0 &&
 577                    t->discard_alignment != alignment) {
 578                        top = t->discard_granularity + t->discard_alignment;
 579                        bottom = b->discard_granularity + alignment;
 580
 581                        /* Verify that top and bottom intervals line up */
 582                        if (max(top, bottom) & (min(top, bottom) - 1))
 583                                t->discard_misaligned = 1;
 584                }
 585
 586                t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
 587                                                      b->max_discard_sectors);
 588                t->discard_granularity = max(t->discard_granularity,
 589                                             b->discard_granularity);
 590                t->discard_alignment = lcm(t->discard_alignment, alignment) &
 591                        (t->discard_granularity - 1);
 592        }
 593
 594        return ret;
 595}
 596EXPORT_SYMBOL(blk_stack_limits);
 597
 598/**
 599 * bdev_stack_limits - adjust queue limits for stacked drivers
 600 * @t:  the stacking driver limits (top device)
 601 * @bdev:  the component block_device (bottom)
 602 * @start:  first data sector within component device
 603 *
 604 * Description:
 605 *    Merges queue limits for a top device and a block_device.  Returns
 606 *    0 if alignment didn't change.  Returns -1 if adding the bottom
 607 *    device caused misalignment.
 608 */
 609int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
 610                      sector_t start)
 611{
 612        struct request_queue *bq = bdev_get_queue(bdev);
 613
 614        start += get_start_sect(bdev);
 615
 616        return blk_stack_limits(t, &bq->limits, start);
 617}
 618EXPORT_SYMBOL(bdev_stack_limits);
 619
 620/**
 621 * disk_stack_limits - adjust queue limits for stacked drivers
 622 * @disk:  MD/DM gendisk (top)
 623 * @bdev:  the underlying block device (bottom)
 624 * @offset:  offset to beginning of data within component device
 625 *
 626 * Description:
 627 *    Merges the limits for a top level gendisk and a bottom level
 628 *    block_device.
 629 */
 630void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
 631                       sector_t offset)
 632{
 633        struct request_queue *t = disk->queue;
 634
 635        if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
 636                char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
 637
 638                disk_name(disk, 0, top);
 639                bdevname(bdev, bottom);
 640
 641                printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
 642                       top, bottom);
 643        }
 644}
 645EXPORT_SYMBOL(disk_stack_limits);
 646
 647/**
 648 * blk_queue_dma_pad - set pad mask
 649 * @q:     the request queue for the device
 650 * @mask:  pad mask
 651 *
 652 * Set dma pad mask.
 653 *
 654 * Appending pad buffer to a request modifies the last entry of a
 655 * scatter list such that it includes the pad buffer.
 656 **/
 657void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
 658{
 659        q->dma_pad_mask = mask;
 660}
 661EXPORT_SYMBOL(blk_queue_dma_pad);
 662
 663/**
 664 * blk_queue_update_dma_pad - update pad mask
 665 * @q:     the request queue for the device
 666 * @mask:  pad mask
 667 *
 668 * Update dma pad mask.
 669 *
 670 * Appending pad buffer to a request modifies the last entry of a
 671 * scatter list such that it includes the pad buffer.
 672 **/
 673void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
 674{
 675        if (mask > q->dma_pad_mask)
 676                q->dma_pad_mask = mask;
 677}
 678EXPORT_SYMBOL(blk_queue_update_dma_pad);
 679
 680/**
 681 * blk_queue_dma_drain - Set up a drain buffer for excess dma.
 682 * @q:  the request queue for the device
 683 * @dma_drain_needed: fn which returns non-zero if drain is necessary
 684 * @buf:        physically contiguous buffer
 685 * @size:       size of the buffer in bytes
 686 *
 687 * Some devices have excess DMA problems and can't simply discard (or
 688 * zero fill) the unwanted piece of the transfer.  They have to have a
 689 * real area of memory to transfer it into.  The use case for this is
 690 * ATAPI devices in DMA mode.  If the packet command causes a transfer
 691 * bigger than the transfer size some HBAs will lock up if there
 692 * aren't DMA elements to contain the excess transfer.  What this API
 693 * does is adjust the queue so that the buf is always appended
 694 * silently to the scatterlist.
 695 *
 696 * Note: This routine adjusts max_hw_segments to make room for appending
 697 * the drain buffer.  If you call blk_queue_max_segments() after calling
 698 * this routine, you must set the limit to one fewer than your device
 699 * can support otherwise there won't be room for the drain buffer.
 700 */
 701int blk_queue_dma_drain(struct request_queue *q,
 702                               dma_drain_needed_fn *dma_drain_needed,
 703                               void *buf, unsigned int size)
 704{
 705        if (queue_max_segments(q) < 2)
 706                return -EINVAL;
 707        /* make room for appending the drain */
 708        blk_queue_max_segments(q, queue_max_segments(q) - 1);
 709        q->dma_drain_needed = dma_drain_needed;
 710        q->dma_drain_buffer = buf;
 711        q->dma_drain_size = size;
 712
 713        return 0;
 714}
 715EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
 716
 717/**
 718 * blk_queue_segment_boundary - set boundary rules for segment merging
 719 * @q:  the request queue for the device
 720 * @mask:  the memory boundary mask
 721 **/
 722void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
 723{
 724        if (mask < PAGE_CACHE_SIZE - 1) {
 725                mask = PAGE_CACHE_SIZE - 1;
 726                printk(KERN_INFO "%s: set to minimum %lx\n",
 727                       __func__, mask);
 728        }
 729
 730        q->limits.seg_boundary_mask = mask;
 731}
 732EXPORT_SYMBOL(blk_queue_segment_boundary);
 733
 734/**
 735 * blk_queue_dma_alignment - set dma length and memory alignment
 736 * @q:     the request queue for the device
 737 * @mask:  alignment mask
 738 *
 739 * description:
 740 *    set required memory and length alignment for direct dma transactions.
 741 *    this is used when building direct io requests for the queue.
 742 *
 743 **/
 744void blk_queue_dma_alignment(struct request_queue *q, int mask)
 745{
 746        q->dma_alignment = mask;
 747}
 748EXPORT_SYMBOL(blk_queue_dma_alignment);
 749
 750/**
 751 * blk_queue_update_dma_alignment - update dma length and memory alignment
 752 * @q:     the request queue for the device
 753 * @mask:  alignment mask
 754 *
 755 * description:
 756 *    update required memory and length alignment for direct dma transactions.
 757 *    If the requested alignment is larger than the current alignment, then
 758 *    the current queue alignment is updated to the new value, otherwise it
 759 *    is left alone.  The design of this is to allow multiple objects
 760 *    (driver, device, transport etc) to set their respective
 761 *    alignments without having them interfere.
 762 *
 763 **/
 764void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
 765{
 766        BUG_ON(mask > PAGE_SIZE);
 767
 768        if (mask > q->dma_alignment)
 769                q->dma_alignment = mask;
 770}
 771EXPORT_SYMBOL(blk_queue_update_dma_alignment);
 772
 773/**
 774 * blk_queue_flush - configure queue's cache flush capability
 775 * @q:          the request queue for the device
 776 * @flush:      0, REQ_FLUSH or REQ_FLUSH | REQ_FUA
 777 *
 778 * Tell block layer cache flush capability of @q.  If it supports
 779 * flushing, REQ_FLUSH should be set.  If it supports bypassing
 780 * write cache for individual writes, REQ_FUA should be set.
 781 */
 782void blk_queue_flush(struct request_queue *q, unsigned int flush)
 783{
 784        WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));
 785
 786        if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
 787                flush &= ~REQ_FUA;
 788
 789        q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
 790}
 791EXPORT_SYMBOL_GPL(blk_queue_flush);
 792
 793static int __init blk_settings_init(void)
 794{
 795        blk_max_low_pfn = max_low_pfn - 1;
 796        blk_max_pfn = max_pfn - 1;
 797        return 0;
 798}
 799subsys_initcall(blk_settings_init);
 800