linux/block/blk-lib.c
<<
>>
Prefs
   1/*
   2 * Functions related to generic helpers functions
   3 */
   4#include <linux/kernel.h>
   5#include <linux/module.h>
   6#include <linux/bio.h>
   7#include <linux/blkdev.h>
   8#include <linux/scatterlist.h>
   9
  10#include "blk.h"
  11
  12struct bio_batch {
  13        atomic_t                done;
  14        unsigned long           flags;
  15        struct completion       *wait;
  16};
  17
  18static void bio_batch_end_io(struct bio *bio, int err)
  19{
  20        struct bio_batch *bb = bio->bi_private;
  21
  22        if (err && (err != -EOPNOTSUPP))
  23                clear_bit(BIO_UPTODATE, &bb->flags);
  24        if (atomic_dec_and_test(&bb->done))
  25                complete(bb->wait);
  26        bio_put(bio);
  27}
  28
  29/**
  30 * blkdev_issue_discard - queue a discard
  31 * @bdev:       blockdev to issue discard for
  32 * @sector:     start sector
  33 * @nr_sects:   number of sectors to discard
  34 * @gfp_mask:   memory allocation flags (for bio_alloc)
  35 * @flags:      BLKDEV_IFL_* flags to control behaviour
  36 *
  37 * Description:
  38 *    Issue a discard request for the sectors in question.
  39 */
  40int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
  41                sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
  42{
  43        DECLARE_COMPLETION_ONSTACK(wait);
  44        struct request_queue *q = bdev_get_queue(bdev);
  45        int type = REQ_WRITE | REQ_DISCARD;
  46        sector_t max_discard_sectors;
  47        sector_t granularity, alignment;
  48        struct bio_batch bb;
  49        struct bio *bio;
  50        int ret = 0;
  51        struct blk_plug plug;
  52
  53        if (!q)
  54                return -ENXIO;
  55
  56        if (!blk_queue_discard(q))
  57                return -EOPNOTSUPP;
  58
  59        /* Zero-sector (unknown) and one-sector granularities are the same.  */
  60        granularity = max(q->limits.discard_granularity >> 9, 1U);
  61        alignment = bdev_discard_alignment(bdev) >> 9;
  62        alignment = sector_div(alignment, granularity);
  63
  64        /*
  65         * Ensure that max_discard_sectors is of the proper
  66         * granularity, so that requests stay aligned after a split.
  67         */
  68        max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
  69        sector_div(max_discard_sectors, granularity);
  70        max_discard_sectors *= granularity;
  71        if (unlikely(!max_discard_sectors)) {
  72                /* Avoid infinite loop below. Being cautious never hurts. */
  73                return -EOPNOTSUPP;
  74        }
  75
  76        if (flags & BLKDEV_DISCARD_SECURE) {
  77                if (!blk_queue_secdiscard(q))
  78                        return -EOPNOTSUPP;
  79                type |= REQ_SECURE;
  80        }
  81
  82        atomic_set(&bb.done, 1);
  83        bb.flags = 1 << BIO_UPTODATE;
  84        bb.wait = &wait;
  85
  86        blk_start_plug(&plug);
  87        while (nr_sects) {
  88                unsigned int req_sects;
  89                sector_t end_sect, tmp;
  90
  91                bio = bio_alloc(gfp_mask, 1);
  92                if (!bio) {
  93                        ret = -ENOMEM;
  94                        break;
  95                }
  96
  97                req_sects = min_t(sector_t, nr_sects, max_discard_sectors);
  98
  99                /*
 100                 * If splitting a request, and the next starting sector would be
 101                 * misaligned, stop the discard at the previous aligned sector.
 102                 */
 103                end_sect = sector + req_sects;
 104                tmp = end_sect;
 105                if (req_sects < nr_sects &&
 106                    sector_div(tmp, granularity) != alignment) {
 107                        end_sect = end_sect - alignment;
 108                        sector_div(end_sect, granularity);
 109                        end_sect = end_sect * granularity + alignment;
 110                        req_sects = end_sect - sector;
 111                }
 112
 113                bio->bi_sector = sector;
 114                bio->bi_end_io = bio_batch_end_io;
 115                bio->bi_bdev = bdev;
 116                bio->bi_private = &bb;
 117
 118                bio->bi_size = req_sects << 9;
 119                nr_sects -= req_sects;
 120                sector = end_sect;
 121
 122                atomic_inc(&bb.done);
 123                submit_bio(type, bio);
 124
 125                /*
 126                 * We can loop for a long time in here, if someone does
 127                 * full device discards (like mkfs). Be nice and allow
 128                 * us to schedule out to avoid softlocking if preempt
 129                 * is disabled.
 130                 */
 131                cond_resched();
 132        }
 133        blk_finish_plug(&plug);
 134
 135        /* Wait for bios in-flight */
 136        if (!atomic_dec_and_test(&bb.done))
 137                wait_for_completion_io(&wait);
 138
 139        if (!test_bit(BIO_UPTODATE, &bb.flags))
 140                ret = -EIO;
 141
 142        return ret;
 143}
 144EXPORT_SYMBOL(blkdev_issue_discard);
 145
 146/**
 147 * blkdev_issue_write_same - queue a write same operation
 148 * @bdev:       target blockdev
 149 * @sector:     start sector
 150 * @nr_sects:   number of sectors to write
 151 * @gfp_mask:   memory allocation flags (for bio_alloc)
 152 * @page:       page containing data to write
 153 *
 154 * Description:
 155 *    Issue a write same request for the sectors in question.
 156 */
 157int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
 158                            sector_t nr_sects, gfp_t gfp_mask,
 159                            struct page *page)
 160{
 161        DECLARE_COMPLETION_ONSTACK(wait);
 162        struct request_queue *q = bdev_get_queue(bdev);
 163        unsigned int max_write_same_sectors;
 164        struct bio_batch bb;
 165        struct bio *bio;
 166        int ret = 0;
 167
 168        if (!q)
 169                return -ENXIO;
 170
 171        max_write_same_sectors = q->limits.max_write_same_sectors;
 172
 173        if (max_write_same_sectors == 0)
 174                return -EOPNOTSUPP;
 175
 176        atomic_set(&bb.done, 1);
 177        bb.flags = 1 << BIO_UPTODATE;
 178        bb.wait = &wait;
 179
 180        while (nr_sects) {
 181                bio = bio_alloc(gfp_mask, 1);
 182                if (!bio) {
 183                        ret = -ENOMEM;
 184                        break;
 185                }
 186
 187                bio->bi_sector = sector;
 188                bio->bi_end_io = bio_batch_end_io;
 189                bio->bi_bdev = bdev;
 190                bio->bi_private = &bb;
 191                bio->bi_vcnt = 1;
 192                bio->bi_io_vec->bv_page = page;
 193                bio->bi_io_vec->bv_offset = 0;
 194                bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
 195
 196                if (nr_sects > max_write_same_sectors) {
 197                        bio->bi_size = max_write_same_sectors << 9;
 198                        nr_sects -= max_write_same_sectors;
 199                        sector += max_write_same_sectors;
 200                } else {
 201                        bio->bi_size = nr_sects << 9;
 202                        nr_sects = 0;
 203                }
 204
 205                atomic_inc(&bb.done);
 206                submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
 207        }
 208
 209        /* Wait for bios in-flight */
 210        if (!atomic_dec_and_test(&bb.done))
 211                wait_for_completion_io(&wait);
 212
 213        if (!test_bit(BIO_UPTODATE, &bb.flags))
 214                ret = -ENOTSUPP;
 215
 216        return ret;
 217}
 218EXPORT_SYMBOL(blkdev_issue_write_same);
 219
 220/**
 221 * blkdev_issue_zeroout - generate number of zero filed write bios
 222 * @bdev:       blockdev to issue
 223 * @sector:     start sector
 224 * @nr_sects:   number of sectors to write
 225 * @gfp_mask:   memory allocation flags (for bio_alloc)
 226 *
 227 * Description:
 228 *  Generate and issue number of bios with zerofiled pages.
 229 */
 230
 231int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
 232                        sector_t nr_sects, gfp_t gfp_mask)
 233{
 234        int ret;
 235        struct bio *bio;
 236        struct bio_batch bb;
 237        unsigned int sz;
 238        DECLARE_COMPLETION_ONSTACK(wait);
 239
 240        atomic_set(&bb.done, 1);
 241        bb.flags = 1 << BIO_UPTODATE;
 242        bb.wait = &wait;
 243
 244        ret = 0;
 245        while (nr_sects != 0) {
 246                bio = bio_alloc(gfp_mask,
 247                                min(nr_sects, (sector_t)BIO_MAX_PAGES));
 248                if (!bio) {
 249                        ret = -ENOMEM;
 250                        break;
 251                }
 252
 253                bio->bi_sector = sector;
 254                bio->bi_bdev   = bdev;
 255                bio->bi_end_io = bio_batch_end_io;
 256                bio->bi_private = &bb;
 257
 258                while (nr_sects != 0) {
 259                        sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
 260                        ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
 261                        nr_sects -= ret >> 9;
 262                        sector += ret >> 9;
 263                        if (ret < (sz << 9))
 264                                break;
 265                }
 266                ret = 0;
 267                atomic_inc(&bb.done);
 268                submit_bio(WRITE, bio);
 269        }
 270
 271        /* Wait for bios in-flight */
 272        if (!atomic_dec_and_test(&bb.done))
 273                wait_for_completion_io(&wait);
 274
 275        if (!test_bit(BIO_UPTODATE, &bb.flags))
 276                /* One of bios in the batch was completed with error.*/
 277                ret = -EIO;
 278
 279        return ret;
 280}
 281
 282/**
 283 * blkdev_issue_zeroout - zero-fill a block range
 284 * @bdev:       blockdev to write
 285 * @sector:     start sector
 286 * @nr_sects:   number of sectors to write
 287 * @gfp_mask:   memory allocation flags (for bio_alloc)
 288 *
 289 * Description:
 290 *  Generate and issue number of bios with zerofiled pages.
 291 */
 292
 293int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
 294                         sector_t nr_sects, gfp_t gfp_mask)
 295{
 296        if (bdev_write_same(bdev)) {
 297                unsigned char bdn[BDEVNAME_SIZE];
 298
 299                if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
 300                                             ZERO_PAGE(0)))
 301                        return 0;
 302
 303                bdevname(bdev, bdn);
 304                pr_err("%s: WRITE SAME failed. Manually zeroing.\n", bdn);
 305        }
 306
 307        return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
 308}
 309EXPORT_SYMBOL(blkdev_issue_zeroout);
 310