linux/block/blk-lib.c
<<
>>
Prefs
   1/*
   2 * Functions related to generic helpers functions
   3 */
   4#include <linux/kernel.h>
   5#include <linux/module.h>
   6#include <linux/bio.h>
   7#include <linux/blkdev.h>
   8#include <linux/scatterlist.h>
   9
  10#include "blk.h"
  11
  12struct bio_batch {
  13        atomic_t                done;
  14        unsigned long           flags;
  15        struct completion       *wait;
  16};
  17
  18static void bio_batch_end_io(struct bio *bio, int err)
  19{
  20        struct bio_batch *bb = bio->bi_private;
  21
  22        if (err && (err != -EOPNOTSUPP))
  23                clear_bit(BIO_UPTODATE, &bb->flags);
  24        if (atomic_dec_and_test(&bb->done))
  25                complete(bb->wait);
  26        bio_put(bio);
  27}
  28
  29/**
  30 * blkdev_issue_discard - queue a discard
  31 * @bdev:       blockdev to issue discard for
  32 * @sector:     start sector
  33 * @nr_sects:   number of sectors to discard
  34 * @gfp_mask:   memory allocation flags (for bio_alloc)
  35 * @flags:      BLKDEV_IFL_* flags to control behaviour
  36 *
  37 * Description:
  38 *    Issue a discard request for the sectors in question.
  39 */
  40int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
  41                sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
  42{
  43        DECLARE_COMPLETION_ONSTACK(wait);
  44        struct request_queue *q = bdev_get_queue(bdev);
  45        int type = REQ_WRITE | REQ_DISCARD;
  46        unsigned int max_discard_sectors;
  47        unsigned int granularity, alignment, mask;
  48        struct bio_batch bb;
  49        struct bio *bio;
  50        int ret = 0;
  51
  52        if (!q)
  53                return -ENXIO;
  54
  55        if (!blk_queue_discard(q))
  56                return -EOPNOTSUPP;
  57
  58        /* Zero-sector (unknown) and one-sector granularities are the same.  */
  59        granularity = max(q->limits.discard_granularity >> 9, 1U);
  60        mask = granularity - 1;
  61        alignment = (bdev_discard_alignment(bdev) >> 9) & mask;
  62
  63        /*
  64         * Ensure that max_discard_sectors is of the proper
  65         * granularity, so that requests stay aligned after a split.
  66         */
  67        max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
  68        max_discard_sectors = round_down(max_discard_sectors, granularity);
  69        if (unlikely(!max_discard_sectors)) {
  70                /* Avoid infinite loop below. Being cautious never hurts. */
  71                return -EOPNOTSUPP;
  72        }
  73
  74        if (flags & BLKDEV_DISCARD_SECURE) {
  75                if (!blk_queue_secdiscard(q))
  76                        return -EOPNOTSUPP;
  77                type |= REQ_SECURE;
  78        }
  79
  80        atomic_set(&bb.done, 1);
  81        bb.flags = 1 << BIO_UPTODATE;
  82        bb.wait = &wait;
  83
  84        while (nr_sects) {
  85                unsigned int req_sects;
  86                sector_t end_sect;
  87
  88                bio = bio_alloc(gfp_mask, 1);
  89                if (!bio) {
  90                        ret = -ENOMEM;
  91                        break;
  92                }
  93
  94                req_sects = min_t(sector_t, nr_sects, max_discard_sectors);
  95
  96                /*
  97                 * If splitting a request, and the next starting sector would be
  98                 * misaligned, stop the discard at the previous aligned sector.
  99                 */
 100                end_sect = sector + req_sects;
 101                if (req_sects < nr_sects && (end_sect & mask) != alignment) {
 102                        end_sect =
 103                                round_down(end_sect - alignment, granularity)
 104                                + alignment;
 105                        req_sects = end_sect - sector;
 106                }
 107
 108                bio->bi_sector = sector;
 109                bio->bi_end_io = bio_batch_end_io;
 110                bio->bi_bdev = bdev;
 111                bio->bi_private = &bb;
 112
 113                bio->bi_size = req_sects << 9;
 114                nr_sects -= req_sects;
 115                sector = end_sect;
 116
 117                atomic_inc(&bb.done);
 118                submit_bio(type, bio);
 119        }
 120
 121        /* Wait for bios in-flight */
 122        if (!atomic_dec_and_test(&bb.done))
 123                wait_for_completion(&wait);
 124
 125        if (!test_bit(BIO_UPTODATE, &bb.flags))
 126                ret = -EIO;
 127
 128        return ret;
 129}
 130EXPORT_SYMBOL(blkdev_issue_discard);
 131
 132/**
 133 * blkdev_issue_write_same - queue a write same operation
 134 * @bdev:       target blockdev
 135 * @sector:     start sector
 136 * @nr_sects:   number of sectors to write
 137 * @gfp_mask:   memory allocation flags (for bio_alloc)
 138 * @page:       page containing data to write
 139 *
 140 * Description:
 141 *    Issue a write same request for the sectors in question.
 142 */
 143int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
 144                            sector_t nr_sects, gfp_t gfp_mask,
 145                            struct page *page)
 146{
 147        DECLARE_COMPLETION_ONSTACK(wait);
 148        struct request_queue *q = bdev_get_queue(bdev);
 149        unsigned int max_write_same_sectors;
 150        struct bio_batch bb;
 151        struct bio *bio;
 152        int ret = 0;
 153
 154        if (!q)
 155                return -ENXIO;
 156
 157        max_write_same_sectors = q->limits.max_write_same_sectors;
 158
 159        if (max_write_same_sectors == 0)
 160                return -EOPNOTSUPP;
 161
 162        atomic_set(&bb.done, 1);
 163        bb.flags = 1 << BIO_UPTODATE;
 164        bb.wait = &wait;
 165
 166        while (nr_sects) {
 167                bio = bio_alloc(gfp_mask, 1);
 168                if (!bio) {
 169                        ret = -ENOMEM;
 170                        break;
 171                }
 172
 173                bio->bi_sector = sector;
 174                bio->bi_end_io = bio_batch_end_io;
 175                bio->bi_bdev = bdev;
 176                bio->bi_private = &bb;
 177                bio->bi_vcnt = 1;
 178                bio->bi_io_vec->bv_page = page;
 179                bio->bi_io_vec->bv_offset = 0;
 180                bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
 181
 182                if (nr_sects > max_write_same_sectors) {
 183                        bio->bi_size = max_write_same_sectors << 9;
 184                        nr_sects -= max_write_same_sectors;
 185                        sector += max_write_same_sectors;
 186                } else {
 187                        bio->bi_size = nr_sects << 9;
 188                        nr_sects = 0;
 189                }
 190
 191                atomic_inc(&bb.done);
 192                submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
 193        }
 194
 195        /* Wait for bios in-flight */
 196        if (!atomic_dec_and_test(&bb.done))
 197                wait_for_completion(&wait);
 198
 199        if (!test_bit(BIO_UPTODATE, &bb.flags))
 200                ret = -ENOTSUPP;
 201
 202        return ret;
 203}
 204EXPORT_SYMBOL(blkdev_issue_write_same);
 205
 206/**
 207 * blkdev_issue_zeroout - generate number of zero filed write bios
 208 * @bdev:       blockdev to issue
 209 * @sector:     start sector
 210 * @nr_sects:   number of sectors to write
 211 * @gfp_mask:   memory allocation flags (for bio_alloc)
 212 *
 213 * Description:
 214 *  Generate and issue number of bios with zerofiled pages.
 215 */
 216
 217int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
 218                        sector_t nr_sects, gfp_t gfp_mask)
 219{
 220        int ret;
 221        struct bio *bio;
 222        struct bio_batch bb;
 223        unsigned int sz;
 224        DECLARE_COMPLETION_ONSTACK(wait);
 225
 226        atomic_set(&bb.done, 1);
 227        bb.flags = 1 << BIO_UPTODATE;
 228        bb.wait = &wait;
 229
 230        ret = 0;
 231        while (nr_sects != 0) {
 232                bio = bio_alloc(gfp_mask,
 233                                min(nr_sects, (sector_t)BIO_MAX_PAGES));
 234                if (!bio) {
 235                        ret = -ENOMEM;
 236                        break;
 237                }
 238
 239                bio->bi_sector = sector;
 240                bio->bi_bdev   = bdev;
 241                bio->bi_end_io = bio_batch_end_io;
 242                bio->bi_private = &bb;
 243
 244                while (nr_sects != 0) {
 245                        sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
 246                        ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
 247                        nr_sects -= ret >> 9;
 248                        sector += ret >> 9;
 249                        if (ret < (sz << 9))
 250                                break;
 251                }
 252                ret = 0;
 253                atomic_inc(&bb.done);
 254                submit_bio(WRITE, bio);
 255        }
 256
 257        /* Wait for bios in-flight */
 258        if (!atomic_dec_and_test(&bb.done))
 259                wait_for_completion(&wait);
 260
 261        if (!test_bit(BIO_UPTODATE, &bb.flags))
 262                /* One of bios in the batch was completed with error.*/
 263                ret = -EIO;
 264
 265        return ret;
 266}
 267
 268/**
 269 * blkdev_issue_zeroout - zero-fill a block range
 270 * @bdev:       blockdev to write
 271 * @sector:     start sector
 272 * @nr_sects:   number of sectors to write
 273 * @gfp_mask:   memory allocation flags (for bio_alloc)
 274 *
 275 * Description:
 276 *  Generate and issue number of bios with zerofiled pages.
 277 */
 278
 279int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
 280                         sector_t nr_sects, gfp_t gfp_mask)
 281{
 282        if (bdev_write_same(bdev)) {
 283                unsigned char bdn[BDEVNAME_SIZE];
 284
 285                if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
 286                                             ZERO_PAGE(0)))
 287                        return 0;
 288
 289                bdevname(bdev, bdn);
 290                pr_err("%s: WRITE SAME failed. Manually zeroing.\n", bdn);
 291        }
 292
 293        return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
 294}
 295EXPORT_SYMBOL(blkdev_issue_zeroout);
 296
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.