linux/block/blk-lib.c
<<
>>
Prefs
   1/*
   2 * Functions related to generic helpers functions
   3 */
   4#include <linux/kernel.h>
   5#include <linux/module.h>
   6#include <linux/bio.h>
   7#include <linux/blkdev.h>
   8#include <linux/scatterlist.h>
   9
  10#include "blk.h"
  11
  12struct bio_batch {
  13        atomic_t                done;
  14        unsigned long           flags;
  15        struct completion       *wait;
  16};
  17
  18static void bio_batch_end_io(struct bio *bio, int err)
  19{
  20        struct bio_batch *bb = bio->bi_private;
  21
  22        if (err && (err != -EOPNOTSUPP))
  23                clear_bit(BIO_UPTODATE, &bb->flags);
  24        if (atomic_dec_and_test(&bb->done))
  25                complete(bb->wait);
  26        bio_put(bio);
  27}
  28
  29/**
  30 * blkdev_issue_discard - queue a discard
  31 * @bdev:       blockdev to issue discard for
  32 * @sector:     start sector
  33 * @nr_sects:   number of sectors to discard
  34 * @gfp_mask:   memory allocation flags (for bio_alloc)
  35 * @flags:      BLKDEV_IFL_* flags to control behaviour
  36 *
  37 * Description:
  38 *    Issue a discard request for the sectors in question.
  39 */
  40int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
  41                sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
  42{
  43        DECLARE_COMPLETION_ONSTACK(wait);
  44        struct request_queue *q = bdev_get_queue(bdev);
  45        int type = REQ_WRITE | REQ_DISCARD;
  46        unsigned int max_discard_sectors;
  47        unsigned int granularity, alignment, mask;
  48        struct bio_batch bb;
  49        struct bio *bio;
  50        int ret = 0;
  51
  52        if (!q)
  53                return -ENXIO;
  54
  55        if (!blk_queue_discard(q))
  56                return -EOPNOTSUPP;
  57
  58        /* Zero-sector (unknown) and one-sector granularities are the same.  */
  59        granularity = max(q->limits.discard_granularity >> 9, 1U);
  60        mask = granularity - 1;
  61        alignment = (bdev_discard_alignment(bdev) >> 9) & mask;
  62
  63        /*
  64         * Ensure that max_discard_sectors is of the proper
  65         * granularity, so that requests stay aligned after a split.
  66         */
  67        max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
  68        max_discard_sectors = round_down(max_discard_sectors, granularity);
  69        if (unlikely(!max_discard_sectors)) {
  70                /* Avoid infinite loop below. Being cautious never hurts. */
  71                return -EOPNOTSUPP;
  72        }
  73
  74        if (flags & BLKDEV_DISCARD_SECURE) {
  75                if (!blk_queue_secdiscard(q))
  76                        return -EOPNOTSUPP;
  77                type |= REQ_SECURE;
  78        }
  79
  80        atomic_set(&bb.done, 1);
  81        bb.flags = 1 << BIO_UPTODATE;
  82        bb.wait = &wait;
  83
  84        while (nr_sects) {
  85                unsigned int req_sects;
  86                sector_t end_sect;
  87
  88                bio = bio_alloc(gfp_mask, 1);
  89                if (!bio) {
  90                        ret = -ENOMEM;
  91                        break;
  92                }
  93
  94                req_sects = min_t(sector_t, nr_sects, max_discard_sectors);
  95
  96                /*
  97                 * If splitting a request, and the next starting sector would be
  98                 * misaligned, stop the discard at the previous aligned sector.
  99                 */
 100                end_sect = sector + req_sects;
 101                if (req_sects < nr_sects && (end_sect & mask) != alignment) {
 102                        end_sect =
 103                                round_down(end_sect - alignment, granularity)
 104                                + alignment;
 105                        req_sects = end_sect - sector;
 106                }
 107
 108                bio->bi_sector = sector;
 109                bio->bi_end_io = bio_batch_end_io;
 110                bio->bi_bdev = bdev;
 111                bio->bi_private = &bb;
 112
 113                bio->bi_size = req_sects << 9;
 114                nr_sects -= req_sects;
 115                sector = end_sect;
 116
 117                atomic_inc(&bb.done);
 118                submit_bio(type, bio);
 119        }
 120
 121        /* Wait for bios in-flight */
 122        if (!atomic_dec_and_test(&bb.done))
 123                wait_for_completion(&wait);
 124
 125        if (!test_bit(BIO_UPTODATE, &bb.flags))
 126                ret = -EIO;
 127
 128        return ret;
 129}
 130EXPORT_SYMBOL(blkdev_issue_discard);
 131
 132/**
 133 * blkdev_issue_zeroout - generate number of zero filed write bios
 134 * @bdev:       blockdev to issue
 135 * @sector:     start sector
 136 * @nr_sects:   number of sectors to write
 137 * @gfp_mask:   memory allocation flags (for bio_alloc)
 138 *
 139 * Description:
 140 *  Generate and issue number of bios with zerofiled pages.
 141 */
 142
 143int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
 144                        sector_t nr_sects, gfp_t gfp_mask)
 145{
 146        int ret;
 147        struct bio *bio;
 148        struct bio_batch bb;
 149        unsigned int sz;
 150        DECLARE_COMPLETION_ONSTACK(wait);
 151
 152        atomic_set(&bb.done, 1);
 153        bb.flags = 1 << BIO_UPTODATE;
 154        bb.wait = &wait;
 155
 156        ret = 0;
 157        while (nr_sects != 0) {
 158                bio = bio_alloc(gfp_mask,
 159                                min(nr_sects, (sector_t)BIO_MAX_PAGES));
 160                if (!bio) {
 161                        ret = -ENOMEM;
 162                        break;
 163                }
 164
 165                bio->bi_sector = sector;
 166                bio->bi_bdev   = bdev;
 167                bio->bi_end_io = bio_batch_end_io;
 168                bio->bi_private = &bb;
 169
 170                while (nr_sects != 0) {
 171                        sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
 172                        ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
 173                        nr_sects -= ret >> 9;
 174                        sector += ret >> 9;
 175                        if (ret < (sz << 9))
 176                                break;
 177                }
 178                ret = 0;
 179                atomic_inc(&bb.done);
 180                submit_bio(WRITE, bio);
 181        }
 182
 183        /* Wait for bios in-flight */
 184        if (!atomic_dec_and_test(&bb.done))
 185                wait_for_completion(&wait);
 186
 187        if (!test_bit(BIO_UPTODATE, &bb.flags))
 188                /* One of bios in the batch was completed with error.*/
 189                ret = -EIO;
 190
 191        return ret;
 192}
 193EXPORT_SYMBOL(blkdev_issue_zeroout);
 194
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.