linux/block/blk-zoned.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Zoned block device handling
   4 *
   5 * Copyright (c) 2015, Hannes Reinecke
   6 * Copyright (c) 2015, SUSE Linux GmbH
   7 *
   8 * Copyright (c) 2016, Damien Le Moal
   9 * Copyright (c) 2016, Western Digital
  10 */
  11
  12#include <linux/kernel.h>
  13#include <linux/module.h>
  14#include <linux/rbtree.h>
  15#include <linux/blkdev.h>
  16#include <linux/blk-mq.h>
  17#include <linux/mm.h>
  18#include <linux/vmalloc.h>
  19#include <linux/sched/mm.h>
  20
  21#include "blk.h"
  22
  23#define ZONE_COND_NAME(name) [BLK_ZONE_COND_##name] = #name
  24static const char *const zone_cond_name[] = {
  25        ZONE_COND_NAME(NOT_WP),
  26        ZONE_COND_NAME(EMPTY),
  27        ZONE_COND_NAME(IMP_OPEN),
  28        ZONE_COND_NAME(EXP_OPEN),
  29        ZONE_COND_NAME(CLOSED),
  30        ZONE_COND_NAME(READONLY),
  31        ZONE_COND_NAME(FULL),
  32        ZONE_COND_NAME(OFFLINE),
  33};
  34#undef ZONE_COND_NAME
  35
  36/**
  37 * blk_zone_cond_str - Return string XXX in BLK_ZONE_COND_XXX.
  38 * @zone_cond: BLK_ZONE_COND_XXX.
  39 *
  40 * Description: Centralize block layer function to convert BLK_ZONE_COND_XXX
  41 * into string format. Useful in the debugging and tracing zone conditions. For
  42 * invalid BLK_ZONE_COND_XXX it returns string "UNKNOWN".
  43 */
  44const char *blk_zone_cond_str(enum blk_zone_cond zone_cond)
  45{
  46        static const char *zone_cond_str = "UNKNOWN";
  47
  48        if (zone_cond < ARRAY_SIZE(zone_cond_name) && zone_cond_name[zone_cond])
  49                zone_cond_str = zone_cond_name[zone_cond];
  50
  51        return zone_cond_str;
  52}
  53EXPORT_SYMBOL_GPL(blk_zone_cond_str);
  54
  55/*
  56 * Return true if a request is a write requests that needs zone write locking.
  57 */
  58bool blk_req_needs_zone_write_lock(struct request *rq)
  59{
  60        if (!rq->q->seq_zones_wlock)
  61                return false;
  62
  63        if (blk_rq_is_passthrough(rq))
  64                return false;
  65
  66        switch (req_op(rq)) {
  67        case REQ_OP_WRITE_ZEROES:
  68        case REQ_OP_WRITE_SAME:
  69        case REQ_OP_WRITE:
  70                return blk_rq_zone_is_seq(rq);
  71        default:
  72                return false;
  73        }
  74}
  75EXPORT_SYMBOL_GPL(blk_req_needs_zone_write_lock);
  76
  77bool blk_req_zone_write_trylock(struct request *rq)
  78{
  79        unsigned int zno = blk_rq_zone_no(rq);
  80
  81        if (test_and_set_bit(zno, rq->q->seq_zones_wlock))
  82                return false;
  83
  84        WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED);
  85        rq->rq_flags |= RQF_ZONE_WRITE_LOCKED;
  86
  87        return true;
  88}
  89EXPORT_SYMBOL_GPL(blk_req_zone_write_trylock);
  90
  91void __blk_req_zone_write_lock(struct request *rq)
  92{
  93        if (WARN_ON_ONCE(test_and_set_bit(blk_rq_zone_no(rq),
  94                                          rq->q->seq_zones_wlock)))
  95                return;
  96
  97        WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED);
  98        rq->rq_flags |= RQF_ZONE_WRITE_LOCKED;
  99}
 100EXPORT_SYMBOL_GPL(__blk_req_zone_write_lock);
 101
 102void __blk_req_zone_write_unlock(struct request *rq)
 103{
 104        rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED;
 105        if (rq->q->seq_zones_wlock)
 106                WARN_ON_ONCE(!test_and_clear_bit(blk_rq_zone_no(rq),
 107                                                 rq->q->seq_zones_wlock));
 108}
 109EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock);
 110
 111/**
 112 * blkdev_nr_zones - Get number of zones
 113 * @disk:       Target gendisk
 114 *
 115 * Return the total number of zones of a zoned block device.  For a block
 116 * device without zone capabilities, the number of zones is always 0.
 117 */
 118unsigned int blkdev_nr_zones(struct gendisk *disk)
 119{
 120        sector_t zone_sectors = blk_queue_zone_sectors(disk->queue);
 121
 122        if (!blk_queue_is_zoned(disk->queue))
 123                return 0;
 124        return (get_capacity(disk) + zone_sectors - 1) >> ilog2(zone_sectors);
 125}
 126EXPORT_SYMBOL_GPL(blkdev_nr_zones);
 127
 128/**
 129 * blkdev_report_zones - Get zones information
 130 * @bdev:       Target block device
 131 * @sector:     Sector from which to report zones
 132 * @nr_zones:   Maximum number of zones to report
 133 * @cb:         Callback function called for each reported zone
 134 * @data:       Private data for the callback
 135 *
 136 * Description:
 137 *    Get zone information starting from the zone containing @sector for at most
 138 *    @nr_zones, and call @cb for each zone reported by the device.
 139 *    To report all zones in a device starting from @sector, the BLK_ALL_ZONES
 140 *    constant can be passed to @nr_zones.
 141 *    Returns the number of zones reported by the device, or a negative errno
 142 *    value in case of failure.
 143 *
 144 *    Note: The caller must use memalloc_noXX_save/restore() calls to control
 145 *    memory allocations done within this function.
 146 */
 147int blkdev_report_zones(struct block_device *bdev, sector_t sector,
 148                        unsigned int nr_zones, report_zones_cb cb, void *data)
 149{
 150        struct gendisk *disk = bdev->bd_disk;
 151        sector_t capacity = get_capacity(disk);
 152
 153        if (!blk_queue_is_zoned(bdev_get_queue(bdev)) ||
 154            WARN_ON_ONCE(!disk->fops->report_zones))
 155                return -EOPNOTSUPP;
 156
 157        if (!nr_zones || sector >= capacity)
 158                return 0;
 159
 160        return disk->fops->report_zones(disk, sector, nr_zones, cb, data);
 161}
 162EXPORT_SYMBOL_GPL(blkdev_report_zones);
 163
 164static inline unsigned long *blk_alloc_zone_bitmap(int node,
 165                                                   unsigned int nr_zones)
 166{
 167        return kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(unsigned long),
 168                            GFP_NOIO, node);
 169}
 170
 171static int blk_zone_need_reset_cb(struct blk_zone *zone, unsigned int idx,
 172                                  void *data)
 173{
 174        /*
 175         * For an all-zones reset, ignore conventional, empty, read-only
 176         * and offline zones.
 177         */
 178        switch (zone->cond) {
 179        case BLK_ZONE_COND_NOT_WP:
 180        case BLK_ZONE_COND_EMPTY:
 181        case BLK_ZONE_COND_READONLY:
 182        case BLK_ZONE_COND_OFFLINE:
 183                return 0;
 184        default:
 185                set_bit(idx, (unsigned long *)data);
 186                return 0;
 187        }
 188}
 189
 190static int blkdev_zone_reset_all_emulated(struct block_device *bdev,
 191                                          gfp_t gfp_mask)
 192{
 193        struct request_queue *q = bdev_get_queue(bdev);
 194        sector_t capacity = get_capacity(bdev->bd_disk);
 195        sector_t zone_sectors = blk_queue_zone_sectors(q);
 196        unsigned long *need_reset;
 197        struct bio *bio = NULL;
 198        sector_t sector = 0;
 199        int ret;
 200
 201        need_reset = blk_alloc_zone_bitmap(q->node, q->nr_zones);
 202        if (!need_reset)
 203                return -ENOMEM;
 204
 205        ret = bdev->bd_disk->fops->report_zones(bdev->bd_disk, 0,
 206                                q->nr_zones, blk_zone_need_reset_cb,
 207                                need_reset);
 208        if (ret < 0)
 209                goto out_free_need_reset;
 210
 211        ret = 0;
 212        while (sector < capacity) {
 213                if (!test_bit(blk_queue_zone_no(q, sector), need_reset)) {
 214                        sector += zone_sectors;
 215                        continue;
 216                }
 217
 218                bio = blk_next_bio(bio, 0, gfp_mask);
 219                bio_set_dev(bio, bdev);
 220                bio->bi_opf = REQ_OP_ZONE_RESET | REQ_SYNC;
 221                bio->bi_iter.bi_sector = sector;
 222                sector += zone_sectors;
 223
 224                /* This may take a while, so be nice to others */
 225                cond_resched();
 226        }
 227
 228        if (bio) {
 229                ret = submit_bio_wait(bio);
 230                bio_put(bio);
 231        }
 232
 233out_free_need_reset:
 234        kfree(need_reset);
 235        return ret;
 236}
 237
 238static int blkdev_zone_reset_all(struct block_device *bdev, gfp_t gfp_mask)
 239{
 240        struct bio bio;
 241
 242        bio_init(&bio, NULL, 0);
 243        bio_set_dev(&bio, bdev);
 244        bio.bi_opf = REQ_OP_ZONE_RESET_ALL | REQ_SYNC;
 245
 246        return submit_bio_wait(&bio);
 247}
 248
 249/**
 250 * blkdev_zone_mgmt - Execute a zone management operation on a range of zones
 251 * @bdev:       Target block device
 252 * @op:         Operation to be performed on the zones
 253 * @sector:     Start sector of the first zone to operate on
 254 * @nr_sectors: Number of sectors, should be at least the length of one zone and
 255 *              must be zone size aligned.
 256 * @gfp_mask:   Memory allocation flags (for bio_alloc)
 257 *
 258 * Description:
 259 *    Perform the specified operation on the range of zones specified by
 260 *    @sector..@sector+@nr_sectors. Specifying the entire disk sector range
 261 *    is valid, but the specified range should not contain conventional zones.
 262 *    The operation to execute on each zone can be a zone reset, open, close
 263 *    or finish request.
 264 */
 265int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
 266                     sector_t sector, sector_t nr_sectors,
 267                     gfp_t gfp_mask)
 268{
 269        struct request_queue *q = bdev_get_queue(bdev);
 270        sector_t zone_sectors = blk_queue_zone_sectors(q);
 271        sector_t capacity = get_capacity(bdev->bd_disk);
 272        sector_t end_sector = sector + nr_sectors;
 273        struct bio *bio = NULL;
 274        int ret = 0;
 275
 276        if (!blk_queue_is_zoned(q))
 277                return -EOPNOTSUPP;
 278
 279        if (bdev_read_only(bdev))
 280                return -EPERM;
 281
 282        if (!op_is_zone_mgmt(op))
 283                return -EOPNOTSUPP;
 284
 285        if (end_sector <= sector || end_sector > capacity)
 286                /* Out of range */
 287                return -EINVAL;
 288
 289        /* Check alignment (handle eventual smaller last zone) */
 290        if (sector & (zone_sectors - 1))
 291                return -EINVAL;
 292
 293        if ((nr_sectors & (zone_sectors - 1)) && end_sector != capacity)
 294                return -EINVAL;
 295
 296        /*
 297         * In the case of a zone reset operation over all zones,
 298         * REQ_OP_ZONE_RESET_ALL can be used with devices supporting this
 299         * command. For other devices, we emulate this command behavior by
 300         * identifying the zones needing a reset.
 301         */
 302        if (op == REQ_OP_ZONE_RESET && sector == 0 && nr_sectors == capacity) {
 303                if (!blk_queue_zone_resetall(q))
 304                        return blkdev_zone_reset_all_emulated(bdev, gfp_mask);
 305                return blkdev_zone_reset_all(bdev, gfp_mask);
 306        }
 307
 308        while (sector < end_sector) {
 309                bio = blk_next_bio(bio, 0, gfp_mask);
 310                bio_set_dev(bio, bdev);
 311                bio->bi_opf = op | REQ_SYNC;
 312                bio->bi_iter.bi_sector = sector;
 313                sector += zone_sectors;
 314
 315                /* This may take a while, so be nice to others */
 316                cond_resched();
 317        }
 318
 319        ret = submit_bio_wait(bio);
 320        bio_put(bio);
 321
 322        return ret;
 323}
 324EXPORT_SYMBOL_GPL(blkdev_zone_mgmt);
 325
 326struct zone_report_args {
 327        struct blk_zone __user *zones;
 328};
 329
 330static int blkdev_copy_zone_to_user(struct blk_zone *zone, unsigned int idx,
 331                                    void *data)
 332{
 333        struct zone_report_args *args = data;
 334
 335        if (copy_to_user(&args->zones[idx], zone, sizeof(struct blk_zone)))
 336                return -EFAULT;
 337        return 0;
 338}
 339
 340/*
 341 * BLKREPORTZONE ioctl processing.
 342 * Called from blkdev_ioctl.
 343 */
 344int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
 345                              unsigned int cmd, unsigned long arg)
 346{
 347        void __user *argp = (void __user *)arg;
 348        struct zone_report_args args;
 349        struct request_queue *q;
 350        struct blk_zone_report rep;
 351        int ret;
 352
 353        if (!argp)
 354                return -EINVAL;
 355
 356        q = bdev_get_queue(bdev);
 357        if (!q)
 358                return -ENXIO;
 359
 360        if (!blk_queue_is_zoned(q))
 361                return -ENOTTY;
 362
 363        if (!capable(CAP_SYS_ADMIN))
 364                return -EACCES;
 365
 366        if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report)))
 367                return -EFAULT;
 368
 369        if (!rep.nr_zones)
 370                return -EINVAL;
 371
 372        args.zones = argp + sizeof(struct blk_zone_report);
 373        ret = blkdev_report_zones(bdev, rep.sector, rep.nr_zones,
 374                                  blkdev_copy_zone_to_user, &args);
 375        if (ret < 0)
 376                return ret;
 377
 378        rep.nr_zones = ret;
 379        rep.flags = BLK_ZONE_REP_CAPACITY;
 380        if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report)))
 381                return -EFAULT;
 382        return 0;
 383}
 384
 385static int blkdev_truncate_zone_range(struct block_device *bdev, fmode_t mode,
 386                                      const struct blk_zone_range *zrange)
 387{
 388        loff_t start, end;
 389
 390        if (zrange->sector + zrange->nr_sectors <= zrange->sector ||
 391            zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk))
 392                /* Out of range */
 393                return -EINVAL;
 394
 395        start = zrange->sector << SECTOR_SHIFT;
 396        end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1;
 397
 398        return truncate_bdev_range(bdev, mode, start, end);
 399}
 400
 401/*
 402 * BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing.
 403 * Called from blkdev_ioctl.
 404 */
 405int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
 406                           unsigned int cmd, unsigned long arg)
 407{
 408        void __user *argp = (void __user *)arg;
 409        struct request_queue *q;
 410        struct blk_zone_range zrange;
 411        enum req_opf op;
 412        int ret;
 413
 414        if (!argp)
 415                return -EINVAL;
 416
 417        q = bdev_get_queue(bdev);
 418        if (!q)
 419                return -ENXIO;
 420
 421        if (!blk_queue_is_zoned(q))
 422                return -ENOTTY;
 423
 424        if (!capable(CAP_SYS_ADMIN))
 425                return -EACCES;
 426
 427        if (!(mode & FMODE_WRITE))
 428                return -EBADF;
 429
 430        if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range)))
 431                return -EFAULT;
 432
 433        switch (cmd) {
 434        case BLKRESETZONE:
 435                op = REQ_OP_ZONE_RESET;
 436
 437                /* Invalidate the page cache, including dirty pages. */
 438                ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
 439                if (ret)
 440                        return ret;
 441                break;
 442        case BLKOPENZONE:
 443                op = REQ_OP_ZONE_OPEN;
 444                break;
 445        case BLKCLOSEZONE:
 446                op = REQ_OP_ZONE_CLOSE;
 447                break;
 448        case BLKFINISHZONE:
 449                op = REQ_OP_ZONE_FINISH;
 450                break;
 451        default:
 452                return -ENOTTY;
 453        }
 454
 455        ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors,
 456                               GFP_KERNEL);
 457
 458        /*
 459         * Invalidate the page cache again for zone reset: writes can only be
 460         * direct for zoned devices so concurrent writes would not add any page
 461         * to the page cache after/during reset. The page cache may be filled
 462         * again due to concurrent reads though and dropping the pages for
 463         * these is fine.
 464         */
 465        if (!ret && cmd == BLKRESETZONE)
 466                ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
 467
 468        return ret;
 469}
 470
 471void blk_queue_free_zone_bitmaps(struct request_queue *q)
 472{
 473        kfree(q->conv_zones_bitmap);
 474        q->conv_zones_bitmap = NULL;
 475        kfree(q->seq_zones_wlock);
 476        q->seq_zones_wlock = NULL;
 477}
 478
 479struct blk_revalidate_zone_args {
 480        struct gendisk  *disk;
 481        unsigned long   *conv_zones_bitmap;
 482        unsigned long   *seq_zones_wlock;
 483        unsigned int    nr_zones;
 484        sector_t        zone_sectors;
 485        sector_t        sector;
 486};
 487
 488/*
 489 * Helper function to check the validity of zones of a zoned block device.
 490 */
 491static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
 492                                  void *data)
 493{
 494        struct blk_revalidate_zone_args *args = data;
 495        struct gendisk *disk = args->disk;
 496        struct request_queue *q = disk->queue;
 497        sector_t capacity = get_capacity(disk);
 498
 499        /*
 500         * All zones must have the same size, with the exception on an eventual
 501         * smaller last zone.
 502         */
 503        if (zone->start == 0) {
 504                if (zone->len == 0 || !is_power_of_2(zone->len)) {
 505                        pr_warn("%s: Invalid zoned device with non power of two zone size (%llu)\n",
 506                                disk->disk_name, zone->len);
 507                        return -ENODEV;
 508                }
 509
 510                args->zone_sectors = zone->len;
 511                args->nr_zones = (capacity + zone->len - 1) >> ilog2(zone->len);
 512        } else if (zone->start + args->zone_sectors < capacity) {
 513                if (zone->len != args->zone_sectors) {
 514                        pr_warn("%s: Invalid zoned device with non constant zone size\n",
 515                                disk->disk_name);
 516                        return -ENODEV;
 517                }
 518        } else {
 519                if (zone->len > args->zone_sectors) {
 520                        pr_warn("%s: Invalid zoned device with larger last zone size\n",
 521                                disk->disk_name);
 522                        return -ENODEV;
 523                }
 524        }
 525
 526        /* Check for holes in the zone report */
 527        if (zone->start != args->sector) {
 528                pr_warn("%s: Zone gap at sectors %llu..%llu\n",
 529                        disk->disk_name, args->sector, zone->start);
 530                return -ENODEV;
 531        }
 532
 533        /* Check zone type */
 534        switch (zone->type) {
 535        case BLK_ZONE_TYPE_CONVENTIONAL:
 536                if (!args->conv_zones_bitmap) {
 537                        args->conv_zones_bitmap =
 538                                blk_alloc_zone_bitmap(q->node, args->nr_zones);
 539                        if (!args->conv_zones_bitmap)
 540                                return -ENOMEM;
 541                }
 542                set_bit(idx, args->conv_zones_bitmap);
 543                break;
 544        case BLK_ZONE_TYPE_SEQWRITE_REQ:
 545        case BLK_ZONE_TYPE_SEQWRITE_PREF:
 546                if (!args->seq_zones_wlock) {
 547                        args->seq_zones_wlock =
 548                                blk_alloc_zone_bitmap(q->node, args->nr_zones);
 549                        if (!args->seq_zones_wlock)
 550                                return -ENOMEM;
 551                }
 552                break;
 553        default:
 554                pr_warn("%s: Invalid zone type 0x%x at sectors %llu\n",
 555                        disk->disk_name, (int)zone->type, zone->start);
 556                return -ENODEV;
 557        }
 558
 559        args->sector += zone->len;
 560        return 0;
 561}
 562
 563/**
 564 * blk_revalidate_disk_zones - (re)allocate and initialize zone bitmaps
 565 * @disk:       Target disk
 566 * @update_driver_data: Callback to update driver data on the frozen disk
 567 *
 568 * Helper function for low-level device drivers to (re) allocate and initialize
 569 * a disk request queue zone bitmaps. This functions should normally be called
 570 * within the disk ->revalidate method for blk-mq based drivers.  For BIO based
 571 * drivers only q->nr_zones needs to be updated so that the sysfs exposed value
 572 * is correct.
 573 * If the @update_driver_data callback function is not NULL, the callback is
 574 * executed with the device request queue frozen after all zones have been
 575 * checked.
 576 */
 577int blk_revalidate_disk_zones(struct gendisk *disk,
 578                              void (*update_driver_data)(struct gendisk *disk))
 579{
 580        struct request_queue *q = disk->queue;
 581        struct blk_revalidate_zone_args args = {
 582                .disk           = disk,
 583        };
 584        unsigned int noio_flag;
 585        int ret;
 586
 587        if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
 588                return -EIO;
 589        if (WARN_ON_ONCE(!queue_is_mq(q)))
 590                return -EIO;
 591
 592        if (!get_capacity(disk))
 593                return -EIO;
 594
 595        /*
 596         * Ensure that all memory allocations in this context are done as if
 597         * GFP_NOIO was specified.
 598         */
 599        noio_flag = memalloc_noio_save();
 600        ret = disk->fops->report_zones(disk, 0, UINT_MAX,
 601                                       blk_revalidate_zone_cb, &args);
 602        if (!ret) {
 603                pr_warn("%s: No zones reported\n", disk->disk_name);
 604                ret = -ENODEV;
 605        }
 606        memalloc_noio_restore(noio_flag);
 607
 608        /*
 609         * If zones where reported, make sure that the entire disk capacity
 610         * has been checked.
 611         */
 612        if (ret > 0 && args.sector != get_capacity(disk)) {
 613                pr_warn("%s: Missing zones from sector %llu\n",
 614                        disk->disk_name, args.sector);
 615                ret = -ENODEV;
 616        }
 617
 618        /*
 619         * Install the new bitmaps and update nr_zones only once the queue is
 620         * stopped and all I/Os are completed (i.e. a scheduler is not
 621         * referencing the bitmaps).
 622         */
 623        blk_mq_freeze_queue(q);
 624        if (ret > 0) {
 625                blk_queue_chunk_sectors(q, args.zone_sectors);
 626                q->nr_zones = args.nr_zones;
 627                swap(q->seq_zones_wlock, args.seq_zones_wlock);
 628                swap(q->conv_zones_bitmap, args.conv_zones_bitmap);
 629                if (update_driver_data)
 630                        update_driver_data(disk);
 631                ret = 0;
 632        } else {
 633                pr_warn("%s: failed to revalidate zones\n", disk->disk_name);
 634                blk_queue_free_zone_bitmaps(q);
 635        }
 636        blk_mq_unfreeze_queue(q);
 637
 638        kfree(args.seq_zones_wlock);
 639        kfree(args.conv_zones_bitmap);
 640        return ret;
 641}
 642EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones);
 643
 644void blk_queue_clear_zone_settings(struct request_queue *q)
 645{
 646        blk_mq_freeze_queue(q);
 647
 648        blk_queue_free_zone_bitmaps(q);
 649        blk_queue_flag_clear(QUEUE_FLAG_ZONE_RESETALL, q);
 650        q->required_elevator_features &= ~ELEVATOR_F_ZBD_SEQ_WRITE;
 651        q->nr_zones = 0;
 652        q->max_open_zones = 0;
 653        q->max_active_zones = 0;
 654        q->limits.chunk_sectors = 0;
 655        q->limits.zone_write_granularity = 0;
 656        q->limits.max_zone_append_sectors = 0;
 657
 658        blk_mq_unfreeze_queue(q);
 659}
 660