linux/drivers/block/null_blk/zoned.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/vmalloc.h>
   3#include <linux/bitmap.h>
   4#include "null_blk.h"
   5
   6#define CREATE_TRACE_POINTS
   7#include "trace.h"
   8
   9static inline sector_t mb_to_sects(unsigned long mb)
  10{
  11        return ((sector_t)mb * SZ_1M) >> SECTOR_SHIFT;
  12}
  13
  14static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
  15{
  16        return sect >> ilog2(dev->zone_size_sects);
  17}
  18
  19static inline void null_lock_zone_res(struct nullb_device *dev)
  20{
  21        if (dev->need_zone_res_mgmt)
  22                spin_lock_irq(&dev->zone_res_lock);
  23}
  24
  25static inline void null_unlock_zone_res(struct nullb_device *dev)
  26{
  27        if (dev->need_zone_res_mgmt)
  28                spin_unlock_irq(&dev->zone_res_lock);
  29}
  30
  31static inline void null_init_zone_lock(struct nullb_device *dev,
  32                                       struct nullb_zone *zone)
  33{
  34        if (!dev->memory_backed)
  35                spin_lock_init(&zone->spinlock);
  36        else
  37                mutex_init(&zone->mutex);
  38}
  39
  40static inline void null_lock_zone(struct nullb_device *dev,
  41                                  struct nullb_zone *zone)
  42{
  43        if (!dev->memory_backed)
  44                spin_lock_irq(&zone->spinlock);
  45        else
  46                mutex_lock(&zone->mutex);
  47}
  48
  49static inline void null_unlock_zone(struct nullb_device *dev,
  50                                    struct nullb_zone *zone)
  51{
  52        if (!dev->memory_backed)
  53                spin_unlock_irq(&zone->spinlock);
  54        else
  55                mutex_unlock(&zone->mutex);
  56}
  57
  58int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
  59{
  60        sector_t dev_capacity_sects, zone_capacity_sects;
  61        struct nullb_zone *zone;
  62        sector_t sector = 0;
  63        unsigned int i;
  64
  65        if (!is_power_of_2(dev->zone_size)) {
  66                pr_err("zone_size must be power-of-two\n");
  67                return -EINVAL;
  68        }
  69        if (dev->zone_size > dev->size) {
  70                pr_err("Zone size larger than device capacity\n");
  71                return -EINVAL;
  72        }
  73
  74        if (!dev->zone_capacity)
  75                dev->zone_capacity = dev->zone_size;
  76
  77        if (dev->zone_capacity > dev->zone_size) {
  78                pr_err("null_blk: zone capacity (%lu MB) larger than zone size (%lu MB)\n",
  79                                        dev->zone_capacity, dev->zone_size);
  80                return -EINVAL;
  81        }
  82
  83        zone_capacity_sects = mb_to_sects(dev->zone_capacity);
  84        dev_capacity_sects = mb_to_sects(dev->size);
  85        dev->zone_size_sects = mb_to_sects(dev->zone_size);
  86        dev->nr_zones = round_up(dev_capacity_sects, dev->zone_size_sects)
  87                >> ilog2(dev->zone_size_sects);
  88
  89        dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct nullb_zone),
  90                                    GFP_KERNEL | __GFP_ZERO);
  91        if (!dev->zones)
  92                return -ENOMEM;
  93
  94        spin_lock_init(&dev->zone_res_lock);
  95
  96        if (dev->zone_nr_conv >= dev->nr_zones) {
  97                dev->zone_nr_conv = dev->nr_zones - 1;
  98                pr_info("changed the number of conventional zones to %u",
  99                        dev->zone_nr_conv);
 100        }
 101
 102        /* Max active zones has to be < nbr of seq zones in order to be enforceable */
 103        if (dev->zone_max_active >= dev->nr_zones - dev->zone_nr_conv) {
 104                dev->zone_max_active = 0;
 105                pr_info("zone_max_active limit disabled, limit >= zone count\n");
 106        }
 107
 108        /* Max open zones has to be <= max active zones */
 109        if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) {
 110                dev->zone_max_open = dev->zone_max_active;
 111                pr_info("changed the maximum number of open zones to %u\n",
 112                        dev->nr_zones);
 113        } else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) {
 114                dev->zone_max_open = 0;
 115                pr_info("zone_max_open limit disabled, limit >= zone count\n");
 116        }
 117        dev->need_zone_res_mgmt = dev->zone_max_active || dev->zone_max_open;
 118        dev->imp_close_zone_no = dev->zone_nr_conv;
 119
 120        for (i = 0; i <  dev->zone_nr_conv; i++) {
 121                zone = &dev->zones[i];
 122
 123                null_init_zone_lock(dev, zone);
 124                zone->start = sector;
 125                zone->len = dev->zone_size_sects;
 126                zone->capacity = zone->len;
 127                zone->wp = zone->start + zone->len;
 128                zone->type = BLK_ZONE_TYPE_CONVENTIONAL;
 129                zone->cond = BLK_ZONE_COND_NOT_WP;
 130
 131                sector += dev->zone_size_sects;
 132        }
 133
 134        for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
 135                zone = &dev->zones[i];
 136
 137                null_init_zone_lock(dev, zone);
 138                zone->start = zone->wp = sector;
 139                if (zone->start + dev->zone_size_sects > dev_capacity_sects)
 140                        zone->len = dev_capacity_sects - zone->start;
 141                else
 142                        zone->len = dev->zone_size_sects;
 143                zone->capacity =
 144                        min_t(sector_t, zone->len, zone_capacity_sects);
 145                zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
 146                zone->cond = BLK_ZONE_COND_EMPTY;
 147
 148                sector += dev->zone_size_sects;
 149        }
 150
 151        return 0;
 152}
 153
 154int null_register_zoned_dev(struct nullb *nullb)
 155{
 156        struct nullb_device *dev = nullb->dev;
 157        struct request_queue *q = nullb->q;
 158
 159        blk_queue_set_zoned(nullb->disk, BLK_ZONED_HM);
 160        blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
 161        blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
 162
 163        if (queue_is_mq(q)) {
 164                int ret = blk_revalidate_disk_zones(nullb->disk, NULL);
 165
 166                if (ret)
 167                        return ret;
 168        } else {
 169                blk_queue_chunk_sectors(q, dev->zone_size_sects);
 170                q->nr_zones = blkdev_nr_zones(nullb->disk);
 171        }
 172
 173        blk_queue_max_zone_append_sectors(q, dev->zone_size_sects);
 174        blk_queue_max_open_zones(q, dev->zone_max_open);
 175        blk_queue_max_active_zones(q, dev->zone_max_active);
 176
 177        return 0;
 178}
 179
 180void null_free_zoned_dev(struct nullb_device *dev)
 181{
 182        kvfree(dev->zones);
 183        dev->zones = NULL;
 184}
 185
 186int null_report_zones(struct gendisk *disk, sector_t sector,
 187                unsigned int nr_zones, report_zones_cb cb, void *data)
 188{
 189        struct nullb *nullb = disk->private_data;
 190        struct nullb_device *dev = nullb->dev;
 191        unsigned int first_zone, i;
 192        struct nullb_zone *zone;
 193        struct blk_zone blkz;
 194        int error;
 195
 196        first_zone = null_zone_no(dev, sector);
 197        if (first_zone >= dev->nr_zones)
 198                return 0;
 199
 200        nr_zones = min(nr_zones, dev->nr_zones - first_zone);
 201        trace_nullb_report_zones(nullb, nr_zones);
 202
 203        memset(&blkz, 0, sizeof(struct blk_zone));
 204        zone = &dev->zones[first_zone];
 205        for (i = 0; i < nr_zones; i++, zone++) {
 206                /*
 207                 * Stacked DM target drivers will remap the zone information by
 208                 * modifying the zone information passed to the report callback.
 209                 * So use a local copy to avoid corruption of the device zone
 210                 * array.
 211                 */
 212                null_lock_zone(dev, zone);
 213                blkz.start = zone->start;
 214                blkz.len = zone->len;
 215                blkz.wp = zone->wp;
 216                blkz.type = zone->type;
 217                blkz.cond = zone->cond;
 218                blkz.capacity = zone->capacity;
 219                null_unlock_zone(dev, zone);
 220
 221                error = cb(&blkz, i, data);
 222                if (error)
 223                        return error;
 224        }
 225
 226        return nr_zones;
 227}
 228
 229/*
 230 * This is called in the case of memory backing from null_process_cmd()
 231 * with the target zone already locked.
 232 */
 233size_t null_zone_valid_read_len(struct nullb *nullb,
 234                                sector_t sector, unsigned int len)
 235{
 236        struct nullb_device *dev = nullb->dev;
 237        struct nullb_zone *zone = &dev->zones[null_zone_no(dev, sector)];
 238        unsigned int nr_sectors = len >> SECTOR_SHIFT;
 239
 240        /* Read must be below the write pointer position */
 241        if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL ||
 242            sector + nr_sectors <= zone->wp)
 243                return len;
 244
 245        if (sector > zone->wp)
 246                return 0;
 247
 248        return (zone->wp - sector) << SECTOR_SHIFT;
 249}
 250
 251static blk_status_t __null_close_zone(struct nullb_device *dev,
 252                                      struct nullb_zone *zone)
 253{
 254        switch (zone->cond) {
 255        case BLK_ZONE_COND_CLOSED:
 256                /* close operation on closed is not an error */
 257                return BLK_STS_OK;
 258        case BLK_ZONE_COND_IMP_OPEN:
 259                dev->nr_zones_imp_open--;
 260                break;
 261        case BLK_ZONE_COND_EXP_OPEN:
 262                dev->nr_zones_exp_open--;
 263                break;
 264        case BLK_ZONE_COND_EMPTY:
 265        case BLK_ZONE_COND_FULL:
 266        default:
 267                return BLK_STS_IOERR;
 268        }
 269
 270        if (zone->wp == zone->start) {
 271                zone->cond = BLK_ZONE_COND_EMPTY;
 272        } else {
 273                zone->cond = BLK_ZONE_COND_CLOSED;
 274                dev->nr_zones_closed++;
 275        }
 276
 277        return BLK_STS_OK;
 278}
 279
 280static void null_close_imp_open_zone(struct nullb_device *dev)
 281{
 282        struct nullb_zone *zone;
 283        unsigned int zno, i;
 284
 285        zno = dev->imp_close_zone_no;
 286        if (zno >= dev->nr_zones)
 287                zno = dev->zone_nr_conv;
 288
 289        for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
 290                zone = &dev->zones[zno];
 291                zno++;
 292                if (zno >= dev->nr_zones)
 293                        zno = dev->zone_nr_conv;
 294
 295                if (zone->cond == BLK_ZONE_COND_IMP_OPEN) {
 296                        __null_close_zone(dev, zone);
 297                        dev->imp_close_zone_no = zno;
 298                        return;
 299                }
 300        }
 301}
 302
 303static blk_status_t null_check_active(struct nullb_device *dev)
 304{
 305        if (!dev->zone_max_active)
 306                return BLK_STS_OK;
 307
 308        if (dev->nr_zones_exp_open + dev->nr_zones_imp_open +
 309                        dev->nr_zones_closed < dev->zone_max_active)
 310                return BLK_STS_OK;
 311
 312        return BLK_STS_ZONE_ACTIVE_RESOURCE;
 313}
 314
 315static blk_status_t null_check_open(struct nullb_device *dev)
 316{
 317        if (!dev->zone_max_open)
 318                return BLK_STS_OK;
 319
 320        if (dev->nr_zones_exp_open + dev->nr_zones_imp_open < dev->zone_max_open)
 321                return BLK_STS_OK;
 322
 323        if (dev->nr_zones_imp_open) {
 324                if (null_check_active(dev) == BLK_STS_OK) {
 325                        null_close_imp_open_zone(dev);
 326                        return BLK_STS_OK;
 327                }
 328        }
 329
 330        return BLK_STS_ZONE_OPEN_RESOURCE;
 331}
 332
 333/*
 334 * This function matches the manage open zone resources function in the ZBC standard,
 335 * with the addition of max active zones support (added in the ZNS standard).
 336 *
 337 * The function determines if a zone can transition to implicit open or explicit open,
 338 * while maintaining the max open zone (and max active zone) limit(s). It may close an
 339 * implicit open zone in order to make additional zone resources available.
 340 *
 341 * ZBC states that an implicit open zone shall be closed only if there is not
 342 * room within the open limit. However, with the addition of an active limit,
 343 * it is not certain that closing an implicit open zone will allow a new zone
 344 * to be opened, since we might already be at the active limit capacity.
 345 */
 346static blk_status_t null_check_zone_resources(struct nullb_device *dev,
 347                                              struct nullb_zone *zone)
 348{
 349        blk_status_t ret;
 350
 351        switch (zone->cond) {
 352        case BLK_ZONE_COND_EMPTY:
 353                ret = null_check_active(dev);
 354                if (ret != BLK_STS_OK)
 355                        return ret;
 356                fallthrough;
 357        case BLK_ZONE_COND_CLOSED:
 358                return null_check_open(dev);
 359        default:
 360                /* Should never be called for other states */
 361                WARN_ON(1);
 362                return BLK_STS_IOERR;
 363        }
 364}
 365
 366static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
 367                                    unsigned int nr_sectors, bool append)
 368{
 369        struct nullb_device *dev = cmd->nq->dev;
 370        unsigned int zno = null_zone_no(dev, sector);
 371        struct nullb_zone *zone = &dev->zones[zno];
 372        blk_status_t ret;
 373
 374        trace_nullb_zone_op(cmd, zno, zone->cond);
 375
 376        if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) {
 377                if (append)
 378                        return BLK_STS_IOERR;
 379                return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
 380        }
 381
 382        null_lock_zone(dev, zone);
 383
 384        if (zone->cond == BLK_ZONE_COND_FULL) {
 385                /* Cannot write to a full zone */
 386                ret = BLK_STS_IOERR;
 387                goto unlock;
 388        }
 389
 390        /*
 391         * Regular writes must be at the write pointer position.
 392         * Zone append writes are automatically issued at the write
 393         * pointer and the position returned using the request or BIO
 394         * sector.
 395         */
 396        if (append) {
 397                sector = zone->wp;
 398                if (cmd->bio)
 399                        cmd->bio->bi_iter.bi_sector = sector;
 400                else
 401                        cmd->rq->__sector = sector;
 402        } else if (sector != zone->wp) {
 403                ret = BLK_STS_IOERR;
 404                goto unlock;
 405        }
 406
 407        if (zone->wp + nr_sectors > zone->start + zone->capacity) {
 408                ret = BLK_STS_IOERR;
 409                goto unlock;
 410        }
 411
 412        if (zone->cond == BLK_ZONE_COND_CLOSED ||
 413            zone->cond == BLK_ZONE_COND_EMPTY) {
 414                null_lock_zone_res(dev);
 415
 416                ret = null_check_zone_resources(dev, zone);
 417                if (ret != BLK_STS_OK) {
 418                        null_unlock_zone_res(dev);
 419                        goto unlock;
 420                }
 421                if (zone->cond == BLK_ZONE_COND_CLOSED) {
 422                        dev->nr_zones_closed--;
 423                        dev->nr_zones_imp_open++;
 424                } else if (zone->cond == BLK_ZONE_COND_EMPTY) {
 425                        dev->nr_zones_imp_open++;
 426                }
 427
 428                if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
 429                        zone->cond = BLK_ZONE_COND_IMP_OPEN;
 430
 431                null_unlock_zone_res(dev);
 432        }
 433
 434        ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
 435        if (ret != BLK_STS_OK)
 436                goto unlock;
 437
 438        zone->wp += nr_sectors;
 439        if (zone->wp == zone->start + zone->capacity) {
 440                null_lock_zone_res(dev);
 441                if (zone->cond == BLK_ZONE_COND_EXP_OPEN)
 442                        dev->nr_zones_exp_open--;
 443                else if (zone->cond == BLK_ZONE_COND_IMP_OPEN)
 444                        dev->nr_zones_imp_open--;
 445                zone->cond = BLK_ZONE_COND_FULL;
 446                null_unlock_zone_res(dev);
 447        }
 448
 449        ret = BLK_STS_OK;
 450
 451unlock:
 452        null_unlock_zone(dev, zone);
 453
 454        return ret;
 455}
 456
 457static blk_status_t null_open_zone(struct nullb_device *dev,
 458                                   struct nullb_zone *zone)
 459{
 460        blk_status_t ret = BLK_STS_OK;
 461
 462        if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
 463                return BLK_STS_IOERR;
 464
 465        null_lock_zone_res(dev);
 466
 467        switch (zone->cond) {
 468        case BLK_ZONE_COND_EXP_OPEN:
 469                /* open operation on exp open is not an error */
 470                goto unlock;
 471        case BLK_ZONE_COND_EMPTY:
 472                ret = null_check_zone_resources(dev, zone);
 473                if (ret != BLK_STS_OK)
 474                        goto unlock;
 475                break;
 476        case BLK_ZONE_COND_IMP_OPEN:
 477                dev->nr_zones_imp_open--;
 478                break;
 479        case BLK_ZONE_COND_CLOSED:
 480                ret = null_check_zone_resources(dev, zone);
 481                if (ret != BLK_STS_OK)
 482                        goto unlock;
 483                dev->nr_zones_closed--;
 484                break;
 485        case BLK_ZONE_COND_FULL:
 486        default:
 487                ret = BLK_STS_IOERR;
 488                goto unlock;
 489        }
 490
 491        zone->cond = BLK_ZONE_COND_EXP_OPEN;
 492        dev->nr_zones_exp_open++;
 493
 494unlock:
 495        null_unlock_zone_res(dev);
 496
 497        return ret;
 498}
 499
 500static blk_status_t null_close_zone(struct nullb_device *dev,
 501                                    struct nullb_zone *zone)
 502{
 503        blk_status_t ret;
 504
 505        if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
 506                return BLK_STS_IOERR;
 507
 508        null_lock_zone_res(dev);
 509        ret = __null_close_zone(dev, zone);
 510        null_unlock_zone_res(dev);
 511
 512        return ret;
 513}
 514
 515static blk_status_t null_finish_zone(struct nullb_device *dev,
 516                                     struct nullb_zone *zone)
 517{
 518        blk_status_t ret = BLK_STS_OK;
 519
 520        if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
 521                return BLK_STS_IOERR;
 522
 523        null_lock_zone_res(dev);
 524
 525        switch (zone->cond) {
 526        case BLK_ZONE_COND_FULL:
 527                /* finish operation on full is not an error */
 528                goto unlock;
 529        case BLK_ZONE_COND_EMPTY:
 530                ret = null_check_zone_resources(dev, zone);
 531                if (ret != BLK_STS_OK)
 532                        goto unlock;
 533                break;
 534        case BLK_ZONE_COND_IMP_OPEN:
 535                dev->nr_zones_imp_open--;
 536                break;
 537        case BLK_ZONE_COND_EXP_OPEN:
 538                dev->nr_zones_exp_open--;
 539                break;
 540        case BLK_ZONE_COND_CLOSED:
 541                ret = null_check_zone_resources(dev, zone);
 542                if (ret != BLK_STS_OK)
 543                        goto unlock;
 544                dev->nr_zones_closed--;
 545                break;
 546        default:
 547                ret = BLK_STS_IOERR;
 548                goto unlock;
 549        }
 550
 551        zone->cond = BLK_ZONE_COND_FULL;
 552        zone->wp = zone->start + zone->len;
 553
 554unlock:
 555        null_unlock_zone_res(dev);
 556
 557        return ret;
 558}
 559
 560static blk_status_t null_reset_zone(struct nullb_device *dev,
 561                                    struct nullb_zone *zone)
 562{
 563        if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
 564                return BLK_STS_IOERR;
 565
 566        null_lock_zone_res(dev);
 567
 568        switch (zone->cond) {
 569        case BLK_ZONE_COND_EMPTY:
 570                /* reset operation on empty is not an error */
 571                null_unlock_zone_res(dev);
 572                return BLK_STS_OK;
 573        case BLK_ZONE_COND_IMP_OPEN:
 574                dev->nr_zones_imp_open--;
 575                break;
 576        case BLK_ZONE_COND_EXP_OPEN:
 577                dev->nr_zones_exp_open--;
 578                break;
 579        case BLK_ZONE_COND_CLOSED:
 580                dev->nr_zones_closed--;
 581                break;
 582        case BLK_ZONE_COND_FULL:
 583                break;
 584        default:
 585                null_unlock_zone_res(dev);
 586                return BLK_STS_IOERR;
 587        }
 588
 589        zone->cond = BLK_ZONE_COND_EMPTY;
 590        zone->wp = zone->start;
 591
 592        null_unlock_zone_res(dev);
 593
 594        if (dev->memory_backed)
 595                return null_handle_discard(dev, zone->start, zone->len);
 596
 597        return BLK_STS_OK;
 598}
 599
 600static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
 601                                   sector_t sector)
 602{
 603        struct nullb_device *dev = cmd->nq->dev;
 604        unsigned int zone_no;
 605        struct nullb_zone *zone;
 606        blk_status_t ret;
 607        size_t i;
 608
 609        if (op == REQ_OP_ZONE_RESET_ALL) {
 610                for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
 611                        zone = &dev->zones[i];
 612                        null_lock_zone(dev, zone);
 613                        if (zone->cond != BLK_ZONE_COND_EMPTY) {
 614                                null_reset_zone(dev, zone);
 615                                trace_nullb_zone_op(cmd, i, zone->cond);
 616                        }
 617                        null_unlock_zone(dev, zone);
 618                }
 619                return BLK_STS_OK;
 620        }
 621
 622        zone_no = null_zone_no(dev, sector);
 623        zone = &dev->zones[zone_no];
 624
 625        null_lock_zone(dev, zone);
 626
 627        switch (op) {
 628        case REQ_OP_ZONE_RESET:
 629                ret = null_reset_zone(dev, zone);
 630                break;
 631        case REQ_OP_ZONE_OPEN:
 632                ret = null_open_zone(dev, zone);
 633                break;
 634        case REQ_OP_ZONE_CLOSE:
 635                ret = null_close_zone(dev, zone);
 636                break;
 637        case REQ_OP_ZONE_FINISH:
 638                ret = null_finish_zone(dev, zone);
 639                break;
 640        default:
 641                ret = BLK_STS_NOTSUPP;
 642                break;
 643        }
 644
 645        if (ret == BLK_STS_OK)
 646                trace_nullb_zone_op(cmd, zone_no, zone->cond);
 647
 648        null_unlock_zone(dev, zone);
 649
 650        return ret;
 651}
 652
 653blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op,
 654                                    sector_t sector, sector_t nr_sectors)
 655{
 656        struct nullb_device *dev;
 657        struct nullb_zone *zone;
 658        blk_status_t sts;
 659
 660        switch (op) {
 661        case REQ_OP_WRITE:
 662                return null_zone_write(cmd, sector, nr_sectors, false);
 663        case REQ_OP_ZONE_APPEND:
 664                return null_zone_write(cmd, sector, nr_sectors, true);
 665        case REQ_OP_ZONE_RESET:
 666        case REQ_OP_ZONE_RESET_ALL:
 667        case REQ_OP_ZONE_OPEN:
 668        case REQ_OP_ZONE_CLOSE:
 669        case REQ_OP_ZONE_FINISH:
 670                return null_zone_mgmt(cmd, op, sector);
 671        default:
 672                dev = cmd->nq->dev;
 673                zone = &dev->zones[null_zone_no(dev, sector)];
 674
 675                null_lock_zone(dev, zone);
 676                sts = null_process_cmd(cmd, op, sector, nr_sectors);
 677                null_unlock_zone(dev, zone);
 678                return sts;
 679        }
 680}
 681
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.