linux/drivers/lightnvm/core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
   4 * Initial release: Matias Bjorling <m@bjorling.me>
   5 */
   6
   7#define pr_fmt(fmt) "nvm: " fmt
   8
   9#include <linux/list.h>
  10#include <linux/types.h>
  11#include <linux/sem.h>
  12#include <linux/bitmap.h>
  13#include <linux/module.h>
  14#include <linux/moduleparam.h>
  15#include <linux/miscdevice.h>
  16#include <linux/lightnvm.h>
  17#include <linux/sched/sysctl.h>
  18
  19static LIST_HEAD(nvm_tgt_types);
  20static DECLARE_RWSEM(nvm_tgtt_lock);
  21static LIST_HEAD(nvm_devices);
  22static DECLARE_RWSEM(nvm_lock);
  23
  24/* Map between virtual and physical channel and lun */
  25struct nvm_ch_map {
  26        int ch_off;
  27        int num_lun;
  28        int *lun_offs;
  29};
  30
  31struct nvm_dev_map {
  32        struct nvm_ch_map *chnls;
  33        int num_ch;
  34};
  35
  36static void nvm_free(struct kref *ref);
  37
  38static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
  39{
  40        struct nvm_target *tgt;
  41
  42        list_for_each_entry(tgt, &dev->targets, list)
  43                if (!strcmp(name, tgt->disk->disk_name))
  44                        return tgt;
  45
  46        return NULL;
  47}
  48
  49static bool nvm_target_exists(const char *name)
  50{
  51        struct nvm_dev *dev;
  52        struct nvm_target *tgt;
  53        bool ret = false;
  54
  55        down_write(&nvm_lock);
  56        list_for_each_entry(dev, &nvm_devices, devices) {
  57                mutex_lock(&dev->mlock);
  58                list_for_each_entry(tgt, &dev->targets, list) {
  59                        if (!strcmp(name, tgt->disk->disk_name)) {
  60                                ret = true;
  61                                mutex_unlock(&dev->mlock);
  62                                goto out;
  63                        }
  64                }
  65                mutex_unlock(&dev->mlock);
  66        }
  67
  68out:
  69        up_write(&nvm_lock);
  70        return ret;
  71}
  72
  73static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
  74{
  75        int i;
  76
  77        for (i = lun_begin; i <= lun_end; i++) {
  78                if (test_and_set_bit(i, dev->lun_map)) {
  79                        pr_err("lun %d already allocated\n", i);
  80                        goto err;
  81                }
  82        }
  83
  84        return 0;
  85err:
  86        while (--i >= lun_begin)
  87                clear_bit(i, dev->lun_map);
  88
  89        return -EBUSY;
  90}
  91
  92static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin,
  93                                 int lun_end)
  94{
  95        int i;
  96
  97        for (i = lun_begin; i <= lun_end; i++)
  98                WARN_ON(!test_and_clear_bit(i, dev->lun_map));
  99}
 100
 101static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
 102{
 103        struct nvm_dev *dev = tgt_dev->parent;
 104        struct nvm_dev_map *dev_map = tgt_dev->map;
 105        int i, j;
 106
 107        for (i = 0; i < dev_map->num_ch; i++) {
 108                struct nvm_ch_map *ch_map = &dev_map->chnls[i];
 109                int *lun_offs = ch_map->lun_offs;
 110                int ch = i + ch_map->ch_off;
 111
 112                if (clear) {
 113                        for (j = 0; j < ch_map->num_lun; j++) {
 114                                int lun = j + lun_offs[j];
 115                                int lunid = (ch * dev->geo.num_lun) + lun;
 116
 117                                WARN_ON(!test_and_clear_bit(lunid,
 118                                                        dev->lun_map));
 119                        }
 120                }
 121
 122                kfree(ch_map->lun_offs);
 123        }
 124
 125        kfree(dev_map->chnls);
 126        kfree(dev_map);
 127
 128        kfree(tgt_dev->luns);
 129        kfree(tgt_dev);
 130}
 131
 132static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
 133                                              u16 lun_begin, u16 lun_end,
 134                                              u16 op)
 135{
 136        struct nvm_tgt_dev *tgt_dev = NULL;
 137        struct nvm_dev_map *dev_rmap = dev->rmap;
 138        struct nvm_dev_map *dev_map;
 139        struct ppa_addr *luns;
 140        int num_lun = lun_end - lun_begin + 1;
 141        int luns_left = num_lun;
 142        int num_ch = num_lun / dev->geo.num_lun;
 143        int num_ch_mod = num_lun % dev->geo.num_lun;
 144        int bch = lun_begin / dev->geo.num_lun;
 145        int blun = lun_begin % dev->geo.num_lun;
 146        int lunid = 0;
 147        int lun_balanced = 1;
 148        int sec_per_lun, prev_num_lun;
 149        int i, j;
 150
 151        num_ch = (num_ch_mod == 0) ? num_ch : num_ch + 1;
 152
 153        dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
 154        if (!dev_map)
 155                goto err_dev;
 156
 157        dev_map->chnls = kcalloc(num_ch, sizeof(struct nvm_ch_map), GFP_KERNEL);
 158        if (!dev_map->chnls)
 159                goto err_chnls;
 160
 161        luns = kcalloc(num_lun, sizeof(struct ppa_addr), GFP_KERNEL);
 162        if (!luns)
 163                goto err_luns;
 164
 165        prev_num_lun = (luns_left > dev->geo.num_lun) ?
 166                                        dev->geo.num_lun : luns_left;
 167        for (i = 0; i < num_ch; i++) {
 168                struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
 169                int *lun_roffs = ch_rmap->lun_offs;
 170                struct nvm_ch_map *ch_map = &dev_map->chnls[i];
 171                int *lun_offs;
 172                int luns_in_chnl = (luns_left > dev->geo.num_lun) ?
 173                                        dev->geo.num_lun : luns_left;
 174
 175                if (lun_balanced && prev_num_lun != luns_in_chnl)
 176                        lun_balanced = 0;
 177
 178                ch_map->ch_off = ch_rmap->ch_off = bch;
 179                ch_map->num_lun = luns_in_chnl;
 180
 181                lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
 182                if (!lun_offs)
 183                        goto err_ch;
 184
 185                for (j = 0; j < luns_in_chnl; j++) {
 186                        luns[lunid].ppa = 0;
 187                        luns[lunid].a.ch = i;
 188                        luns[lunid++].a.lun = j;
 189
 190                        lun_offs[j] = blun;
 191                        lun_roffs[j + blun] = blun;
 192                }
 193
 194                ch_map->lun_offs = lun_offs;
 195
 196                /* when starting a new channel, lun offset is reset */
 197                blun = 0;
 198                luns_left -= luns_in_chnl;
 199        }
 200
 201        dev_map->num_ch = num_ch;
 202
 203        tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
 204        if (!tgt_dev)
 205                goto err_ch;
 206
 207        /* Inherit device geometry from parent */
 208        memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
 209
 210        /* Target device only owns a portion of the physical device */
 211        tgt_dev->geo.num_ch = num_ch;
 212        tgt_dev->geo.num_lun = (lun_balanced) ? prev_num_lun : -1;
 213        tgt_dev->geo.all_luns = num_lun;
 214        tgt_dev->geo.all_chunks = num_lun * dev->geo.num_chk;
 215
 216        tgt_dev->geo.op = op;
 217
 218        sec_per_lun = dev->geo.clba * dev->geo.num_chk;
 219        tgt_dev->geo.total_secs = num_lun * sec_per_lun;
 220
 221        tgt_dev->q = dev->q;
 222        tgt_dev->map = dev_map;
 223        tgt_dev->luns = luns;
 224        tgt_dev->parent = dev;
 225
 226        return tgt_dev;
 227err_ch:
 228        while (--i >= 0)
 229                kfree(dev_map->chnls[i].lun_offs);
 230        kfree(luns);
 231err_luns:
 232        kfree(dev_map->chnls);
 233err_chnls:
 234        kfree(dev_map);
 235err_dev:
 236        return tgt_dev;
 237}
 238
 239static struct nvm_tgt_type *__nvm_find_target_type(const char *name)
 240{
 241        struct nvm_tgt_type *tt;
 242
 243        list_for_each_entry(tt, &nvm_tgt_types, list)
 244                if (!strcmp(name, tt->name))
 245                        return tt;
 246
 247        return NULL;
 248}
 249
 250static struct nvm_tgt_type *nvm_find_target_type(const char *name)
 251{
 252        struct nvm_tgt_type *tt;
 253
 254        down_write(&nvm_tgtt_lock);
 255        tt = __nvm_find_target_type(name);
 256        up_write(&nvm_tgtt_lock);
 257
 258        return tt;
 259}
 260
 261static int nvm_config_check_luns(struct nvm_geo *geo, int lun_begin,
 262                                 int lun_end)
 263{
 264        if (lun_begin > lun_end || lun_end >= geo->all_luns) {
 265                pr_err("lun out of bound (%u:%u > %u)\n",
 266                        lun_begin, lun_end, geo->all_luns - 1);
 267                return -EINVAL;
 268        }
 269
 270        return 0;
 271}
 272
 273static int __nvm_config_simple(struct nvm_dev *dev,
 274                               struct nvm_ioctl_create_simple *s)
 275{
 276        struct nvm_geo *geo = &dev->geo;
 277
 278        if (s->lun_begin == -1 && s->lun_end == -1) {
 279                s->lun_begin = 0;
 280                s->lun_end = geo->all_luns - 1;
 281        }
 282
 283        return nvm_config_check_luns(geo, s->lun_begin, s->lun_end);
 284}
 285
 286static int __nvm_config_extended(struct nvm_dev *dev,
 287                                 struct nvm_ioctl_create_extended *e)
 288{
 289        if (e->lun_begin == 0xFFFF && e->lun_end == 0xFFFF) {
 290                e->lun_begin = 0;
 291                e->lun_end = dev->geo.all_luns - 1;
 292        }
 293
 294        /* op not set falls into target's default */
 295        if (e->op == 0xFFFF) {
 296                e->op = NVM_TARGET_DEFAULT_OP;
 297        } else if (e->op < NVM_TARGET_MIN_OP || e->op > NVM_TARGET_MAX_OP) {
 298                pr_err("invalid over provisioning value\n");
 299                return -EINVAL;
 300        }
 301
 302        return nvm_config_check_luns(&dev->geo, e->lun_begin, e->lun_end);
 303}
 304
 305static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
 306{
 307        struct nvm_ioctl_create_extended e;
 308        struct gendisk *tdisk;
 309        struct nvm_tgt_type *tt;
 310        struct nvm_target *t;
 311        struct nvm_tgt_dev *tgt_dev;
 312        void *targetdata;
 313        unsigned int mdts;
 314        int ret;
 315
 316        switch (create->conf.type) {
 317        case NVM_CONFIG_TYPE_SIMPLE:
 318                ret = __nvm_config_simple(dev, &create->conf.s);
 319                if (ret)
 320                        return ret;
 321
 322                e.lun_begin = create->conf.s.lun_begin;
 323                e.lun_end = create->conf.s.lun_end;
 324                e.op = NVM_TARGET_DEFAULT_OP;
 325                break;
 326        case NVM_CONFIG_TYPE_EXTENDED:
 327                ret = __nvm_config_extended(dev, &create->conf.e);
 328                if (ret)
 329                        return ret;
 330
 331                e = create->conf.e;
 332                break;
 333        default:
 334                pr_err("config type not valid\n");
 335                return -EINVAL;
 336        }
 337
 338        tt = nvm_find_target_type(create->tgttype);
 339        if (!tt) {
 340                pr_err("target type %s not found\n", create->tgttype);
 341                return -EINVAL;
 342        }
 343
 344        if ((tt->flags & NVM_TGT_F_HOST_L2P) != (dev->geo.dom & NVM_RSP_L2P)) {
 345                pr_err("device is incompatible with target L2P type.\n");
 346                return -EINVAL;
 347        }
 348
 349        if (nvm_target_exists(create->tgtname)) {
 350                pr_err("target name already exists (%s)\n",
 351                                                        create->tgtname);
 352                return -EINVAL;
 353        }
 354
 355        ret = nvm_reserve_luns(dev, e.lun_begin, e.lun_end);
 356        if (ret)
 357                return ret;
 358
 359        t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
 360        if (!t) {
 361                ret = -ENOMEM;
 362                goto err_reserve;
 363        }
 364
 365        tgt_dev = nvm_create_tgt_dev(dev, e.lun_begin, e.lun_end, e.op);
 366        if (!tgt_dev) {
 367                pr_err("could not create target device\n");
 368                ret = -ENOMEM;
 369                goto err_t;
 370        }
 371
 372        tdisk = blk_alloc_disk(dev->q->node);
 373        if (!tdisk) {
 374                ret = -ENOMEM;
 375                goto err_dev;
 376        }
 377
 378        strlcpy(tdisk->disk_name, create->tgtname, sizeof(tdisk->disk_name));
 379        tdisk->major = 0;
 380        tdisk->first_minor = 0;
 381        tdisk->fops = tt->bops;
 382
 383        targetdata = tt->init(tgt_dev, tdisk, create->flags);
 384        if (IS_ERR(targetdata)) {
 385                ret = PTR_ERR(targetdata);
 386                goto err_init;
 387        }
 388
 389        tdisk->private_data = targetdata;
 390        tdisk->queue->queuedata = targetdata;
 391
 392        mdts = (dev->geo.csecs >> 9) * NVM_MAX_VLBA;
 393        if (dev->geo.mdts) {
 394                mdts = min_t(u32, dev->geo.mdts,
 395                                (dev->geo.csecs >> 9) * NVM_MAX_VLBA);
 396        }
 397        blk_queue_max_hw_sectors(tdisk->queue, mdts);
 398
 399        set_capacity(tdisk, tt->capacity(targetdata));
 400        add_disk(tdisk);
 401
 402        if (tt->sysfs_init && tt->sysfs_init(tdisk)) {
 403                ret = -ENOMEM;
 404                goto err_sysfs;
 405        }
 406
 407        t->type = tt;
 408        t->disk = tdisk;
 409        t->dev = tgt_dev;
 410
 411        mutex_lock(&dev->mlock);
 412        list_add_tail(&t->list, &dev->targets);
 413        mutex_unlock(&dev->mlock);
 414
 415        __module_get(tt->owner);
 416
 417        return 0;
 418err_sysfs:
 419        if (tt->exit)
 420                tt->exit(targetdata, true);
 421err_init:
 422        blk_cleanup_disk(tdisk);
 423err_dev:
 424        nvm_remove_tgt_dev(tgt_dev, 0);
 425err_t:
 426        kfree(t);
 427err_reserve:
 428        nvm_release_luns_err(dev, e.lun_begin, e.lun_end);
 429        return ret;
 430}
 431
 432static void __nvm_remove_target(struct nvm_target *t, bool graceful)
 433{
 434        struct nvm_tgt_type *tt = t->type;
 435        struct gendisk *tdisk = t->disk;
 436
 437        del_gendisk(tdisk);
 438
 439        if (tt->sysfs_exit)
 440                tt->sysfs_exit(tdisk);
 441
 442        if (tt->exit)
 443                tt->exit(tdisk->private_data, graceful);
 444
 445        nvm_remove_tgt_dev(t->dev, 1);
 446        blk_cleanup_disk(tdisk);
 447        module_put(t->type->owner);
 448
 449        list_del(&t->list);
 450        kfree(t);
 451}
 452
 453/**
 454 * nvm_remove_tgt - Removes a target from the media manager
 455 * @remove:     ioctl structure with target name to remove.
 456 *
 457 * Returns:
 458 * 0: on success
 459 * 1: on not found
 460 * <0: on error
 461 */
 462static int nvm_remove_tgt(struct nvm_ioctl_remove *remove)
 463{
 464        struct nvm_target *t = NULL;
 465        struct nvm_dev *dev;
 466
 467        down_read(&nvm_lock);
 468        list_for_each_entry(dev, &nvm_devices, devices) {
 469                mutex_lock(&dev->mlock);
 470                t = nvm_find_target(dev, remove->tgtname);
 471                if (t) {
 472                        mutex_unlock(&dev->mlock);
 473                        break;
 474                }
 475                mutex_unlock(&dev->mlock);
 476        }
 477        up_read(&nvm_lock);
 478
 479        if (!t) {
 480                pr_err("failed to remove target %s\n",
 481                                remove->tgtname);
 482                return 1;
 483        }
 484
 485        __nvm_remove_target(t, true);
 486        kref_put(&dev->ref, nvm_free);
 487
 488        return 0;
 489}
 490
 491static int nvm_register_map(struct nvm_dev *dev)
 492{
 493        struct nvm_dev_map *rmap;
 494        int i, j;
 495
 496        rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
 497        if (!rmap)
 498                goto err_rmap;
 499
 500        rmap->chnls = kcalloc(dev->geo.num_ch, sizeof(struct nvm_ch_map),
 501                                                                GFP_KERNEL);
 502        if (!rmap->chnls)
 503                goto err_chnls;
 504
 505        for (i = 0; i < dev->geo.num_ch; i++) {
 506                struct nvm_ch_map *ch_rmap;
 507                int *lun_roffs;
 508                int luns_in_chnl = dev->geo.num_lun;
 509
 510                ch_rmap = &rmap->chnls[i];
 511
 512                ch_rmap->ch_off = -1;
 513                ch_rmap->num_lun = luns_in_chnl;
 514
 515                lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
 516                if (!lun_roffs)
 517                        goto err_ch;
 518
 519                for (j = 0; j < luns_in_chnl; j++)
 520                        lun_roffs[j] = -1;
 521
 522                ch_rmap->lun_offs = lun_roffs;
 523        }
 524
 525        dev->rmap = rmap;
 526
 527        return 0;
 528err_ch:
 529        while (--i >= 0)
 530                kfree(rmap->chnls[i].lun_offs);
 531err_chnls:
 532        kfree(rmap);
 533err_rmap:
 534        return -ENOMEM;
 535}
 536
 537static void nvm_unregister_map(struct nvm_dev *dev)
 538{
 539        struct nvm_dev_map *rmap = dev->rmap;
 540        int i;
 541
 542        for (i = 0; i < dev->geo.num_ch; i++)
 543                kfree(rmap->chnls[i].lun_offs);
 544
 545        kfree(rmap->chnls);
 546        kfree(rmap);
 547}
 548
 549static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
 550{
 551        struct nvm_dev_map *dev_map = tgt_dev->map;
 552        struct nvm_ch_map *ch_map = &dev_map->chnls[p->a.ch];
 553        int lun_off = ch_map->lun_offs[p->a.lun];
 554
 555        p->a.ch += ch_map->ch_off;
 556        p->a.lun += lun_off;
 557}
 558
 559static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
 560{
 561        struct nvm_dev *dev = tgt_dev->parent;
 562        struct nvm_dev_map *dev_rmap = dev->rmap;
 563        struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->a.ch];
 564        int lun_roff = ch_rmap->lun_offs[p->a.lun];
 565
 566        p->a.ch -= ch_rmap->ch_off;
 567        p->a.lun -= lun_roff;
 568}
 569
 570static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev,
 571                                struct ppa_addr *ppa_list, int nr_ppas)
 572{
 573        int i;
 574
 575        for (i = 0; i < nr_ppas; i++) {
 576                nvm_map_to_dev(tgt_dev, &ppa_list[i]);
 577                ppa_list[i] = generic_to_dev_addr(tgt_dev->parent, ppa_list[i]);
 578        }
 579}
 580
 581static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev,
 582                                struct ppa_addr *ppa_list, int nr_ppas)
 583{
 584        int i;
 585
 586        for (i = 0; i < nr_ppas; i++) {
 587                ppa_list[i] = dev_to_generic_addr(tgt_dev->parent, ppa_list[i]);
 588                nvm_map_to_tgt(tgt_dev, &ppa_list[i]);
 589        }
 590}
 591
 592static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
 593{
 594        struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
 595
 596        nvm_ppa_tgt_to_dev(tgt_dev, ppa_list, rqd->nr_ppas);
 597}
 598
 599static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
 600{
 601        struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
 602
 603        nvm_ppa_dev_to_tgt(tgt_dev, ppa_list, rqd->nr_ppas);
 604}
 605
 606int nvm_register_tgt_type(struct nvm_tgt_type *tt)
 607{
 608        int ret = 0;
 609
 610        down_write(&nvm_tgtt_lock);
 611        if (__nvm_find_target_type(tt->name))
 612                ret = -EEXIST;
 613        else
 614                list_add(&tt->list, &nvm_tgt_types);
 615        up_write(&nvm_tgtt_lock);
 616
 617        return ret;
 618}
 619EXPORT_SYMBOL(nvm_register_tgt_type);
 620
 621void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
 622{
 623        if (!tt)
 624                return;
 625
 626        down_write(&nvm_tgtt_lock);
 627        list_del(&tt->list);
 628        up_write(&nvm_tgtt_lock);
 629}
 630EXPORT_SYMBOL(nvm_unregister_tgt_type);
 631
 632void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
 633                                                        dma_addr_t *dma_handler)
 634{
 635        return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
 636                                                                dma_handler);
 637}
 638EXPORT_SYMBOL(nvm_dev_dma_alloc);
 639
 640void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
 641{
 642        dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
 643}
 644EXPORT_SYMBOL(nvm_dev_dma_free);
 645
 646static struct nvm_dev *nvm_find_nvm_dev(const char *name)
 647{
 648        struct nvm_dev *dev;
 649
 650        list_for_each_entry(dev, &nvm_devices, devices)
 651                if (!strcmp(name, dev->name))
 652                        return dev;
 653
 654        return NULL;
 655}
 656
 657static int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
 658                        const struct ppa_addr *ppas, int nr_ppas)
 659{
 660        struct nvm_dev *dev = tgt_dev->parent;
 661        struct nvm_geo *geo = &tgt_dev->geo;
 662        int i, plane_cnt, pl_idx;
 663        struct ppa_addr ppa;
 664
 665        if (geo->pln_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
 666                rqd->nr_ppas = nr_ppas;
 667                rqd->ppa_addr = ppas[0];
 668
 669                return 0;
 670        }
 671
 672        rqd->nr_ppas = nr_ppas;
 673        rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
 674        if (!rqd->ppa_list) {
 675                pr_err("failed to allocate dma memory\n");
 676                return -ENOMEM;
 677        }
 678
 679        plane_cnt = geo->pln_mode;
 680        rqd->nr_ppas *= plane_cnt;
 681
 682        for (i = 0; i < nr_ppas; i++) {
 683                for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
 684                        ppa = ppas[i];
 685                        ppa.g.pl = pl_idx;
 686                        rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
 687                }
 688        }
 689
 690        return 0;
 691}
 692
 693static void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev,
 694                        struct nvm_rq *rqd)
 695{
 696        if (!rqd->ppa_list)
 697                return;
 698
 699        nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
 700}
 701
 702static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd)
 703{
 704        int flags = 0;
 705
 706        if (geo->version == NVM_OCSSD_SPEC_20)
 707                return 0;
 708
 709        if (rqd->is_seq)
 710                flags |= geo->pln_mode >> 1;
 711
 712        if (rqd->opcode == NVM_OP_PREAD)
 713                flags |= (NVM_IO_SCRAMBLE_ENABLE | NVM_IO_SUSPEND);
 714        else if (rqd->opcode == NVM_OP_PWRITE)
 715                flags |= NVM_IO_SCRAMBLE_ENABLE;
 716
 717        return flags;
 718}
 719
 720int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, void *buf)
 721{
 722        struct nvm_dev *dev = tgt_dev->parent;
 723        int ret;
 724
 725        if (!dev->ops->submit_io)
 726                return -ENODEV;
 727
 728        nvm_rq_tgt_to_dev(tgt_dev, rqd);
 729
 730        rqd->dev = tgt_dev;
 731        rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
 732
 733        /* In case of error, fail with right address format */
 734        ret = dev->ops->submit_io(dev, rqd, buf);
 735        if (ret)
 736                nvm_rq_dev_to_tgt(tgt_dev, rqd);
 737        return ret;
 738}
 739EXPORT_SYMBOL(nvm_submit_io);
 740
 741static void nvm_sync_end_io(struct nvm_rq *rqd)
 742{
 743        struct completion *waiting = rqd->private;
 744
 745        complete(waiting);
 746}
 747
 748static int nvm_submit_io_wait(struct nvm_dev *dev, struct nvm_rq *rqd,
 749                              void *buf)
 750{
 751        DECLARE_COMPLETION_ONSTACK(wait);
 752        int ret = 0;
 753
 754        rqd->end_io = nvm_sync_end_io;
 755        rqd->private = &wait;
 756
 757        ret = dev->ops->submit_io(dev, rqd, buf);
 758        if (ret)
 759                return ret;
 760
 761        wait_for_completion_io(&wait);
 762
 763        return 0;
 764}
 765
 766int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
 767                       void *buf)
 768{
 769        struct nvm_dev *dev = tgt_dev->parent;
 770        int ret;
 771
 772        if (!dev->ops->submit_io)
 773                return -ENODEV;
 774
 775        nvm_rq_tgt_to_dev(tgt_dev, rqd);
 776
 777        rqd->dev = tgt_dev;
 778        rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
 779
 780        ret = nvm_submit_io_wait(dev, rqd, buf);
 781
 782        return ret;
 783}
 784EXPORT_SYMBOL(nvm_submit_io_sync);
 785
 786void nvm_end_io(struct nvm_rq *rqd)
 787{
 788        struct nvm_tgt_dev *tgt_dev = rqd->dev;
 789
 790        /* Convert address space */
 791        if (tgt_dev)
 792                nvm_rq_dev_to_tgt(tgt_dev, rqd);
 793
 794        if (rqd->end_io)
 795                rqd->end_io(rqd);
 796}
 797EXPORT_SYMBOL(nvm_end_io);
 798
 799static int nvm_submit_io_sync_raw(struct nvm_dev *dev, struct nvm_rq *rqd)
 800{
 801        if (!dev->ops->submit_io)
 802                return -ENODEV;
 803
 804        rqd->dev = NULL;
 805        rqd->flags = nvm_set_flags(&dev->geo, rqd);
 806
 807        return nvm_submit_io_wait(dev, rqd, NULL);
 808}
 809
 810static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa)
 811{
 812        struct nvm_rq rqd = { NULL };
 813        struct bio bio;
 814        struct bio_vec bio_vec;
 815        struct page *page;
 816        int ret;
 817
 818        page = alloc_page(GFP_KERNEL);
 819        if (!page)
 820                return -ENOMEM;
 821
 822        bio_init(&bio, &bio_vec, 1);
 823        bio_add_page(&bio, page, PAGE_SIZE, 0);
 824        bio_set_op_attrs(&bio, REQ_OP_READ, 0);
 825
 826        rqd.bio = &bio;
 827        rqd.opcode = NVM_OP_PREAD;
 828        rqd.is_seq = 1;
 829        rqd.nr_ppas = 1;
 830        rqd.ppa_addr = generic_to_dev_addr(dev, ppa);
 831
 832        ret = nvm_submit_io_sync_raw(dev, &rqd);
 833        __free_page(page);
 834        if (ret)
 835                return ret;
 836
 837        return rqd.error;
 838}
 839
 840/*
 841 * Scans a 1.2 chunk first and last page to determine if its state.
 842 * If the chunk is found to be open, also scan it to update the write
 843 * pointer.
 844 */
 845static int nvm_bb_chunk_scan(struct nvm_dev *dev, struct ppa_addr ppa,
 846                             struct nvm_chk_meta *meta)
 847{
 848        struct nvm_geo *geo = &dev->geo;
 849        int ret, pg, pl;
 850
 851        /* sense first page */
 852        ret = nvm_bb_chunk_sense(dev, ppa);
 853        if (ret < 0) /* io error */
 854                return ret;
 855        else if (ret == 0) /* valid data */
 856                meta->state = NVM_CHK_ST_OPEN;
 857        else if (ret > 0) {
 858                /*
 859                 * If empty page, the chunk is free, else it is an
 860                 * actual io error. In that case, mark it offline.
 861                 */
 862                switch (ret) {
 863                case NVM_RSP_ERR_EMPTYPAGE:
 864                        meta->state = NVM_CHK_ST_FREE;
 865                        return 0;
 866                case NVM_RSP_ERR_FAILCRC:
 867                case NVM_RSP_ERR_FAILECC:
 868                case NVM_RSP_WARN_HIGHECC:
 869                        meta->state = NVM_CHK_ST_OPEN;
 870                        goto scan;
 871                default:
 872                        return -ret; /* other io error */
 873                }
 874        }
 875
 876        /* sense last page */
 877        ppa.g.pg = geo->num_pg - 1;
 878        ppa.g.pl = geo->num_pln - 1;
 879
 880        ret = nvm_bb_chunk_sense(dev, ppa);
 881        if (ret < 0) /* io error */
 882                return ret;
 883        else if (ret == 0) { /* Chunk fully written */
 884                meta->state = NVM_CHK_ST_CLOSED;
 885                meta->wp = geo->clba;
 886                return 0;
 887        } else if (ret > 0) {
 888                switch (ret) {
 889                case NVM_RSP_ERR_EMPTYPAGE:
 890                case NVM_RSP_ERR_FAILCRC:
 891                case NVM_RSP_ERR_FAILECC:
 892                case NVM_RSP_WARN_HIGHECC:
 893                        meta->state = NVM_CHK_ST_OPEN;
 894                        break;
 895                default:
 896                        return -ret; /* other io error */
 897                }
 898        }
 899
 900scan:
 901        /*
 902         * chunk is open, we scan sequentially to update the write pointer.
 903         * We make the assumption that targets write data across all planes
 904         * before moving to the next page.
 905         */
 906        for (pg = 0; pg < geo->num_pg; pg++) {
 907                for (pl = 0; pl < geo->num_pln; pl++) {
 908                        ppa.g.pg = pg;
 909                        ppa.g.pl = pl;
 910
 911                        ret = nvm_bb_chunk_sense(dev, ppa);
 912                        if (ret < 0) /* io error */
 913                                return ret;
 914                        else if (ret == 0) {
 915                                meta->wp += geo->ws_min;
 916                        } else if (ret > 0) {
 917                                switch (ret) {
 918                                case NVM_RSP_ERR_EMPTYPAGE:
 919                                        return 0;
 920                                case NVM_RSP_ERR_FAILCRC:
 921                                case NVM_RSP_ERR_FAILECC:
 922                                case NVM_RSP_WARN_HIGHECC:
 923                                        meta->wp += geo->ws_min;
 924                                        break;
 925                                default:
 926                                        return -ret; /* other io error */
 927                                }
 928                        }
 929                }
 930        }
 931
 932        return 0;
 933}
 934
 935/*
 936 * folds a bad block list from its plane representation to its
 937 * chunk representation.
 938 *
 939 * If any of the planes status are bad or grown bad, the chunk is marked
 940 * offline. If not bad, the first plane state acts as the chunk state.
 941 */
 942static int nvm_bb_to_chunk(struct nvm_dev *dev, struct ppa_addr ppa,
 943                           u8 *blks, int nr_blks, struct nvm_chk_meta *meta)
 944{
 945        struct nvm_geo *geo = &dev->geo;
 946        int ret, blk, pl, offset, blktype;
 947
 948        for (blk = 0; blk < geo->num_chk; blk++) {
 949                offset = blk * geo->pln_mode;
 950                blktype = blks[offset];
 951
 952                for (pl = 0; pl < geo->pln_mode; pl++) {
 953                        if (blks[offset + pl] &
 954                                        (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
 955                                blktype = blks[offset + pl];
 956                                break;
 957                        }
 958                }
 959
 960                ppa.g.blk = blk;
 961
 962                meta->wp = 0;
 963                meta->type = NVM_CHK_TP_W_SEQ;
 964                meta->wi = 0;
 965                meta->slba = generic_to_dev_addr(dev, ppa).ppa;
 966                meta->cnlb = dev->geo.clba;
 967
 968                if (blktype == NVM_BLK_T_FREE) {
 969                        ret = nvm_bb_chunk_scan(dev, ppa, meta);
 970                        if (ret)
 971                                return ret;
 972                } else {
 973                        meta->state = NVM_CHK_ST_OFFLINE;
 974                }
 975
 976                meta++;
 977        }
 978
 979        return 0;
 980}
 981
 982static int nvm_get_bb_meta(struct nvm_dev *dev, sector_t slba,
 983                           int nchks, struct nvm_chk_meta *meta)
 984{
 985        struct nvm_geo *geo = &dev->geo;
 986        struct ppa_addr ppa;
 987        u8 *blks;
 988        int ch, lun, nr_blks;
 989        int ret = 0;
 990
 991        ppa.ppa = slba;
 992        ppa = dev_to_generic_addr(dev, ppa);
 993
 994        if (ppa.g.blk != 0)
 995                return -EINVAL;
 996
 997        if ((nchks % geo->num_chk) != 0)
 998                return -EINVAL;
 999
1000        nr_blks = geo->num_chk * geo->pln_mode;
1001
1002        blks = kmalloc(nr_blks, GFP_KERNEL);
1003        if (!blks)
1004                return -ENOMEM;
1005
1006        for (ch = ppa.g.ch; ch < geo->num_ch; ch++) {
1007                for (lun = ppa.g.lun; lun < geo->num_lun; lun++) {
1008                        struct ppa_addr ppa_gen, ppa_dev;
1009
1010                        if (!nchks)
1011                                goto done;
1012
1013                        ppa_gen.ppa = 0;
1014                        ppa_gen.g.ch = ch;
1015                        ppa_gen.g.lun = lun;
1016                        ppa_dev = generic_to_dev_addr(dev, ppa_gen);
1017
1018                        ret = dev->ops->get_bb_tbl(dev, ppa_dev, blks);
1019                        if (ret)
1020                                goto done;
1021
1022                        ret = nvm_bb_to_chunk(dev, ppa_gen, blks, nr_blks,
1023                                                                        meta);
1024                        if (ret)
1025                                goto done;
1026
1027                        meta += geo->num_chk;
1028                        nchks -= geo->num_chk;
1029                }
1030        }
1031done:
1032        kfree(blks);
1033        return ret;
1034}
1035
1036int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
1037                       int nchks, struct nvm_chk_meta *meta)
1038{
1039        struct nvm_dev *dev = tgt_dev->parent;
1040
1041        nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
1042
1043        if (dev->geo.version == NVM_OCSSD_SPEC_12)
1044                return nvm_get_bb_meta(dev, (sector_t)ppa.ppa, nchks, meta);
1045
1046        return dev->ops->get_chk_meta(dev, (sector_t)ppa.ppa, nchks, meta);
1047}
1048EXPORT_SYMBOL_GPL(nvm_get_chunk_meta);
1049
1050int nvm_set_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
1051                       int nr_ppas, int type)
1052{
1053        struct nvm_dev *dev = tgt_dev->parent;
1054        struct nvm_rq rqd;
1055        int ret;
1056
1057        if (dev->geo.version == NVM_OCSSD_SPEC_20)
1058                return 0;
1059
1060        if (nr_ppas > NVM_MAX_VLBA) {
1061                pr_err("unable to update all blocks atomically\n");
1062                return -EINVAL;
1063        }
1064
1065        memset(&rqd, 0, sizeof(struct nvm_rq));
1066
1067        nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
1068        nvm_rq_tgt_to_dev(tgt_dev, &rqd);
1069
1070        ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
1071        nvm_free_rqd_ppalist(tgt_dev, &rqd);
1072        if (ret)
1073                return -EINVAL;
1074
1075        return 0;
1076}
1077EXPORT_SYMBOL_GPL(nvm_set_chunk_meta);
1078
1079static int nvm_core_init(struct nvm_dev *dev)
1080{
1081        struct nvm_geo *geo = &dev->geo;
1082        int ret;
1083
1084        dev->lun_map = kcalloc(BITS_TO_LONGS(geo->all_luns),
1085                                        sizeof(unsigned long), GFP_KERNEL);
1086        if (!dev->lun_map)
1087                return -ENOMEM;
1088
1089        INIT_LIST_HEAD(&dev->area_list);
1090        INIT_LIST_HEAD(&dev->targets);
1091        mutex_init(&dev->mlock);
1092        spin_lock_init(&dev->lock);
1093
1094        ret = nvm_register_map(dev);
1095        if (ret)
1096                goto err_fmtype;
1097
1098        return 0;
1099err_fmtype:
1100        kfree(dev->lun_map);
1101        return ret;
1102}
1103
1104static void nvm_free(struct kref *ref)
1105{
1106        struct nvm_dev *dev = container_of(ref, struct nvm_dev, ref);
1107
1108        if (dev->dma_pool)
1109                dev->ops->destroy_dma_pool(dev->dma_pool);
1110
1111        if (dev->rmap)
1112                nvm_unregister_map(dev);
1113
1114        kfree(dev->lun_map);
1115        kfree(dev);
1116}
1117
1118static int nvm_init(struct nvm_dev *dev)
1119{
1120        struct nvm_geo *geo = &dev->geo;
1121        int ret = -EINVAL;
1122
1123        if (dev->ops->identity(dev)) {
1124                pr_err("device could not be identified\n");
1125                goto err;
1126        }
1127
1128        pr_debug("ver:%u.%u nvm_vendor:%x\n", geo->major_ver_id,
1129                        geo->minor_ver_id, geo->vmnt);
1130
1131        ret = nvm_core_init(dev);
1132        if (ret) {
1133                pr_err("could not initialize core structures.\n");
1134                goto err;
1135        }
1136
1137        pr_info("registered %s [%u/%u/%u/%u/%u]\n",
1138                        dev->name, dev->geo.ws_min, dev->geo.ws_opt,
1139                        dev->geo.num_chk, dev->geo.all_luns,
1140                        dev->geo.num_ch);
1141        return 0;
1142err:
1143        pr_err("failed to initialize nvm\n");
1144        return ret;
1145}
1146
1147struct nvm_dev *nvm_alloc_dev(int node)
1148{
1149        struct nvm_dev *dev;
1150
1151        dev = kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
1152        if (dev)
1153                kref_init(&dev->ref);
1154
1155        return dev;
1156}
1157EXPORT_SYMBOL(nvm_alloc_dev);
1158
1159int nvm_register(struct nvm_dev *dev)
1160{
1161        int ret, exp_pool_size;
1162
1163        pr_warn_once("lightnvm support is deprecated and will be removed in Linux 5.15.\n");
1164
1165        if (!dev->q || !dev->ops) {
1166                kref_put(&dev->ref, nvm_free);
1167                return -EINVAL;
1168        }
1169
1170        ret = nvm_init(dev);
1171        if (ret) {
1172                kref_put(&dev->ref, nvm_free);
1173                return ret;
1174        }
1175
1176        exp_pool_size = max_t(int, PAGE_SIZE,
1177                              (NVM_MAX_VLBA * (sizeof(u64) + dev->geo.sos)));
1178        exp_pool_size = round_up(exp_pool_size, PAGE_SIZE);
1179
1180        dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist",
1181                                                  exp_pool_size);
1182        if (!dev->dma_pool) {
1183                pr_err("could not create dma pool\n");
1184                kref_put(&dev->ref, nvm_free);
1185                return -ENOMEM;
1186        }
1187
1188        /* register device with a supported media manager */
1189        down_write(&nvm_lock);
1190        list_add(&dev->devices, &nvm_devices);
1191        up_write(&nvm_lock);
1192
1193        return 0;
1194}
1195EXPORT_SYMBOL(nvm_register);
1196
1197void nvm_unregister(struct nvm_dev *dev)
1198{
1199        struct nvm_target *t, *tmp;
1200
1201        mutex_lock(&dev->mlock);
1202        list_for_each_entry_safe(t, tmp, &dev->targets, list) {
1203                if (t->dev->parent != dev)
1204                        continue;
1205                __nvm_remove_target(t, false);
1206                kref_put(&dev->ref, nvm_free);
1207        }
1208        mutex_unlock(&dev->mlock);
1209
1210        down_write(&nvm_lock);
1211        list_del(&dev->devices);
1212        up_write(&nvm_lock);
1213
1214        kref_put(&dev->ref, nvm_free);
1215}
1216EXPORT_SYMBOL(nvm_unregister);
1217
1218static int __nvm_configure_create(struct nvm_ioctl_create *create)
1219{
1220        struct nvm_dev *dev;
1221        int ret;
1222
1223        down_write(&nvm_lock);
1224        dev = nvm_find_nvm_dev(create->dev);
1225        up_write(&nvm_lock);
1226
1227        if (!dev) {
1228                pr_err("device not found\n");
1229                return -EINVAL;
1230        }
1231
1232        kref_get(&dev->ref);
1233        ret = nvm_create_tgt(dev, create);
1234        if (ret)
1235                kref_put(&dev->ref, nvm_free);
1236
1237        return ret;
1238}
1239
1240static long nvm_ioctl_info(struct file *file, void __user *arg)
1241{
1242        struct nvm_ioctl_info *info;
1243        struct nvm_tgt_type *tt;
1244        int tgt_iter = 0;
1245
1246        info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
1247        if (IS_ERR(info))
1248                return PTR_ERR(info);
1249
1250        info->version[0] = NVM_VERSION_MAJOR;
1251        info->version[1] = NVM_VERSION_MINOR;
1252        info->version[2] = NVM_VERSION_PATCH;
1253
1254        down_write(&nvm_tgtt_lock);
1255        list_for_each_entry(tt, &nvm_tgt_types, list) {
1256                struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
1257
1258                tgt->version[0] = tt->version[0];
1259                tgt->version[1] = tt->version[1];
1260                tgt->version[2] = tt->version[2];
1261                strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
1262
1263                tgt_iter++;
1264        }
1265
1266        info->tgtsize = tgt_iter;
1267        up_write(&nvm_tgtt_lock);
1268
1269        if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
1270                kfree(info);
1271                return -EFAULT;
1272        }
1273
1274        kfree(info);
1275        return 0;
1276}
1277
1278static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
1279{
1280        struct nvm_ioctl_get_devices *devices;
1281        struct nvm_dev *dev;
1282        int i = 0;
1283
1284        devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
1285        if (!devices)
1286                return -ENOMEM;
1287
1288        down_write(&nvm_lock);
1289        list_for_each_entry(dev, &nvm_devices, devices) {
1290                struct nvm_ioctl_device_info *info = &devices->info[i];
1291
1292                strlcpy(info->devname, dev->name, sizeof(info->devname));
1293
1294                /* kept for compatibility */
1295                info->bmversion[0] = 1;
1296                info->bmversion[1] = 0;
1297                info->bmversion[2] = 0;
1298                strlcpy(info->bmname, "gennvm", sizeof(info->bmname));
1299                i++;
1300
1301                if (i >= ARRAY_SIZE(devices->info)) {
1302                        pr_err("max %zd devices can be reported.\n",
1303                               ARRAY_SIZE(devices->info));
1304                        break;
1305                }
1306        }
1307        up_write(&nvm_lock);
1308
1309        devices->nr_devices = i;
1310
1311        if (copy_to_user(arg, devices,
1312                         sizeof(struct nvm_ioctl_get_devices))) {
1313                kfree(devices);
1314                return -EFAULT;
1315        }
1316
1317        kfree(devices);
1318        return 0;
1319}
1320
1321static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
1322{
1323        struct nvm_ioctl_create create;
1324
1325        if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
1326                return -EFAULT;
1327
1328        if (create.conf.type == NVM_CONFIG_TYPE_EXTENDED &&
1329            create.conf.e.rsv != 0) {
1330                pr_err("reserved config field in use\n");
1331                return -EINVAL;
1332        }
1333
1334        create.dev[DISK_NAME_LEN - 1] = '\0';
1335        create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
1336        create.tgtname[DISK_NAME_LEN - 1] = '\0';
1337
1338        if (create.flags != 0) {
1339                __u32 flags = create.flags;
1340
1341                /* Check for valid flags */
1342                if (flags & NVM_TARGET_FACTORY)
1343                        flags &= ~NVM_TARGET_FACTORY;
1344
1345                if (flags) {
1346                        pr_err("flag not supported\n");
1347                        return -EINVAL;
1348                }
1349        }
1350
1351        return __nvm_configure_create(&create);
1352}
1353
1354static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
1355{
1356        struct nvm_ioctl_remove remove;
1357
1358        if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
1359                return -EFAULT;
1360
1361        remove.tgtname[DISK_NAME_LEN - 1] = '\0';
1362
1363        if (remove.flags != 0) {
1364                pr_err("no flags supported\n");
1365                return -EINVAL;
1366        }
1367
1368        return nvm_remove_tgt(&remove);
1369}
1370
1371/* kept for compatibility reasons */
1372static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1373{
1374        struct nvm_ioctl_dev_init init;
1375
1376        if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
1377                return -EFAULT;
1378
1379        if (init.flags != 0) {
1380                pr_err("no flags supported\n");
1381                return -EINVAL;
1382        }
1383
1384        return 0;
1385}
1386
1387/* Kept for compatibility reasons */
1388static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1389{
1390        struct nvm_ioctl_dev_factory fact;
1391
1392        if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
1393                return -EFAULT;
1394
1395        fact.dev[DISK_NAME_LEN - 1] = '\0';
1396
1397        if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
1398                return -EINVAL;
1399
1400        return 0;
1401}
1402
1403static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
1404{
1405        void __user *argp = (void __user *)arg;
1406
1407        if (!capable(CAP_SYS_ADMIN))
1408                return -EPERM;
1409
1410        switch (cmd) {
1411        case NVM_INFO:
1412                return nvm_ioctl_info(file, argp);
1413        case NVM_GET_DEVICES:
1414                return nvm_ioctl_get_devices(file, argp);
1415        case NVM_DEV_CREATE:
1416                return nvm_ioctl_dev_create(file, argp);
1417        case NVM_DEV_REMOVE:
1418                return nvm_ioctl_dev_remove(file, argp);
1419        case NVM_DEV_INIT:
1420                return nvm_ioctl_dev_init(file, argp);
1421        case NVM_DEV_FACTORY:
1422                return nvm_ioctl_dev_factory(file, argp);
1423        }
1424        return 0;
1425}
1426
1427static const struct file_operations _ctl_fops = {
1428        .open = nonseekable_open,
1429        .unlocked_ioctl = nvm_ctl_ioctl,
1430        .owner = THIS_MODULE,
1431        .llseek  = noop_llseek,
1432};
1433
1434static struct miscdevice _nvm_misc = {
1435        .minor          = MISC_DYNAMIC_MINOR,
1436        .name           = "lightnvm",
1437        .nodename       = "lightnvm/control",
1438        .fops           = &_ctl_fops,
1439};
1440builtin_misc_device(_nvm_misc);
1441