linux/drivers/lightnvm/pblk-core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2016 CNEX Labs
   4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
   5 *                  Matias Bjorling <matias@cnexlabs.com>
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License version
   9 * 2 as published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful, but
  12 * WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * General Public License for more details.
  15 *
  16 * pblk-core.c - pblk's core functionality
  17 *
  18 */
  19
  20#define CREATE_TRACE_POINTS
  21
  22#include "pblk.h"
  23#include "pblk-trace.h"
  24
  25static void pblk_line_mark_bb(struct work_struct *work)
  26{
  27        struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
  28                                                                        ws);
  29        struct pblk *pblk = line_ws->pblk;
  30        struct nvm_tgt_dev *dev = pblk->dev;
  31        struct ppa_addr *ppa = line_ws->priv;
  32        int ret;
  33
  34        ret = nvm_set_chunk_meta(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
  35        if (ret) {
  36                struct pblk_line *line;
  37                int pos;
  38
  39                line = pblk_ppa_to_line(pblk, *ppa);
  40                pos = pblk_ppa_to_pos(&dev->geo, *ppa);
  41
  42                pblk_err(pblk, "failed to mark bb, line:%d, pos:%d\n",
  43                                line->id, pos);
  44        }
  45
  46        kfree(ppa);
  47        mempool_free(line_ws, &pblk->gen_ws_pool);
  48}
  49
  50static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
  51                         struct ppa_addr ppa_addr)
  52{
  53        struct nvm_tgt_dev *dev = pblk->dev;
  54        struct nvm_geo *geo = &dev->geo;
  55        struct ppa_addr *ppa;
  56        int pos = pblk_ppa_to_pos(geo, ppa_addr);
  57
  58        pblk_debug(pblk, "erase failed: line:%d, pos:%d\n", line->id, pos);
  59        atomic_long_inc(&pblk->erase_failed);
  60
  61        atomic_dec(&line->blk_in_line);
  62        if (test_and_set_bit(pos, line->blk_bitmap))
  63                pblk_err(pblk, "attempted to erase bb: line:%d, pos:%d\n",
  64                                                        line->id, pos);
  65
  66        /* Not necessary to mark bad blocks on 2.0 spec. */
  67        if (geo->version == NVM_OCSSD_SPEC_20)
  68                return;
  69
  70        ppa = kmalloc(sizeof(struct ppa_addr), GFP_ATOMIC);
  71        if (!ppa)
  72                return;
  73
  74        *ppa = ppa_addr;
  75        pblk_gen_run_ws(pblk, NULL, ppa, pblk_line_mark_bb,
  76                                                GFP_ATOMIC, pblk->bb_wq);
  77}
  78
  79static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
  80{
  81        struct nvm_tgt_dev *dev = pblk->dev;
  82        struct nvm_geo *geo = &dev->geo;
  83        struct nvm_chk_meta *chunk;
  84        struct pblk_line *line;
  85        int pos;
  86
  87        line = pblk_ppa_to_line(pblk, rqd->ppa_addr);
  88        pos = pblk_ppa_to_pos(geo, rqd->ppa_addr);
  89        chunk = &line->chks[pos];
  90
  91        atomic_dec(&line->left_seblks);
  92
  93        if (rqd->error) {
  94                trace_pblk_chunk_reset(pblk_disk_name(pblk),
  95                                &rqd->ppa_addr, PBLK_CHUNK_RESET_FAILED);
  96
  97                chunk->state = NVM_CHK_ST_OFFLINE;
  98                pblk_mark_bb(pblk, line, rqd->ppa_addr);
  99        } else {
 100                trace_pblk_chunk_reset(pblk_disk_name(pblk),
 101                                &rqd->ppa_addr, PBLK_CHUNK_RESET_DONE);
 102
 103                chunk->state = NVM_CHK_ST_FREE;
 104        }
 105
 106        trace_pblk_chunk_state(pblk_disk_name(pblk), &rqd->ppa_addr,
 107                                chunk->state);
 108
 109        atomic_dec(&pblk->inflight_io);
 110}
 111
 112/* Erase completion assumes that only one block is erased at the time */
 113static void pblk_end_io_erase(struct nvm_rq *rqd)
 114{
 115        struct pblk *pblk = rqd->private;
 116
 117        __pblk_end_io_erase(pblk, rqd);
 118        mempool_free(rqd, &pblk->e_rq_pool);
 119}
 120
 121/*
 122 * Get information for all chunks from the device.
 123 *
 124 * The caller is responsible for freeing (vmalloc) the returned structure
 125 */
 126struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk)
 127{
 128        struct nvm_tgt_dev *dev = pblk->dev;
 129        struct nvm_geo *geo = &dev->geo;
 130        struct nvm_chk_meta *meta;
 131        struct ppa_addr ppa;
 132        unsigned long len;
 133        int ret;
 134
 135        ppa.ppa = 0;
 136
 137        len = geo->all_chunks * sizeof(*meta);
 138        meta = vzalloc(len);
 139        if (!meta)
 140                return ERR_PTR(-ENOMEM);
 141
 142        ret = nvm_get_chunk_meta(dev, ppa, geo->all_chunks, meta);
 143        if (ret) {
 144                vfree(meta);
 145                return ERR_PTR(-EIO);
 146        }
 147
 148        return meta;
 149}
 150
 151struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
 152                                              struct nvm_chk_meta *meta,
 153                                              struct ppa_addr ppa)
 154{
 155        struct nvm_tgt_dev *dev = pblk->dev;
 156        struct nvm_geo *geo = &dev->geo;
 157        int ch_off = ppa.m.grp * geo->num_chk * geo->num_lun;
 158        int lun_off = ppa.m.pu * geo->num_chk;
 159        int chk_off = ppa.m.chk;
 160
 161        return meta + ch_off + lun_off + chk_off;
 162}
 163
 164void __pblk_map_invalidate(struct pblk *pblk, struct pblk_line *line,
 165                           u64 paddr)
 166{
 167        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 168        struct list_head *move_list = NULL;
 169
 170        /* Lines being reclaimed (GC'ed) cannot be invalidated. Before the L2P
 171         * table is modified with reclaimed sectors, a check is done to endure
 172         * that newer updates are not overwritten.
 173         */
 174        spin_lock(&line->lock);
 175        WARN_ON(line->state == PBLK_LINESTATE_FREE);
 176
 177        if (test_and_set_bit(paddr, line->invalid_bitmap)) {
 178                WARN_ONCE(1, "pblk: double invalidate\n");
 179                spin_unlock(&line->lock);
 180                return;
 181        }
 182        le32_add_cpu(line->vsc, -1);
 183
 184        if (line->state == PBLK_LINESTATE_CLOSED)
 185                move_list = pblk_line_gc_list(pblk, line);
 186        spin_unlock(&line->lock);
 187
 188        if (move_list) {
 189                spin_lock(&l_mg->gc_lock);
 190                spin_lock(&line->lock);
 191                /* Prevent moving a line that has just been chosen for GC */
 192                if (line->state == PBLK_LINESTATE_GC) {
 193                        spin_unlock(&line->lock);
 194                        spin_unlock(&l_mg->gc_lock);
 195                        return;
 196                }
 197                spin_unlock(&line->lock);
 198
 199                list_move_tail(&line->list, move_list);
 200                spin_unlock(&l_mg->gc_lock);
 201        }
 202}
 203
 204void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
 205{
 206        struct pblk_line *line;
 207        u64 paddr;
 208
 209#ifdef CONFIG_NVM_PBLK_DEBUG
 210        /* Callers must ensure that the ppa points to a device address */
 211        BUG_ON(pblk_addr_in_cache(ppa));
 212        BUG_ON(pblk_ppa_empty(ppa));
 213#endif
 214
 215        line = pblk_ppa_to_line(pblk, ppa);
 216        paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
 217
 218        __pblk_map_invalidate(pblk, line, paddr);
 219}
 220
 221static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
 222                                  unsigned int nr_secs)
 223{
 224        sector_t lba;
 225
 226        spin_lock(&pblk->trans_lock);
 227        for (lba = slba; lba < slba + nr_secs; lba++) {
 228                struct ppa_addr ppa;
 229
 230                ppa = pblk_trans_map_get(pblk, lba);
 231
 232                if (!pblk_addr_in_cache(ppa) && !pblk_ppa_empty(ppa))
 233                        pblk_map_invalidate(pblk, ppa);
 234
 235                pblk_ppa_set_empty(&ppa);
 236                pblk_trans_map_set(pblk, lba, ppa);
 237        }
 238        spin_unlock(&pblk->trans_lock);
 239}
 240
 241int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
 242{
 243        struct nvm_tgt_dev *dev = pblk->dev;
 244
 245        rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
 246                                                        &rqd->dma_meta_list);
 247        if (!rqd->meta_list)
 248                return -ENOMEM;
 249
 250        if (rqd->nr_ppas == 1)
 251                return 0;
 252
 253        rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size(pblk);
 254        rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size(pblk);
 255
 256        return 0;
 257}
 258
 259void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
 260{
 261        struct nvm_tgt_dev *dev = pblk->dev;
 262
 263        if (rqd->meta_list)
 264                nvm_dev_dma_free(dev->parent, rqd->meta_list,
 265                                rqd->dma_meta_list);
 266}
 267
 268/* Caller must guarantee that the request is a valid type */
 269struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
 270{
 271        mempool_t *pool;
 272        struct nvm_rq *rqd;
 273        int rq_size;
 274
 275        switch (type) {
 276        case PBLK_WRITE:
 277        case PBLK_WRITE_INT:
 278                pool = &pblk->w_rq_pool;
 279                rq_size = pblk_w_rq_size;
 280                break;
 281        case PBLK_READ:
 282                pool = &pblk->r_rq_pool;
 283                rq_size = pblk_g_rq_size;
 284                break;
 285        default:
 286                pool = &pblk->e_rq_pool;
 287                rq_size = pblk_g_rq_size;
 288        }
 289
 290        rqd = mempool_alloc(pool, GFP_KERNEL);
 291        memset(rqd, 0, rq_size);
 292
 293        return rqd;
 294}
 295
 296/* Typically used on completion path. Cannot guarantee request consistency */
 297void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
 298{
 299        mempool_t *pool;
 300
 301        switch (type) {
 302        case PBLK_WRITE:
 303                kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
 304                fallthrough;
 305        case PBLK_WRITE_INT:
 306                pool = &pblk->w_rq_pool;
 307                break;
 308        case PBLK_READ:
 309                pool = &pblk->r_rq_pool;
 310                break;
 311        case PBLK_ERASE:
 312                pool = &pblk->e_rq_pool;
 313                break;
 314        default:
 315                pblk_err(pblk, "trying to free unknown rqd type\n");
 316                return;
 317        }
 318
 319        pblk_free_rqd_meta(pblk, rqd);
 320        mempool_free(rqd, pool);
 321}
 322
 323void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
 324                         int nr_pages)
 325{
 326        struct bio_vec *bv;
 327        struct page *page;
 328        int i, e, nbv = 0;
 329
 330        for (i = 0; i < bio->bi_vcnt; i++) {
 331                bv = &bio->bi_io_vec[i];
 332                page = bv->bv_page;
 333                for (e = 0; e < bv->bv_len; e += PBLK_EXPOSED_PAGE_SIZE, nbv++)
 334                        if (nbv >= off)
 335                                mempool_free(page++, &pblk->page_bio_pool);
 336        }
 337}
 338
 339int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
 340                       int nr_pages)
 341{
 342        struct request_queue *q = pblk->dev->q;
 343        struct page *page;
 344        int i, ret;
 345
 346        for (i = 0; i < nr_pages; i++) {
 347                page = mempool_alloc(&pblk->page_bio_pool, flags);
 348
 349                ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
 350                if (ret != PBLK_EXPOSED_PAGE_SIZE) {
 351                        pblk_err(pblk, "could not add page to bio\n");
 352                        mempool_free(page, &pblk->page_bio_pool);
 353                        goto err;
 354                }
 355        }
 356
 357        return 0;
 358err:
 359        pblk_bio_free_pages(pblk, bio, (bio->bi_vcnt - i), i);
 360        return -1;
 361}
 362
 363void pblk_write_kick(struct pblk *pblk)
 364{
 365        wake_up_process(pblk->writer_ts);
 366        mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
 367}
 368
 369void pblk_write_timer_fn(struct timer_list *t)
 370{
 371        struct pblk *pblk = from_timer(pblk, t, wtimer);
 372
 373        /* kick the write thread every tick to flush outstanding data */
 374        pblk_write_kick(pblk);
 375}
 376
 377void pblk_write_should_kick(struct pblk *pblk)
 378{
 379        unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
 380
 381        if (secs_avail >= pblk->min_write_pgs_data)
 382                pblk_write_kick(pblk);
 383}
 384
 385static void pblk_wait_for_meta(struct pblk *pblk)
 386{
 387        do {
 388                if (!atomic_read(&pblk->inflight_io))
 389                        break;
 390
 391                schedule();
 392        } while (1);
 393}
 394
 395static void pblk_flush_writer(struct pblk *pblk)
 396{
 397        pblk_rb_flush(&pblk->rwb);
 398        do {
 399                if (!pblk_rb_sync_count(&pblk->rwb))
 400                        break;
 401
 402                pblk_write_kick(pblk);
 403                schedule();
 404        } while (1);
 405}
 406
 407struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
 408{
 409        struct pblk_line_meta *lm = &pblk->lm;
 410        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 411        struct list_head *move_list = NULL;
 412        int packed_meta = (le32_to_cpu(*line->vsc) / pblk->min_write_pgs_data)
 413                        * (pblk->min_write_pgs - pblk->min_write_pgs_data);
 414        int vsc = le32_to_cpu(*line->vsc) + packed_meta;
 415
 416        lockdep_assert_held(&line->lock);
 417
 418        if (line->w_err_gc->has_write_err) {
 419                if (line->gc_group != PBLK_LINEGC_WERR) {
 420                        line->gc_group = PBLK_LINEGC_WERR;
 421                        move_list = &l_mg->gc_werr_list;
 422                        pblk_rl_werr_line_in(&pblk->rl);
 423                }
 424        } else if (!vsc) {
 425                if (line->gc_group != PBLK_LINEGC_FULL) {
 426                        line->gc_group = PBLK_LINEGC_FULL;
 427                        move_list = &l_mg->gc_full_list;
 428                }
 429        } else if (vsc < lm->high_thrs) {
 430                if (line->gc_group != PBLK_LINEGC_HIGH) {
 431                        line->gc_group = PBLK_LINEGC_HIGH;
 432                        move_list = &l_mg->gc_high_list;
 433                }
 434        } else if (vsc < lm->mid_thrs) {
 435                if (line->gc_group != PBLK_LINEGC_MID) {
 436                        line->gc_group = PBLK_LINEGC_MID;
 437                        move_list = &l_mg->gc_mid_list;
 438                }
 439        } else if (vsc < line->sec_in_line) {
 440                if (line->gc_group != PBLK_LINEGC_LOW) {
 441                        line->gc_group = PBLK_LINEGC_LOW;
 442                        move_list = &l_mg->gc_low_list;
 443                }
 444        } else if (vsc == line->sec_in_line) {
 445                if (line->gc_group != PBLK_LINEGC_EMPTY) {
 446                        line->gc_group = PBLK_LINEGC_EMPTY;
 447                        move_list = &l_mg->gc_empty_list;
 448                }
 449        } else {
 450                line->state = PBLK_LINESTATE_CORRUPT;
 451                trace_pblk_line_state(pblk_disk_name(pblk), line->id,
 452                                        line->state);
 453
 454                line->gc_group = PBLK_LINEGC_NONE;
 455                move_list =  &l_mg->corrupt_list;
 456                pblk_err(pblk, "corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
 457                                                line->id, vsc,
 458                                                line->sec_in_line,
 459                                                lm->high_thrs, lm->mid_thrs);
 460        }
 461
 462        return move_list;
 463}
 464
 465void pblk_discard(struct pblk *pblk, struct bio *bio)
 466{
 467        sector_t slba = pblk_get_lba(bio);
 468        sector_t nr_secs = pblk_get_secs(bio);
 469
 470        pblk_invalidate_range(pblk, slba, nr_secs);
 471}
 472
 473void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd)
 474{
 475        atomic_long_inc(&pblk->write_failed);
 476#ifdef CONFIG_NVM_PBLK_DEBUG
 477        pblk_print_failed_rqd(pblk, rqd, rqd->error);
 478#endif
 479}
 480
 481void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd)
 482{
 483        /* Empty page read is not necessarily an error (e.g., L2P recovery) */
 484        if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
 485                atomic_long_inc(&pblk->read_empty);
 486                return;
 487        }
 488
 489        switch (rqd->error) {
 490        case NVM_RSP_WARN_HIGHECC:
 491                atomic_long_inc(&pblk->read_high_ecc);
 492                break;
 493        case NVM_RSP_ERR_FAILECC:
 494        case NVM_RSP_ERR_FAILCRC:
 495                atomic_long_inc(&pblk->read_failed);
 496                break;
 497        default:
 498                pblk_err(pblk, "unknown read error:%d\n", rqd->error);
 499        }
 500#ifdef CONFIG_NVM_PBLK_DEBUG
 501        pblk_print_failed_rqd(pblk, rqd, rqd->error);
 502#endif
 503}
 504
 505void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write)
 506{
 507        pblk->sec_per_write = sec_per_write;
 508}
 509
 510int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd, void *buf)
 511{
 512        struct nvm_tgt_dev *dev = pblk->dev;
 513
 514        atomic_inc(&pblk->inflight_io);
 515
 516#ifdef CONFIG_NVM_PBLK_DEBUG
 517        if (pblk_check_io(pblk, rqd))
 518                return NVM_IO_ERR;
 519#endif
 520
 521        return nvm_submit_io(dev, rqd, buf);
 522}
 523
 524void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd)
 525{
 526        struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
 527
 528        int i;
 529
 530        for (i = 0; i < rqd->nr_ppas; i++) {
 531                struct ppa_addr *ppa = &ppa_list[i];
 532                struct nvm_chk_meta *chunk = pblk_dev_ppa_to_chunk(pblk, *ppa);
 533                u64 caddr = pblk_dev_ppa_to_chunk_addr(pblk, *ppa);
 534
 535                if (caddr == 0)
 536                        trace_pblk_chunk_state(pblk_disk_name(pblk),
 537                                                        ppa, NVM_CHK_ST_OPEN);
 538                else if (caddr == (chunk->cnlb - 1))
 539                        trace_pblk_chunk_state(pblk_disk_name(pblk),
 540                                                        ppa, NVM_CHK_ST_CLOSED);
 541        }
 542}
 543
 544int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd, void *buf)
 545{
 546        struct nvm_tgt_dev *dev = pblk->dev;
 547        int ret;
 548
 549        atomic_inc(&pblk->inflight_io);
 550
 551#ifdef CONFIG_NVM_PBLK_DEBUG
 552        if (pblk_check_io(pblk, rqd))
 553                return NVM_IO_ERR;
 554#endif
 555
 556        ret = nvm_submit_io_sync(dev, rqd, buf);
 557
 558        if (trace_pblk_chunk_state_enabled() && !ret &&
 559            rqd->opcode == NVM_OP_PWRITE)
 560                pblk_check_chunk_state_update(pblk, rqd);
 561
 562        return ret;
 563}
 564
 565static int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd,
 566                                   void *buf)
 567{
 568        struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
 569        int ret;
 570
 571        pblk_down_chunk(pblk, ppa_list[0]);
 572        ret = pblk_submit_io_sync(pblk, rqd, buf);
 573        pblk_up_chunk(pblk, ppa_list[0]);
 574
 575        return ret;
 576}
 577
 578int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
 579                   unsigned long secs_to_flush, bool skip_meta)
 580{
 581        int max = pblk->sec_per_write;
 582        int min = pblk->min_write_pgs;
 583        int secs_to_sync = 0;
 584
 585        if (skip_meta && pblk->min_write_pgs_data != pblk->min_write_pgs)
 586                min = max = pblk->min_write_pgs_data;
 587
 588        if (secs_avail >= max)
 589                secs_to_sync = max;
 590        else if (secs_avail >= min)
 591                secs_to_sync = min * (secs_avail / min);
 592        else if (secs_to_flush)
 593                secs_to_sync = min;
 594
 595        return secs_to_sync;
 596}
 597
 598void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
 599{
 600        u64 addr;
 601        int i;
 602
 603        spin_lock(&line->lock);
 604        addr = find_next_zero_bit(line->map_bitmap,
 605                                        pblk->lm.sec_per_line, line->cur_sec);
 606        line->cur_sec = addr - nr_secs;
 607
 608        for (i = 0; i < nr_secs; i++, line->cur_sec--)
 609                WARN_ON(!test_and_clear_bit(line->cur_sec, line->map_bitmap));
 610        spin_unlock(&line->lock);
 611}
 612
 613u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
 614{
 615        u64 addr;
 616        int i;
 617
 618        lockdep_assert_held(&line->lock);
 619
 620        /* logic error: ppa out-of-bounds. Prevent generating bad address */
 621        if (line->cur_sec + nr_secs > pblk->lm.sec_per_line) {
 622                WARN(1, "pblk: page allocation out of bounds\n");
 623                nr_secs = pblk->lm.sec_per_line - line->cur_sec;
 624        }
 625
 626        line->cur_sec = addr = find_next_zero_bit(line->map_bitmap,
 627                                        pblk->lm.sec_per_line, line->cur_sec);
 628        for (i = 0; i < nr_secs; i++, line->cur_sec++)
 629                WARN_ON(test_and_set_bit(line->cur_sec, line->map_bitmap));
 630
 631        return addr;
 632}
 633
 634u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs)
 635{
 636        u64 addr;
 637
 638        /* Lock needed in case a write fails and a recovery needs to remap
 639         * failed write buffer entries
 640         */
 641        spin_lock(&line->lock);
 642        addr = __pblk_alloc_page(pblk, line, nr_secs);
 643        line->left_msecs -= nr_secs;
 644        WARN(line->left_msecs < 0, "pblk: page allocation out of bounds\n");
 645        spin_unlock(&line->lock);
 646
 647        return addr;
 648}
 649
 650u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
 651{
 652        u64 paddr;
 653
 654        spin_lock(&line->lock);
 655        paddr = find_next_zero_bit(line->map_bitmap,
 656                                        pblk->lm.sec_per_line, line->cur_sec);
 657        spin_unlock(&line->lock);
 658
 659        return paddr;
 660}
 661
 662u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
 663{
 664        struct nvm_tgt_dev *dev = pblk->dev;
 665        struct nvm_geo *geo = &dev->geo;
 666        struct pblk_line_meta *lm = &pblk->lm;
 667        int bit;
 668
 669        /* This usually only happens on bad lines */
 670        bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
 671        if (bit >= lm->blk_per_line)
 672                return -1;
 673
 674        return bit * geo->ws_opt;
 675}
 676
 677int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
 678{
 679        struct pblk_line_meta *lm = &pblk->lm;
 680        struct ppa_addr *ppa_list;
 681        struct nvm_rq rqd;
 682        u64 paddr = pblk_line_smeta_start(pblk, line);
 683        int i, ret;
 684
 685        memset(&rqd, 0, sizeof(struct nvm_rq));
 686
 687        ret = pblk_alloc_rqd_meta(pblk, &rqd);
 688        if (ret)
 689                return ret;
 690
 691        rqd.opcode = NVM_OP_PREAD;
 692        rqd.nr_ppas = lm->smeta_sec;
 693        rqd.is_seq = 1;
 694        ppa_list = nvm_rq_to_ppa_list(&rqd);
 695
 696        for (i = 0; i < lm->smeta_sec; i++, paddr++)
 697                ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
 698
 699        ret = pblk_submit_io_sync(pblk, &rqd, line->smeta);
 700        if (ret) {
 701                pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
 702                goto clear_rqd;
 703        }
 704
 705        atomic_dec(&pblk->inflight_io);
 706
 707        if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) {
 708                pblk_log_read_err(pblk, &rqd);
 709                ret = -EIO;
 710        }
 711
 712clear_rqd:
 713        pblk_free_rqd_meta(pblk, &rqd);
 714        return ret;
 715}
 716
 717static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line,
 718                                 u64 paddr)
 719{
 720        struct pblk_line_meta *lm = &pblk->lm;
 721        struct ppa_addr *ppa_list;
 722        struct nvm_rq rqd;
 723        __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
 724        __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
 725        int i, ret;
 726
 727        memset(&rqd, 0, sizeof(struct nvm_rq));
 728
 729        ret = pblk_alloc_rqd_meta(pblk, &rqd);
 730        if (ret)
 731                return ret;
 732
 733        rqd.opcode = NVM_OP_PWRITE;
 734        rqd.nr_ppas = lm->smeta_sec;
 735        rqd.is_seq = 1;
 736        ppa_list = nvm_rq_to_ppa_list(&rqd);
 737
 738        for (i = 0; i < lm->smeta_sec; i++, paddr++) {
 739                struct pblk_sec_meta *meta = pblk_get_meta(pblk,
 740                                                           rqd.meta_list, i);
 741
 742                ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
 743                meta->lba = lba_list[paddr] = addr_empty;
 744        }
 745
 746        ret = pblk_submit_io_sync_sem(pblk, &rqd, line->smeta);
 747        if (ret) {
 748                pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
 749                goto clear_rqd;
 750        }
 751
 752        atomic_dec(&pblk->inflight_io);
 753
 754        if (rqd.error) {
 755                pblk_log_write_err(pblk, &rqd);
 756                ret = -EIO;
 757        }
 758
 759clear_rqd:
 760        pblk_free_rqd_meta(pblk, &rqd);
 761        return ret;
 762}
 763
 764int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
 765                         void *emeta_buf)
 766{
 767        struct nvm_tgt_dev *dev = pblk->dev;
 768        struct nvm_geo *geo = &dev->geo;
 769        struct pblk_line_meta *lm = &pblk->lm;
 770        void *ppa_list_buf, *meta_list;
 771        struct ppa_addr *ppa_list;
 772        struct nvm_rq rqd;
 773        u64 paddr = line->emeta_ssec;
 774        dma_addr_t dma_ppa_list, dma_meta_list;
 775        int min = pblk->min_write_pgs;
 776        int left_ppas = lm->emeta_sec[0];
 777        int line_id = line->id;
 778        int rq_ppas, rq_len;
 779        int i, j;
 780        int ret;
 781
 782        meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
 783                                                        &dma_meta_list);
 784        if (!meta_list)
 785                return -ENOMEM;
 786
 787        ppa_list_buf = meta_list + pblk_dma_meta_size(pblk);
 788        dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk);
 789
 790next_rq:
 791        memset(&rqd, 0, sizeof(struct nvm_rq));
 792
 793        rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
 794        rq_len = rq_ppas * geo->csecs;
 795
 796        rqd.meta_list = meta_list;
 797        rqd.ppa_list = ppa_list_buf;
 798        rqd.dma_meta_list = dma_meta_list;
 799        rqd.dma_ppa_list = dma_ppa_list;
 800        rqd.opcode = NVM_OP_PREAD;
 801        rqd.nr_ppas = rq_ppas;
 802        ppa_list = nvm_rq_to_ppa_list(&rqd);
 803
 804        for (i = 0; i < rqd.nr_ppas; ) {
 805                struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, line_id);
 806                int pos = pblk_ppa_to_pos(geo, ppa);
 807
 808                if (pblk_io_aligned(pblk, rq_ppas))
 809                        rqd.is_seq = 1;
 810
 811                while (test_bit(pos, line->blk_bitmap)) {
 812                        paddr += min;
 813                        if (pblk_boundary_paddr_checks(pblk, paddr)) {
 814                                ret = -EINTR;
 815                                goto free_rqd_dma;
 816                        }
 817
 818                        ppa = addr_to_gen_ppa(pblk, paddr, line_id);
 819                        pos = pblk_ppa_to_pos(geo, ppa);
 820                }
 821
 822                if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
 823                        ret = -EINTR;
 824                        goto free_rqd_dma;
 825                }
 826
 827                for (j = 0; j < min; j++, i++, paddr++)
 828                        ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line_id);
 829        }
 830
 831        ret = pblk_submit_io_sync(pblk, &rqd, emeta_buf);
 832        if (ret) {
 833                pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
 834                goto free_rqd_dma;
 835        }
 836
 837        atomic_dec(&pblk->inflight_io);
 838
 839        if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) {
 840                pblk_log_read_err(pblk, &rqd);
 841                ret = -EIO;
 842                goto free_rqd_dma;
 843        }
 844
 845        emeta_buf += rq_len;
 846        left_ppas -= rq_ppas;
 847        if (left_ppas)
 848                goto next_rq;
 849
 850free_rqd_dma:
 851        nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
 852        return ret;
 853}
 854
 855static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
 856                            struct ppa_addr ppa)
 857{
 858        rqd->opcode = NVM_OP_ERASE;
 859        rqd->ppa_addr = ppa;
 860        rqd->nr_ppas = 1;
 861        rqd->is_seq = 1;
 862        rqd->bio = NULL;
 863}
 864
 865static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
 866{
 867        struct nvm_rq rqd = {NULL};
 868        int ret;
 869
 870        trace_pblk_chunk_reset(pblk_disk_name(pblk), &ppa,
 871                                PBLK_CHUNK_RESET_START);
 872
 873        pblk_setup_e_rq(pblk, &rqd, ppa);
 874
 875        /* The write thread schedules erases so that it minimizes disturbances
 876         * with writes. Thus, there is no need to take the LUN semaphore.
 877         */
 878        ret = pblk_submit_io_sync(pblk, &rqd, NULL);
 879        rqd.private = pblk;
 880        __pblk_end_io_erase(pblk, &rqd);
 881
 882        return ret;
 883}
 884
 885int pblk_line_erase(struct pblk *pblk, struct pblk_line *line)
 886{
 887        struct pblk_line_meta *lm = &pblk->lm;
 888        struct ppa_addr ppa;
 889        int ret, bit = -1;
 890
 891        /* Erase only good blocks, one at a time */
 892        do {
 893                spin_lock(&line->lock);
 894                bit = find_next_zero_bit(line->erase_bitmap, lm->blk_per_line,
 895                                                                bit + 1);
 896                if (bit >= lm->blk_per_line) {
 897                        spin_unlock(&line->lock);
 898                        break;
 899                }
 900
 901                ppa = pblk->luns[bit].bppa; /* set ch and lun */
 902                ppa.a.blk = line->id;
 903
 904                atomic_dec(&line->left_eblks);
 905                WARN_ON(test_and_set_bit(bit, line->erase_bitmap));
 906                spin_unlock(&line->lock);
 907
 908                ret = pblk_blk_erase_sync(pblk, ppa);
 909                if (ret) {
 910                        pblk_err(pblk, "failed to erase line %d\n", line->id);
 911                        return ret;
 912                }
 913        } while (1);
 914
 915        return 0;
 916}
 917
 918static void pblk_line_setup_metadata(struct pblk_line *line,
 919                                     struct pblk_line_mgmt *l_mg,
 920                                     struct pblk_line_meta *lm)
 921{
 922        int meta_line;
 923
 924        lockdep_assert_held(&l_mg->free_lock);
 925
 926retry_meta:
 927        meta_line = find_first_zero_bit(&l_mg->meta_bitmap, PBLK_DATA_LINES);
 928        if (meta_line == PBLK_DATA_LINES) {
 929                spin_unlock(&l_mg->free_lock);
 930                io_schedule();
 931                spin_lock(&l_mg->free_lock);
 932                goto retry_meta;
 933        }
 934
 935        set_bit(meta_line, &l_mg->meta_bitmap);
 936        line->meta_line = meta_line;
 937
 938        line->smeta = l_mg->sline_meta[meta_line];
 939        line->emeta = l_mg->eline_meta[meta_line];
 940
 941        memset(line->smeta, 0, lm->smeta_len);
 942        memset(line->emeta->buf, 0, lm->emeta_len[0]);
 943
 944        line->emeta->mem = 0;
 945        atomic_set(&line->emeta->sync, 0);
 946}
 947
 948/* For now lines are always assumed full lines. Thus, smeta former and current
 949 * lun bitmaps are omitted.
 950 */
 951static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
 952                                  struct pblk_line *cur)
 953{
 954        struct nvm_tgt_dev *dev = pblk->dev;
 955        struct nvm_geo *geo = &dev->geo;
 956        struct pblk_line_meta *lm = &pblk->lm;
 957        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
 958        struct pblk_emeta *emeta = line->emeta;
 959        struct line_emeta *emeta_buf = emeta->buf;
 960        struct line_smeta *smeta_buf = (struct line_smeta *)line->smeta;
 961        int nr_blk_line;
 962
 963        /* After erasing the line, new bad blocks might appear and we risk
 964         * having an invalid line
 965         */
 966        nr_blk_line = lm->blk_per_line -
 967                        bitmap_weight(line->blk_bitmap, lm->blk_per_line);
 968        if (nr_blk_line < lm->min_blk_line) {
 969                spin_lock(&l_mg->free_lock);
 970                spin_lock(&line->lock);
 971                line->state = PBLK_LINESTATE_BAD;
 972                trace_pblk_line_state(pblk_disk_name(pblk), line->id,
 973                                        line->state);
 974                spin_unlock(&line->lock);
 975
 976                list_add_tail(&line->list, &l_mg->bad_list);
 977                spin_unlock(&l_mg->free_lock);
 978
 979                pblk_debug(pblk, "line %d is bad\n", line->id);
 980
 981                return 0;
 982        }
 983
 984        /* Run-time metadata */
 985        line->lun_bitmap = ((void *)(smeta_buf)) + sizeof(struct line_smeta);
 986
 987        /* Mark LUNs allocated in this line (all for now) */
 988        bitmap_set(line->lun_bitmap, 0, lm->lun_bitmap_len);
 989
 990        smeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
 991        export_guid(smeta_buf->header.uuid, &pblk->instance_uuid);
 992        smeta_buf->header.id = cpu_to_le32(line->id);
 993        smeta_buf->header.type = cpu_to_le16(line->type);
 994        smeta_buf->header.version_major = SMETA_VERSION_MAJOR;
 995        smeta_buf->header.version_minor = SMETA_VERSION_MINOR;
 996
 997        /* Start metadata */
 998        smeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
 999        smeta_buf->window_wr_lun = cpu_to_le32(geo->all_luns);
1000
1001        /* Fill metadata among lines */
1002        if (cur) {
1003                memcpy(line->lun_bitmap, cur->lun_bitmap, lm->lun_bitmap_len);
1004                smeta_buf->prev_id = cpu_to_le32(cur->id);
1005                cur->emeta->buf->next_id = cpu_to_le32(line->id);
1006        } else {
1007                smeta_buf->prev_id = cpu_to_le32(PBLK_LINE_EMPTY);
1008        }
1009
1010        /* All smeta must be set at this point */
1011        smeta_buf->header.crc = cpu_to_le32(
1012                        pblk_calc_meta_header_crc(pblk, &smeta_buf->header));
1013        smeta_buf->crc = cpu_to_le32(pblk_calc_smeta_crc(pblk, smeta_buf));
1014
1015        /* End metadata */
1016        memcpy(&emeta_buf->header, &smeta_buf->header,
1017                                                sizeof(struct line_header));
1018
1019        emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
1020        emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
1021        emeta_buf->header.crc = cpu_to_le32(
1022                        pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
1023
1024        emeta_buf->seq_nr = cpu_to_le64(line->seq_nr);
1025        emeta_buf->nr_lbas = cpu_to_le64(line->sec_in_line);
1026        emeta_buf->nr_valid_lbas = cpu_to_le64(0);
1027        emeta_buf->next_id = cpu_to_le32(PBLK_LINE_EMPTY);
1028        emeta_buf->crc = cpu_to_le32(0);
1029        emeta_buf->prev_id = smeta_buf->prev_id;
1030
1031        return 1;
1032}
1033
1034static int pblk_line_alloc_bitmaps(struct pblk *pblk, struct pblk_line *line)
1035{
1036        struct pblk_line_meta *lm = &pblk->lm;
1037        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1038
1039        line->map_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
1040        if (!line->map_bitmap)
1041                return -ENOMEM;
1042
1043        memset(line->map_bitmap, 0, lm->sec_bitmap_len);
1044
1045        /* will be initialized using bb info from map_bitmap */
1046        line->invalid_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
1047        if (!line->invalid_bitmap) {
1048                mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1049                line->map_bitmap = NULL;
1050                return -ENOMEM;
1051        }
1052
1053        return 0;
1054}
1055
1056/* For now lines are always assumed full lines. Thus, smeta former and current
1057 * lun bitmaps are omitted.
1058 */
1059static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
1060                             int init)
1061{
1062        struct nvm_tgt_dev *dev = pblk->dev;
1063        struct nvm_geo *geo = &dev->geo;
1064        struct pblk_line_meta *lm = &pblk->lm;
1065        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1066        u64 off;
1067        int bit = -1;
1068        int emeta_secs;
1069
1070        line->sec_in_line = lm->sec_per_line;
1071
1072        /* Capture bad block information on line mapping bitmaps */
1073        while ((bit = find_next_bit(line->blk_bitmap, lm->blk_per_line,
1074                                        bit + 1)) < lm->blk_per_line) {
1075                off = bit * geo->ws_opt;
1076                bitmap_shift_left(l_mg->bb_aux, l_mg->bb_template, off,
1077                                                        lm->sec_per_line);
1078                bitmap_or(line->map_bitmap, line->map_bitmap, l_mg->bb_aux,
1079                                                        lm->sec_per_line);
1080                line->sec_in_line -= geo->clba;
1081        }
1082
1083        /* Mark smeta metadata sectors as bad sectors */
1084        bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1085        off = bit * geo->ws_opt;
1086        bitmap_set(line->map_bitmap, off, lm->smeta_sec);
1087        line->sec_in_line -= lm->smeta_sec;
1088        line->cur_sec = off + lm->smeta_sec;
1089
1090        if (init && pblk_line_smeta_write(pblk, line, off)) {
1091                pblk_debug(pblk, "line smeta I/O failed. Retry\n");
1092                return 0;
1093        }
1094
1095        bitmap_copy(line->invalid_bitmap, line->map_bitmap, lm->sec_per_line);
1096
1097        /* Mark emeta metadata sectors as bad sectors. We need to consider bad
1098         * blocks to make sure that there are enough sectors to store emeta
1099         */
1100        emeta_secs = lm->emeta_sec[0];
1101        off = lm->sec_per_line;
1102        while (emeta_secs) {
1103                off -= geo->ws_opt;
1104                if (!test_bit(off, line->invalid_bitmap)) {
1105                        bitmap_set(line->invalid_bitmap, off, geo->ws_opt);
1106                        emeta_secs -= geo->ws_opt;
1107                }
1108        }
1109
1110        line->emeta_ssec = off;
1111        line->sec_in_line -= lm->emeta_sec[0];
1112        line->nr_valid_lbas = 0;
1113        line->left_msecs = line->sec_in_line;
1114        *line->vsc = cpu_to_le32(line->sec_in_line);
1115
1116        if (lm->sec_per_line - line->sec_in_line !=
1117                bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
1118                spin_lock(&line->lock);
1119                line->state = PBLK_LINESTATE_BAD;
1120                trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1121                                        line->state);
1122                spin_unlock(&line->lock);
1123
1124                list_add_tail(&line->list, &l_mg->bad_list);
1125                pblk_err(pblk, "unexpected line %d is bad\n", line->id);
1126
1127                return 0;
1128        }
1129
1130        return 1;
1131}
1132
1133static int pblk_prepare_new_line(struct pblk *pblk, struct pblk_line *line)
1134{
1135        struct pblk_line_meta *lm = &pblk->lm;
1136        struct nvm_tgt_dev *dev = pblk->dev;
1137        struct nvm_geo *geo = &dev->geo;
1138        int blk_to_erase = atomic_read(&line->blk_in_line);
1139        int i;
1140
1141        for (i = 0; i < lm->blk_per_line; i++) {
1142                struct pblk_lun *rlun = &pblk->luns[i];
1143                int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1144                int state = line->chks[pos].state;
1145
1146                /* Free chunks should not be erased */
1147                if (state & NVM_CHK_ST_FREE) {
1148                        set_bit(pblk_ppa_to_pos(geo, rlun->bppa),
1149                                                        line->erase_bitmap);
1150                        blk_to_erase--;
1151                }
1152        }
1153
1154        return blk_to_erase;
1155}
1156
1157static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
1158{
1159        struct pblk_line_meta *lm = &pblk->lm;
1160        int blk_in_line = atomic_read(&line->blk_in_line);
1161        int blk_to_erase;
1162
1163        /* Bad blocks do not need to be erased */
1164        bitmap_copy(line->erase_bitmap, line->blk_bitmap, lm->blk_per_line);
1165
1166        spin_lock(&line->lock);
1167
1168        /* If we have not written to this line, we need to mark up free chunks
1169         * as already erased
1170         */
1171        if (line->state == PBLK_LINESTATE_NEW) {
1172                blk_to_erase = pblk_prepare_new_line(pblk, line);
1173                line->state = PBLK_LINESTATE_FREE;
1174                trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1175                                        line->state);
1176        } else {
1177                blk_to_erase = blk_in_line;
1178        }
1179
1180        if (blk_in_line < lm->min_blk_line) {
1181                spin_unlock(&line->lock);
1182                return -EAGAIN;
1183        }
1184
1185        if (line->state != PBLK_LINESTATE_FREE) {
1186                WARN(1, "pblk: corrupted line %d, state %d\n",
1187                                                        line->id, line->state);
1188                spin_unlock(&line->lock);
1189                return -EINTR;
1190        }
1191
1192        line->state = PBLK_LINESTATE_OPEN;
1193        trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1194                                line->state);
1195
1196        atomic_set(&line->left_eblks, blk_to_erase);
1197        atomic_set(&line->left_seblks, blk_to_erase);
1198
1199        line->meta_distance = lm->meta_distance;
1200        spin_unlock(&line->lock);
1201
1202        kref_init(&line->ref);
1203        atomic_set(&line->sec_to_update, 0);
1204
1205        return 0;
1206}
1207
1208/* Line allocations in the recovery path are always single threaded */
1209int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
1210{
1211        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1212        int ret;
1213
1214        spin_lock(&l_mg->free_lock);
1215        l_mg->data_line = line;
1216        list_del(&line->list);
1217
1218        ret = pblk_line_prepare(pblk, line);
1219        if (ret) {
1220                list_add(&line->list, &l_mg->free_list);
1221                spin_unlock(&l_mg->free_lock);
1222                return ret;
1223        }
1224        spin_unlock(&l_mg->free_lock);
1225
1226        ret = pblk_line_alloc_bitmaps(pblk, line);
1227        if (ret)
1228                goto fail;
1229
1230        if (!pblk_line_init_bb(pblk, line, 0)) {
1231                ret = -EINTR;
1232                goto fail;
1233        }
1234
1235        pblk_rl_free_lines_dec(&pblk->rl, line, true);
1236        return 0;
1237
1238fail:
1239        spin_lock(&l_mg->free_lock);
1240        list_add(&line->list, &l_mg->free_list);
1241        spin_unlock(&l_mg->free_lock);
1242
1243        return ret;
1244}
1245
1246void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
1247{
1248        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1249
1250        mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1251        line->map_bitmap = NULL;
1252        line->smeta = NULL;
1253        line->emeta = NULL;
1254}
1255
1256static void pblk_line_reinit(struct pblk_line *line)
1257{
1258        *line->vsc = cpu_to_le32(EMPTY_ENTRY);
1259
1260        line->map_bitmap = NULL;
1261        line->invalid_bitmap = NULL;
1262        line->smeta = NULL;
1263        line->emeta = NULL;
1264}
1265
1266void pblk_line_free(struct pblk_line *line)
1267{
1268        struct pblk *pblk = line->pblk;
1269        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1270
1271        mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1272        mempool_free(line->invalid_bitmap, l_mg->bitmap_pool);
1273
1274        pblk_line_reinit(line);
1275}
1276
1277struct pblk_line *pblk_line_get(struct pblk *pblk)
1278{
1279        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1280        struct pblk_line_meta *lm = &pblk->lm;
1281        struct pblk_line *line;
1282        int ret, bit;
1283
1284        lockdep_assert_held(&l_mg->free_lock);
1285
1286retry:
1287        if (list_empty(&l_mg->free_list)) {
1288                pblk_err(pblk, "no free lines\n");
1289                return NULL;
1290        }
1291
1292        line = list_first_entry(&l_mg->free_list, struct pblk_line, list);
1293        list_del(&line->list);
1294        l_mg->nr_free_lines--;
1295
1296        bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
1297        if (unlikely(bit >= lm->blk_per_line)) {
1298                spin_lock(&line->lock);
1299                line->state = PBLK_LINESTATE_BAD;
1300                trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1301                                        line->state);
1302                spin_unlock(&line->lock);
1303
1304                list_add_tail(&line->list, &l_mg->bad_list);
1305
1306                pblk_debug(pblk, "line %d is bad\n", line->id);
1307                goto retry;
1308        }
1309
1310        ret = pblk_line_prepare(pblk, line);
1311        if (ret) {
1312                switch (ret) {
1313                case -EAGAIN:
1314                        list_add(&line->list, &l_mg->bad_list);
1315                        goto retry;
1316                case -EINTR:
1317                        list_add(&line->list, &l_mg->corrupt_list);
1318                        goto retry;
1319                default:
1320                        pblk_err(pblk, "failed to prepare line %d\n", line->id);
1321                        list_add(&line->list, &l_mg->free_list);
1322                        l_mg->nr_free_lines++;
1323                        return NULL;
1324                }
1325        }
1326
1327        return line;
1328}
1329
1330static struct pblk_line *pblk_line_retry(struct pblk *pblk,
1331                                         struct pblk_line *line)
1332{
1333        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1334        struct pblk_line *retry_line;
1335
1336retry:
1337        spin_lock(&l_mg->free_lock);
1338        retry_line = pblk_line_get(pblk);
1339        if (!retry_line) {
1340                l_mg->data_line = NULL;
1341                spin_unlock(&l_mg->free_lock);
1342                return NULL;
1343        }
1344
1345        retry_line->map_bitmap = line->map_bitmap;
1346        retry_line->invalid_bitmap = line->invalid_bitmap;
1347        retry_line->smeta = line->smeta;
1348        retry_line->emeta = line->emeta;
1349        retry_line->meta_line = line->meta_line;
1350
1351        pblk_line_reinit(line);
1352
1353        l_mg->data_line = retry_line;
1354        spin_unlock(&l_mg->free_lock);
1355
1356        pblk_rl_free_lines_dec(&pblk->rl, line, false);
1357
1358        if (pblk_line_erase(pblk, retry_line))
1359                goto retry;
1360
1361        return retry_line;
1362}
1363
1364static void pblk_set_space_limit(struct pblk *pblk)
1365{
1366        struct pblk_rl *rl = &pblk->rl;
1367
1368        atomic_set(&rl->rb_space, 0);
1369}
1370
1371struct pblk_line *pblk_line_get_first_data(struct pblk *pblk)
1372{
1373        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1374        struct pblk_line *line;
1375
1376        spin_lock(&l_mg->free_lock);
1377        line = pblk_line_get(pblk);
1378        if (!line) {
1379                spin_unlock(&l_mg->free_lock);
1380                return NULL;
1381        }
1382
1383        line->seq_nr = l_mg->d_seq_nr++;
1384        line->type = PBLK_LINETYPE_DATA;
1385        l_mg->data_line = line;
1386
1387        pblk_line_setup_metadata(line, l_mg, &pblk->lm);
1388
1389        /* Allocate next line for preparation */
1390        l_mg->data_next = pblk_line_get(pblk);
1391        if (!l_mg->data_next) {
1392                /* If we cannot get a new line, we need to stop the pipeline.
1393                 * Only allow as many writes in as we can store safely and then
1394                 * fail gracefully
1395                 */
1396                pblk_set_space_limit(pblk);
1397
1398                l_mg->data_next = NULL;
1399        } else {
1400                l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1401                l_mg->data_next->type = PBLK_LINETYPE_DATA;
1402        }
1403        spin_unlock(&l_mg->free_lock);
1404
1405        if (pblk_line_alloc_bitmaps(pblk, line))
1406                return NULL;
1407
1408        if (pblk_line_erase(pblk, line)) {
1409                line = pblk_line_retry(pblk, line);
1410                if (!line)
1411                        return NULL;
1412        }
1413
1414retry_setup:
1415        if (!pblk_line_init_metadata(pblk, line, NULL)) {
1416                line = pblk_line_retry(pblk, line);
1417                if (!line)
1418                        return NULL;
1419
1420                goto retry_setup;
1421        }
1422
1423        if (!pblk_line_init_bb(pblk, line, 1)) {
1424                line = pblk_line_retry(pblk, line);
1425                if (!line)
1426                        return NULL;
1427
1428                goto retry_setup;
1429        }
1430
1431        pblk_rl_free_lines_dec(&pblk->rl, line, true);
1432
1433        return line;
1434}
1435
1436void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa)
1437{
1438        struct pblk_line *line;
1439
1440        line = pblk_ppa_to_line(pblk, ppa);
1441        kref_put(&line->ref, pblk_line_put_wq);
1442}
1443
1444void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd)
1445{
1446        struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
1447        int i;
1448
1449        for (i = 0; i < rqd->nr_ppas; i++)
1450                pblk_ppa_to_line_put(pblk, ppa_list[i]);
1451}
1452
1453static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
1454{
1455        lockdep_assert_held(&pblk->l_mg.free_lock);
1456
1457        pblk_set_space_limit(pblk);
1458        pblk->state = PBLK_STATE_STOPPING;
1459        trace_pblk_state(pblk_disk_name(pblk), pblk->state);
1460}
1461
1462static void pblk_line_close_meta_sync(struct pblk *pblk)
1463{
1464        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1465        struct pblk_line_meta *lm = &pblk->lm;
1466        struct pblk_line *line, *tline;
1467        LIST_HEAD(list);
1468
1469        spin_lock(&l_mg->close_lock);
1470        if (list_empty(&l_mg->emeta_list)) {
1471                spin_unlock(&l_mg->close_lock);
1472                return;
1473        }
1474
1475        list_cut_position(&list, &l_mg->emeta_list, l_mg->emeta_list.prev);
1476        spin_unlock(&l_mg->close_lock);
1477
1478        list_for_each_entry_safe(line, tline, &list, list) {
1479                struct pblk_emeta *emeta = line->emeta;
1480
1481                while (emeta->mem < lm->emeta_len[0]) {
1482                        int ret;
1483
1484                        ret = pblk_submit_meta_io(pblk, line);
1485                        if (ret) {
1486                                pblk_err(pblk, "sync meta line %d failed (%d)\n",
1487                                                        line->id, ret);
1488                                return;
1489                        }
1490                }
1491        }
1492
1493        pblk_wait_for_meta(pblk);
1494        flush_workqueue(pblk->close_wq);
1495}
1496
1497void __pblk_pipeline_flush(struct pblk *pblk)
1498{
1499        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1500        int ret;
1501
1502        spin_lock(&l_mg->free_lock);
1503        if (pblk->state == PBLK_STATE_RECOVERING ||
1504                                        pblk->state == PBLK_STATE_STOPPED) {
1505                spin_unlock(&l_mg->free_lock);
1506                return;
1507        }
1508        pblk->state = PBLK_STATE_RECOVERING;
1509        trace_pblk_state(pblk_disk_name(pblk), pblk->state);
1510        spin_unlock(&l_mg->free_lock);
1511
1512        pblk_flush_writer(pblk);
1513        pblk_wait_for_meta(pblk);
1514
1515        ret = pblk_recov_pad(pblk);
1516        if (ret) {
1517                pblk_err(pblk, "could not close data on teardown(%d)\n", ret);
1518                return;
1519        }
1520
1521        flush_workqueue(pblk->bb_wq);
1522        pblk_line_close_meta_sync(pblk);
1523}
1524
1525void __pblk_pipeline_stop(struct pblk *pblk)
1526{
1527        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1528
1529        spin_lock(&l_mg->free_lock);
1530        pblk->state = PBLK_STATE_STOPPED;
1531        trace_pblk_state(pblk_disk_name(pblk), pblk->state);
1532        l_mg->data_line = NULL;
1533        l_mg->data_next = NULL;
1534        spin_unlock(&l_mg->free_lock);
1535}
1536
1537void pblk_pipeline_stop(struct pblk *pblk)
1538{
1539        __pblk_pipeline_flush(pblk);
1540        __pblk_pipeline_stop(pblk);
1541}
1542
1543struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
1544{
1545        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1546        struct pblk_line *cur, *new = NULL;
1547        unsigned int left_seblks;
1548
1549        new = l_mg->data_next;
1550        if (!new)
1551                goto out;
1552
1553        spin_lock(&l_mg->free_lock);
1554        cur = l_mg->data_line;
1555        l_mg->data_line = new;
1556
1557        pblk_line_setup_metadata(new, l_mg, &pblk->lm);
1558        spin_unlock(&l_mg->free_lock);
1559
1560retry_erase:
1561        left_seblks = atomic_read(&new->left_seblks);
1562        if (left_seblks) {
1563                /* If line is not fully erased, erase it */
1564                if (atomic_read(&new->left_eblks)) {
1565                        if (pblk_line_erase(pblk, new))
1566                                goto out;
1567                } else {
1568                        io_schedule();
1569                }
1570                goto retry_erase;
1571        }
1572
1573        if (pblk_line_alloc_bitmaps(pblk, new))
1574                return NULL;
1575
1576retry_setup:
1577        if (!pblk_line_init_metadata(pblk, new, cur)) {
1578                new = pblk_line_retry(pblk, new);
1579                if (!new)
1580                        goto out;
1581
1582                goto retry_setup;
1583        }
1584
1585        if (!pblk_line_init_bb(pblk, new, 1)) {
1586                new = pblk_line_retry(pblk, new);
1587                if (!new)
1588                        goto out;
1589
1590                goto retry_setup;
1591        }
1592
1593        pblk_rl_free_lines_dec(&pblk->rl, new, true);
1594
1595        /* Allocate next line for preparation */
1596        spin_lock(&l_mg->free_lock);
1597        l_mg->data_next = pblk_line_get(pblk);
1598        if (!l_mg->data_next) {
1599                /* If we cannot get a new line, we need to stop the pipeline.
1600                 * Only allow as many writes in as we can store safely and then
1601                 * fail gracefully
1602                 */
1603                pblk_stop_writes(pblk, new);
1604                l_mg->data_next = NULL;
1605        } else {
1606                l_mg->data_next->seq_nr = l_mg->d_seq_nr++;
1607                l_mg->data_next->type = PBLK_LINETYPE_DATA;
1608        }
1609        spin_unlock(&l_mg->free_lock);
1610
1611out:
1612        return new;
1613}
1614
1615static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
1616{
1617        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1618        struct pblk_gc *gc = &pblk->gc;
1619
1620        spin_lock(&line->lock);
1621        WARN_ON(line->state != PBLK_LINESTATE_GC);
1622        if (line->w_err_gc->has_gc_err) {
1623                spin_unlock(&line->lock);
1624                pblk_err(pblk, "line %d had errors during GC\n", line->id);
1625                pblk_put_line_back(pblk, line);
1626                line->w_err_gc->has_gc_err = 0;
1627                return;
1628        }
1629
1630        line->state = PBLK_LINESTATE_FREE;
1631        trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1632                                        line->state);
1633        line->gc_group = PBLK_LINEGC_NONE;
1634        pblk_line_free(line);
1635
1636        if (line->w_err_gc->has_write_err) {
1637                pblk_rl_werr_line_out(&pblk->rl);
1638                line->w_err_gc->has_write_err = 0;
1639        }
1640
1641        spin_unlock(&line->lock);
1642        atomic_dec(&gc->pipeline_gc);
1643
1644        spin_lock(&l_mg->free_lock);
1645        list_add_tail(&line->list, &l_mg->free_list);
1646        l_mg->nr_free_lines++;
1647        spin_unlock(&l_mg->free_lock);
1648
1649        pblk_rl_free_lines_inc(&pblk->rl, line);
1650}
1651
1652static void pblk_line_put_ws(struct work_struct *work)
1653{
1654        struct pblk_line_ws *line_put_ws = container_of(work,
1655                                                struct pblk_line_ws, ws);
1656        struct pblk *pblk = line_put_ws->pblk;
1657        struct pblk_line *line = line_put_ws->line;
1658
1659        __pblk_line_put(pblk, line);
1660        mempool_free(line_put_ws, &pblk->gen_ws_pool);
1661}
1662
1663void pblk_line_put(struct kref *ref)
1664{
1665        struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1666        struct pblk *pblk = line->pblk;
1667
1668        __pblk_line_put(pblk, line);
1669}
1670
1671void pblk_line_put_wq(struct kref *ref)
1672{
1673        struct pblk_line *line = container_of(ref, struct pblk_line, ref);
1674        struct pblk *pblk = line->pblk;
1675        struct pblk_line_ws *line_put_ws;
1676
1677        line_put_ws = mempool_alloc(&pblk->gen_ws_pool, GFP_ATOMIC);
1678        if (!line_put_ws)
1679                return;
1680
1681        line_put_ws->pblk = pblk;
1682        line_put_ws->line = line;
1683        line_put_ws->priv = NULL;
1684
1685        INIT_WORK(&line_put_ws->ws, pblk_line_put_ws);
1686        queue_work(pblk->r_end_wq, &line_put_ws->ws);
1687}
1688
1689int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
1690{
1691        struct nvm_rq *rqd;
1692        int err;
1693
1694        rqd = pblk_alloc_rqd(pblk, PBLK_ERASE);
1695
1696        pblk_setup_e_rq(pblk, rqd, ppa);
1697
1698        rqd->end_io = pblk_end_io_erase;
1699        rqd->private = pblk;
1700
1701        trace_pblk_chunk_reset(pblk_disk_name(pblk),
1702                                &ppa, PBLK_CHUNK_RESET_START);
1703
1704        /* The write thread schedules erases so that it minimizes disturbances
1705         * with writes. Thus, there is no need to take the LUN semaphore.
1706         */
1707        err = pblk_submit_io(pblk, rqd, NULL);
1708        if (err) {
1709                struct nvm_tgt_dev *dev = pblk->dev;
1710                struct nvm_geo *geo = &dev->geo;
1711
1712                pblk_err(pblk, "could not async erase line:%d,blk:%d\n",
1713                                        pblk_ppa_to_line_id(ppa),
1714                                        pblk_ppa_to_pos(geo, ppa));
1715        }
1716
1717        return err;
1718}
1719
1720struct pblk_line *pblk_line_get_data(struct pblk *pblk)
1721{
1722        return pblk->l_mg.data_line;
1723}
1724
1725/* For now, always erase next line */
1726struct pblk_line *pblk_line_get_erase(struct pblk *pblk)
1727{
1728        return pblk->l_mg.data_next;
1729}
1730
1731int pblk_line_is_full(struct pblk_line *line)
1732{
1733        return (line->left_msecs == 0);
1734}
1735
1736static void pblk_line_should_sync_meta(struct pblk *pblk)
1737{
1738        if (pblk_rl_is_limit(&pblk->rl))
1739                pblk_line_close_meta_sync(pblk);
1740}
1741
1742void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
1743{
1744        struct nvm_tgt_dev *dev = pblk->dev;
1745        struct nvm_geo *geo = &dev->geo;
1746        struct pblk_line_meta *lm = &pblk->lm;
1747        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1748        struct list_head *move_list;
1749        int i;
1750
1751#ifdef CONFIG_NVM_PBLK_DEBUG
1752        WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line),
1753                                "pblk: corrupt closed line %d\n", line->id);
1754#endif
1755
1756        spin_lock(&l_mg->free_lock);
1757        WARN_ON(!test_and_clear_bit(line->meta_line, &l_mg->meta_bitmap));
1758        spin_unlock(&l_mg->free_lock);
1759
1760        spin_lock(&l_mg->gc_lock);
1761        spin_lock(&line->lock);
1762        WARN_ON(line->state != PBLK_LINESTATE_OPEN);
1763        line->state = PBLK_LINESTATE_CLOSED;
1764        move_list = pblk_line_gc_list(pblk, line);
1765        list_add_tail(&line->list, move_list);
1766
1767        mempool_free(line->map_bitmap, l_mg->bitmap_pool);
1768        line->map_bitmap = NULL;
1769        line->smeta = NULL;
1770        line->emeta = NULL;
1771
1772        for (i = 0; i < lm->blk_per_line; i++) {
1773                struct pblk_lun *rlun = &pblk->luns[i];
1774                int pos = pblk_ppa_to_pos(geo, rlun->bppa);
1775                int state = line->chks[pos].state;
1776
1777                if (!(state & NVM_CHK_ST_OFFLINE))
1778                        state = NVM_CHK_ST_CLOSED;
1779        }
1780
1781        spin_unlock(&line->lock);
1782        spin_unlock(&l_mg->gc_lock);
1783
1784        trace_pblk_line_state(pblk_disk_name(pblk), line->id,
1785                                        line->state);
1786}
1787
1788void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
1789{
1790        struct pblk_line_mgmt *l_mg = &pblk->l_mg;
1791        struct pblk_line_meta *lm = &pblk->lm;
1792        struct pblk_emeta *emeta = line->emeta;
1793        struct line_emeta *emeta_buf = emeta->buf;
1794        struct wa_counters *wa = emeta_to_wa(lm, emeta_buf);
1795
1796        /* No need for exact vsc value; avoid a big line lock and take aprox. */
1797        memcpy(emeta_to_vsc(pblk, emeta_buf), l_mg->vsc_list, lm->vsc_list_len);
1798        memcpy(emeta_to_bb(emeta_buf), line->blk_bitmap, lm->blk_bitmap_len);
1799
1800        wa->user = cpu_to_le64(atomic64_read(&pblk->user_wa));
1801        wa->pad = cpu_to_le64(atomic64_read(&pblk->pad_wa));
1802        wa->gc = cpu_to_le64(atomic64_read(&pblk->gc_wa));
1803
1804        if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC) {
1805                emeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
1806                export_guid(emeta_buf->header.uuid, &pblk->instance_uuid);
1807                emeta_buf->header.id = cpu_to_le32(line->id);
1808                emeta_buf->header.type = cpu_to_le16(line->type);
1809                emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
1810                emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
1811                emeta_buf->header.crc = cpu_to_le32(
1812                        pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
1813        }
1814
1815        emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
1816        emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
1817
1818        spin_lock(&l_mg->close_lock);
1819        spin_lock(&line->lock);
1820
1821        /* Update the in-memory start address for emeta, in case it has
1822         * shifted due to write errors
1823         */
1824        if (line->emeta_ssec != line->cur_sec)
1825                line->emeta_ssec = line->cur_sec;
1826
1827        list_add_tail(&line->list, &l_mg->emeta_list);
1828        spin_unlock(&line->lock);
1829        spin_unlock(&l_mg->close_lock);
1830
1831        pblk_line_should_sync_meta(pblk);
1832}
1833
1834static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line)
1835{
1836        struct pblk_line_meta *lm = &pblk->lm;
1837        unsigned int lba_list_size = lm->emeta_len[2];
1838        struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
1839        struct pblk_emeta *emeta = line->emeta;
1840
1841        w_err_gc->lba_list = kvmalloc(lba_list_size, GFP_KERNEL);
1842        memcpy(w_err_gc->lba_list, emeta_to_lbas(pblk, emeta->buf),
1843                                lba_list_size);
1844}
1845
1846void pblk_line_close_ws(struct work_struct *work)
1847{
1848        struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
1849                                                                        ws);
1850        struct pblk *pblk = line_ws->pblk;
1851        struct pblk_line *line = line_ws->line;
1852        struct pblk_w_err_gc *w_err_gc = line->w_err_gc;
1853
1854        /* Write errors makes the emeta start address stored in smeta invalid,
1855         * so keep a copy of the lba list until we've gc'd the line
1856         */
1857        if (w_err_gc->has_write_err)
1858                pblk_save_lba_list(pblk, line);
1859
1860        pblk_line_close(pblk, line);
1861        mempool_free(line_ws, &pblk->gen_ws_pool);
1862}
1863
1864void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
1865                      void (*work)(struct work_struct *), gfp_t gfp_mask,
1866                      struct workqueue_struct *wq)
1867{
1868        struct pblk_line_ws *line_ws;
1869
1870        line_ws = mempool_alloc(&pblk->gen_ws_pool, gfp_mask);
1871        if (!line_ws) {
1872                pblk_err(pblk, "pblk: could not allocate memory\n");
1873                return;
1874        }
1875
1876        line_ws->pblk = pblk;
1877        line_ws->line = line;
1878        line_ws->priv = priv;
1879
1880        INIT_WORK(&line_ws->ws, work);
1881        queue_work(wq, &line_ws->ws);
1882}
1883
1884static void __pblk_down_chunk(struct pblk *pblk, int pos)
1885{
1886        struct pblk_lun *rlun = &pblk->luns[pos];
1887        int ret;
1888
1889        /*
1890         * Only send one inflight I/O per LUN. Since we map at a page
1891         * granurality, all ppas in the I/O will map to the same LUN
1892         */
1893
1894        ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
1895        if (ret == -ETIME || ret == -EINTR)
1896                pblk_err(pblk, "taking lun semaphore timed out: err %d\n",
1897                                -ret);
1898}
1899
1900void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa)
1901{
1902        struct nvm_tgt_dev *dev = pblk->dev;
1903        struct nvm_geo *geo = &dev->geo;
1904        int pos = pblk_ppa_to_pos(geo, ppa);
1905
1906        __pblk_down_chunk(pblk, pos);
1907}
1908
1909void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa,
1910                  unsigned long *lun_bitmap)
1911{
1912        struct nvm_tgt_dev *dev = pblk->dev;
1913        struct nvm_geo *geo = &dev->geo;
1914        int pos = pblk_ppa_to_pos(geo, ppa);
1915
1916        /* If the LUN has been locked for this same request, do no attempt to
1917         * lock it again
1918         */
1919        if (test_and_set_bit(pos, lun_bitmap))
1920                return;
1921
1922        __pblk_down_chunk(pblk, pos);
1923}
1924
1925void pblk_up_chunk(struct pblk *pblk, struct ppa_addr ppa)
1926{
1927        struct nvm_tgt_dev *dev = pblk->dev;
1928        struct nvm_geo *geo = &dev->geo;
1929        struct pblk_lun *rlun;
1930        int pos = pblk_ppa_to_pos(geo, ppa);
1931
1932        rlun = &pblk->luns[pos];
1933        up(&rlun->wr_sem);
1934}
1935
1936void pblk_up_rq(struct pblk *pblk, unsigned long *lun_bitmap)
1937{
1938        struct nvm_tgt_dev *dev = pblk->dev;
1939        struct nvm_geo *geo = &dev->geo;
1940        struct pblk_lun *rlun;
1941        int num_lun = geo->all_luns;
1942        int bit = -1;
1943
1944        while ((bit = find_next_bit(lun_bitmap, num_lun, bit + 1)) < num_lun) {
1945                rlun = &pblk->luns[bit];
1946                up(&rlun->wr_sem);
1947        }
1948}
1949
1950void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1951{
1952        struct ppa_addr ppa_l2p;
1953
1954        /* logic error: lba out-of-bounds. Ignore update */
1955        if (!(lba < pblk->capacity)) {
1956                WARN(1, "pblk: corrupted L2P map request\n");
1957                return;
1958        }
1959
1960        spin_lock(&pblk->trans_lock);
1961        ppa_l2p = pblk_trans_map_get(pblk, lba);
1962
1963        if (!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p))
1964                pblk_map_invalidate(pblk, ppa_l2p);
1965
1966        pblk_trans_map_set(pblk, lba, ppa);
1967        spin_unlock(&pblk->trans_lock);
1968}
1969
1970void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa)
1971{
1972
1973#ifdef CONFIG_NVM_PBLK_DEBUG
1974        /* Callers must ensure that the ppa points to a cache address */
1975        BUG_ON(!pblk_addr_in_cache(ppa));
1976        BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa)));
1977#endif
1978
1979        pblk_update_map(pblk, lba, ppa);
1980}
1981
1982int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new,
1983                       struct pblk_line *gc_line, u64 paddr_gc)
1984{
1985        struct ppa_addr ppa_l2p, ppa_gc;
1986        int ret = 1;
1987
1988#ifdef CONFIG_NVM_PBLK_DEBUG
1989        /* Callers must ensure that the ppa points to a cache address */
1990        BUG_ON(!pblk_addr_in_cache(ppa_new));
1991        BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new)));
1992#endif
1993
1994        /* logic error: lba out-of-bounds. Ignore update */
1995        if (!(lba < pblk->capacity)) {
1996                WARN(1, "pblk: corrupted L2P map request\n");
1997                return 0;
1998        }
1999
2000        spin_lock(&pblk->trans_lock);
2001        ppa_l2p = pblk_trans_map_get(pblk, lba);
2002        ppa_gc = addr_to_gen_ppa(pblk, paddr_gc, gc_line->id);
2003
2004        if (!pblk_ppa_comp(ppa_l2p, ppa_gc)) {
2005                spin_lock(&gc_line->lock);
2006                WARN(!test_bit(paddr_gc, gc_line->invalid_bitmap),
2007                                                "pblk: corrupted GC update");
2008                spin_unlock(&gc_line->lock);
2009
2010                ret = 0;
2011                goto out;
2012        }
2013
2014        pblk_trans_map_set(pblk, lba, ppa_new);
2015out:
2016        spin_unlock(&pblk->trans_lock);
2017        return ret;
2018}
2019
2020void pblk_update_map_dev(struct pblk *pblk, sector_t lba,
2021                         struct ppa_addr ppa_mapped, struct ppa_addr ppa_cache)
2022{
2023        struct ppa_addr ppa_l2p;
2024
2025#ifdef CONFIG_NVM_PBLK_DEBUG
2026        /* Callers must ensure that the ppa points to a device address */
2027        BUG_ON(pblk_addr_in_cache(ppa_mapped));
2028#endif
2029        /* Invalidate and discard padded entries */
2030        if (lba == ADDR_EMPTY) {
2031                atomic64_inc(&pblk->pad_wa);
2032#ifdef CONFIG_NVM_PBLK_DEBUG
2033                atomic_long_inc(&pblk->padded_wb);
2034#endif
2035                if (!pblk_ppa_empty(ppa_mapped))
2036                        pblk_map_invalidate(pblk, ppa_mapped);
2037                return;
2038        }
2039
2040        /* logic error: lba out-of-bounds. Ignore update */
2041        if (!(lba < pblk->capacity)) {
2042                WARN(1, "pblk: corrupted L2P map request\n");
2043                return;
2044        }
2045
2046        spin_lock(&pblk->trans_lock);
2047        ppa_l2p = pblk_trans_map_get(pblk, lba);
2048
2049        /* Do not update L2P if the cacheline has been updated. In this case,
2050         * the mapped ppa must be invalidated
2051         */
2052        if (!pblk_ppa_comp(ppa_l2p, ppa_cache)) {
2053                if (!pblk_ppa_empty(ppa_mapped))
2054                        pblk_map_invalidate(pblk, ppa_mapped);
2055                goto out;
2056        }
2057
2058#ifdef CONFIG_NVM_PBLK_DEBUG
2059        WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p));
2060#endif
2061
2062        pblk_trans_map_set(pblk, lba, ppa_mapped);
2063out:
2064        spin_unlock(&pblk->trans_lock);
2065}
2066
2067int pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
2068                         sector_t blba, int nr_secs, bool *from_cache)
2069{
2070        int i;
2071
2072        spin_lock(&pblk->trans_lock);
2073        for (i = 0; i < nr_secs; i++) {
2074                struct ppa_addr ppa;
2075
2076                ppa = ppas[i] = pblk_trans_map_get(pblk, blba + i);
2077
2078                /* If the L2P entry maps to a line, the reference is valid */
2079                if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
2080                        struct pblk_line *line = pblk_ppa_to_line(pblk, ppa);
2081
2082                        if (i > 0 && *from_cache)
2083                                break;
2084                        *from_cache = false;
2085
2086                        kref_get(&line->ref);
2087                } else {
2088                        if (i > 0 && !*from_cache)
2089                                break;
2090                        *from_cache = true;
2091                }
2092        }
2093        spin_unlock(&pblk->trans_lock);
2094        return i;
2095}
2096
2097void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
2098                          u64 *lba_list, int nr_secs)
2099{
2100        u64 lba;
2101        int i;
2102
2103        spin_lock(&pblk->trans_lock);
2104        for (i = 0; i < nr_secs; i++) {
2105                lba = lba_list[i];
2106                if (lba != ADDR_EMPTY) {
2107                        /* logic error: lba out-of-bounds. Ignore update */
2108                        if (!(lba < pblk->capacity)) {
2109                                WARN(1, "pblk: corrupted L2P map request\n");
2110                                continue;
2111                        }
2112                        ppas[i] = pblk_trans_map_get(pblk, lba);
2113                }
2114        }
2115        spin_unlock(&pblk->trans_lock);
2116}
2117
2118void *pblk_get_meta_for_writes(struct pblk *pblk, struct nvm_rq *rqd)
2119{
2120        void *buffer;
2121
2122        if (pblk_is_oob_meta_supported(pblk)) {
2123                /* Just use OOB metadata buffer as always */
2124                buffer = rqd->meta_list;
2125        } else {
2126                /* We need to reuse last page of request (packed metadata)
2127                 * in similar way as traditional oob metadata
2128                 */
2129                buffer = page_to_virt(
2130                        rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
2131        }
2132
2133        return buffer;
2134}
2135
2136void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd)
2137{
2138        void *meta_list = rqd->meta_list;
2139        void *page;
2140        int i = 0;
2141
2142        if (pblk_is_oob_meta_supported(pblk))
2143                return;
2144
2145        page = page_to_virt(rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
2146        /* We need to fill oob meta buffer with data from packed metadata */
2147        for (; i < rqd->nr_ppas; i++)
2148                memcpy(pblk_get_meta(pblk, meta_list, i),
2149                        page + (i * sizeof(struct pblk_sec_meta)),
2150                        sizeof(struct pblk_sec_meta));
2151}
2152