linux/drivers/scsi/lpfc/lpfc_nvme.c
<<
>>
Prefs
   1/*******************************************************************
   2 * This file is part of the Emulex Linux Device Driver for         *
   3 * Fibre Channel Host Bus Adapters.                                *
   4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
   5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
   6 * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
   7 * EMULEX and SLI are trademarks of Emulex.                        *
   8 * www.broadcom.com                                                *
   9 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
  10 *                                                                 *
  11 * This program is free software; you can redistribute it and/or   *
  12 * modify it under the terms of version 2 of the GNU General       *
  13 * Public License as published by the Free Software Foundation.    *
  14 * This program is distributed in the hope that it will be useful. *
  15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
  16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
  17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
  18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  19 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
  20 * more details, a copy of which can be found in the file COPYING  *
  21 * included with this package.                                     *
  22 ********************************************************************/
  23#include <linux/pci.h>
  24#include <linux/slab.h>
  25#include <linux/interrupt.h>
  26#include <linux/delay.h>
  27#include <asm/unaligned.h>
  28#include <linux/crc-t10dif.h>
  29#include <net/checksum.h>
  30
  31#include <scsi/scsi.h>
  32#include <scsi/scsi_device.h>
  33#include <scsi/scsi_eh.h>
  34#include <scsi/scsi_host.h>
  35#include <scsi/scsi_tcq.h>
  36#include <scsi/scsi_transport_fc.h>
  37#include <scsi/fc/fc_fs.h>
  38
  39#include "lpfc_version.h"
  40#include "lpfc_hw4.h"
  41#include "lpfc_hw.h"
  42#include "lpfc_sli.h"
  43#include "lpfc_sli4.h"
  44#include "lpfc_nl.h"
  45#include "lpfc_disc.h"
  46#include "lpfc.h"
  47#include "lpfc_nvme.h"
  48#include "lpfc_scsi.h"
  49#include "lpfc_logmsg.h"
  50#include "lpfc_crtn.h"
  51#include "lpfc_vport.h"
  52#include "lpfc_debugfs.h"
  53
  54/* NVME initiator-based functions */
  55
  56static struct lpfc_io_buf *
  57lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
  58                  int idx, int expedite);
  59
  60static void
  61lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *);
  62
  63static struct nvme_fc_port_template lpfc_nvme_template;
  64
  65/**
  66 * lpfc_nvme_create_queue -
  67 * @pnvme_lport: Transport localport that LS is to be issued from
  68 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
  69 * @qsize: Size of the queue in bytes
  70 * @handle: An opaque driver handle used in follow-up calls.
  71 *
  72 * Driver registers this routine to preallocate and initialize any
  73 * internal data structures to bind the @qidx to its internal IO queues.
  74 * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ.
  75 *
  76 * Return value :
  77 *   0 - Success
  78 *   -EINVAL - Unsupported input value.
  79 *   -ENOMEM - Could not alloc necessary memory
  80 **/
  81static int
  82lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
  83                       unsigned int qidx, u16 qsize,
  84                       void **handle)
  85{
  86        struct lpfc_nvme_lport *lport;
  87        struct lpfc_vport *vport;
  88        struct lpfc_nvme_qhandle *qhandle;
  89        char *str;
  90
  91        if (!pnvme_lport->private)
  92                return -ENOMEM;
  93
  94        lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
  95        vport = lport->vport;
  96        qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
  97        if (qhandle == NULL)
  98                return -ENOMEM;
  99
 100        qhandle->cpu_id = raw_smp_processor_id();
 101        qhandle->qidx = qidx;
 102        /*
 103         * NVME qidx == 0 is the admin queue, so both admin queue
 104         * and first IO queue will use MSI-X vector and associated
 105         * EQ/CQ/WQ at index 0. After that they are sequentially assigned.
 106         */
 107        if (qidx) {
 108                str = "IO ";  /* IO queue */
 109                qhandle->index = ((qidx - 1) %
 110                        lpfc_nvme_template.max_hw_queues);
 111        } else {
 112                str = "ADM";  /* Admin queue */
 113                qhandle->index = qidx;
 114        }
 115
 116        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
 117                         "6073 Binding %s HdwQueue %d  (cpu %d) to "
 118                         "hdw_queue %d qhandle x%px\n", str,
 119                         qidx, qhandle->cpu_id, qhandle->index, qhandle);
 120        *handle = (void *)qhandle;
 121        return 0;
 122}
 123
 124/**
 125 * lpfc_nvme_delete_queue -
 126 * @pnvme_lport: Transport localport that LS is to be issued from
 127 * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
 128 * @handle: An opaque driver handle from lpfc_nvme_create_queue
 129 *
 130 * Driver registers this routine to free
 131 * any internal data structures to bind the @qidx to its internal
 132 * IO queues.
 133 *
 134 * Return value :
 135 *   0 - Success
 136 *   TODO:  What are the failure codes.
 137 **/
 138static void
 139lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
 140                       unsigned int qidx,
 141                       void *handle)
 142{
 143        struct lpfc_nvme_lport *lport;
 144        struct lpfc_vport *vport;
 145
 146        if (!pnvme_lport->private)
 147                return;
 148
 149        lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
 150        vport = lport->vport;
 151
 152        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
 153                        "6001 ENTER.  lpfc_pnvme x%px, qidx x%x qhandle x%px\n",
 154                        lport, qidx, handle);
 155        kfree(handle);
 156}
 157
 158static void
 159lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
 160{
 161        struct lpfc_nvme_lport *lport = localport->private;
 162
 163        lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
 164                         "6173 localport x%px delete complete\n",
 165                         lport);
 166
 167        /* release any threads waiting for the unreg to complete */
 168        if (lport->vport->localport)
 169                complete(lport->lport_unreg_cmp);
 170}
 171
 172/* lpfc_nvme_remoteport_delete
 173 *
 174 * @remoteport: Pointer to an nvme transport remoteport instance.
 175 *
 176 * This is a template downcall.  NVME transport calls this function
 177 * when it has completed the unregistration of a previously
 178 * registered remoteport.
 179 *
 180 * Return value :
 181 * None
 182 */
 183static void
 184lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
 185{
 186        struct lpfc_nvme_rport *rport = remoteport->private;
 187        struct lpfc_vport *vport;
 188        struct lpfc_nodelist *ndlp;
 189        u32 fc4_xpt_flags;
 190
 191        ndlp = rport->ndlp;
 192        if (!ndlp) {
 193                pr_err("**** %s: NULL ndlp on rport x%px remoteport x%px\n",
 194                       __func__, rport, remoteport);
 195                goto rport_err;
 196        }
 197
 198        vport = ndlp->vport;
 199        if (!vport) {
 200                pr_err("**** %s: Null vport on ndlp x%px, ste x%x rport x%px\n",
 201                       __func__, ndlp, ndlp->nlp_state, rport);
 202                goto rport_err;
 203        }
 204
 205        fc4_xpt_flags = NVME_XPT_REGD | SCSI_XPT_REGD;
 206
 207        /* Remove this rport from the lport's list - memory is owned by the
 208         * transport. Remove the ndlp reference for the NVME transport before
 209         * calling state machine to remove the node.
 210         */
 211        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
 212                        "6146 remoteport delete of remoteport x%px\n",
 213                        remoteport);
 214        spin_lock_irq(&ndlp->lock);
 215
 216        /* The register rebind might have occurred before the delete
 217         * downcall.  Guard against this race.
 218         */
 219        if (ndlp->fc4_xpt_flags & NLP_WAIT_FOR_UNREG)
 220                ndlp->fc4_xpt_flags &= ~(NLP_WAIT_FOR_UNREG | NVME_XPT_REGD);
 221
 222        spin_unlock_irq(&ndlp->lock);
 223
 224        /* On a devloss timeout event, one more put is executed provided the
 225         * NVME and SCSI rport unregister requests are complete.  If the vport
 226         * is unloading, this extra put is executed by lpfc_drop_node.
 227         */
 228        if (!(ndlp->fc4_xpt_flags & fc4_xpt_flags))
 229                lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
 230
 231 rport_err:
 232        return;
 233}
 234
 235/**
 236 * lpfc_nvme_handle_lsreq - Process an unsolicited NVME LS request
 237 * @phba: pointer to lpfc hba data structure.
 238 * @axchg: pointer to exchange context for the NVME LS request
 239 *
 240 * This routine is used for processing an asychronously received NVME LS
 241 * request. Any remaining validation is done and the LS is then forwarded
 242 * to the nvme-fc transport via nvme_fc_rcv_ls_req().
 243 *
 244 * The calling sequence should be: nvme_fc_rcv_ls_req() -> (processing)
 245 * -> lpfc_nvme_xmt_ls_rsp/cmp -> req->done.
 246 * __lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg.
 247 *
 248 * Returns 0 if LS was handled and delivered to the transport
 249 * Returns 1 if LS failed to be handled and should be dropped
 250 */
 251int
 252lpfc_nvme_handle_lsreq(struct lpfc_hba *phba,
 253                        struct lpfc_async_xchg_ctx *axchg)
 254{
 255#if (IS_ENABLED(CONFIG_NVME_FC))
 256        struct lpfc_vport *vport;
 257        struct lpfc_nvme_rport *lpfc_rport;
 258        struct nvme_fc_remote_port *remoteport;
 259        struct lpfc_nvme_lport *lport;
 260        uint32_t *payload = axchg->payload;
 261        int rc;
 262
 263        vport = axchg->ndlp->vport;
 264        lpfc_rport = axchg->ndlp->nrport;
 265        if (!lpfc_rport)
 266                return -EINVAL;
 267
 268        remoteport = lpfc_rport->remoteport;
 269        if (!vport->localport)
 270                return -EINVAL;
 271
 272        lport = vport->localport->private;
 273        if (!lport)
 274                return -EINVAL;
 275
 276        rc = nvme_fc_rcv_ls_req(remoteport, &axchg->ls_rsp, axchg->payload,
 277                                axchg->size);
 278
 279        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
 280                        "6205 NVME Unsol rcv: sz %d rc %d: %08x %08x %08x "
 281                        "%08x %08x %08x\n",
 282                        axchg->size, rc,
 283                        *payload, *(payload+1), *(payload+2),
 284                        *(payload+3), *(payload+4), *(payload+5));
 285
 286        if (!rc)
 287                return 0;
 288#endif
 289        return 1;
 290}
 291
 292/**
 293 * __lpfc_nvme_ls_req_cmp - Generic completion handler for a NVME
 294 *        LS request.
 295 * @phba: Pointer to HBA context object
 296 * @vport: The local port that issued the LS
 297 * @cmdwqe: Pointer to driver command WQE object.
 298 * @wcqe: Pointer to driver response CQE object.
 299 *
 300 * This function is the generic completion handler for NVME LS requests.
 301 * The function updates any states and statistics, calls the transport
 302 * ls_req done() routine, then tears down the command and buffers used
 303 * for the LS request.
 304 **/
 305void
 306__lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba,  struct lpfc_vport *vport,
 307                        struct lpfc_iocbq *cmdwqe,
 308                        struct lpfc_wcqe_complete *wcqe)
 309{
 310        struct nvmefc_ls_req *pnvme_lsreq;
 311        struct lpfc_dmabuf *buf_ptr;
 312        struct lpfc_nodelist *ndlp;
 313        uint32_t status;
 314
 315        pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
 316        ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
 317        status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
 318
 319        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
 320                         "6047 NVMEx LS REQ x%px cmpl DID %x Xri: %x "
 321                         "status %x reason x%x cmd:x%px lsreg:x%px bmp:x%px "
 322                         "ndlp:x%px\n",
 323                         pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
 324                         cmdwqe->sli4_xritag, status,
 325                         (wcqe->parameter & 0xffff),
 326                         cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
 327
 328        lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x parm x%x\n",
 329                         cmdwqe->sli4_xritag, status, wcqe->parameter);
 330
 331        if (cmdwqe->context3) {
 332                buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3;
 333                lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
 334                kfree(buf_ptr);
 335                cmdwqe->context3 = NULL;
 336        }
 337        if (pnvme_lsreq->done)
 338                pnvme_lsreq->done(pnvme_lsreq, status);
 339        else
 340                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
 341                                 "6046 NVMEx cmpl without done call back? "
 342                                 "Data x%px DID %x Xri: %x status %x\n",
 343                                pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
 344                                cmdwqe->sli4_xritag, status);
 345        if (ndlp) {
 346                lpfc_nlp_put(ndlp);
 347                cmdwqe->context1 = NULL;
 348        }
 349        lpfc_sli_release_iocbq(phba, cmdwqe);
 350}
 351
 352static void
 353lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
 354                       struct lpfc_wcqe_complete *wcqe)
 355{
 356        struct lpfc_vport *vport = cmdwqe->vport;
 357        struct lpfc_nvme_lport *lport;
 358        uint32_t status;
 359
 360        status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
 361
 362        if (vport->localport) {
 363                lport = (struct lpfc_nvme_lport *)vport->localport->private;
 364                if (lport) {
 365                        atomic_inc(&lport->fc4NvmeLsCmpls);
 366                        if (status) {
 367                                if (bf_get(lpfc_wcqe_c_xb, wcqe))
 368                                        atomic_inc(&lport->cmpl_ls_xb);
 369                                atomic_inc(&lport->cmpl_ls_err);
 370                        }
 371                }
 372        }
 373
 374        __lpfc_nvme_ls_req_cmp(phba, vport, cmdwqe, wcqe);
 375}
 376
 377static int
 378lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
 379                  struct lpfc_dmabuf *inp,
 380                  struct nvmefc_ls_req *pnvme_lsreq,
 381                  void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
 382                               struct lpfc_wcqe_complete *),
 383                  struct lpfc_nodelist *ndlp, uint32_t num_entry,
 384                  uint32_t tmo, uint8_t retry)
 385{
 386        struct lpfc_hba *phba = vport->phba;
 387        union lpfc_wqe128 *wqe;
 388        struct lpfc_iocbq *genwqe;
 389        struct ulp_bde64 *bpl;
 390        struct ulp_bde64 bde;
 391        int i, rc, xmit_len, first_len;
 392
 393        /* Allocate buffer for  command WQE */
 394        genwqe = lpfc_sli_get_iocbq(phba);
 395        if (genwqe == NULL)
 396                return 1;
 397
 398        wqe = &genwqe->wqe;
 399        /* Initialize only 64 bytes */
 400        memset(wqe, 0, sizeof(union lpfc_wqe));
 401
 402        genwqe->context3 = (uint8_t *)bmp;
 403        genwqe->iocb_flag |= LPFC_IO_NVME_LS;
 404
 405        /* Save for completion so we can release these resources */
 406        genwqe->context1 = lpfc_nlp_get(ndlp);
 407        if (!genwqe->context1) {
 408                dev_warn(&phba->pcidev->dev,
 409                         "Warning: Failed node ref, not sending LS_REQ\n");
 410                lpfc_sli_release_iocbq(phba, genwqe);
 411                return 1;
 412        }
 413
 414        genwqe->context2 = (uint8_t *)pnvme_lsreq;
 415        /* Fill in payload, bp points to frame payload */
 416
 417        if (!tmo)
 418                /* FC spec states we need 3 * ratov for CT requests */
 419                tmo = (3 * phba->fc_ratov);
 420
 421        /* For this command calculate the xmit length of the request bde. */
 422        xmit_len = 0;
 423        first_len = 0;
 424        bpl = (struct ulp_bde64 *)bmp->virt;
 425        for (i = 0; i < num_entry; i++) {
 426                bde.tus.w = bpl[i].tus.w;
 427                if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
 428                        break;
 429                xmit_len += bde.tus.f.bdeSize;
 430                if (i == 0)
 431                        first_len = xmit_len;
 432        }
 433
 434        genwqe->rsvd2 = num_entry;
 435        genwqe->hba_wqidx = 0;
 436
 437        /* Words 0 - 2 */
 438        wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
 439        wqe->generic.bde.tus.f.bdeSize = first_len;
 440        wqe->generic.bde.addrLow = bpl[0].addrLow;
 441        wqe->generic.bde.addrHigh = bpl[0].addrHigh;
 442
 443        /* Word 3 */
 444        wqe->gen_req.request_payload_len = first_len;
 445
 446        /* Word 4 */
 447
 448        /* Word 5 */
 449        bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
 450        bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
 451        bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
 452        bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ);
 453        bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
 454
 455        /* Word 6 */
 456        bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com,
 457               phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
 458        bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag);
 459
 460        /* Word 7 */
 461        bf_set(wqe_tmo, &wqe->gen_req.wqe_com, tmo);
 462        bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3);
 463        bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE);
 464        bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI);
 465
 466        /* Word 8 */
 467        wqe->gen_req.wqe_com.abort_tag = genwqe->iotag;
 468
 469        /* Word 9 */
 470        bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag);
 471
 472        /* Word 10 */
 473        bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
 474        bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
 475        bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
 476        bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
 477        bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
 478
 479        /* Word 11 */
 480        bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
 481        bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND);
 482
 483
 484        /* Issue GEN REQ WQE for NPORT <did> */
 485        genwqe->wqe_cmpl = cmpl;
 486        genwqe->iocb_cmpl = NULL;
 487        genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
 488        genwqe->vport = vport;
 489        genwqe->retry = retry;
 490
 491        lpfc_nvmeio_data(phba, "NVME LS  XMIT: xri x%x iotag x%x to x%06x\n",
 492                         genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);
 493
 494        rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe);
 495        if (rc) {
 496                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
 497                                 "6045 Issue GEN REQ WQE to NPORT x%x "
 498                                 "Data: x%x x%x  rc x%x\n",
 499                                 ndlp->nlp_DID, genwqe->iotag,
 500                                 vport->port_state, rc);
 501                lpfc_nlp_put(ndlp);
 502                lpfc_sli_release_iocbq(phba, genwqe);
 503                return 1;
 504        }
 505
 506        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_ELS,
 507                         "6050 Issue GEN REQ WQE to NPORT x%x "
 508                         "Data: oxid: x%x state: x%x wq:x%px lsreq:x%px "
 509                         "bmp:x%px xmit:%d 1st:%d\n",
 510                         ndlp->nlp_DID, genwqe->sli4_xritag,
 511                         vport->port_state,
 512                         genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
 513        return 0;
 514}
 515
 516
 517/**
 518 * __lpfc_nvme_ls_req - Generic service routine to issue an NVME LS request
 519 * @vport: The local port issuing the LS
 520 * @ndlp: The remote port to send the LS to
 521 * @pnvme_lsreq: Pointer to LS request structure from the transport
 522 * @gen_req_cmp: Completion call-back
 523 *
 524 * Routine validates the ndlp, builds buffers and sends a GEN_REQUEST
 525 * WQE to perform the LS operation.
 526 *
 527 * Return value :
 528 *   0 - Success
 529 *   non-zero: various error codes, in form of -Exxx
 530 **/
 531int
 532__lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 533                      struct nvmefc_ls_req *pnvme_lsreq,
 534                      void (*gen_req_cmp)(struct lpfc_hba *phba,
 535                                struct lpfc_iocbq *cmdwqe,
 536                                struct lpfc_wcqe_complete *wcqe))
 537{
 538        struct lpfc_dmabuf *bmp;
 539        struct ulp_bde64 *bpl;
 540        int ret;
 541        uint16_t ntype, nstate;
 542
 543        if (!ndlp) {
 544                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
 545                                 "6051 NVMEx LS REQ: Bad NDLP x%px, Failing "
 546                                 "LS Req\n",
 547                                 ndlp);
 548                return -ENODEV;
 549        }
 550
 551        ntype = ndlp->nlp_type;
 552        nstate = ndlp->nlp_state;
 553        if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) ||
 554            (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) {
 555                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
 556                                 "6088 NVMEx LS REQ: Fail DID x%06x not "
 557                                 "ready for IO. Type x%x, State x%x\n",
 558                                 ndlp->nlp_DID, ntype, nstate);
 559                return -ENODEV;
 560        }
 561
 562        if (!vport->phba->sli4_hba.nvmels_wq)
 563                return -ENOMEM;
 564
 565        /*
 566         * there are two dma buf in the request, actually there is one and
 567         * the second one is just the start address + cmd size.
 568         * Before calling lpfc_nvme_gen_req these buffers need to be wrapped
 569         * in a lpfc_dmabuf struct. When freeing we just free the wrapper
 570         * because the nvem layer owns the data bufs.
 571         * We do not have to break these packets open, we don't care what is
 572         * in them. And we do not have to look at the resonse data, we only
 573         * care that we got a response. All of the caring is going to happen
 574         * in the nvme-fc layer.
 575         */
 576
 577        bmp = kmalloc(sizeof(*bmp), GFP_KERNEL);
 578        if (!bmp) {
 579                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
 580                                 "6044 NVMEx LS REQ: Could not alloc LS buf "
 581                                 "for DID %x\n",
 582                                 ndlp->nlp_DID);
 583                return -ENOMEM;
 584        }
 585
 586        bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys));
 587        if (!bmp->virt) {
 588                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
 589                                 "6042 NVMEx LS REQ: Could not alloc mbuf "
 590                                 "for DID %x\n",
 591                                 ndlp->nlp_DID);
 592                kfree(bmp);
 593                return -ENOMEM;
 594        }
 595
 596        INIT_LIST_HEAD(&bmp->list);
 597
 598        bpl = (struct ulp_bde64 *)bmp->virt;
 599        bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma));
 600        bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma));
 601        bpl->tus.f.bdeFlags = 0;
 602        bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen;
 603        bpl->tus.w = le32_to_cpu(bpl->tus.w);
 604        bpl++;
 605
 606        bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma));
 607        bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma));
 608        bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
 609        bpl->tus.f.bdeSize = pnvme_lsreq->rsplen;
 610        bpl->tus.w = le32_to_cpu(bpl->tus.w);
 611
 612        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
 613                        "6149 NVMEx LS REQ: Issue to DID 0x%06x lsreq x%px, "
 614                        "rqstlen:%d rsplen:%d %pad %pad\n",
 615                        ndlp->nlp_DID, pnvme_lsreq, pnvme_lsreq->rqstlen,
 616                        pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
 617                        &pnvme_lsreq->rspdma);
 618
 619        ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr,
 620                                pnvme_lsreq, gen_req_cmp, ndlp, 2,
 621                                pnvme_lsreq->timeout, 0);
 622        if (ret != WQE_SUCCESS) {
 623                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
 624                                 "6052 NVMEx REQ: EXIT. issue ls wqe failed "
 625                                 "lsreq x%px Status %x DID %x\n",
 626                                 pnvme_lsreq, ret, ndlp->nlp_DID);
 627                lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
 628                kfree(bmp);
 629                return -EIO;
 630        }
 631
 632        return 0;
 633}
 634
 635/**
 636 * lpfc_nvme_ls_req - Issue an NVME Link Service request
 637 * @pnvme_lport: Transport localport that LS is to be issued from.
 638 * @pnvme_rport: Transport remoteport that LS is to be sent to.
 639 * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
 640 *
 641 * Driver registers this routine to handle any link service request
 642 * from the nvme_fc transport to a remote nvme-aware port.
 643 *
 644 * Return value :
 645 *   0 - Success
 646 *   non-zero: various error codes, in form of -Exxx
 647 **/
 648static int
 649lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
 650                 struct nvme_fc_remote_port *pnvme_rport,
 651                 struct nvmefc_ls_req *pnvme_lsreq)
 652{
 653        struct lpfc_nvme_lport *lport;
 654        struct lpfc_nvme_rport *rport;
 655        struct lpfc_vport *vport;
 656        int ret;
 657
 658        lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
 659        rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
 660        if (unlikely(!lport) || unlikely(!rport))
 661                return -EINVAL;
 662
 663        vport = lport->vport;
 664        if (vport->load_flag & FC_UNLOADING)
 665                return -ENODEV;
 666
 667        atomic_inc(&lport->fc4NvmeLsRequests);
 668
 669        ret = __lpfc_nvme_ls_req(vport, rport->ndlp, pnvme_lsreq,
 670                                 lpfc_nvme_ls_req_cmp);
 671        if (ret)
 672                atomic_inc(&lport->xmt_ls_err);
 673
 674        return ret;
 675}
 676
 677/**
 678 * __lpfc_nvme_ls_abort - Generic service routine to abort a prior
 679 *         NVME LS request
 680 * @vport: The local port that issued the LS
 681 * @ndlp: The remote port the LS was sent to
 682 * @pnvme_lsreq: Pointer to LS request structure from the transport
 683 *
 684 * The driver validates the ndlp, looks for the LS, and aborts the
 685 * LS if found.
 686 *
 687 * Returns:
 688 * 0 : if LS found and aborted
 689 * non-zero: various error conditions in form -Exxx
 690 **/
 691int
 692__lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
 693                        struct nvmefc_ls_req *pnvme_lsreq)
 694{
 695        struct lpfc_hba *phba = vport->phba;
 696        struct lpfc_sli_ring *pring;
 697        struct lpfc_iocbq *wqe, *next_wqe;
 698        bool foundit = false;
 699
 700        if (!ndlp) {
 701                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
 702                                "6049 NVMEx LS REQ Abort: Bad NDLP x%px DID "
 703                                "x%06x, Failing LS Req\n",
 704                                ndlp, ndlp ? ndlp->nlp_DID : 0);
 705                return -EINVAL;
 706        }
 707
 708        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS,
 709                         "6040 NVMEx LS REQ Abort: Issue LS_ABORT for lsreq "
 710                         "x%px rqstlen:%d rsplen:%d %pad %pad\n",
 711                         pnvme_lsreq, pnvme_lsreq->rqstlen,
 712                         pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
 713                         &pnvme_lsreq->rspdma);
 714
 715        /*
 716         * Lock the ELS ring txcmplq and look for the wqe that matches
 717         * this ELS. If found, issue an abort on the wqe.
 718         */
 719        pring = phba->sli4_hba.nvmels_wq->pring;
 720        spin_lock_irq(&phba->hbalock);
 721        spin_lock(&pring->ring_lock);
 722        list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
 723                if (wqe->context2 == pnvme_lsreq) {
 724                        wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
 725                        foundit = true;
 726                        break;
 727                }
 728        }
 729        spin_unlock(&pring->ring_lock);
 730
 731        if (foundit)
 732                lpfc_sli_issue_abort_iotag(phba, pring, wqe, NULL);
 733        spin_unlock_irq(&phba->hbalock);
 734
 735        if (foundit)
 736                return 0;
 737
 738        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS,
 739                         "6213 NVMEx LS REQ Abort: Unable to locate req x%px\n",
 740                         pnvme_lsreq);
 741        return -EINVAL;
 742}
 743
 744static int
 745lpfc_nvme_xmt_ls_rsp(struct nvme_fc_local_port *localport,
 746                     struct nvme_fc_remote_port *remoteport,
 747                     struct nvmefc_ls_rsp *ls_rsp)
 748{
 749        struct lpfc_async_xchg_ctx *axchg =
 750                container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp);
 751        struct lpfc_nvme_lport *lport;
 752        int rc;
 753
 754        if (axchg->phba->pport->load_flag & FC_UNLOADING)
 755                return -ENODEV;
 756
 757        lport = (struct lpfc_nvme_lport *)localport->private;
 758
 759        rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, __lpfc_nvme_xmt_ls_rsp_cmp);
 760
 761        if (rc) {
 762                /*
 763                 * unless the failure is due to having already sent
 764                 * the response, an abort will be generated for the
 765                 * exchange if the rsp can't be sent.
 766                 */
 767                if (rc != -EALREADY)
 768                        atomic_inc(&lport->xmt_ls_abort);
 769                return rc;
 770        }
 771
 772        return 0;
 773}
 774
 775/**
 776 * lpfc_nvme_ls_abort - Abort a prior NVME LS request
 777 * @pnvme_lport: Transport localport that LS is to be issued from.
 778 * @pnvme_rport: Transport remoteport that LS is to be sent to.
 779 * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
 780 *
 781 * Driver registers this routine to abort a NVME LS request that is
 782 * in progress (from the transports perspective).
 783 **/
 784static void
 785lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
 786                   struct nvme_fc_remote_port *pnvme_rport,
 787                   struct nvmefc_ls_req *pnvme_lsreq)
 788{
 789        struct lpfc_nvme_lport *lport;
 790        struct lpfc_vport *vport;
 791        struct lpfc_nodelist *ndlp;
 792        int ret;
 793
 794        lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
 795        if (unlikely(!lport))
 796                return;
 797        vport = lport->vport;
 798
 799        if (vport->load_flag & FC_UNLOADING)
 800                return;
 801
 802        ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
 803
 804        ret = __lpfc_nvme_ls_abort(vport, ndlp, pnvme_lsreq);
 805        if (!ret)
 806                atomic_inc(&lport->xmt_ls_abort);
 807}
 808
 809/* Fix up the existing sgls for NVME IO. */
 810static inline void
 811lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
 812                       struct lpfc_io_buf *lpfc_ncmd,
 813                       struct nvmefc_fcp_req *nCmd)
 814{
 815        struct lpfc_hba  *phba = vport->phba;
 816        struct sli4_sge *sgl;
 817        union lpfc_wqe128 *wqe;
 818        uint32_t *wptr, *dptr;
 819
 820        /*
 821         * Get a local pointer to the built-in wqe and correct
 822         * the cmd size to match NVME's 96 bytes and fix
 823         * the dma address.
 824         */
 825
 826        wqe = &lpfc_ncmd->cur_iocbq.wqe;
 827
 828        /*
 829         * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to
 830         * match NVME.  NVME sends 96 bytes. Also, use the
 831         * nvme commands command and response dma addresses
 832         * rather than the virtual memory to ease the restore
 833         * operation.
 834         */
 835        sgl = lpfc_ncmd->dma_sgl;
 836        sgl->sge_len = cpu_to_le32(nCmd->cmdlen);
 837        if (phba->cfg_nvme_embed_cmd) {
 838                sgl->addr_hi = 0;
 839                sgl->addr_lo = 0;
 840
 841                /* Word 0-2 - NVME CMND IU (embedded payload) */
 842                wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED;
 843                wqe->generic.bde.tus.f.bdeSize = 56;
 844                wqe->generic.bde.addrHigh = 0;
 845                wqe->generic.bde.addrLow =  64;  /* Word 16 */
 846
 847                /* Word 10  - dbde is 0, wqes is 1 in template */
 848
 849                /*
 850                 * Embed the payload in the last half of the WQE
 851                 * WQE words 16-30 get the NVME CMD IU payload
 852                 *
 853                 * WQE words 16-19 get payload Words 1-4
 854                 * WQE words 20-21 get payload Words 6-7
 855                 * WQE words 22-29 get payload Words 16-23
 856                 */
 857                wptr = &wqe->words[16];  /* WQE ptr */
 858                dptr = (uint32_t *)nCmd->cmdaddr;  /* payload ptr */
 859                dptr++;                 /* Skip Word 0 in payload */
 860
 861                *wptr++ = *dptr++;      /* Word 1 */
 862                *wptr++ = *dptr++;      /* Word 2 */
 863                *wptr++ = *dptr++;      /* Word 3 */
 864                *wptr++ = *dptr++;      /* Word 4 */
 865                dptr++;                 /* Skip Word 5 in payload */
 866                *wptr++ = *dptr++;      /* Word 6 */
 867                *wptr++ = *dptr++;      /* Word 7 */
 868                dptr += 8;              /* Skip Words 8-15 in payload */
 869                *wptr++ = *dptr++;      /* Word 16 */
 870                *wptr++ = *dptr++;      /* Word 17 */
 871                *wptr++ = *dptr++;      /* Word 18 */
 872                *wptr++ = *dptr++;      /* Word 19 */
 873                *wptr++ = *dptr++;      /* Word 20 */
 874                *wptr++ = *dptr++;      /* Word 21 */
 875                *wptr++ = *dptr++;      /* Word 22 */
 876                *wptr   = *dptr;        /* Word 23 */
 877        } else {
 878                sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->cmddma));
 879                sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->cmddma));
 880
 881                /* Word 0-2 - NVME CMND IU Inline BDE */
 882                wqe->generic.bde.tus.f.bdeFlags =  BUFF_TYPE_BDE_64;
 883                wqe->generic.bde.tus.f.bdeSize = nCmd->cmdlen;
 884                wqe->generic.bde.addrHigh = sgl->addr_hi;
 885                wqe->generic.bde.addrLow =  sgl->addr_lo;
 886
 887                /* Word 10 */
 888                bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
 889                bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
 890        }
 891
 892        sgl++;
 893
 894        /* Setup the physical region for the FCP RSP */
 895        sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma));
 896        sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma));
 897        sgl->word2 = le32_to_cpu(sgl->word2);
 898        if (nCmd->sg_cnt)
 899                bf_set(lpfc_sli4_sge_last, sgl, 0);
 900        else
 901                bf_set(lpfc_sli4_sge_last, sgl, 1);
 902        sgl->word2 = cpu_to_le32(sgl->word2);
 903        sgl->sge_len = cpu_to_le32(nCmd->rsplen);
 904}
 905
 906
 907/*
 908 * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO
 909 *
 910 * Driver registers this routine as it io request handler.  This
 911 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
 912 * data structure to the rport indicated in @lpfc_nvme_rport.
 913 *
 914 * Return value :
 915 *   0 - Success
 916 *   TODO: What are the failure codes.
 917 **/
 918static void
 919lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
 920                          struct lpfc_wcqe_complete *wcqe)
 921{
 922        struct lpfc_io_buf *lpfc_ncmd =
 923                (struct lpfc_io_buf *)pwqeIn->context1;
 924        struct lpfc_vport *vport = pwqeIn->vport;
 925        struct nvmefc_fcp_req *nCmd;
 926        struct nvme_fc_ersp_iu *ep;
 927        struct nvme_fc_cmd_iu *cp;
 928        struct lpfc_nodelist *ndlp;
 929        struct lpfc_nvme_fcpreq_priv *freqpriv;
 930        struct lpfc_nvme_lport *lport;
 931        uint32_t code, status, idx;
 932        uint16_t cid, sqhd, data;
 933        uint32_t *ptr;
 934#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
 935        int cpu;
 936#endif
 937
 938        /* Sanity check on return of outstanding command */
 939        if (!lpfc_ncmd) {
 940                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
 941                                 "6071 Null lpfc_ncmd pointer. No "
 942                                 "release, skip completion\n");
 943                return;
 944        }
 945
 946        /* Guard against abort handler being called at same time */
 947        spin_lock(&lpfc_ncmd->buf_lock);
 948
 949        if (!lpfc_ncmd->nvmeCmd) {
 950                spin_unlock(&lpfc_ncmd->buf_lock);
 951                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
 952                                 "6066 Missing cmpl ptrs: lpfc_ncmd x%px, "
 953                                 "nvmeCmd x%px\n",
 954                                 lpfc_ncmd, lpfc_ncmd->nvmeCmd);
 955
 956                /* Release the lpfc_ncmd regardless of the missing elements. */
 957                lpfc_release_nvme_buf(phba, lpfc_ncmd);
 958                return;
 959        }
 960        nCmd = lpfc_ncmd->nvmeCmd;
 961        status = bf_get(lpfc_wcqe_c_status, wcqe);
 962
 963        idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
 964        phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++;
 965
 966        if (unlikely(status && vport->localport)) {
 967                lport = (struct lpfc_nvme_lport *)vport->localport->private;
 968                if (lport) {
 969                        if (bf_get(lpfc_wcqe_c_xb, wcqe))
 970                                atomic_inc(&lport->cmpl_fcp_xb);
 971                        atomic_inc(&lport->cmpl_fcp_err);
 972                }
 973        }
 974
 975        lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
 976                         lpfc_ncmd->cur_iocbq.sli4_xritag,
 977                         status, wcqe->parameter);
 978        /*
 979         * Catch race where our node has transitioned, but the
 980         * transport is still transitioning.
 981         */
 982        ndlp = lpfc_ncmd->ndlp;
 983        if (!ndlp) {
 984                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
 985                                 "6062 Ignoring NVME cmpl.  No ndlp\n");
 986                goto out_err;
 987        }
 988
 989        code = bf_get(lpfc_wcqe_c_code, wcqe);
 990        if (code == CQE_CODE_NVME_ERSP) {
 991                /* For this type of CQE, we need to rebuild the rsp */
 992                ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
 993
 994                /*
 995                 * Get Command Id from cmd to plug into response. This
 996                 * code is not needed in the next NVME Transport drop.
 997                 */
 998                cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
 999                cid = cp->sqe.common.command_id;
1000
1001                /*
1002                 * RSN is in CQE word 2
1003                 * SQHD is in CQE Word 3 bits 15:0
1004                 * Cmd Specific info is in CQE Word 1
1005                 * and in CQE Word 0 bits 15:0
1006                 */
1007                sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe);
1008
1009                /* Now lets build the NVME ERSP IU */
1010                ep->iu_len = cpu_to_be16(8);
1011                ep->rsn = wcqe->parameter;
1012                ep->xfrd_len = cpu_to_be32(nCmd->payload_length);
1013                ep->rsvd12 = 0;
1014                ptr = (uint32_t *)&ep->cqe.result.u64;
1015                *ptr++ = wcqe->total_data_placed;
1016                data = bf_get(lpfc_wcqe_c_ersp0, wcqe);
1017                *ptr = (uint32_t)data;
1018                ep->cqe.sq_head = sqhd;
1019                ep->cqe.sq_id =  nCmd->sqid;
1020                ep->cqe.command_id = cid;
1021                ep->cqe.status = 0;
1022
1023                lpfc_ncmd->status = IOSTAT_SUCCESS;
1024                lpfc_ncmd->result = 0;
1025                nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
1026                nCmd->transferred_length = nCmd->payload_length;
1027        } else {
1028                lpfc_ncmd->status = (status & LPFC_IOCB_STATUS_MASK);
1029                lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
1030
1031                /* For NVME, the only failure path that results in an
1032                 * IO error is when the adapter rejects it.  All other
1033                 * conditions are a success case and resolved by the
1034                 * transport.
1035                 * IOSTAT_FCP_RSP_ERROR means:
1036                 * 1. Length of data received doesn't match total
1037                 *    transfer length in WQE
1038                 * 2. If the RSP payload does NOT match these cases:
1039                 *    a. RSP length 12/24 bytes and all zeros
1040                 *    b. NVME ERSP
1041                 */
1042                switch (lpfc_ncmd->status) {
1043                case IOSTAT_SUCCESS:
1044                        nCmd->transferred_length = wcqe->total_data_placed;
1045                        nCmd->rcv_rsplen = 0;
1046                        nCmd->status = 0;
1047                        break;
1048                case IOSTAT_FCP_RSP_ERROR:
1049                        nCmd->transferred_length = wcqe->total_data_placed;
1050                        nCmd->rcv_rsplen = wcqe->parameter;
1051                        nCmd->status = 0;
1052
1053                        /* Check if this is really an ERSP */
1054                        if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) {
1055                                lpfc_ncmd->status = IOSTAT_SUCCESS;
1056                                lpfc_ncmd->result = 0;
1057
1058                                lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1059                                         "6084 NVME Completion ERSP: "
1060                                         "xri %x placed x%x\n",
1061                                         lpfc_ncmd->cur_iocbq.sli4_xritag,
1062                                         wcqe->total_data_placed);
1063                                break;
1064                        }
1065                        lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1066                                         "6081 NVME Completion Protocol Error: "
1067                                         "xri %x status x%x result x%x "
1068                                         "placed x%x\n",
1069                                         lpfc_ncmd->cur_iocbq.sli4_xritag,
1070                                         lpfc_ncmd->status, lpfc_ncmd->result,
1071                                         wcqe->total_data_placed);
1072                        break;
1073                case IOSTAT_LOCAL_REJECT:
1074                        /* Let fall through to set command final state. */
1075                        if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED)
1076                                lpfc_printf_vlog(vport, KERN_INFO,
1077                                         LOG_NVME_IOERR,
1078                                         "6032 Delay Aborted cmd x%px "
1079                                         "nvme cmd x%px, xri x%x, "
1080                                         "xb %d\n",
1081                                         lpfc_ncmd, nCmd,
1082                                         lpfc_ncmd->cur_iocbq.sli4_xritag,
1083                                         bf_get(lpfc_wcqe_c_xb, wcqe));
1084                        fallthrough;
1085                default:
1086out_err:
1087                        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1088                                         "6072 NVME Completion Error: xri %x "
1089                                         "status x%x result x%x [x%x] "
1090                                         "placed x%x\n",
1091                                         lpfc_ncmd->cur_iocbq.sli4_xritag,
1092                                         lpfc_ncmd->status, lpfc_ncmd->result,
1093                                         wcqe->parameter,
1094                                         wcqe->total_data_placed);
1095                        nCmd->transferred_length = 0;
1096                        nCmd->rcv_rsplen = 0;
1097                        nCmd->status = NVME_SC_INTERNAL;
1098                }
1099        }
1100
1101        /* pick up SLI4 exhange busy condition */
1102        if (bf_get(lpfc_wcqe_c_xb, wcqe))
1103                lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
1104        else
1105                lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
1106
1107        /* Update stats and complete the IO.  There is
1108         * no need for dma unprep because the nvme_transport
1109         * owns the dma address.
1110         */
1111#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1112        if (lpfc_ncmd->ts_cmd_start) {
1113                lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
1114                lpfc_ncmd->ts_data_io = ktime_get_ns();
1115                phba->ktime_last_cmd = lpfc_ncmd->ts_data_io;
1116                lpfc_io_ktime(phba, lpfc_ncmd);
1117        }
1118        if (unlikely(phba->hdwqstat_on & LPFC_CHECK_NVME_IO)) {
1119                cpu = raw_smp_processor_id();
1120                this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
1121                if (lpfc_ncmd->cpu != cpu)
1122                        lpfc_printf_vlog(vport,
1123                                         KERN_INFO, LOG_NVME_IOERR,
1124                                         "6701 CPU Check cmpl: "
1125                                         "cpu %d expect %d\n",
1126                                         cpu, lpfc_ncmd->cpu);
1127        }
1128#endif
1129
1130        /* NVME targets need completion held off until the abort exchange
1131         * completes unless the NVME Rport is getting unregistered.
1132         */
1133
1134        if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
1135                freqpriv = nCmd->private;
1136                freqpriv->nvme_buf = NULL;
1137                lpfc_ncmd->nvmeCmd = NULL;
1138                spin_unlock(&lpfc_ncmd->buf_lock);
1139                nCmd->done(nCmd);
1140        } else
1141                spin_unlock(&lpfc_ncmd->buf_lock);
1142
1143        /* Call release with XB=1 to queue the IO into the abort list. */
1144        lpfc_release_nvme_buf(phba, lpfc_ncmd);
1145}
1146
1147
1148/**
1149 * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO
1150 * @vport: pointer to a host virtual N_Port data structure
1151 * @lpfc_ncmd: Pointer to lpfc scsi command
1152 * @pnode: pointer to a node-list data structure
1153 * @cstat: pointer to the control status structure
1154 *
1155 * Driver registers this routine as it io request handler.  This
1156 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1157 * data structure to the rport indicated in @lpfc_nvme_rport.
1158 *
1159 * Return value :
1160 *   0 - Success
1161 *   TODO: What are the failure codes.
1162 **/
1163static int
1164lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
1165                      struct lpfc_io_buf *lpfc_ncmd,
1166                      struct lpfc_nodelist *pnode,
1167                      struct lpfc_fc4_ctrl_stat *cstat)
1168{
1169        struct lpfc_hba *phba = vport->phba;
1170        struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1171        struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq);
1172        union lpfc_wqe128 *wqe = &pwqeq->wqe;
1173        uint32_t req_len;
1174
1175        /*
1176         * There are three possibilities here - use scatter-gather segment, use
1177         * the single mapping, or neither.
1178         */
1179        if (nCmd->sg_cnt) {
1180                if (nCmd->io_dir == NVMEFC_FCP_WRITE) {
1181                        /* From the iwrite template, initialize words 7 - 11 */
1182                        memcpy(&wqe->words[7],
1183                               &lpfc_iwrite_cmd_template.words[7],
1184                               sizeof(uint32_t) * 5);
1185
1186                        /* Word 4 */
1187                        wqe->fcp_iwrite.total_xfer_len = nCmd->payload_length;
1188
1189                        /* Word 5 */
1190                        if ((phba->cfg_nvme_enable_fb) &&
1191                            (pnode->nlp_flag & NLP_FIRSTBURST)) {
1192                                req_len = lpfc_ncmd->nvmeCmd->payload_length;
1193                                if (req_len < pnode->nvme_fb_size)
1194                                        wqe->fcp_iwrite.initial_xfer_len =
1195                                                req_len;
1196                                else
1197                                        wqe->fcp_iwrite.initial_xfer_len =
1198                                                pnode->nvme_fb_size;
1199                        } else {
1200                                wqe->fcp_iwrite.initial_xfer_len = 0;
1201                        }
1202                        cstat->output_requests++;
1203                } else {
1204                        /* From the iread template, initialize words 7 - 11 */
1205                        memcpy(&wqe->words[7],
1206                               &lpfc_iread_cmd_template.words[7],
1207                               sizeof(uint32_t) * 5);
1208
1209                        /* Word 4 */
1210                        wqe->fcp_iread.total_xfer_len = nCmd->payload_length;
1211
1212                        /* Word 5 */
1213                        wqe->fcp_iread.rsrvd5 = 0;
1214
1215                        cstat->input_requests++;
1216                }
1217        } else {
1218                /* From the icmnd template, initialize words 4 - 11 */
1219                memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
1220                       sizeof(uint32_t) * 8);
1221                cstat->control_requests++;
1222        }
1223
1224        if (pnode->nlp_nvme_info & NLP_NVME_NSLER)
1225                bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
1226        /*
1227         * Finish initializing those WQE fields that are independent
1228         * of the nvme_cmnd request_buffer
1229         */
1230
1231        /* Word 3 */
1232        bf_set(payload_offset_len, &wqe->fcp_icmd,
1233               (nCmd->rsplen + nCmd->cmdlen));
1234
1235        /* Word 6 */
1236        bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
1237               phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
1238        bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
1239
1240        /* Word 8 */
1241        wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
1242
1243        /* Word 9 */
1244        bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
1245
1246        /* Word 10 */
1247        bf_set(wqe_xchg, &wqe->fcp_iwrite.wqe_com, LPFC_NVME_XCHG);
1248
1249        /* Words 13 14 15 are for PBDE support */
1250
1251        pwqeq->vport = vport;
1252        return 0;
1253}
1254
1255
1256/**
1257 * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO
1258 * @vport: pointer to a host virtual N_Port data structure
1259 * @lpfc_ncmd: Pointer to lpfc scsi command
1260 *
1261 * Driver registers this routine as it io request handler.  This
1262 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1263 * data structure to the rport indicated in @lpfc_nvme_rport.
1264 *
1265 * Return value :
1266 *   0 - Success
1267 *   TODO: What are the failure codes.
1268 **/
1269static int
1270lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
1271                      struct lpfc_io_buf *lpfc_ncmd)
1272{
1273        struct lpfc_hba *phba = vport->phba;
1274        struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1275        union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe;
1276        struct sli4_sge *sgl = lpfc_ncmd->dma_sgl;
1277        struct sli4_hybrid_sgl *sgl_xtra = NULL;
1278        struct scatterlist *data_sg;
1279        struct sli4_sge *first_data_sgl;
1280        struct ulp_bde64 *bde;
1281        dma_addr_t physaddr = 0;
1282        uint32_t num_bde = 0;
1283        uint32_t dma_len = 0;
1284        uint32_t dma_offset = 0;
1285        int nseg, i, j;
1286        bool lsp_just_set = false;
1287
1288        /* Fix up the command and response DMA stuff. */
1289        lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
1290
1291        /*
1292         * There are three possibilities here - use scatter-gather segment, use
1293         * the single mapping, or neither.
1294         */
1295        if (nCmd->sg_cnt) {
1296                /*
1297                 * Jump over the cmd and rsp SGEs.  The fix routine
1298                 * has already adjusted for this.
1299                 */
1300                sgl += 2;
1301
1302                first_data_sgl = sgl;
1303                lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
1304                if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
1305                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1306                                        "6058 Too many sg segments from "
1307                                        "NVME Transport.  Max %d, "
1308                                        "nvmeIO sg_cnt %d\n",
1309                                        phba->cfg_nvme_seg_cnt + 1,
1310                                        lpfc_ncmd->seg_cnt);
1311                        lpfc_ncmd->seg_cnt = 0;
1312                        return 1;
1313                }
1314
1315                /*
1316                 * The driver established a maximum scatter-gather segment count
1317                 * during probe that limits the number of sg elements in any
1318                 * single nvme command.  Just run through the seg_cnt and format
1319                 * the sge's.
1320                 */
1321                nseg = nCmd->sg_cnt;
1322                data_sg = nCmd->first_sgl;
1323
1324                /* for tracking the segment boundaries */
1325                j = 2;
1326                for (i = 0; i < nseg; i++) {
1327                        if (data_sg == NULL) {
1328                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1329                                                "6059 dptr err %d, nseg %d\n",
1330                                                i, nseg);
1331                                lpfc_ncmd->seg_cnt = 0;
1332                                return 1;
1333                        }
1334
1335                        sgl->word2 = 0;
1336                        if ((num_bde + 1) == nseg) {
1337                                bf_set(lpfc_sli4_sge_last, sgl, 1);
1338                                bf_set(lpfc_sli4_sge_type, sgl,
1339                                       LPFC_SGE_TYPE_DATA);
1340                        } else {
1341                                bf_set(lpfc_sli4_sge_last, sgl, 0);
1342
1343                                /* expand the segment */
1344                                if (!lsp_just_set &&
1345                                    !((j + 1) % phba->border_sge_num) &&
1346                                    ((nseg - 1) != i)) {
1347                                        /* set LSP type */
1348                                        bf_set(lpfc_sli4_sge_type, sgl,
1349                                               LPFC_SGE_TYPE_LSP);
1350
1351                                        sgl_xtra = lpfc_get_sgl_per_hdwq(
1352                                                        phba, lpfc_ncmd);
1353
1354                                        if (unlikely(!sgl_xtra)) {
1355                                                lpfc_ncmd->seg_cnt = 0;
1356                                                return 1;
1357                                        }
1358                                        sgl->addr_lo = cpu_to_le32(putPaddrLow(
1359                                                       sgl_xtra->dma_phys_sgl));
1360                                        sgl->addr_hi = cpu_to_le32(putPaddrHigh(
1361                                                       sgl_xtra->dma_phys_sgl));
1362
1363                                } else {
1364                                        bf_set(lpfc_sli4_sge_type, sgl,
1365                                               LPFC_SGE_TYPE_DATA);
1366                                }
1367                        }
1368
1369                        if (!(bf_get(lpfc_sli4_sge_type, sgl) &
1370                                     LPFC_SGE_TYPE_LSP)) {
1371                                if ((nseg - 1) == i)
1372                                        bf_set(lpfc_sli4_sge_last, sgl, 1);
1373
1374                                physaddr = data_sg->dma_address;
1375                                dma_len = data_sg->length;
1376                                sgl->addr_lo = cpu_to_le32(
1377                                                         putPaddrLow(physaddr));
1378                                sgl->addr_hi = cpu_to_le32(
1379                                                        putPaddrHigh(physaddr));
1380
1381                                bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1382                                sgl->word2 = cpu_to_le32(sgl->word2);
1383                                sgl->sge_len = cpu_to_le32(dma_len);
1384
1385                                dma_offset += dma_len;
1386                                data_sg = sg_next(data_sg);
1387
1388                                sgl++;
1389
1390                                lsp_just_set = false;
1391                        } else {
1392                                sgl->word2 = cpu_to_le32(sgl->word2);
1393
1394                                sgl->sge_len = cpu_to_le32(
1395                                                     phba->cfg_sg_dma_buf_size);
1396
1397                                sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
1398                                i = i - 1;
1399
1400                                lsp_just_set = true;
1401                        }
1402
1403                        j++;
1404                }
1405                if (phba->cfg_enable_pbde) {
1406                        /* Use PBDE support for first SGL only, offset == 0 */
1407                        /* Words 13-15 */
1408                        bde = (struct ulp_bde64 *)
1409                                &wqe->words[13];
1410                        bde->addrLow = first_data_sgl->addr_lo;
1411                        bde->addrHigh = first_data_sgl->addr_hi;
1412                        bde->tus.f.bdeSize =
1413                                le32_to_cpu(first_data_sgl->sge_len);
1414                        bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1415                        bde->tus.w = cpu_to_le32(bde->tus.w);
1416
1417                        /* Word 11 */
1418                        bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
1419                } else {
1420                        memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
1421                        bf_set(wqe_pbde, &wqe->generic.wqe_com, 0);
1422                }
1423
1424        } else {
1425                lpfc_ncmd->seg_cnt = 0;
1426
1427                /* For this clause to be valid, the payload_length
1428                 * and sg_cnt must zero.
1429                 */
1430                if (nCmd->payload_length != 0) {
1431                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1432                                        "6063 NVME DMA Prep Err: sg_cnt %d "
1433                                        "payload_length x%x\n",
1434                                        nCmd->sg_cnt, nCmd->payload_length);
1435                        return 1;
1436                }
1437        }
1438        return 0;
1439}
1440
1441/**
1442 * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO
1443 * @pnvme_lport: Pointer to the driver's local port data
1444 * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1445 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1446 * @pnvme_fcreq: IO request from nvme fc to driver.
1447 *
1448 * Driver registers this routine as it io request handler.  This
1449 * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1450 * data structure to the rport indicated in @lpfc_nvme_rport.
1451 *
1452 * Return value :
1453 *   0 - Success
1454 *   TODO: What are the failure codes.
1455 **/
1456static int
1457lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1458                        struct nvme_fc_remote_port *pnvme_rport,
1459                        void *hw_queue_handle,
1460                        struct nvmefc_fcp_req *pnvme_fcreq)
1461{
1462        int ret = 0;
1463        int expedite = 0;
1464        int idx, cpu;
1465        struct lpfc_nvme_lport *lport;
1466        struct lpfc_fc4_ctrl_stat *cstat;
1467        struct lpfc_vport *vport;
1468        struct lpfc_hba *phba;
1469        struct lpfc_nodelist *ndlp;
1470        struct lpfc_io_buf *lpfc_ncmd;
1471        struct lpfc_nvme_rport *rport;
1472        struct lpfc_nvme_qhandle *lpfc_queue_info;
1473        struct lpfc_nvme_fcpreq_priv *freqpriv;
1474        struct nvme_common_command *sqe;
1475#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1476        uint64_t start = 0;
1477#endif
1478
1479        /* Validate pointers. LLDD fault handling with transport does
1480         * have timing races.
1481         */
1482        lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1483        if (unlikely(!lport)) {
1484                ret = -EINVAL;
1485                goto out_fail;
1486        }
1487
1488        vport = lport->vport;
1489
1490        if (unlikely(!hw_queue_handle)) {
1491                lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1492                                 "6117 Fail IO, NULL hw_queue_handle\n");
1493                atomic_inc(&lport->xmt_fcp_err);
1494                ret = -EBUSY;
1495                goto out_fail;
1496        }
1497
1498        phba = vport->phba;
1499
1500        if (unlikely(vport->load_flag & FC_UNLOADING)) {
1501                lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1502                                 "6124 Fail IO, Driver unload\n");
1503                atomic_inc(&lport->xmt_fcp_err);
1504                ret = -ENODEV;
1505                goto out_fail;
1506        }
1507
1508        freqpriv = pnvme_fcreq->private;
1509        if (unlikely(!freqpriv)) {
1510                lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1511                                 "6158 Fail IO, NULL request data\n");
1512                atomic_inc(&lport->xmt_fcp_err);
1513                ret = -EINVAL;
1514                goto out_fail;
1515        }
1516
1517#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1518        if (phba->ktime_on)
1519                start = ktime_get_ns();
1520#endif
1521        rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
1522        lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle;
1523
1524        /*
1525         * Catch race where our node has transitioned, but the
1526         * transport is still transitioning.
1527         */
1528        ndlp = rport->ndlp;
1529        if (!ndlp) {
1530                lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1531                                 "6053 Busy IO, ndlp not ready: rport x%px "
1532                                  "ndlp x%px, DID x%06x\n",
1533                                 rport, ndlp, pnvme_rport->port_id);
1534                atomic_inc(&lport->xmt_fcp_err);
1535                ret = -EBUSY;
1536                goto out_fail;
1537        }
1538
1539        /* The remote node has to be a mapped target or it's an error. */
1540        if ((ndlp->nlp_type & NLP_NVME_TARGET) &&
1541            (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
1542                lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1543                                 "6036 Fail IO, DID x%06x not ready for "
1544                                 "IO. State x%x, Type x%x Flg x%x\n",
1545                                 pnvme_rport->port_id,
1546                                 ndlp->nlp_state, ndlp->nlp_type,
1547                                 ndlp->fc4_xpt_flags);
1548                atomic_inc(&lport->xmt_fcp_bad_ndlp);
1549                ret = -EBUSY;
1550                goto out_fail;
1551
1552        }
1553
1554        /* Currently only NVME Keep alive commands should be expedited
1555         * if the driver runs out of a resource. These should only be
1556         * issued on the admin queue, qidx 0
1557         */
1558        if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) {
1559                sqe = &((struct nvme_fc_cmd_iu *)
1560                        pnvme_fcreq->cmdaddr)->sqe.common;
1561                if (sqe->opcode == nvme_admin_keep_alive)
1562                        expedite = 1;
1563        }
1564
1565        /* The node is shared with FCP IO, make sure the IO pending count does
1566         * not exceed the programmed depth.
1567         */
1568        if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
1569                if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
1570                    !expedite) {
1571                        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1572                                         "6174 Fail IO, ndlp qdepth exceeded: "
1573                                         "idx %d DID %x pend %d qdepth %d\n",
1574                                         lpfc_queue_info->index, ndlp->nlp_DID,
1575                                         atomic_read(&ndlp->cmd_pending),
1576                                         ndlp->cmd_qdepth);
1577                        atomic_inc(&lport->xmt_fcp_qdepth);
1578                        ret = -EBUSY;
1579                        goto out_fail;
1580                }
1581        }
1582
1583        /* Lookup Hardware Queue index based on fcp_io_sched module parameter */
1584        if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
1585                idx = lpfc_queue_info->index;
1586        } else {
1587                cpu = raw_smp_processor_id();
1588                idx = phba->sli4_hba.cpu_map[cpu].hdwq;
1589        }
1590
1591        lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, idx, expedite);
1592        if (lpfc_ncmd == NULL) {
1593                atomic_inc(&lport->xmt_fcp_noxri);
1594                lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1595                                 "6065 Fail IO, driver buffer pool is empty: "
1596                                 "idx %d DID %x\n",
1597                                 lpfc_queue_info->index, ndlp->nlp_DID);
1598                ret = -EBUSY;
1599                goto out_fail;
1600        }
1601#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1602        if (start) {
1603                lpfc_ncmd->ts_cmd_start = start;
1604                lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd;
1605        } else {
1606                lpfc_ncmd->ts_cmd_start = 0;
1607        }
1608#endif
1609
1610        /*
1611         * Store the data needed by the driver to issue, abort, and complete
1612         * an IO.
1613         * Do not let the IO hang out forever.  There is no midlayer issuing
1614         * an abort so inform the FW of the maximum IO pending time.
1615         */
1616        freqpriv->nvme_buf = lpfc_ncmd;
1617        lpfc_ncmd->nvmeCmd = pnvme_fcreq;
1618        lpfc_ncmd->ndlp = ndlp;
1619        lpfc_ncmd->qidx = lpfc_queue_info->qidx;
1620
1621        /*
1622         * Issue the IO on the WQ indicated by index in the hw_queue_handle.
1623         * This identfier was create in our hardware queue create callback
1624         * routine. The driver now is dependent on the IO queue steering from
1625         * the transport.  We are trusting the upper NVME layers know which
1626         * index to use and that they have affinitized a CPU to this hardware
1627         * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ.
1628         */
1629        lpfc_ncmd->cur_iocbq.hba_wqidx = idx;
1630        cstat = &phba->sli4_hba.hdwq[idx].nvme_cstat;
1631
1632        lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat);
1633        ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
1634        if (ret) {
1635                lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1636                                 "6175 Fail IO, Prep DMA: "
1637                                 "idx %d DID %x\n",
1638                                 lpfc_queue_info->index, ndlp->nlp_DID);
1639                atomic_inc(&lport->xmt_fcp_err);
1640                ret = -ENOMEM;
1641                goto out_free_nvme_buf;
1642        }
1643
1644        lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
1645                         lpfc_ncmd->cur_iocbq.sli4_xritag,
1646                         lpfc_queue_info->index, ndlp->nlp_DID);
1647
1648        ret = lpfc_sli4_issue_wqe(phba, lpfc_ncmd->hdwq, &lpfc_ncmd->cur_iocbq);
1649        if (ret) {
1650                atomic_inc(&lport->xmt_fcp_wqerr);
1651                lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1652                                 "6113 Fail IO, Could not issue WQE err %x "
1653                                 "sid: x%x did: x%x oxid: x%x\n",
1654                                 ret, vport->fc_myDID, ndlp->nlp_DID,
1655                                 lpfc_ncmd->cur_iocbq.sli4_xritag);
1656                goto out_free_nvme_buf;
1657        }
1658
1659        if (phba->cfg_xri_rebalancing)
1660                lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_ncmd->hdwq_no);
1661
1662#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1663        if (lpfc_ncmd->ts_cmd_start)
1664                lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
1665
1666        if (phba->hdwqstat_on & LPFC_CHECK_NVME_IO) {
1667                cpu = raw_smp_processor_id();
1668                this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
1669                lpfc_ncmd->cpu = cpu;
1670                if (idx != cpu)
1671                        lpfc_printf_vlog(vport,
1672                                         KERN_INFO, LOG_NVME_IOERR,
1673                                        "6702 CPU Check cmd: "
1674                                        "cpu %d wq %d\n",
1675                                        lpfc_ncmd->cpu,
1676                                        lpfc_queue_info->index);
1677        }
1678#endif
1679        return 0;
1680
1681 out_free_nvme_buf:
1682        if (lpfc_ncmd->nvmeCmd->sg_cnt) {
1683                if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
1684                        cstat->output_requests--;
1685                else
1686                        cstat->input_requests--;
1687        } else
1688                cstat->control_requests--;
1689        lpfc_release_nvme_buf(phba, lpfc_ncmd);
1690 out_fail:
1691        return ret;
1692}
1693
1694/**
1695 * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
1696 * @phba: Pointer to HBA context object
1697 * @cmdiocb: Pointer to command iocb object.
1698 * @abts_cmpl: Pointer to wcqe complete object.
1699 *
1700 * This is the callback function for any NVME FCP IO that was aborted.
1701 *
1702 * Return value:
1703 *   None
1704 **/
1705void
1706lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1707                           struct lpfc_wcqe_complete *abts_cmpl)
1708{
1709        lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1710                        "6145 ABORT_XRI_CN completing on rpi x%x "
1711                        "original iotag x%x, abort cmd iotag x%x "
1712                        "req_tag x%x, status x%x, hwstatus x%x\n",
1713                        cmdiocb->iocb.un.acxri.abortContextTag,
1714                        cmdiocb->iocb.un.acxri.abortIoTag,
1715                        cmdiocb->iotag,
1716                        bf_get(lpfc_wcqe_c_request_tag, abts_cmpl),
1717                        bf_get(lpfc_wcqe_c_status, abts_cmpl),
1718                        bf_get(lpfc_wcqe_c_hw_status, abts_cmpl));
1719        lpfc_sli_release_iocbq(phba, cmdiocb);
1720}
1721
1722/**
1723 * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS
1724 * @pnvme_lport: Pointer to the driver's local port data
1725 * @pnvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1726 * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1727 * @pnvme_fcreq: IO request from nvme fc to driver.
1728 *
1729 * Driver registers this routine as its nvme request io abort handler.  This
1730 * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq
1731 * data structure to the rport indicated in @lpfc_nvme_rport.  This routine
1732 * is executed asynchronously - one the target is validated as "MAPPED" and
1733 * ready for IO, the driver issues the abort request and returns.
1734 *
1735 * Return value:
1736 *   None
1737 **/
1738static void
1739lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1740                    struct nvme_fc_remote_port *pnvme_rport,
1741                    void *hw_queue_handle,
1742                    struct nvmefc_fcp_req *pnvme_fcreq)
1743{
1744        struct lpfc_nvme_lport *lport;
1745        struct lpfc_vport *vport;
1746        struct lpfc_hba *phba;
1747        struct lpfc_io_buf *lpfc_nbuf;
1748        struct lpfc_iocbq *nvmereq_wqe;
1749        struct lpfc_nvme_fcpreq_priv *freqpriv;
1750        unsigned long flags;
1751        int ret_val;
1752
1753        /* Validate pointers. LLDD fault handling with transport does
1754         * have timing races.
1755         */
1756        lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1757        if (unlikely(!lport))
1758                return;
1759
1760        vport = lport->vport;
1761
1762        if (unlikely(!hw_queue_handle)) {
1763                lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1764                                 "6129 Fail Abort, HW Queue Handle NULL.\n");
1765                return;
1766        }
1767
1768        phba = vport->phba;
1769        freqpriv = pnvme_fcreq->private;
1770
1771        if (unlikely(!freqpriv))
1772                return;
1773        if (vport->load_flag & FC_UNLOADING)
1774                return;
1775
1776        /* Announce entry to new IO submit field. */
1777        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1778                         "6002 Abort Request to rport DID x%06x "
1779                         "for nvme_fc_req x%px\n",
1780                         pnvme_rport->port_id,
1781                         pnvme_fcreq);
1782
1783        /* If the hba is getting reset, this flag is set.  It is
1784         * cleared when the reset is complete and rings reestablished.
1785         */
1786        spin_lock_irqsave(&phba->hbalock, flags);
1787        /* driver queued commands are in process of being flushed */
1788        if (phba->hba_flag & HBA_IOQ_FLUSH) {
1789                spin_unlock_irqrestore(&phba->hbalock, flags);
1790                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1791                                 "6139 Driver in reset cleanup - flushing "
1792                                 "NVME Req now.  hba_flag x%x\n",
1793                                 phba->hba_flag);
1794                return;
1795        }
1796
1797        lpfc_nbuf = freqpriv->nvme_buf;
1798        if (!lpfc_nbuf) {
1799                spin_unlock_irqrestore(&phba->hbalock, flags);
1800                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1801                                 "6140 NVME IO req has no matching lpfc nvme "
1802                                 "io buffer.  Skipping abort req.\n");
1803                return;
1804        } else if (!lpfc_nbuf->nvmeCmd) {
1805                spin_unlock_irqrestore(&phba->hbalock, flags);
1806                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1807                                 "6141 lpfc NVME IO req has no nvme_fcreq "
1808                                 "io buffer.  Skipping abort req.\n");
1809                return;
1810        }
1811        nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
1812
1813        /* Guard against IO completion being called at same time */
1814        spin_lock(&lpfc_nbuf->buf_lock);
1815
1816        /*
1817         * The lpfc_nbuf and the mapped nvme_fcreq in the driver's
1818         * state must match the nvme_fcreq passed by the nvme
1819         * transport.  If they don't match, it is likely the driver
1820         * has already completed the NVME IO and the nvme transport
1821         * has not seen it yet.
1822         */
1823        if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
1824                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1825                                 "6143 NVME req mismatch: "
1826                                 "lpfc_nbuf x%px nvmeCmd x%px, "
1827                                 "pnvme_fcreq x%px.  Skipping Abort xri x%x\n",
1828                                 lpfc_nbuf, lpfc_nbuf->nvmeCmd,
1829                                 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1830                goto out_unlock;
1831        }
1832
1833        /* Don't abort IOs no longer on the pending queue. */
1834        if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
1835                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1836                                 "6142 NVME IO req x%px not queued - skipping "
1837                                 "abort req xri x%x\n",
1838                                 pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1839                goto out_unlock;
1840        }
1841
1842        atomic_inc(&lport->xmt_fcp_abort);
1843        lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
1844                         nvmereq_wqe->sli4_xritag,
1845                         nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
1846
1847        /* Outstanding abort is in progress */
1848        if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
1849                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1850                                 "6144 Outstanding NVME I/O Abort Request "
1851                                 "still pending on nvme_fcreq x%px, "
1852                                 "lpfc_ncmd x%px xri x%x\n",
1853                                 pnvme_fcreq, lpfc_nbuf,
1854                                 nvmereq_wqe->sli4_xritag);
1855                goto out_unlock;
1856        }
1857
1858        ret_val = lpfc_sli4_issue_abort_iotag(phba, nvmereq_wqe,
1859                                              lpfc_nvme_abort_fcreq_cmpl);
1860
1861        spin_unlock(&lpfc_nbuf->buf_lock);
1862        spin_unlock_irqrestore(&phba->hbalock, flags);
1863
1864        /* Make sure HBA is alive */
1865        lpfc_issue_hb_tmo(phba);
1866
1867        if (ret_val != WQE_SUCCESS) {
1868                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1869                                 "6137 Failed abts issue_wqe with status x%x "
1870                                 "for nvme_fcreq x%px.\n",
1871                                 ret_val, pnvme_fcreq);
1872                return;
1873        }
1874
1875        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1876                         "6138 Transport Abort NVME Request Issued for "
1877                         "ox_id x%x\n",
1878                         nvmereq_wqe->sli4_xritag);
1879        return;
1880
1881out_unlock:
1882        spin_unlock(&lpfc_nbuf->buf_lock);
1883        spin_unlock_irqrestore(&phba->hbalock, flags);
1884        return;
1885}
1886
1887/* Declare and initialization an instance of the FC NVME template. */
1888static struct nvme_fc_port_template lpfc_nvme_template = {
1889        /* initiator-based functions */
1890        .localport_delete  = lpfc_nvme_localport_delete,
1891        .remoteport_delete = lpfc_nvme_remoteport_delete,
1892        .create_queue = lpfc_nvme_create_queue,
1893        .delete_queue = lpfc_nvme_delete_queue,
1894        .ls_req       = lpfc_nvme_ls_req,
1895        .fcp_io       = lpfc_nvme_fcp_io_submit,
1896        .ls_abort     = lpfc_nvme_ls_abort,
1897        .fcp_abort    = lpfc_nvme_fcp_abort,
1898        .xmt_ls_rsp   = lpfc_nvme_xmt_ls_rsp,
1899
1900        .max_hw_queues = 1,
1901        .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1902        .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1903        .dma_boundary = 0xFFFFFFFF,
1904
1905        /* Sizes of additional private data for data structures.
1906         * No use for the last two sizes at this time.
1907         */
1908        .local_priv_sz = sizeof(struct lpfc_nvme_lport),
1909        .remote_priv_sz = sizeof(struct lpfc_nvme_rport),
1910        .lsrqst_priv_sz = 0,
1911        .fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv),
1912};
1913
1914/*
1915 * lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA
1916 *
1917 * This routine removes a nvme buffer from head of @hdwq io_buf_list
1918 * and returns to caller.
1919 *
1920 * Return codes:
1921 *   NULL - Error
1922 *   Pointer to lpfc_nvme_buf - Success
1923 **/
1924static struct lpfc_io_buf *
1925lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1926                  int idx, int expedite)
1927{
1928        struct lpfc_io_buf *lpfc_ncmd;
1929        struct lpfc_sli4_hdw_queue *qp;
1930        struct sli4_sge *sgl;
1931        struct lpfc_iocbq *pwqeq;
1932        union lpfc_wqe128 *wqe;
1933
1934        lpfc_ncmd = lpfc_get_io_buf(phba, NULL, idx, expedite);
1935
1936        if (lpfc_ncmd) {
1937                pwqeq = &(lpfc_ncmd->cur_iocbq);
1938                wqe = &pwqeq->wqe;
1939
1940                /* Setup key fields in buffer that may have been changed
1941                 * if other protocols used this buffer.
1942                 */
1943                pwqeq->iocb_flag = LPFC_IO_NVME;
1944                pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
1945                lpfc_ncmd->start_time = jiffies;
1946                lpfc_ncmd->flags = 0;
1947
1948                /* Rsp SGE will be filled in when we rcv an IO
1949                 * from the NVME Layer to be sent.
1950                 * The cmd is going to be embedded so we need a SKIP SGE.
1951                 */
1952                sgl = lpfc_ncmd->dma_sgl;
1953                bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
1954                bf_set(lpfc_sli4_sge_last, sgl, 0);
1955                sgl->word2 = cpu_to_le32(sgl->word2);
1956                /* Fill in word 3 / sgl_len during cmd submission */
1957
1958                /* Initialize 64 bytes only */
1959                memset(wqe, 0, sizeof(union lpfc_wqe));
1960
1961                if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
1962                        atomic_inc(&ndlp->cmd_pending);
1963                        lpfc_ncmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
1964                }
1965
1966        } else {
1967                qp = &phba->sli4_hba.hdwq[idx];
1968                qp->empty_io_bufs++;
1969        }
1970
1971        return  lpfc_ncmd;
1972}
1973
1974/**
1975 * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list.
1976 * @phba: The Hba for which this call is being executed.
1977 * @lpfc_ncmd: The nvme buffer which is being released.
1978 *
1979 * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba
1980 * lpfc_io_buf_list list. For SLI4 XRI's are tied to the nvme buffer
1981 * and cannot be reused for at least RA_TOV amount of time if it was
1982 * aborted.
1983 **/
1984static void
1985lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
1986{
1987        struct lpfc_sli4_hdw_queue *qp;
1988        unsigned long iflag = 0;
1989
1990        if ((lpfc_ncmd->flags & LPFC_SBUF_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
1991                atomic_dec(&lpfc_ncmd->ndlp->cmd_pending);
1992
1993        lpfc_ncmd->ndlp = NULL;
1994        lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
1995
1996        qp = lpfc_ncmd->hdwq;
1997        if (unlikely(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
1998                lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1999                                "6310 XB release deferred for "
2000                                "ox_id x%x on reqtag x%x\n",
2001                                lpfc_ncmd->cur_iocbq.sli4_xritag,
2002                                lpfc_ncmd->cur_iocbq.iotag);
2003
2004                spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
2005                list_add_tail(&lpfc_ncmd->list,
2006                        &qp->lpfc_abts_io_buf_list);
2007                qp->abts_nvme_io_bufs++;
2008                spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
2009        } else
2010                lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp);
2011}
2012
2013/**
2014 * lpfc_nvme_create_localport - Create/Bind an nvme localport instance.
2015 * @vport: the lpfc_vport instance requesting a localport.
2016 *
2017 * This routine is invoked to create an nvme localport instance to bind
2018 * to the nvme_fc_transport.  It is called once during driver load
2019 * like lpfc_create_shost after all other services are initialized.
2020 * It requires a vport, vpi, and wwns at call time.  Other localport
2021 * parameters are modified as the driver's FCID and the Fabric WWN
2022 * are established.
2023 *
2024 * Return codes
2025 *      0 - successful
2026 *      -ENOMEM - no heap memory available
2027 *      other values - from nvme registration upcall
2028 **/
2029int
2030lpfc_nvme_create_localport(struct lpfc_vport *vport)
2031{
2032        int ret = 0;
2033        struct lpfc_hba  *phba = vport->phba;
2034        struct nvme_fc_port_info nfcp_info;
2035        struct nvme_fc_local_port *localport;
2036        struct lpfc_nvme_lport *lport;
2037
2038        /* Initialize this localport instance.  The vport wwn usage ensures
2039         * that NPIV is accounted for.
2040         */
2041        memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info));
2042        nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR;
2043        nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
2044        nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
2045
2046        /* We need to tell the transport layer + 1 because it takes page
2047         * alignment into account. When space for the SGL is allocated we
2048         * allocate + 3, one for cmd, one for rsp and one for this alignment
2049         */
2050        lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
2051
2052        /* Advertise how many hw queues we support based on cfg_hdw_queue,
2053         * which will not exceed cpu count.
2054         */
2055        lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
2056
2057        if (!IS_ENABLED(CONFIG_NVME_FC))
2058                return ret;
2059
2060        /* localport is allocated from the stack, but the registration
2061         * call allocates heap memory as well as the private area.
2062         */
2063
2064        ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
2065                                         &vport->phba->pcidev->dev, &localport);
2066        if (!ret) {
2067                lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
2068                                 "6005 Successfully registered local "
2069                                 "NVME port num %d, localP x%px, private "
2070                                 "x%px, sg_seg %d\n",
2071                                 localport->port_num, localport,
2072                                 localport->private,
2073                                 lpfc_nvme_template.max_sgl_segments);
2074
2075                /* Private is our lport size declared in the template. */
2076                lport = (struct lpfc_nvme_lport *)localport->private;
2077                vport->localport = localport;
2078                lport->vport = vport;
2079                vport->nvmei_support = 1;
2080
2081                atomic_set(&lport->xmt_fcp_noxri, 0);
2082                atomic_set(&lport->xmt_fcp_bad_ndlp, 0);
2083                atomic_set(&lport->xmt_fcp_qdepth, 0);
2084                atomic_set(&lport->xmt_fcp_err, 0);
2085                atomic_set(&lport->xmt_fcp_wqerr, 0);
2086                atomic_set(&lport->xmt_fcp_abort, 0);
2087                atomic_set(&lport->xmt_ls_abort, 0);
2088                atomic_set(&lport->xmt_ls_err, 0);
2089                atomic_set(&lport->cmpl_fcp_xb, 0);
2090                atomic_set(&lport->cmpl_fcp_err, 0);
2091                atomic_set(&lport->cmpl_ls_xb, 0);
2092                atomic_set(&lport->cmpl_ls_err, 0);
2093
2094                atomic_set(&lport->fc4NvmeLsRequests, 0);
2095                atomic_set(&lport->fc4NvmeLsCmpls, 0);
2096        }
2097
2098        return ret;
2099}
2100
2101#if (IS_ENABLED(CONFIG_NVME_FC))
2102/* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg.
2103 *
2104 * The driver has to wait for the host nvme transport to callback
2105 * indicating the localport has successfully unregistered all
2106 * resources.  Since this is an uninterruptible wait, loop every ten
2107 * seconds and print a message indicating no progress.
2108 *
2109 * An uninterruptible wait is used because of the risk of transport-to-
2110 * driver state mismatch.
2111 */
2112static void
2113lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
2114                           struct lpfc_nvme_lport *lport,
2115                           struct completion *lport_unreg_cmp)
2116{
2117        u32 wait_tmo;
2118        int ret, i, pending = 0;
2119        struct lpfc_sli_ring  *pring;
2120        struct lpfc_hba  *phba = vport->phba;
2121        struct lpfc_sli4_hdw_queue *qp;
2122        int abts_scsi, abts_nvme;
2123
2124        /* Host transport has to clean up and confirm requiring an indefinite
2125         * wait. Print a message if a 10 second wait expires and renew the
2126         * wait. This is unexpected.
2127         */
2128        wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
2129        while (true) {
2130                ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
2131                if (unlikely(!ret)) {
2132                        pending = 0;
2133                        abts_scsi = 0;
2134                        abts_nvme = 0;
2135                        for (i = 0; i < phba->cfg_hdw_queue; i++) {
2136                                qp = &phba->sli4_hba.hdwq[i];
2137                                pring = qp->io_wq->pring;
2138                                if (!pring)
2139                                        continue;
2140                                pending += pring->txcmplq_cnt;
2141                                abts_scsi += qp->abts_scsi_io_bufs;
2142                                abts_nvme += qp->abts_nvme_io_bufs;
2143                        }
2144                        lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2145                                         "6176 Lport x%px Localport x%px wait "
2146                                         "timed out. Pending %d [%d:%d]. "
2147                                         "Renewing.\n",
2148                                         lport, vport->localport, pending,
2149                                         abts_scsi, abts_nvme);
2150                        continue;
2151                }
2152                break;
2153        }
2154        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
2155                         "6177 Lport x%px Localport x%px Complete Success\n",
2156                         lport, vport->localport);
2157}
2158#endif
2159
2160/**
2161 * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
2162 * @vport: pointer to a host virtual N_Port data structure
2163 *
2164 * This routine is invoked to destroy all lports bound to the phba.
2165 * The lport memory was allocated by the nvme fc transport and is
2166 * released there.  This routine ensures all rports bound to the
2167 * lport have been disconnected.
2168 *
2169 **/
2170void
2171lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2172{
2173#if (IS_ENABLED(CONFIG_NVME_FC))
2174        struct nvme_fc_local_port *localport;
2175        struct lpfc_nvme_lport *lport;
2176        int ret;
2177        DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp);
2178
2179        if (vport->nvmei_support == 0)
2180                return;
2181
2182        localport = vport->localport;
2183        lport = (struct lpfc_nvme_lport *)localport->private;
2184
2185        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2186                         "6011 Destroying NVME localport x%px\n",
2187                         localport);
2188
2189        /* lport's rport list is clear.  Unregister
2190         * lport and release resources.
2191         */
2192        lport->lport_unreg_cmp = &lport_unreg_cmp;
2193        ret = nvme_fc_unregister_localport(localport);
2194
2195        /* Wait for completion.  This either blocks
2196         * indefinitely or succeeds
2197         */
2198        lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp);
2199        vport->localport = NULL;
2200
2201        /* Regardless of the unregister upcall response, clear
2202         * nvmei_support.  All rports are unregistered and the
2203         * driver will clean up.
2204         */
2205        vport->nvmei_support = 0;
2206        if (ret == 0) {
2207                lpfc_printf_vlog(vport,
2208                                 KERN_INFO, LOG_NVME_DISC,
2209                                 "6009 Unregistered lport Success\n");
2210        } else {
2211                lpfc_printf_vlog(vport,
2212                                 KERN_INFO, LOG_NVME_DISC,
2213                                 "6010 Unregistered lport "
2214                                 "Failed, status x%x\n",
2215                                 ret);
2216        }
2217#endif
2218}
2219
2220void
2221lpfc_nvme_update_localport(struct lpfc_vport *vport)
2222{
2223#if (IS_ENABLED(CONFIG_NVME_FC))
2224        struct nvme_fc_local_port *localport;
2225        struct lpfc_nvme_lport *lport;
2226
2227        localport = vport->localport;
2228        if (!localport) {
2229                lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2230                                 "6710 Update NVME fail. No localport\n");
2231                return;
2232        }
2233        lport = (struct lpfc_nvme_lport *)localport->private;
2234        if (!lport) {
2235                lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2236                                 "6171 Update NVME fail. localP x%px, No lport\n",
2237                                 localport);
2238                return;
2239        }
2240        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2241                         "6012 Update NVME lport x%px did x%x\n",
2242                         localport, vport->fc_myDID);
2243
2244        localport->port_id = vport->fc_myDID;
2245        if (localport->port_id == 0)
2246                localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY;
2247        else
2248                localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;
2249
2250        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2251                         "6030 bound lport x%px to DID x%06x\n",
2252                         lport, localport->port_id);
2253#endif
2254}
2255
2256int
2257lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2258{
2259#if (IS_ENABLED(CONFIG_NVME_FC))
2260        int ret = 0;
2261        struct nvme_fc_local_port *localport;
2262        struct lpfc_nvme_lport *lport;
2263        struct lpfc_nvme_rport *rport;
2264        struct lpfc_nvme_rport *oldrport;
2265        struct nvme_fc_remote_port *remote_port;
2266        struct nvme_fc_port_info rpinfo;
2267        struct lpfc_nodelist *prev_ndlp = NULL;
2268        struct fc_rport *srport = ndlp->rport;
2269
2270        lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
2271                         "6006 Register NVME PORT. DID x%06x nlptype x%x\n",
2272                         ndlp->nlp_DID, ndlp->nlp_type);
2273
2274        localport = vport->localport;
2275        if (!localport)
2276                return 0;
2277
2278        lport = (struct lpfc_nvme_lport *)localport->private;
2279
2280        /* NVME rports are not preserved across devloss.
2281         * Just register this instance.  Note, rpinfo->dev_loss_tmo
2282         * is left 0 to indicate accept transport defaults.  The
2283         * driver communicates port role capabilities consistent
2284         * with the PRLI response data.
2285         */
2286        memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info));
2287        rpinfo.port_id = ndlp->nlp_DID;
2288        if (ndlp->nlp_type & NLP_NVME_TARGET)
2289                rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
2290        if (ndlp->nlp_type & NLP_NVME_INITIATOR)
2291                rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
2292
2293        if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
2294                rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
2295
2296        rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
2297        rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
2298        if (srport)
2299                rpinfo.dev_loss_tmo = srport->dev_loss_tmo;
2300        else
2301                rpinfo.dev_loss_tmo = vport->cfg_devloss_tmo;
2302
2303        spin_lock_irq(&ndlp->lock);
2304        oldrport = lpfc_ndlp_get_nrport(ndlp);
2305        if (oldrport) {
2306                prev_ndlp = oldrport->ndlp;
2307                spin_unlock_irq(&ndlp->lock);
2308        } else {
2309                spin_unlock_irq(&ndlp->lock);
2310                if (!lpfc_nlp_get(ndlp)) {
2311                        dev_warn(&vport->phba->pcidev->dev,
2312                                 "Warning - No node ref - exit register\n");
2313                        return 0;
2314                }
2315        }
2316
2317        ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
2318        if (!ret) {
2319                /* If the ndlp already has an nrport, this is just
2320                 * a resume of the existing rport.  Else this is a
2321                 * new rport.
2322                 */
2323                /* Guard against an unregister/reregister
2324                 * race that leaves the WAIT flag set.
2325                 */
2326                spin_lock_irq(&ndlp->lock);
2327                ndlp->fc4_xpt_flags &= ~NLP_WAIT_FOR_UNREG;
2328                ndlp->fc4_xpt_flags |= NVME_XPT_REGD;
2329                spin_unlock_irq(&ndlp->lock);
2330                rport = remote_port->private;
2331                if (oldrport) {
2332
2333                        /* Sever the ndlp<->rport association
2334                         * before dropping the ndlp ref from
2335                         * register.
2336                         */
2337                        spin_lock_irq(&ndlp->lock);
2338                        ndlp->nrport = NULL;
2339                        ndlp->fc4_xpt_flags &= ~NLP_WAIT_FOR_UNREG;
2340                        spin_unlock_irq(&ndlp->lock);
2341                        rport->ndlp = NULL;
2342                        rport->remoteport = NULL;
2343
2344                        /* Reference only removed if previous NDLP is no longer
2345                         * active. It might be just a swap and removing the
2346                         * reference would cause a premature cleanup.
2347                         */
2348                        if (prev_ndlp && prev_ndlp != ndlp) {
2349                                if (!prev_ndlp->nrport)
2350                                        lpfc_nlp_put(prev_ndlp);
2351                        }
2352                }
2353
2354                /* Clean bind the rport to the ndlp. */
2355                rport->remoteport = remote_port;
2356                rport->lport = lport;
2357                rport->ndlp = ndlp;
2358                spin_lock_irq(&ndlp->lock);
2359                ndlp->nrport = rport;
2360                spin_unlock_irq(&ndlp->lock);
2361                lpfc_printf_vlog(vport, KERN_INFO,
2362                                 LOG_NVME_DISC | LOG_NODE,
2363                                 "6022 Bind lport x%px to remoteport x%px "
2364                                 "rport x%px WWNN 0x%llx, "
2365                                 "Rport WWPN 0x%llx DID "
2366                                 "x%06x Role x%x, ndlp %p prev_ndlp x%px\n",
2367                                 lport, remote_port, rport,
2368                                 rpinfo.node_name, rpinfo.port_name,
2369                                 rpinfo.port_id, rpinfo.port_role,
2370                                 ndlp, prev_ndlp);
2371        } else {
2372                lpfc_printf_vlog(vport, KERN_ERR,
2373                                 LOG_TRACE_EVENT,
2374                                 "6031 RemotePort Registration failed "
2375                                 "err: %d, DID x%06x\n",
2376                                 ret, ndlp->nlp_DID);
2377        }
2378
2379        return ret;
2380#else
2381        return 0;
2382#endif
2383}
2384
2385/*
2386 * lpfc_nvme_rescan_port - Check to see if we should rescan this remoteport
2387 *
2388 * If the ndlp represents an NVME Target, that we are logged into,
2389 * ping the NVME FC Transport layer to initiate a device rescan
2390 * on this remote NPort.
2391 */
2392void
2393lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2394{
2395#if (IS_ENABLED(CONFIG_NVME_FC))
2396        struct lpfc_nvme_rport *nrport;
2397        struct nvme_fc_remote_port *remoteport = NULL;
2398
2399        spin_lock_irq(&ndlp->lock);
2400        nrport = lpfc_ndlp_get_nrport(ndlp);
2401        if (nrport)
2402                remoteport = nrport->remoteport;
2403        spin_unlock_irq(&ndlp->lock);
2404
2405        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2406                         "6170 Rescan NPort DID x%06x type x%x "
2407                         "state x%x nrport x%px remoteport x%px\n",
2408                         ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state,
2409                         nrport, remoteport);
2410
2411        if (!nrport || !remoteport)
2412                goto rescan_exit;
2413
2414        /* Only rescan if we are an NVME target in the MAPPED state */
2415        if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY &&
2416            ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
2417                nvme_fc_rescan_remoteport(remoteport);
2418
2419                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2420                                 "6172 NVME rescanned DID x%06x "
2421                                 "port_state x%x\n",
2422                                 ndlp->nlp_DID, remoteport->port_state);
2423        }
2424        return;
2425 rescan_exit:
2426        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2427                         "6169 Skip NVME Rport Rescan, NVME remoteport "
2428                         "unregistered\n");
2429#endif
2430}
2431
2432/* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
2433 *
2434 * There is no notion of Devloss or rport recovery from the current
2435 * nvme_transport perspective.  Loss of an rport just means IO cannot
2436 * be sent and recovery is completely up to the initator.
2437 * For now, the driver just unbinds the DID and port_role so that
2438 * no further IO can be issued.  Changes are planned for later.
2439 *
2440 * Notes - the ndlp reference count is not decremented here since
2441 * since there is no nvme_transport api for devloss.  Node ref count
2442 * is only adjusted in driver unload.
2443 */
2444void
2445lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2446{
2447#if (IS_ENABLED(CONFIG_NVME_FC))
2448        int ret;
2449        struct nvme_fc_local_port *localport;
2450        struct lpfc_nvme_lport *lport;
2451        struct lpfc_nvme_rport *rport;
2452        struct nvme_fc_remote_port *remoteport = NULL;
2453
2454        localport = vport->localport;
2455
2456        /* This is fundamental error.  The localport is always
2457         * available until driver unload.  Just exit.
2458         */
2459        if (!localport)
2460                return;
2461
2462        lport = (struct lpfc_nvme_lport *)localport->private;
2463        if (!lport)
2464                goto input_err;
2465
2466        spin_lock_irq(&ndlp->lock);
2467        rport = lpfc_ndlp_get_nrport(ndlp);
2468        if (rport)
2469                remoteport = rport->remoteport;
2470        spin_unlock_irq(&ndlp->lock);
2471        if (!remoteport)
2472                goto input_err;
2473
2474        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2475                         "6033 Unreg nvme remoteport x%px, portname x%llx, "
2476                         "port_id x%06x, portstate x%x port type x%x "
2477                         "refcnt %d\n",
2478                         remoteport, remoteport->port_name,
2479                         remoteport->port_id, remoteport->port_state,
2480                         ndlp->nlp_type, kref_read(&ndlp->kref));
2481
2482        /* Sanity check ndlp type.  Only call for NVME ports. Don't
2483         * clear any rport state until the transport calls back.
2484         */
2485
2486        if (ndlp->nlp_type & NLP_NVME_TARGET) {
2487                /* No concern about the role change on the nvme remoteport.
2488                 * The transport will update it.
2489                 */
2490                spin_lock_irq(&vport->phba->hbalock);
2491                ndlp->fc4_xpt_flags |= NLP_WAIT_FOR_UNREG;
2492                spin_unlock_irq(&vport->phba->hbalock);
2493
2494                /* Don't let the host nvme transport keep sending keep-alives
2495                 * on this remoteport. Vport is unloading, no recovery. The
2496                 * return values is ignored.  The upcall is a courtesy to the
2497                 * transport.
2498                 */
2499                if (vport->load_flag & FC_UNLOADING)
2500                        (void)nvme_fc_set_remoteport_devloss(remoteport, 0);
2501
2502                ret = nvme_fc_unregister_remoteport(remoteport);
2503
2504                /* The driver no longer knows if the nrport memory is valid.
2505                 * because the controller teardown process has begun and
2506                 * is asynchronous.  Break the binding in the ndlp. Also
2507                 * remove the register ndlp reference to setup node release.
2508                 */
2509                ndlp->nrport = NULL;
2510                lpfc_nlp_put(ndlp);
2511                if (ret != 0) {
2512                        lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2513                                         "6167 NVME unregister failed %d "
2514                                         "port_state x%x\n",
2515                                         ret, remoteport->port_state);
2516                }
2517        }
2518        return;
2519
2520 input_err:
2521#endif
2522        lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2523                         "6168 State error: lport x%px, rport x%px FCID x%06x\n",
2524                         vport->localport, ndlp->rport, ndlp->nlp_DID);
2525}
2526
2527/**
2528 * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
2529 * @phba: pointer to lpfc hba data structure.
2530 * @axri: pointer to the fcp xri abort wcqe structure.
2531 * @lpfc_ncmd: The nvme job structure for the request being aborted.
2532 *
2533 * This routine is invoked by the worker thread to process a SLI4 fast-path
2534 * NVME aborted xri.  Aborted NVME IO commands are completed to the transport
2535 * here.
2536 **/
2537void
2538lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
2539                           struct sli4_wcqe_xri_aborted *axri,
2540                           struct lpfc_io_buf *lpfc_ncmd)
2541{
2542        uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
2543        struct nvmefc_fcp_req *nvme_cmd = NULL;
2544        struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp;
2545
2546
2547        if (ndlp)
2548                lpfc_sli4_abts_err_handler(phba, ndlp, axri);
2549
2550        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2551                        "6311 nvme_cmd %p xri x%x tag x%x abort complete and "
2552                        "xri released\n",
2553                        lpfc_ncmd->nvmeCmd, xri,
2554                        lpfc_ncmd->cur_iocbq.iotag);
2555
2556        /* Aborted NVME commands are required to not complete
2557         * before the abort exchange command fully completes.
2558         * Once completed, it is available via the put list.
2559         */
2560        if (lpfc_ncmd->nvmeCmd) {
2561                nvme_cmd = lpfc_ncmd->nvmeCmd;
2562                nvme_cmd->done(nvme_cmd);
2563                lpfc_ncmd->nvmeCmd = NULL;
2564        }
2565        lpfc_release_nvme_buf(phba, lpfc_ncmd);
2566}
2567
2568/**
2569 * lpfc_nvme_wait_for_io_drain - Wait for all NVME wqes to complete
2570 * @phba: Pointer to HBA context object.
2571 *
2572 * This function flushes all wqes in the nvme rings and frees all resources
2573 * in the txcmplq. This function does not issue abort wqes for the IO
2574 * commands in txcmplq, they will just be returned with
2575 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
2576 * slot has been permanently disabled.
2577 **/
2578void
2579lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
2580{
2581        struct lpfc_sli_ring  *pring;
2582        u32 i, wait_cnt = 0;
2583
2584        if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq)
2585                return;
2586
2587        /* Cycle through all IO rings and make sure all outstanding
2588         * WQEs have been removed from the txcmplqs.
2589         */
2590        for (i = 0; i < phba->cfg_hdw_queue; i++) {
2591                if (!phba->sli4_hba.hdwq[i].io_wq)
2592                        continue;
2593                pring = phba->sli4_hba.hdwq[i].io_wq->pring;
2594
2595                if (!pring)
2596                        continue;
2597
2598                /* Retrieve everything on the txcmplq */
2599                while (!list_empty(&pring->txcmplq)) {
2600                        msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
2601                        wait_cnt++;
2602
2603                        /* The sleep is 10mS.  Every ten seconds,
2604                         * dump a message.  Something is wrong.
2605                         */
2606                        if ((wait_cnt % 1000) == 0) {
2607                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2608                                                "6178 NVME IO not empty, "
2609                                                "cnt %d\n", wait_cnt);
2610                        }
2611                }
2612        }
2613
2614        /* Make sure HBA is alive */
2615        lpfc_issue_hb_tmo(phba);
2616
2617}
2618
2619void
2620lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
2621                      uint32_t stat, uint32_t param)
2622{
2623#if (IS_ENABLED(CONFIG_NVME_FC))
2624        struct lpfc_io_buf *lpfc_ncmd;
2625        struct nvmefc_fcp_req *nCmd;
2626        struct lpfc_wcqe_complete wcqe;
2627        struct lpfc_wcqe_complete *wcqep = &wcqe;
2628
2629        lpfc_ncmd = (struct lpfc_io_buf *)pwqeIn->context1;
2630        if (!lpfc_ncmd) {
2631                lpfc_sli_release_iocbq(phba, pwqeIn);
2632                return;
2633        }
2634        /* For abort iocb just return, IO iocb will do a done call */
2635        if (bf_get(wqe_cmnd, &pwqeIn->wqe.gen_req.wqe_com) ==
2636            CMD_ABORT_XRI_CX) {
2637                lpfc_sli_release_iocbq(phba, pwqeIn);
2638                return;
2639        }
2640
2641        spin_lock(&lpfc_ncmd->buf_lock);
2642        nCmd = lpfc_ncmd->nvmeCmd;
2643        if (!nCmd) {
2644                spin_unlock(&lpfc_ncmd->buf_lock);
2645                lpfc_release_nvme_buf(phba, lpfc_ncmd);
2646                return;
2647        }
2648        spin_unlock(&lpfc_ncmd->buf_lock);
2649
2650        lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2651                        "6194 NVME Cancel xri %x\n",
2652                        lpfc_ncmd->cur_iocbq.sli4_xritag);
2653
2654        wcqep->word0 = 0;
2655        bf_set(lpfc_wcqe_c_status, wcqep, stat);
2656        wcqep->parameter = param;
2657        wcqep->word3 = 0; /* xb is 0 */
2658
2659        /* Call release with XB=1 to queue the IO into the abort list. */
2660        if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
2661                bf_set(lpfc_wcqe_c_xb, wcqep, 1);
2662
2663        (pwqeIn->wqe_cmpl)(phba, pwqeIn, wcqep);
2664#endif
2665}
2666
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.