1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/pci.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27#include <asm/unaligned.h>
28#include <linux/crc-t10dif.h>
29#include <net/checksum.h>
30
31#include <scsi/scsi.h>
32#include <scsi/scsi_device.h>
33#include <scsi/scsi_eh.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_tcq.h>
36#include <scsi/scsi_transport_fc.h>
37#include <scsi/fc/fc_fs.h>
38
39#include "lpfc_version.h"
40#include "lpfc_hw4.h"
41#include "lpfc_hw.h"
42#include "lpfc_sli.h"
43#include "lpfc_sli4.h"
44#include "lpfc_nl.h"
45#include "lpfc_disc.h"
46#include "lpfc.h"
47#include "lpfc_scsi.h"
48#include "lpfc_nvme.h"
49#include "lpfc_logmsg.h"
50#include "lpfc_crtn.h"
51#include "lpfc_vport.h"
52#include "lpfc_debugfs.h"
53
54static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
55 struct lpfc_async_xchg_ctx *,
56 dma_addr_t rspbuf,
57 uint16_t rspsize);
58static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
59 struct lpfc_async_xchg_ctx *);
60static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
61 struct lpfc_async_xchg_ctx *,
62 uint32_t, uint16_t);
63static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
64 struct lpfc_async_xchg_ctx *,
65 uint32_t, uint16_t);
66static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
67 struct lpfc_async_xchg_ctx *);
68static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *);
69
70static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf);
71
72static union lpfc_wqe128 lpfc_tsend_cmd_template;
73static union lpfc_wqe128 lpfc_treceive_cmd_template;
74static union lpfc_wqe128 lpfc_trsp_cmd_template;
75
76
77void
78lpfc_nvmet_cmd_template(void)
79{
80 union lpfc_wqe128 *wqe;
81
82
83 wqe = &lpfc_tsend_cmd_template;
84 memset(wqe, 0, sizeof(union lpfc_wqe128));
85
86
87
88
89
90
91
92
93
94
95
96
97 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
98 bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF);
99 bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3);
100 bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI);
101 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
102
103
104
105
106
107
108 bf_set(wqe_xchg, &wqe->fcp_tsend.wqe_com, LPFC_NVME_XCHG);
109 bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
110 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
111 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
112 bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
113 bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12);
114
115
116 bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND);
117 bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
118 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
119 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
120 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
121 bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0);
122
123
124
125
126
127
128 wqe = &lpfc_treceive_cmd_template;
129 memset(wqe, 0, sizeof(union lpfc_wqe128));
130
131
132
133
134 wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
135
136
137
138
139
140
141
142
143 bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE);
144 bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF);
145 bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3);
146 bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI);
147 bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
148
149
150
151
152
153
154 bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
155 bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
156 bf_set(wqe_xchg, &wqe->fcp_treceive.wqe_com, LPFC_NVME_XCHG);
157 bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
158 bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
159 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
160
161
162 bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE);
163 bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
164 bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0);
165 bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
166 bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
167 bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1);
168
169
170
171
172
173
174 wqe = &lpfc_trsp_cmd_template;
175 memset(wqe, 0, sizeof(union lpfc_wqe128));
176
177
178
179
180
181
182
183
184
185
186 bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
187 bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED);
188 bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3);
189 bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI);
190 bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1);
191
192
193
194
195
196
197 bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1);
198 bf_set(wqe_xchg, &wqe->fcp_trsp.wqe_com, LPFC_NVME_XCHG);
199 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
200 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0);
201 bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE);
202 bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3);
203
204
205 bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP);
206 bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
207 bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0);
208 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
209 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
210 bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0);
211
212
213}
214
215#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
216static struct lpfc_async_xchg_ctx *
217lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri)
218{
219 struct lpfc_async_xchg_ctx *ctxp;
220 unsigned long iflag;
221 bool found = false;
222
223 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
224 list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
225 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
226 continue;
227
228 found = true;
229 break;
230 }
231 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
232 if (found)
233 return ctxp;
234
235 return NULL;
236}
237
238static struct lpfc_async_xchg_ctx *
239lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid)
240{
241 struct lpfc_async_xchg_ctx *ctxp;
242 unsigned long iflag;
243 bool found = false;
244
245 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
246 list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
247 if (ctxp->oxid != oxid || ctxp->sid != sid)
248 continue;
249
250 found = true;
251 break;
252 }
253 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
254 if (found)
255 return ctxp;
256
257 return NULL;
258}
259#endif
260
261static void
262lpfc_nvmet_defer_release(struct lpfc_hba *phba,
263 struct lpfc_async_xchg_ctx *ctxp)
264{
265 lockdep_assert_held(&ctxp->ctxlock);
266
267 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
268 "6313 NVMET Defer ctx release oxid x%x flg x%x\n",
269 ctxp->oxid, ctxp->flag);
270
271 if (ctxp->flag & LPFC_NVME_CTX_RLS)
272 return;
273
274 ctxp->flag |= LPFC_NVME_CTX_RLS;
275 spin_lock(&phba->sli4_hba.t_active_list_lock);
276 list_del(&ctxp->list);
277 spin_unlock(&phba->sli4_hba.t_active_list_lock);
278 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
279 list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
280 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
281}
282
283
284
285
286
287
288
289
290
291
292
293
294void
295__lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
296 struct lpfc_wcqe_complete *wcqe)
297{
298 struct lpfc_async_xchg_ctx *axchg = cmdwqe->context2;
299 struct nvmefc_ls_rsp *ls_rsp = &axchg->ls_rsp;
300 uint32_t status, result;
301
302 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
303 result = wcqe->parameter;
304
305 if (axchg->state != LPFC_NVME_STE_LS_RSP || axchg->entry_cnt != 2) {
306 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
307 "6410 NVMEx LS cmpl state mismatch IO x%x: "
308 "%d %d\n",
309 axchg->oxid, axchg->state, axchg->entry_cnt);
310 }
311
312 lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x result x%x\n",
313 axchg->oxid, status, result);
314
315 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
316 "6038 NVMEx LS rsp cmpl: %d %d oxid x%x\n",
317 status, result, axchg->oxid);
318
319 lpfc_nlp_put(cmdwqe->context1);
320 cmdwqe->context2 = NULL;
321 cmdwqe->context3 = NULL;
322 lpfc_sli_release_iocbq(phba, cmdwqe);
323 ls_rsp->done(ls_rsp);
324 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
325 "6200 NVMEx LS rsp cmpl done status %d oxid x%x\n",
326 status, axchg->oxid);
327 kfree(axchg);
328}
329
330
331
332
333
334
335
336
337
338
339
340
341static void
342lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
343 struct lpfc_wcqe_complete *wcqe)
344{
345 struct lpfc_nvmet_tgtport *tgtp;
346 uint32_t status, result;
347
348 if (!phba->targetport)
349 goto finish;
350
351 status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
352 result = wcqe->parameter;
353
354 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
355 if (tgtp) {
356 if (status) {
357 atomic_inc(&tgtp->xmt_ls_rsp_error);
358 if (result == IOERR_ABORT_REQUESTED)
359 atomic_inc(&tgtp->xmt_ls_rsp_aborted);
360 if (bf_get(lpfc_wcqe_c_xb, wcqe))
361 atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
362 } else {
363 atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
364 }
365 }
366
367finish:
368 __lpfc_nvme_xmt_ls_rsp_cmp(phba, cmdwqe, wcqe);
369}
370
371
372
373
374
375
376
377
378
379
380
381
382
383void
384lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
385{
386#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
387 struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
388 struct lpfc_nvmet_tgtport *tgtp;
389 struct fc_frame_header *fc_hdr;
390 struct rqb_dmabuf *nvmebuf;
391 struct lpfc_nvmet_ctx_info *infop;
392 uint32_t size, oxid, sid;
393 int cpu;
394 unsigned long iflag;
395
396 if (ctxp->state == LPFC_NVME_STE_FREE) {
397 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
398 "6411 NVMET free, already free IO x%x: %d %d\n",
399 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
400 }
401
402 if (ctxp->rqb_buffer) {
403 spin_lock_irqsave(&ctxp->ctxlock, iflag);
404 nvmebuf = ctxp->rqb_buffer;
405
406 if (nvmebuf) {
407 ctxp->rqb_buffer = NULL;
408 if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
409 ctxp->flag &= ~LPFC_NVME_CTX_REUSE_WQ;
410 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
411 nvmebuf->hrq->rqbp->rqb_free_buffer(phba,
412 nvmebuf);
413 } else {
414 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
415
416 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
417 }
418 } else {
419 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
420 }
421 }
422 ctxp->state = LPFC_NVME_STE_FREE;
423
424 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
425 if (phba->sli4_hba.nvmet_io_wait_cnt) {
426 list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
427 nvmebuf, struct rqb_dmabuf,
428 hbuf.list);
429 phba->sli4_hba.nvmet_io_wait_cnt--;
430 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
431 iflag);
432
433 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
434 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
435 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
436 size = nvmebuf->bytes_recv;
437 sid = sli4_sid_from_fc_hdr(fc_hdr);
438
439 ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
440 ctxp->wqeq = NULL;
441 ctxp->offset = 0;
442 ctxp->phba = phba;
443 ctxp->size = size;
444 ctxp->oxid = oxid;
445 ctxp->sid = sid;
446 ctxp->state = LPFC_NVME_STE_RCV;
447 ctxp->entry_cnt = 1;
448 ctxp->flag = 0;
449 ctxp->ctxbuf = ctx_buf;
450 ctxp->rqb_buffer = (void *)nvmebuf;
451 spin_lock_init(&ctxp->ctxlock);
452
453#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
454
455 if (ctxp->ts_isr_cmd) {
456 ctxp->ts_cmd_nvme = 0;
457 ctxp->ts_nvme_data = 0;
458 ctxp->ts_data_wqput = 0;
459 ctxp->ts_isr_data = 0;
460 ctxp->ts_data_nvme = 0;
461 ctxp->ts_nvme_status = 0;
462 ctxp->ts_status_wqput = 0;
463 ctxp->ts_isr_status = 0;
464 ctxp->ts_status_nvme = 0;
465 }
466#endif
467 atomic_inc(&tgtp->rcv_fcp_cmd_in);
468
469
470 spin_lock_irqsave(&ctxp->ctxlock, iflag);
471 ctxp->flag |= LPFC_NVME_CTX_REUSE_WQ;
472 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
473
474 if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
475 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
476 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
477 "6181 Unable to queue deferred work "
478 "for oxid x%x. "
479 "FCP Drop IO [x%x x%x x%x]\n",
480 ctxp->oxid,
481 atomic_read(&tgtp->rcv_fcp_cmd_in),
482 atomic_read(&tgtp->rcv_fcp_cmd_out),
483 atomic_read(&tgtp->xmt_fcp_release));
484
485 spin_lock_irqsave(&ctxp->ctxlock, iflag);
486 lpfc_nvmet_defer_release(phba, ctxp);
487 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
488 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
489 }
490 return;
491 }
492 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
493
494
495
496
497
498 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
499 list_del_init(&ctxp->list);
500 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
501 cpu = raw_smp_processor_id();
502 infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
503 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
504 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
505 infop->nvmet_ctx_list_cnt++;
506 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
507#endif
508}
509
510#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
511static void
512lpfc_nvmet_ktime(struct lpfc_hba *phba,
513 struct lpfc_async_xchg_ctx *ctxp)
514{
515 uint64_t seg1, seg2, seg3, seg4, seg5;
516 uint64_t seg6, seg7, seg8, seg9, seg10;
517 uint64_t segsum;
518
519 if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
520 !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
521 !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
522 !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
523 !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
524 return;
525
526 if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
527 return;
528 if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme)
529 return;
530 if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
531 return;
532 if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
533 return;
534 if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
535 return;
536 if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
537 return;
538 if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
539 return;
540 if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
541 return;
542 if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
543 return;
544 if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
545 return;
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571 seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
572 segsum = seg1;
573
574 seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
575 if (segsum > seg2)
576 return;
577 seg2 -= segsum;
578 segsum += seg2;
579
580 seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
581 if (segsum > seg3)
582 return;
583 seg3 -= segsum;
584 segsum += seg3;
585
586 seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
587 if (segsum > seg4)
588 return;
589 seg4 -= segsum;
590 segsum += seg4;
591
592 seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
593 if (segsum > seg5)
594 return;
595 seg5 -= segsum;
596 segsum += seg5;
597
598
599
600 if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
601 seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
602 if (segsum > seg6)
603 return;
604 seg6 -= segsum;
605 segsum += seg6;
606
607 seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
608 if (segsum > seg7)
609 return;
610 seg7 -= segsum;
611 segsum += seg7;
612
613 seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
614 if (segsum > seg8)
615 return;
616 seg8 -= segsum;
617 segsum += seg8;
618
619 seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
620 if (segsum > seg9)
621 return;
622 seg9 -= segsum;
623 segsum += seg9;
624
625 if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
626 return;
627 seg10 = (ctxp->ts_isr_status -
628 ctxp->ts_isr_cmd);
629 } else {
630 if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
631 return;
632 seg6 = 0;
633 seg7 = 0;
634 seg8 = 0;
635 seg9 = 0;
636 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
637 }
638
639 phba->ktime_seg1_total += seg1;
640 if (seg1 < phba->ktime_seg1_min)
641 phba->ktime_seg1_min = seg1;
642 else if (seg1 > phba->ktime_seg1_max)
643 phba->ktime_seg1_max = seg1;
644
645 phba->ktime_seg2_total += seg2;
646 if (seg2 < phba->ktime_seg2_min)
647 phba->ktime_seg2_min = seg2;
648 else if (seg2 > phba->ktime_seg2_max)
649 phba->ktime_seg2_max = seg2;
650
651 phba->ktime_seg3_total += seg3;
652 if (seg3 < phba->ktime_seg3_min)
653 phba->ktime_seg3_min = seg3;
654 else if (seg3 > phba->ktime_seg3_max)
655 phba->ktime_seg3_max = seg3;
656
657 phba->ktime_seg4_total += seg4;
658 if (seg4 < phba->ktime_seg4_min)
659 phba->ktime_seg4_min = seg4;
660 else if (seg4 > phba->ktime_seg4_max)
661 phba->ktime_seg4_max = seg4;
662
663 phba->ktime_seg5_total += seg5;
664 if (seg5 < phba->ktime_seg5_min)
665 phba->ktime_seg5_min = seg5;
666 else if (seg5 > phba->ktime_seg5_max)
667 phba->ktime_seg5_max = seg5;
668
669 phba->ktime_data_samples++;
670 if (!seg6)
671 goto out;
672
673 phba->ktime_seg6_total += seg6;
674 if (seg6 < phba->ktime_seg6_min)
675 phba->ktime_seg6_min = seg6;
676 else if (seg6 > phba->ktime_seg6_max)
677 phba->ktime_seg6_max = seg6;
678
679 phba->ktime_seg7_total += seg7;
680 if (seg7 < phba->ktime_seg7_min)
681 phba->ktime_seg7_min = seg7;
682 else if (seg7 > phba->ktime_seg7_max)
683 phba->ktime_seg7_max = seg7;
684
685 phba->ktime_seg8_total += seg8;
686 if (seg8 < phba->ktime_seg8_min)
687 phba->ktime_seg8_min = seg8;
688 else if (seg8 > phba->ktime_seg8_max)
689 phba->ktime_seg8_max = seg8;
690
691 phba->ktime_seg9_total += seg9;
692 if (seg9 < phba->ktime_seg9_min)
693 phba->ktime_seg9_min = seg9;
694 else if (seg9 > phba->ktime_seg9_max)
695 phba->ktime_seg9_max = seg9;
696out:
697 phba->ktime_seg10_total += seg10;
698 if (seg10 < phba->ktime_seg10_min)
699 phba->ktime_seg10_min = seg10;
700 else if (seg10 > phba->ktime_seg10_max)
701 phba->ktime_seg10_max = seg10;
702 phba->ktime_status_samples++;
703}
704#endif
705
706
707
708
709
710
711
712
713
714
715
716static void
717lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
718 struct lpfc_wcqe_complete *wcqe)
719{
720 struct lpfc_nvmet_tgtport *tgtp;
721 struct nvmefc_tgt_fcp_req *rsp;
722 struct lpfc_async_xchg_ctx *ctxp;
723 uint32_t status, result, op, start_clean, logerr;
724#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
725 int id;
726#endif
727
728 ctxp = cmdwqe->context2;
729 ctxp->flag &= ~LPFC_NVME_IO_INP;
730
731 rsp = &ctxp->hdlrctx.fcp_req;
732 op = rsp->op;
733
734 status = bf_get(lpfc_wcqe_c_status, wcqe);
735 result = wcqe->parameter;
736
737 if (phba->targetport)
738 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
739 else
740 tgtp = NULL;
741
742 lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
743 ctxp->oxid, op, status);
744
745 if (status) {
746 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
747 rsp->transferred_length = 0;
748 if (tgtp) {
749 atomic_inc(&tgtp->xmt_fcp_rsp_error);
750 if (result == IOERR_ABORT_REQUESTED)
751 atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
752 }
753
754 logerr = LOG_NVME_IOERR;
755
756
757 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
758 ctxp->flag |= LPFC_NVME_XBUSY;
759 logerr |= LOG_NVME_ABTS;
760 if (tgtp)
761 atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
762
763 } else {
764 ctxp->flag &= ~LPFC_NVME_XBUSY;
765 }
766
767 lpfc_printf_log(phba, KERN_INFO, logerr,
768 "6315 IO Error Cmpl oxid: x%x xri: x%x %x/%x "
769 "XBUSY:x%x\n",
770 ctxp->oxid, ctxp->ctxbuf->sglq->sli4_xritag,
771 status, result, ctxp->flag);
772
773 } else {
774 rsp->fcp_error = NVME_SC_SUCCESS;
775 if (op == NVMET_FCOP_RSP)
776 rsp->transferred_length = rsp->rsplen;
777 else
778 rsp->transferred_length = rsp->transfer_length;
779 if (tgtp)
780 atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
781 }
782
783 if ((op == NVMET_FCOP_READDATA_RSP) ||
784 (op == NVMET_FCOP_RSP)) {
785
786 ctxp->state = LPFC_NVME_STE_DONE;
787 ctxp->entry_cnt++;
788
789#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
790 if (ctxp->ts_cmd_nvme) {
791 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
792 ctxp->ts_isr_data =
793 cmdwqe->isr_timestamp;
794 ctxp->ts_data_nvme =
795 ktime_get_ns();
796 ctxp->ts_nvme_status =
797 ctxp->ts_data_nvme;
798 ctxp->ts_status_wqput =
799 ctxp->ts_data_nvme;
800 ctxp->ts_isr_status =
801 ctxp->ts_data_nvme;
802 ctxp->ts_status_nvme =
803 ctxp->ts_data_nvme;
804 } else {
805 ctxp->ts_isr_status =
806 cmdwqe->isr_timestamp;
807 ctxp->ts_status_nvme =
808 ktime_get_ns();
809 }
810 }
811#endif
812 rsp->done(rsp);
813#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
814 if (ctxp->ts_cmd_nvme)
815 lpfc_nvmet_ktime(phba, ctxp);
816#endif
817
818 } else {
819 ctxp->entry_cnt++;
820 start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
821 memset(((char *)cmdwqe) + start_clean, 0,
822 (sizeof(struct lpfc_iocbq) - start_clean));
823#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
824 if (ctxp->ts_cmd_nvme) {
825 ctxp->ts_isr_data = cmdwqe->isr_timestamp;
826 ctxp->ts_data_nvme = ktime_get_ns();
827 }
828#endif
829 rsp->done(rsp);
830 }
831#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
832 if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
833 id = raw_smp_processor_id();
834 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
835 if (ctxp->cpu != id)
836 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
837 "6704 CPU Check cmdcmpl: "
838 "cpu %d expect %d\n",
839 id, ctxp->cpu);
840 }
841#endif
842}
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860int
861__lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
862 struct nvmefc_ls_rsp *ls_rsp,
863 void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba,
864 struct lpfc_iocbq *cmdwqe,
865 struct lpfc_wcqe_complete *wcqe))
866{
867 struct lpfc_hba *phba = axchg->phba;
868 struct hbq_dmabuf *nvmebuf = (struct hbq_dmabuf *)axchg->rqb_buffer;
869 struct lpfc_iocbq *nvmewqeq;
870 struct lpfc_dmabuf dmabuf;
871 struct ulp_bde64 bpl;
872 int rc;
873
874 if (phba->pport->load_flag & FC_UNLOADING)
875 return -ENODEV;
876
877 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
878 "6023 NVMEx LS rsp oxid x%x\n", axchg->oxid);
879
880 if (axchg->state != LPFC_NVME_STE_LS_RCV || axchg->entry_cnt != 1) {
881 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
882 "6412 NVMEx LS rsp state mismatch "
883 "oxid x%x: %d %d\n",
884 axchg->oxid, axchg->state, axchg->entry_cnt);
885 return -EALREADY;
886 }
887 axchg->state = LPFC_NVME_STE_LS_RSP;
888 axchg->entry_cnt++;
889
890 nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, axchg, ls_rsp->rspdma,
891 ls_rsp->rsplen);
892 if (nvmewqeq == NULL) {
893 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
894 "6150 NVMEx LS Drop Rsp x%x: Prep\n",
895 axchg->oxid);
896 rc = -ENOMEM;
897 goto out_free_buf;
898 }
899
900
901 nvmewqeq->rsvd2 = 1;
902 nvmewqeq->hba_wqidx = 0;
903 nvmewqeq->context3 = &dmabuf;
904 dmabuf.virt = &bpl;
905 bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
906 bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
907 bpl.tus.f.bdeSize = ls_rsp->rsplen;
908 bpl.tus.f.bdeFlags = 0;
909 bpl.tus.w = le32_to_cpu(bpl.tus.w);
910
911
912
913
914
915
916 nvmewqeq->wqe_cmpl = xmt_ls_rsp_cmp;
917 nvmewqeq->iocb_cmpl = NULL;
918 nvmewqeq->context2 = axchg;
919
920 lpfc_nvmeio_data(phba, "NVMEx LS RSP: xri x%x wqidx x%x len x%x\n",
921 axchg->oxid, nvmewqeq->hba_wqidx, ls_rsp->rsplen);
922
923 rc = lpfc_sli4_issue_wqe(phba, axchg->hdwq, nvmewqeq);
924
925
926 nvmewqeq->context3 = NULL;
927
928 if (rc == WQE_SUCCESS) {
929
930
931
932
933 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
934 return 0;
935 }
936
937 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
938 "6151 NVMEx LS RSP x%x: failed to transmit %d\n",
939 axchg->oxid, rc);
940
941 rc = -ENXIO;
942
943 lpfc_nlp_put(nvmewqeq->context1);
944
945out_free_buf:
946
947 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
948
949
950
951
952
953
954
955
956 lpfc_nvme_unsol_ls_issue_abort(phba, axchg, axchg->sid, axchg->oxid);
957 return rc;
958}
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978static int
979lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
980 struct nvmefc_ls_rsp *ls_rsp)
981{
982 struct lpfc_async_xchg_ctx *axchg =
983 container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp);
984 struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
985 int rc;
986
987 if (axchg->phba->pport->load_flag & FC_UNLOADING)
988 return -ENODEV;
989
990 rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, lpfc_nvmet_xmt_ls_rsp_cmp);
991
992 if (rc) {
993 atomic_inc(&nvmep->xmt_ls_drop);
994
995
996
997
998
999 if (rc != -EALREADY)
1000 atomic_inc(&nvmep->xmt_ls_abort);
1001 return rc;
1002 }
1003
1004 atomic_inc(&nvmep->xmt_ls_rsp);
1005 return 0;
1006}
1007
1008static int
1009lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
1010 struct nvmefc_tgt_fcp_req *rsp)
1011{
1012 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1013 struct lpfc_async_xchg_ctx *ctxp =
1014 container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1015 struct lpfc_hba *phba = ctxp->phba;
1016 struct lpfc_queue *wq;
1017 struct lpfc_iocbq *nvmewqeq;
1018 struct lpfc_sli_ring *pring;
1019 unsigned long iflags;
1020 int rc;
1021#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1022 int id;
1023#endif
1024
1025 if (phba->pport->load_flag & FC_UNLOADING) {
1026 rc = -ENODEV;
1027 goto aerr;
1028 }
1029
1030#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1031 if (ctxp->ts_cmd_nvme) {
1032 if (rsp->op == NVMET_FCOP_RSP)
1033 ctxp->ts_nvme_status = ktime_get_ns();
1034 else
1035 ctxp->ts_nvme_data = ktime_get_ns();
1036 }
1037
1038
1039 if (!ctxp->hdwq)
1040 ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
1041
1042 if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
1043 id = raw_smp_processor_id();
1044 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
1045 if (rsp->hwqid != id)
1046 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1047 "6705 CPU Check OP: "
1048 "cpu %d expect %d\n",
1049 id, rsp->hwqid);
1050 ctxp->cpu = id;
1051 }
1052#endif
1053
1054
1055 if ((ctxp->flag & LPFC_NVME_ABTS_RCV) ||
1056 (ctxp->state == LPFC_NVME_STE_ABORT)) {
1057 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1058 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1059 "6102 IO oxid x%x aborted\n",
1060 ctxp->oxid);
1061 rc = -ENXIO;
1062 goto aerr;
1063 }
1064
1065 nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
1066 if (nvmewqeq == NULL) {
1067 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1068 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1069 "6152 FCP Drop IO x%x: Prep\n",
1070 ctxp->oxid);
1071 rc = -ENXIO;
1072 goto aerr;
1073 }
1074
1075 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
1076 nvmewqeq->iocb_cmpl = NULL;
1077 nvmewqeq->context2 = ctxp;
1078 nvmewqeq->iocb_flag |= LPFC_IO_NVMET;
1079 ctxp->wqeq->hba_wqidx = rsp->hwqid;
1080
1081 lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
1082 ctxp->oxid, rsp->op, rsp->rsplen);
1083
1084 ctxp->flag |= LPFC_NVME_IO_INP;
1085 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
1086 if (rc == WQE_SUCCESS) {
1087#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1088 if (!ctxp->ts_cmd_nvme)
1089 return 0;
1090 if (rsp->op == NVMET_FCOP_RSP)
1091 ctxp->ts_status_wqput = ktime_get_ns();
1092 else
1093 ctxp->ts_data_wqput = ktime_get_ns();
1094#endif
1095 return 0;
1096 }
1097
1098 if (rc == -EBUSY) {
1099
1100
1101
1102
1103 ctxp->flag |= LPFC_NVME_DEFER_WQFULL;
1104 wq = ctxp->hdwq->io_wq;
1105 pring = wq->pring;
1106 spin_lock_irqsave(&pring->ring_lock, iflags);
1107 list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
1108 wq->q_flag |= HBA_NVMET_WQFULL;
1109 spin_unlock_irqrestore(&pring->ring_lock, iflags);
1110 atomic_inc(&lpfc_nvmep->defer_wqfull);
1111 return 0;
1112 }
1113
1114
1115 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1116 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1117 "6153 FCP Drop IO x%x: Issue: %d\n",
1118 ctxp->oxid, rc);
1119
1120 ctxp->wqeq->hba_wqidx = 0;
1121 nvmewqeq->context2 = NULL;
1122 nvmewqeq->context3 = NULL;
1123 rc = -EBUSY;
1124aerr:
1125 return rc;
1126}
1127
1128static void
1129lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
1130{
1131 struct lpfc_nvmet_tgtport *tport = targetport->private;
1132
1133
1134 if (tport->phba->targetport)
1135 complete(tport->tport_unreg_cmp);
1136}
1137
1138static void
1139lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
1140 struct nvmefc_tgt_fcp_req *req)
1141{
1142 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1143 struct lpfc_async_xchg_ctx *ctxp =
1144 container_of(req, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1145 struct lpfc_hba *phba = ctxp->phba;
1146 struct lpfc_queue *wq;
1147 unsigned long flags;
1148
1149 if (phba->pport->load_flag & FC_UNLOADING)
1150 return;
1151
1152 if (!ctxp->hdwq)
1153 ctxp->hdwq = &phba->sli4_hba.hdwq[0];
1154
1155 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1156 "6103 NVMET Abort op: oxid x%x flg x%x ste %d\n",
1157 ctxp->oxid, ctxp->flag, ctxp->state);
1158
1159 lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
1160 ctxp->oxid, ctxp->flag, ctxp->state);
1161
1162 atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
1163
1164 spin_lock_irqsave(&ctxp->ctxlock, flags);
1165
1166
1167
1168
1169 if (ctxp->flag & (LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP)) {
1170 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1171 return;
1172 }
1173 ctxp->flag |= LPFC_NVME_ABORT_OP;
1174
1175 if (ctxp->flag & LPFC_NVME_DEFER_WQFULL) {
1176 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1177 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1178 ctxp->oxid);
1179 wq = ctxp->hdwq->io_wq;
1180 lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
1181 return;
1182 }
1183 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1184
1185
1186
1187
1188
1189 if (ctxp->state == LPFC_NVME_STE_RCV)
1190 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1191 ctxp->oxid);
1192 else
1193 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1194 ctxp->oxid);
1195}
1196
1197static void
1198lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
1199 struct nvmefc_tgt_fcp_req *rsp)
1200{
1201 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1202 struct lpfc_async_xchg_ctx *ctxp =
1203 container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1204 struct lpfc_hba *phba = ctxp->phba;
1205 unsigned long flags;
1206 bool aborting = false;
1207
1208 spin_lock_irqsave(&ctxp->ctxlock, flags);
1209 if (ctxp->flag & LPFC_NVME_XBUSY)
1210 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1211 "6027 NVMET release with XBUSY flag x%x"
1212 " oxid x%x\n",
1213 ctxp->flag, ctxp->oxid);
1214 else if (ctxp->state != LPFC_NVME_STE_DONE &&
1215 ctxp->state != LPFC_NVME_STE_ABORT)
1216 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1217 "6413 NVMET release bad state %d %d oxid x%x\n",
1218 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1219
1220 if ((ctxp->flag & LPFC_NVME_ABORT_OP) ||
1221 (ctxp->flag & LPFC_NVME_XBUSY)) {
1222 aborting = true;
1223
1224 lpfc_nvmet_defer_release(phba, ctxp);
1225 }
1226 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1227
1228 lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
1229 ctxp->state, aborting);
1230
1231 atomic_inc(&lpfc_nvmep->xmt_fcp_release);
1232 ctxp->flag &= ~LPFC_NVME_TNOTIFY;
1233
1234 if (aborting)
1235 return;
1236
1237 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1238}
1239
1240static void
1241lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
1242 struct nvmefc_tgt_fcp_req *rsp)
1243{
1244 struct lpfc_nvmet_tgtport *tgtp;
1245 struct lpfc_async_xchg_ctx *ctxp =
1246 container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
1247 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
1248 struct lpfc_hba *phba = ctxp->phba;
1249 unsigned long iflag;
1250
1251
1252 lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
1253 ctxp->oxid, ctxp->size, raw_smp_processor_id());
1254
1255 if (!nvmebuf) {
1256 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1257 "6425 Defer rcv: no buffer oxid x%x: "
1258 "flg %x ste %x\n",
1259 ctxp->oxid, ctxp->flag, ctxp->state);
1260 return;
1261 }
1262
1263 tgtp = phba->targetport->private;
1264 if (tgtp)
1265 atomic_inc(&tgtp->rcv_fcp_cmd_defer);
1266
1267
1268 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1269 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1270 ctxp->rqb_buffer = NULL;
1271 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1272}
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284static void
1285lpfc_nvmet_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1286 struct lpfc_wcqe_complete *wcqe)
1287{
1288 __lpfc_nvme_ls_req_cmp(phba, cmdwqe->vport, cmdwqe, wcqe);
1289}
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305static int
1306lpfc_nvmet_ls_req(struct nvmet_fc_target_port *targetport,
1307 void *hosthandle,
1308 struct nvmefc_ls_req *pnvme_lsreq)
1309{
1310 struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private;
1311 struct lpfc_hba *phba;
1312 struct lpfc_nodelist *ndlp;
1313 int ret;
1314 u32 hstate;
1315
1316 if (!lpfc_nvmet)
1317 return -EINVAL;
1318
1319 phba = lpfc_nvmet->phba;
1320 if (phba->pport->load_flag & FC_UNLOADING)
1321 return -EINVAL;
1322
1323 hstate = atomic_read(&lpfc_nvmet->state);
1324 if (hstate == LPFC_NVMET_INV_HOST_ACTIVE)
1325 return -EACCES;
1326
1327 ndlp = (struct lpfc_nodelist *)hosthandle;
1328
1329 ret = __lpfc_nvme_ls_req(phba->pport, ndlp, pnvme_lsreq,
1330 lpfc_nvmet_ls_req_cmp);
1331
1332 return ret;
1333}
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345static void
1346lpfc_nvmet_ls_abort(struct nvmet_fc_target_port *targetport,
1347 void *hosthandle,
1348 struct nvmefc_ls_req *pnvme_lsreq)
1349{
1350 struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private;
1351 struct lpfc_hba *phba;
1352 struct lpfc_nodelist *ndlp;
1353 int ret;
1354
1355 phba = lpfc_nvmet->phba;
1356 if (phba->pport->load_flag & FC_UNLOADING)
1357 return;
1358
1359 ndlp = (struct lpfc_nodelist *)hosthandle;
1360
1361 ret = __lpfc_nvme_ls_abort(phba->pport, ndlp, pnvme_lsreq);
1362 if (!ret)
1363 atomic_inc(&lpfc_nvmet->xmt_ls_abort);
1364}
1365
1366static void
1367lpfc_nvmet_host_release(void *hosthandle)
1368{
1369 struct lpfc_nodelist *ndlp = hosthandle;
1370 struct lpfc_hba *phba = ndlp->phba;
1371 struct lpfc_nvmet_tgtport *tgtp;
1372
1373 if (!phba->targetport || !phba->targetport->private)
1374 return;
1375
1376 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1377 "6202 NVMET XPT releasing hosthandle x%px "
1378 "DID x%x xflags x%x refcnt %d\n",
1379 hosthandle, ndlp->nlp_DID, ndlp->fc4_xpt_flags,
1380 kref_read(&ndlp->kref));
1381 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1382 spin_lock_irq(&ndlp->lock);
1383 ndlp->fc4_xpt_flags &= ~NLP_XPT_HAS_HH;
1384 spin_unlock_irq(&ndlp->lock);
1385 lpfc_nlp_put(ndlp);
1386 atomic_set(&tgtp->state, 0);
1387}
1388
1389static void
1390lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport)
1391{
1392 struct lpfc_nvmet_tgtport *tgtp;
1393 struct lpfc_hba *phba;
1394 uint32_t rc;
1395
1396 tgtp = tgtport->private;
1397 phba = tgtp->phba;
1398
1399 rc = lpfc_issue_els_rscn(phba->pport, 0);
1400 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1401 "6420 NVMET subsystem change: Notification %s\n",
1402 (rc) ? "Failed" : "Sent");
1403}
1404
1405static struct nvmet_fc_target_template lpfc_tgttemplate = {
1406 .targetport_delete = lpfc_nvmet_targetport_delete,
1407 .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
1408 .fcp_op = lpfc_nvmet_xmt_fcp_op,
1409 .fcp_abort = lpfc_nvmet_xmt_fcp_abort,
1410 .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
1411 .defer_rcv = lpfc_nvmet_defer_rcv,
1412 .discovery_event = lpfc_nvmet_discovery_event,
1413 .ls_req = lpfc_nvmet_ls_req,
1414 .ls_abort = lpfc_nvmet_ls_abort,
1415 .host_release = lpfc_nvmet_host_release,
1416
1417 .max_hw_queues = 1,
1418 .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1419 .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1420 .dma_boundary = 0xFFFFFFFF,
1421
1422
1423 .target_features = 0,
1424
1425 .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
1426 .lsrqst_priv_sz = 0,
1427};
1428
1429static void
1430__lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
1431 struct lpfc_nvmet_ctx_info *infop)
1432{
1433 struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
1434 unsigned long flags;
1435
1436 spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
1437 list_for_each_entry_safe(ctx_buf, next_ctx_buf,
1438 &infop->nvmet_ctx_list, list) {
1439 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1440 list_del_init(&ctx_buf->list);
1441 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1442
1443 spin_lock(&phba->hbalock);
1444 __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
1445 spin_unlock(&phba->hbalock);
1446
1447 ctx_buf->sglq->state = SGL_FREED;
1448 ctx_buf->sglq->ndlp = NULL;
1449
1450 spin_lock(&phba->sli4_hba.sgl_list_lock);
1451 list_add_tail(&ctx_buf->sglq->list,
1452 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1453 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1454
1455 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1456 kfree(ctx_buf->context);
1457 }
1458 spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
1459}
1460
1461static void
1462lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
1463{
1464 struct lpfc_nvmet_ctx_info *infop;
1465 int i, j;
1466
1467
1468 infop = phba->sli4_hba.nvmet_ctx_info;
1469 if (!infop)
1470 return;
1471
1472
1473 for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
1474 for_each_present_cpu(j) {
1475 infop = lpfc_get_ctx_list(phba, j, i);
1476 __lpfc_nvmet_clean_io_for_cpu(phba, infop);
1477 }
1478 }
1479 kfree(phba->sli4_hba.nvmet_ctx_info);
1480 phba->sli4_hba.nvmet_ctx_info = NULL;
1481}
1482
1483static int
1484lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1485{
1486 struct lpfc_nvmet_ctxbuf *ctx_buf;
1487 struct lpfc_iocbq *nvmewqe;
1488 union lpfc_wqe128 *wqe;
1489 struct lpfc_nvmet_ctx_info *last_infop;
1490 struct lpfc_nvmet_ctx_info *infop;
1491 int i, j, idx, cpu;
1492
1493 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1494 "6403 Allocate NVMET resources for %d XRIs\n",
1495 phba->sli4_hba.nvmet_xri_cnt);
1496
1497 phba->sli4_hba.nvmet_ctx_info = kcalloc(
1498 phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq,
1499 sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
1500 if (!phba->sli4_hba.nvmet_ctx_info) {
1501 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1502 "6419 Failed allocate memory for "
1503 "nvmet context lists\n");
1504 return -ENOMEM;
1505 }
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526 for_each_possible_cpu(i) {
1527 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1528 infop = lpfc_get_ctx_list(phba, i, j);
1529 INIT_LIST_HEAD(&infop->nvmet_ctx_list);
1530 spin_lock_init(&infop->nvmet_ctx_list_lock);
1531 infop->nvmet_ctx_list_cnt = 0;
1532 }
1533 }
1534
1535
1536
1537
1538
1539
1540 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1541 last_infop = lpfc_get_ctx_list(phba,
1542 cpumask_first(cpu_present_mask),
1543 j);
1544 for (i = phba->sli4_hba.num_possible_cpu - 1; i >= 0; i--) {
1545 infop = lpfc_get_ctx_list(phba, i, j);
1546 infop->nvmet_ctx_next_cpu = last_infop;
1547 last_infop = infop;
1548 }
1549 }
1550
1551
1552
1553
1554 idx = 0;
1555 cpu = cpumask_first(cpu_present_mask);
1556 for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
1557 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
1558 if (!ctx_buf) {
1559 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1560 "6404 Ran out of memory for NVMET\n");
1561 return -ENOMEM;
1562 }
1563
1564 ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
1565 GFP_KERNEL);
1566 if (!ctx_buf->context) {
1567 kfree(ctx_buf);
1568 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1569 "6405 Ran out of NVMET "
1570 "context memory\n");
1571 return -ENOMEM;
1572 }
1573 ctx_buf->context->ctxbuf = ctx_buf;
1574 ctx_buf->context->state = LPFC_NVME_STE_FREE;
1575
1576 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
1577 if (!ctx_buf->iocbq) {
1578 kfree(ctx_buf->context);
1579 kfree(ctx_buf);
1580 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1581 "6406 Ran out of NVMET iocb/WQEs\n");
1582 return -ENOMEM;
1583 }
1584 ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
1585 nvmewqe = ctx_buf->iocbq;
1586 wqe = &nvmewqe->wqe;
1587
1588
1589 memset(wqe, 0, sizeof(union lpfc_wqe));
1590
1591 ctx_buf->iocbq->context1 = NULL;
1592 spin_lock(&phba->sli4_hba.sgl_list_lock);
1593 ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
1594 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1595 if (!ctx_buf->sglq) {
1596 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1597 kfree(ctx_buf->context);
1598 kfree(ctx_buf);
1599 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1600 "6407 Ran out of NVMET XRIs\n");
1601 return -ENOMEM;
1602 }
1603 INIT_WORK(&ctx_buf->defer_work, lpfc_nvmet_fcp_rqst_defer_work);
1604
1605
1606
1607
1608
1609
1610 infop = lpfc_get_ctx_list(phba, cpu, idx);
1611 spin_lock(&infop->nvmet_ctx_list_lock);
1612 list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
1613 infop->nvmet_ctx_list_cnt++;
1614 spin_unlock(&infop->nvmet_ctx_list_lock);
1615
1616
1617 idx++;
1618 if (idx >= phba->cfg_nvmet_mrq) {
1619 idx = 0;
1620 cpu = cpumask_first(cpu_present_mask);
1621 continue;
1622 }
1623 cpu = cpumask_next(cpu, cpu_present_mask);
1624 if (cpu == nr_cpu_ids)
1625 cpu = cpumask_first(cpu_present_mask);
1626
1627 }
1628
1629 for_each_present_cpu(i) {
1630 for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1631 infop = lpfc_get_ctx_list(phba, i, j);
1632 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1633 "6408 TOTAL NVMET ctx for CPU %d "
1634 "MRQ %d: cnt %d nextcpu x%px\n",
1635 i, j, infop->nvmet_ctx_list_cnt,
1636 infop->nvmet_ctx_next_cpu);
1637 }
1638 }
1639 return 0;
1640}
1641
1642int
1643lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1644{
1645 struct lpfc_vport *vport = phba->pport;
1646 struct lpfc_nvmet_tgtport *tgtp;
1647 struct nvmet_fc_port_info pinfo;
1648 int error;
1649
1650 if (phba->targetport)
1651 return 0;
1652
1653 error = lpfc_nvmet_setup_io_context(phba);
1654 if (error)
1655 return error;
1656
1657 memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
1658 pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
1659 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
1660 pinfo.port_id = vport->fc_myDID;
1661
1662
1663
1664
1665
1666 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
1667 lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue;
1668 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
1669
1670#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1671 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
1672 &phba->pcidev->dev,
1673 &phba->targetport);
1674#else
1675 error = -ENOENT;
1676#endif
1677 if (error) {
1678 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1679 "6025 Cannot register NVME targetport x%x: "
1680 "portnm %llx nodenm %llx segs %d qs %d\n",
1681 error,
1682 pinfo.port_name, pinfo.node_name,
1683 lpfc_tgttemplate.max_sgl_segments,
1684 lpfc_tgttemplate.max_hw_queues);
1685 phba->targetport = NULL;
1686 phba->nvmet_support = 0;
1687
1688 lpfc_nvmet_cleanup_io_context(phba);
1689
1690 } else {
1691 tgtp = (struct lpfc_nvmet_tgtport *)
1692 phba->targetport->private;
1693 tgtp->phba = phba;
1694
1695 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1696 "6026 Registered NVME "
1697 "targetport: x%px, private x%px "
1698 "portnm %llx nodenm %llx segs %d qs %d\n",
1699 phba->targetport, tgtp,
1700 pinfo.port_name, pinfo.node_name,
1701 lpfc_tgttemplate.max_sgl_segments,
1702 lpfc_tgttemplate.max_hw_queues);
1703
1704 atomic_set(&tgtp->rcv_ls_req_in, 0);
1705 atomic_set(&tgtp->rcv_ls_req_out, 0);
1706 atomic_set(&tgtp->rcv_ls_req_drop, 0);
1707 atomic_set(&tgtp->xmt_ls_abort, 0);
1708 atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
1709 atomic_set(&tgtp->xmt_ls_rsp, 0);
1710 atomic_set(&tgtp->xmt_ls_drop, 0);
1711 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
1712 atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
1713 atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
1714 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
1715 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1716 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1717 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
1718 atomic_set(&tgtp->xmt_fcp_drop, 0);
1719 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1720 atomic_set(&tgtp->xmt_fcp_read, 0);
1721 atomic_set(&tgtp->xmt_fcp_write, 0);
1722 atomic_set(&tgtp->xmt_fcp_rsp, 0);
1723 atomic_set(&tgtp->xmt_fcp_release, 0);
1724 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1725 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1726 atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
1727 atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
1728 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1729 atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
1730 atomic_set(&tgtp->xmt_fcp_abort, 0);
1731 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1732 atomic_set(&tgtp->xmt_abort_unsol, 0);
1733 atomic_set(&tgtp->xmt_abort_sol, 0);
1734 atomic_set(&tgtp->xmt_abort_rsp, 0);
1735 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
1736 atomic_set(&tgtp->defer_ctx, 0);
1737 atomic_set(&tgtp->defer_fod, 0);
1738 atomic_set(&tgtp->defer_wqfull, 0);
1739 }
1740 return error;
1741}
1742
1743int
1744lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
1745{
1746 struct lpfc_vport *vport = phba->pport;
1747
1748 if (!phba->targetport)
1749 return 0;
1750
1751 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1752 "6007 Update NVMET port x%px did x%x\n",
1753 phba->targetport, vport->fc_myDID);
1754
1755 phba->targetport->port_id = vport->fc_myDID;
1756 return 0;
1757}
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767void
1768lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1769 struct sli4_wcqe_xri_aborted *axri)
1770{
1771#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1772 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
1773 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1774 struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
1775 struct lpfc_nvmet_tgtport *tgtp;
1776 struct nvmefc_tgt_fcp_req *req = NULL;
1777 struct lpfc_nodelist *ndlp;
1778 unsigned long iflag = 0;
1779 int rrq_empty = 0;
1780 bool released = false;
1781
1782 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1783 "6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
1784
1785 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1786 return;
1787
1788 if (phba->targetport) {
1789 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1790 atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
1791 }
1792
1793 spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
1794 list_for_each_entry_safe(ctxp, next_ctxp,
1795 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1796 list) {
1797 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1798 continue;
1799
1800 spin_lock(&ctxp->ctxlock);
1801
1802
1803
1804 if (ctxp->flag & LPFC_NVME_CTX_RLS &&
1805 !(ctxp->flag & LPFC_NVME_ABORT_OP)) {
1806 list_del_init(&ctxp->list);
1807 released = true;
1808 }
1809 ctxp->flag &= ~LPFC_NVME_XBUSY;
1810 spin_unlock(&ctxp->ctxlock);
1811 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
1812 iflag);
1813
1814 rrq_empty = list_empty(&phba->active_rrq_list);
1815 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1816 if (ndlp &&
1817 (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
1818 ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
1819 lpfc_set_rrq_active(phba, ndlp,
1820 ctxp->ctxbuf->sglq->sli4_lxritag,
1821 rxid, 1);
1822 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
1823 }
1824
1825 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1826 "6318 XB aborted oxid x%x flg x%x (%x)\n",
1827 ctxp->oxid, ctxp->flag, released);
1828 if (released)
1829 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1830
1831 if (rrq_empty)
1832 lpfc_worker_wake_up(phba);
1833 return;
1834 }
1835 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
1836 ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri);
1837 if (ctxp) {
1838
1839
1840
1841
1842 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1843 "6323 NVMET Rcv ABTS xri x%x ctxp state x%x "
1844 "flag x%x oxid x%x rxid x%x\n",
1845 xri, ctxp->state, ctxp->flag, ctxp->oxid,
1846 rxid);
1847
1848 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1849 ctxp->flag |= LPFC_NVME_ABTS_RCV;
1850 ctxp->state = LPFC_NVME_STE_ABORT;
1851 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1852
1853 lpfc_nvmeio_data(phba,
1854 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1855 xri, raw_smp_processor_id(), 0);
1856
1857 req = &ctxp->hdlrctx.fcp_req;
1858 if (req)
1859 nvmet_fc_rcv_fcp_abort(phba->targetport, req);
1860 }
1861#endif
1862}
1863
1864int
1865lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
1866 struct fc_frame_header *fc_hdr)
1867{
1868#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1869 struct lpfc_hba *phba = vport->phba;
1870 struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
1871 struct nvmefc_tgt_fcp_req *rsp;
1872 uint32_t sid;
1873 uint16_t oxid, xri;
1874 unsigned long iflag = 0;
1875
1876 sid = sli4_sid_from_fc_hdr(fc_hdr);
1877 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1878
1879 spin_lock_irqsave(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
1880 list_for_each_entry_safe(ctxp, next_ctxp,
1881 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1882 list) {
1883 if (ctxp->oxid != oxid || ctxp->sid != sid)
1884 continue;
1885
1886 xri = ctxp->ctxbuf->sglq->sli4_xritag;
1887
1888 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock,
1889 iflag);
1890 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1891 ctxp->flag |= LPFC_NVME_ABTS_RCV;
1892 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1893
1894 lpfc_nvmeio_data(phba,
1895 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1896 xri, raw_smp_processor_id(), 0);
1897
1898 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1899 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
1900
1901 rsp = &ctxp->hdlrctx.fcp_req;
1902 nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
1903
1904
1905 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1906 return 0;
1907 }
1908 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvmet_buf_list_lock, iflag);
1909
1910 if (phba->sli4_hba.nvmet_io_wait_cnt) {
1911 struct rqb_dmabuf *nvmebuf;
1912 struct fc_frame_header *fc_hdr_tmp;
1913 u32 sid_tmp;
1914 u16 oxid_tmp;
1915 bool found = false;
1916
1917 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
1918
1919
1920 list_for_each_entry(nvmebuf,
1921 &phba->sli4_hba.lpfc_nvmet_io_wait_list,
1922 hbuf.list) {
1923 fc_hdr_tmp = (struct fc_frame_header *)
1924 (nvmebuf->hbuf.virt);
1925 oxid_tmp = be16_to_cpu(fc_hdr_tmp->fh_ox_id);
1926 sid_tmp = sli4_sid_from_fc_hdr(fc_hdr_tmp);
1927 if (oxid_tmp != oxid || sid_tmp != sid)
1928 continue;
1929
1930 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1931 "6321 NVMET Rcv ABTS oxid x%x from x%x "
1932 "is waiting for a ctxp\n",
1933 oxid, sid);
1934
1935 list_del_init(&nvmebuf->hbuf.list);
1936 phba->sli4_hba.nvmet_io_wait_cnt--;
1937 found = true;
1938 break;
1939 }
1940 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
1941 iflag);
1942
1943
1944 if (found) {
1945 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1946
1947 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1948 return 0;
1949 }
1950 }
1951
1952
1953 ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid);
1954 if (ctxp) {
1955 xri = ctxp->ctxbuf->sglq->sli4_xritag;
1956
1957 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1958 ctxp->flag |= (LPFC_NVME_ABTS_RCV | LPFC_NVME_ABORT_OP);
1959 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1960
1961 lpfc_nvmeio_data(phba,
1962 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1963 xri, raw_smp_processor_id(), 0);
1964
1965 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1966 "6322 NVMET Rcv ABTS:acc oxid x%x xri x%x "
1967 "flag x%x state x%x\n",
1968 ctxp->oxid, xri, ctxp->flag, ctxp->state);
1969
1970 if (ctxp->flag & LPFC_NVME_TNOTIFY) {
1971
1972 nvmet_fc_rcv_fcp_abort(phba->targetport,
1973 &ctxp->hdlrctx.fcp_req);
1974 } else {
1975 cancel_work_sync(&ctxp->ctxbuf->defer_work);
1976 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1977 lpfc_nvmet_defer_release(phba, ctxp);
1978 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1979 }
1980 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1981 ctxp->oxid);
1982
1983 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1984 return 0;
1985 }
1986
1987 lpfc_nvmeio_data(phba, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n",
1988 oxid, raw_smp_processor_id(), 1);
1989
1990 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1991 "6320 NVMET Rcv ABTS:rjt oxid x%x\n", oxid);
1992
1993
1994 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
1995#endif
1996 return 0;
1997}
1998
1999static void
2000lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
2001 struct lpfc_async_xchg_ctx *ctxp)
2002{
2003 struct lpfc_sli_ring *pring;
2004 struct lpfc_iocbq *nvmewqeq;
2005 struct lpfc_iocbq *next_nvmewqeq;
2006 unsigned long iflags;
2007 struct lpfc_wcqe_complete wcqe;
2008 struct lpfc_wcqe_complete *wcqep;
2009
2010 pring = wq->pring;
2011 wcqep = &wcqe;
2012
2013
2014 memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete));
2015 bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT);
2016 wcqep->parameter = IOERR_ABORT_REQUESTED;
2017
2018 spin_lock_irqsave(&pring->ring_lock, iflags);
2019 list_for_each_entry_safe(nvmewqeq, next_nvmewqeq,
2020 &wq->wqfull_list, list) {
2021 if (ctxp) {
2022
2023 if (nvmewqeq->context2 == ctxp) {
2024 list_del(&nvmewqeq->list);
2025 spin_unlock_irqrestore(&pring->ring_lock,
2026 iflags);
2027 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
2028 wcqep);
2029 return;
2030 }
2031 continue;
2032 } else {
2033
2034 list_del(&nvmewqeq->list);
2035 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2036 lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep);
2037 spin_lock_irqsave(&pring->ring_lock, iflags);
2038 }
2039 }
2040 if (!ctxp)
2041 wq->q_flag &= ~HBA_NVMET_WQFULL;
2042 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2043}
2044
2045void
2046lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
2047 struct lpfc_queue *wq)
2048{
2049#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2050 struct lpfc_sli_ring *pring;
2051 struct lpfc_iocbq *nvmewqeq;
2052 struct lpfc_async_xchg_ctx *ctxp;
2053 unsigned long iflags;
2054 int rc;
2055
2056
2057
2058
2059
2060 pring = wq->pring;
2061 spin_lock_irqsave(&pring->ring_lock, iflags);
2062 while (!list_empty(&wq->wqfull_list)) {
2063 list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
2064 list);
2065 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2066 ctxp = (struct lpfc_async_xchg_ctx *)nvmewqeq->context2;
2067 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
2068 spin_lock_irqsave(&pring->ring_lock, iflags);
2069 if (rc == -EBUSY) {
2070
2071 list_add(&nvmewqeq->list, &wq->wqfull_list);
2072 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2073 return;
2074 }
2075 if (rc == WQE_SUCCESS) {
2076#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2077 if (ctxp->ts_cmd_nvme) {
2078 if (ctxp->hdlrctx.fcp_req.op == NVMET_FCOP_RSP)
2079 ctxp->ts_status_wqput = ktime_get_ns();
2080 else
2081 ctxp->ts_data_wqput = ktime_get_ns();
2082 }
2083#endif
2084 } else {
2085 WARN_ON(rc);
2086 }
2087 }
2088 wq->q_flag &= ~HBA_NVMET_WQFULL;
2089 spin_unlock_irqrestore(&pring->ring_lock, iflags);
2090
2091#endif
2092}
2093
2094void
2095lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
2096{
2097#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2098 struct lpfc_nvmet_tgtport *tgtp;
2099 struct lpfc_queue *wq;
2100 uint32_t qidx;
2101 DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
2102
2103 if (phba->nvmet_support == 0)
2104 return;
2105 if (phba->targetport) {
2106 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2107 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
2108 wq = phba->sli4_hba.hdwq[qidx].io_wq;
2109 lpfc_nvmet_wqfull_flush(phba, wq, NULL);
2110 }
2111 tgtp->tport_unreg_cmp = &tport_unreg_cmp;
2112 nvmet_fc_unregister_targetport(phba->targetport);
2113 if (!wait_for_completion_timeout(&tport_unreg_cmp,
2114 msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
2115 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2116 "6179 Unreg targetport x%px timeout "
2117 "reached.\n", phba->targetport);
2118 lpfc_nvmet_cleanup_io_context(phba);
2119 }
2120 phba->targetport = NULL;
2121#endif
2122}
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140int
2141lpfc_nvmet_handle_lsreq(struct lpfc_hba *phba,
2142 struct lpfc_async_xchg_ctx *axchg)
2143{
2144#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2145 struct lpfc_nvmet_tgtport *tgtp = phba->targetport->private;
2146 uint32_t *payload = axchg->payload;
2147 int rc;
2148
2149 atomic_inc(&tgtp->rcv_ls_req_in);
2150
2151
2152
2153
2154
2155
2156 rc = nvmet_fc_rcv_ls_req(phba->targetport, axchg->ndlp, &axchg->ls_rsp,
2157 axchg->payload, axchg->size);
2158
2159 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2160 "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
2161 "%08x %08x %08x\n", axchg->size, rc,
2162 *payload, *(payload+1), *(payload+2),
2163 *(payload+3), *(payload+4), *(payload+5));
2164
2165 if (!rc) {
2166 atomic_inc(&tgtp->rcv_ls_req_out);
2167 return 0;
2168 }
2169
2170 atomic_inc(&tgtp->rcv_ls_req_drop);
2171#endif
2172 return 1;
2173}
2174
2175static void
2176lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
2177{
2178#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2179 struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
2180 struct lpfc_hba *phba = ctxp->phba;
2181 struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
2182 struct lpfc_nvmet_tgtport *tgtp;
2183 uint32_t *payload, qno;
2184 uint32_t rc;
2185 unsigned long iflags;
2186
2187 if (!nvmebuf) {
2188 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2189 "6159 process_rcv_fcp_req, nvmebuf is NULL, "
2190 "oxid: x%x flg: x%x state: x%x\n",
2191 ctxp->oxid, ctxp->flag, ctxp->state);
2192 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2193 lpfc_nvmet_defer_release(phba, ctxp);
2194 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2195 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
2196 ctxp->oxid);
2197 return;
2198 }
2199
2200 if (ctxp->flag & LPFC_NVME_ABTS_RCV) {
2201 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2202 "6324 IO oxid x%x aborted\n",
2203 ctxp->oxid);
2204 return;
2205 }
2206
2207 payload = (uint32_t *)(nvmebuf->dbuf.virt);
2208 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2209 ctxp->flag |= LPFC_NVME_TNOTIFY;
2210#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2211 if (ctxp->ts_isr_cmd)
2212 ctxp->ts_cmd_nvme = ktime_get_ns();
2213#endif
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->hdlrctx.fcp_req,
2224 payload, ctxp->size);
2225
2226 if (rc == 0) {
2227 atomic_inc(&tgtp->rcv_fcp_cmd_out);
2228 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2229 if ((ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) ||
2230 (nvmebuf != ctxp->rqb_buffer)) {
2231 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2232 return;
2233 }
2234 ctxp->rqb_buffer = NULL;
2235 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2236 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2237 return;
2238 }
2239
2240
2241 if (rc == -EOVERFLOW) {
2242 lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d "
2243 "from %06x\n",
2244 ctxp->oxid, ctxp->size, ctxp->sid);
2245 atomic_inc(&tgtp->rcv_fcp_cmd_out);
2246 atomic_inc(&tgtp->defer_fod);
2247 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2248 if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
2249 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2250 return;
2251 }
2252 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2253
2254
2255
2256
2257 qno = nvmebuf->idx;
2258 lpfc_post_rq_buffer(
2259 phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2260 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2261 return;
2262 }
2263 ctxp->flag &= ~LPFC_NVME_TNOTIFY;
2264 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2265 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2266 "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
2267 ctxp->oxid, rc,
2268 atomic_read(&tgtp->rcv_fcp_cmd_in),
2269 atomic_read(&tgtp->rcv_fcp_cmd_out),
2270 atomic_read(&tgtp->xmt_fcp_release));
2271 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
2272 ctxp->oxid, ctxp->size, ctxp->sid);
2273 spin_lock_irqsave(&ctxp->ctxlock, iflags);
2274 lpfc_nvmet_defer_release(phba, ctxp);
2275 spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2276 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
2277#endif
2278}
2279
2280static void
2281lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work)
2282{
2283#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2284 struct lpfc_nvmet_ctxbuf *ctx_buf =
2285 container_of(work, struct lpfc_nvmet_ctxbuf, defer_work);
2286
2287 lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2288#endif
2289}
2290
2291static struct lpfc_nvmet_ctxbuf *
2292lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
2293 struct lpfc_nvmet_ctx_info *current_infop)
2294{
2295#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2296 struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
2297 struct lpfc_nvmet_ctx_info *get_infop;
2298 int i;
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310 if (current_infop->nvmet_ctx_start_cpu)
2311 get_infop = current_infop->nvmet_ctx_start_cpu;
2312 else
2313 get_infop = current_infop->nvmet_ctx_next_cpu;
2314
2315 for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) {
2316 if (get_infop == current_infop) {
2317 get_infop = get_infop->nvmet_ctx_next_cpu;
2318 continue;
2319 }
2320 spin_lock(&get_infop->nvmet_ctx_list_lock);
2321
2322
2323 if (get_infop->nvmet_ctx_list_cnt) {
2324 list_splice_init(&get_infop->nvmet_ctx_list,
2325 ¤t_infop->nvmet_ctx_list);
2326 current_infop->nvmet_ctx_list_cnt =
2327 get_infop->nvmet_ctx_list_cnt - 1;
2328 get_infop->nvmet_ctx_list_cnt = 0;
2329 spin_unlock(&get_infop->nvmet_ctx_list_lock);
2330
2331 current_infop->nvmet_ctx_start_cpu = get_infop;
2332 list_remove_head(¤t_infop->nvmet_ctx_list,
2333 ctx_buf, struct lpfc_nvmet_ctxbuf,
2334 list);
2335 return ctx_buf;
2336 }
2337
2338
2339 spin_unlock(&get_infop->nvmet_ctx_list_lock);
2340 get_infop = get_infop->nvmet_ctx_next_cpu;
2341 }
2342
2343#endif
2344
2345 return NULL;
2346}
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363static void
2364lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
2365 uint32_t idx,
2366 struct rqb_dmabuf *nvmebuf,
2367 uint64_t isr_timestamp,
2368 uint8_t cqflag)
2369{
2370 struct lpfc_async_xchg_ctx *ctxp;
2371 struct lpfc_nvmet_tgtport *tgtp;
2372 struct fc_frame_header *fc_hdr;
2373 struct lpfc_nvmet_ctxbuf *ctx_buf;
2374 struct lpfc_nvmet_ctx_info *current_infop;
2375 uint32_t size, oxid, sid, qno;
2376 unsigned long iflag;
2377 int current_cpu;
2378
2379 if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
2380 return;
2381
2382 ctx_buf = NULL;
2383 if (!nvmebuf || !phba->targetport) {
2384 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2385 "6157 NVMET FCP Drop IO\n");
2386 if (nvmebuf)
2387 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2388 return;
2389 }
2390
2391
2392
2393
2394
2395
2396
2397
2398 current_cpu = raw_smp_processor_id();
2399 current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
2400 spin_lock_irqsave(¤t_infop->nvmet_ctx_list_lock, iflag);
2401 if (current_infop->nvmet_ctx_list_cnt) {
2402 list_remove_head(¤t_infop->nvmet_ctx_list,
2403 ctx_buf, struct lpfc_nvmet_ctxbuf, list);
2404 current_infop->nvmet_ctx_list_cnt--;
2405 } else {
2406 ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
2407 }
2408 spin_unlock_irqrestore(¤t_infop->nvmet_ctx_list_lock, iflag);
2409
2410 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
2411 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2412 size = nvmebuf->bytes_recv;
2413
2414#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2415 if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
2416 this_cpu_inc(phba->sli4_hba.c_stat->rcv_io);
2417 if (idx != current_cpu)
2418 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2419 "6703 CPU Check rcv: "
2420 "cpu %d expect %d\n",
2421 current_cpu, idx);
2422 }
2423#endif
2424
2425 lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
2426 oxid, size, raw_smp_processor_id());
2427
2428 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2429
2430 if (!ctx_buf) {
2431
2432 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
2433 list_add_tail(&nvmebuf->hbuf.list,
2434 &phba->sli4_hba.lpfc_nvmet_io_wait_list);
2435 phba->sli4_hba.nvmet_io_wait_cnt++;
2436 phba->sli4_hba.nvmet_io_wait_total++;
2437 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
2438 iflag);
2439
2440
2441 qno = nvmebuf->idx;
2442 lpfc_post_rq_buffer(
2443 phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2444 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2445
2446 atomic_inc(&tgtp->defer_ctx);
2447 return;
2448 }
2449
2450 sid = sli4_sid_from_fc_hdr(fc_hdr);
2451
2452 ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
2453 spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
2454 list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list);
2455 spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
2456 if (ctxp->state != LPFC_NVME_STE_FREE) {
2457 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2458 "6414 NVMET Context corrupt %d %d oxid x%x\n",
2459 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
2460 }
2461 ctxp->wqeq = NULL;
2462 ctxp->offset = 0;
2463 ctxp->phba = phba;
2464 ctxp->size = size;
2465 ctxp->oxid = oxid;
2466 ctxp->sid = sid;
2467 ctxp->idx = idx;
2468 ctxp->state = LPFC_NVME_STE_RCV;
2469 ctxp->entry_cnt = 1;
2470 ctxp->flag = 0;
2471 ctxp->ctxbuf = ctx_buf;
2472 ctxp->rqb_buffer = (void *)nvmebuf;
2473 ctxp->hdwq = NULL;
2474 spin_lock_init(&ctxp->ctxlock);
2475
2476#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2477 if (isr_timestamp)
2478 ctxp->ts_isr_cmd = isr_timestamp;
2479 ctxp->ts_cmd_nvme = 0;
2480 ctxp->ts_nvme_data = 0;
2481 ctxp->ts_data_wqput = 0;
2482 ctxp->ts_isr_data = 0;
2483 ctxp->ts_data_nvme = 0;
2484 ctxp->ts_nvme_status = 0;
2485 ctxp->ts_status_wqput = 0;
2486 ctxp->ts_isr_status = 0;
2487 ctxp->ts_status_nvme = 0;
2488#endif
2489
2490 atomic_inc(&tgtp->rcv_fcp_cmd_in);
2491
2492 if (!cqflag) {
2493 lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2494 return;
2495 }
2496
2497 if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
2498 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2499 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2500 "6325 Unable to queue work for oxid x%x. "
2501 "FCP Drop IO [x%x x%x x%x]\n",
2502 ctxp->oxid,
2503 atomic_read(&tgtp->rcv_fcp_cmd_in),
2504 atomic_read(&tgtp->rcv_fcp_cmd_out),
2505 atomic_read(&tgtp->xmt_fcp_release));
2506
2507 spin_lock_irqsave(&ctxp->ctxlock, iflag);
2508 lpfc_nvmet_defer_release(phba, ctxp);
2509 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
2510 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
2511 }
2512}
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528void
2529lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
2530 uint32_t idx,
2531 struct rqb_dmabuf *nvmebuf,
2532 uint64_t isr_timestamp,
2533 uint8_t cqflag)
2534{
2535 if (!nvmebuf) {
2536 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2537 "3167 NVMET FCP Drop IO\n");
2538 return;
2539 }
2540 if (phba->nvmet_support == 0) {
2541 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2542 return;
2543 }
2544 lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp, cqflag);
2545}
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572static struct lpfc_iocbq *
2573lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
2574 struct lpfc_async_xchg_ctx *ctxp,
2575 dma_addr_t rspbuf, uint16_t rspsize)
2576{
2577 struct lpfc_nodelist *ndlp;
2578 struct lpfc_iocbq *nvmewqe;
2579 union lpfc_wqe128 *wqe;
2580
2581 if (!lpfc_is_link_up(phba)) {
2582 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2583 "6104 NVMET prep LS wqe: link err: "
2584 "NPORT x%x oxid:x%x ste %d\n",
2585 ctxp->sid, ctxp->oxid, ctxp->state);
2586 return NULL;
2587 }
2588
2589
2590 nvmewqe = lpfc_sli_get_iocbq(phba);
2591 if (nvmewqe == NULL) {
2592 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2593 "6105 NVMET prep LS wqe: No WQE: "
2594 "NPORT x%x oxid x%x ste %d\n",
2595 ctxp->sid, ctxp->oxid, ctxp->state);
2596 return NULL;
2597 }
2598
2599 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2600 if (!ndlp ||
2601 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2602 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2603 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2604 "6106 NVMET prep LS wqe: No ndlp: "
2605 "NPORT x%x oxid x%x ste %d\n",
2606 ctxp->sid, ctxp->oxid, ctxp->state);
2607 goto nvme_wqe_free_wqeq_exit;
2608 }
2609 ctxp->wqeq = nvmewqe;
2610
2611
2612 nvmewqe->context1 = lpfc_nlp_get(ndlp);
2613 if (nvmewqe->context1 == NULL)
2614 goto nvme_wqe_free_wqeq_exit;
2615 nvmewqe->context2 = ctxp;
2616
2617 wqe = &nvmewqe->wqe;
2618 memset(wqe, 0, sizeof(union lpfc_wqe));
2619
2620
2621 wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2622 wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
2623 wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
2624 wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
2625
2626
2627
2628
2629
2630
2631 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
2632 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
2633 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
2634 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
2635 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
2636
2637
2638 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
2639 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2640 bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
2641
2642
2643 bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
2644 CMD_XMIT_SEQUENCE64_WQE);
2645 bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
2646 bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
2647 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
2648
2649
2650 wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
2651
2652
2653 bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
2654
2655 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
2656
2657
2658 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
2659 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2660 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
2661 LPFC_WQE_LENLOC_WORD12);
2662 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
2663
2664
2665 bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
2666 LPFC_WQE_CQ_ID_DEFAULT);
2667 bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
2668 OTHER_COMMAND);
2669
2670
2671 wqe->xmit_sequence.xmit_len = rspsize;
2672
2673 nvmewqe->retry = 1;
2674 nvmewqe->vport = phba->pport;
2675 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2676 nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
2677
2678
2679 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2680 "6039 Xmit NVMET LS response to remote "
2681 "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
2682 ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
2683 rspsize);
2684 return nvmewqe;
2685
2686nvme_wqe_free_wqeq_exit:
2687 nvmewqe->context2 = NULL;
2688 nvmewqe->context3 = NULL;
2689 lpfc_sli_release_iocbq(phba, nvmewqe);
2690 return NULL;
2691}
2692
2693
2694static struct lpfc_iocbq *
2695lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
2696 struct lpfc_async_xchg_ctx *ctxp)
2697{
2698 struct nvmefc_tgt_fcp_req *rsp = &ctxp->hdlrctx.fcp_req;
2699 struct lpfc_nvmet_tgtport *tgtp;
2700 struct sli4_sge *sgl;
2701 struct lpfc_nodelist *ndlp;
2702 struct lpfc_iocbq *nvmewqe;
2703 struct scatterlist *sgel;
2704 union lpfc_wqe128 *wqe;
2705 struct ulp_bde64 *bde;
2706 dma_addr_t physaddr;
2707 int i, cnt, nsegs;
2708 int do_pbde;
2709 int xc = 1;
2710
2711 if (!lpfc_is_link_up(phba)) {
2712 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2713 "6107 NVMET prep FCP wqe: link err:"
2714 "NPORT x%x oxid x%x ste %d\n",
2715 ctxp->sid, ctxp->oxid, ctxp->state);
2716 return NULL;
2717 }
2718
2719 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2720 if (!ndlp ||
2721 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2722 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2723 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2724 "6108 NVMET prep FCP wqe: no ndlp: "
2725 "NPORT x%x oxid x%x ste %d\n",
2726 ctxp->sid, ctxp->oxid, ctxp->state);
2727 return NULL;
2728 }
2729
2730 if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
2731 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2732 "6109 NVMET prep FCP wqe: seg cnt err: "
2733 "NPORT x%x oxid x%x ste %d cnt %d\n",
2734 ctxp->sid, ctxp->oxid, ctxp->state,
2735 phba->cfg_nvme_seg_cnt);
2736 return NULL;
2737 }
2738 nsegs = rsp->sg_cnt;
2739
2740 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2741 nvmewqe = ctxp->wqeq;
2742 if (nvmewqe == NULL) {
2743
2744 nvmewqe = ctxp->ctxbuf->iocbq;
2745 if (nvmewqe == NULL) {
2746 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2747 "6110 NVMET prep FCP wqe: No "
2748 "WQE: NPORT x%x oxid x%x ste %d\n",
2749 ctxp->sid, ctxp->oxid, ctxp->state);
2750 return NULL;
2751 }
2752 ctxp->wqeq = nvmewqe;
2753 xc = 0;
2754 nvmewqe->sli4_lxritag = NO_XRI;
2755 nvmewqe->sli4_xritag = NO_XRI;
2756 }
2757
2758
2759 if (((ctxp->state == LPFC_NVME_STE_RCV) &&
2760 (ctxp->entry_cnt == 1)) ||
2761 (ctxp->state == LPFC_NVME_STE_DATA)) {
2762 wqe = &nvmewqe->wqe;
2763 } else {
2764 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2765 "6111 Wrong state NVMET FCP: %d cnt %d\n",
2766 ctxp->state, ctxp->entry_cnt);
2767 return NULL;
2768 }
2769
2770 sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
2771 switch (rsp->op) {
2772 case NVMET_FCOP_READDATA:
2773 case NVMET_FCOP_READDATA_RSP:
2774
2775 memcpy(&wqe->words[7],
2776 &lpfc_tsend_cmd_template.words[7],
2777 sizeof(uint32_t) * 5);
2778
2779
2780 sgel = &rsp->sg[0];
2781 physaddr = sg_dma_address(sgel);
2782 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2783 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
2784 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
2785 wqe->fcp_tsend.bde.addrHigh =
2786 cpu_to_le32(putPaddrHigh(physaddr));
2787
2788
2789 wqe->fcp_tsend.payload_offset_len = 0;
2790
2791
2792 wqe->fcp_tsend.relative_offset = ctxp->offset;
2793
2794
2795 wqe->fcp_tsend.reserved = 0;
2796
2797
2798 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
2799 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2800 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
2801 nvmewqe->sli4_xritag);
2802
2803
2804
2805
2806 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
2807
2808
2809 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
2810 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
2811
2812
2813 if (!xc)
2814 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0);
2815
2816
2817 do_pbde = 0;
2818
2819
2820 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2821
2822
2823 sgl->addr_hi = 0;
2824 sgl->addr_lo = 0;
2825 sgl->word2 = 0;
2826 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2827 sgl->word2 = cpu_to_le32(sgl->word2);
2828 sgl->sge_len = 0;
2829 sgl++;
2830 sgl->addr_hi = 0;
2831 sgl->addr_lo = 0;
2832 sgl->word2 = 0;
2833 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2834 sgl->word2 = cpu_to_le32(sgl->word2);
2835 sgl->sge_len = 0;
2836 sgl++;
2837 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
2838 atomic_inc(&tgtp->xmt_fcp_read_rsp);
2839
2840
2841
2842 if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
2843 if (ndlp->nlp_flag & NLP_SUPPRESS_RSP)
2844 bf_set(wqe_sup,
2845 &wqe->fcp_tsend.wqe_com, 1);
2846 } else {
2847 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
2848 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
2849 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
2850 ((rsp->rsplen >> 2) - 1));
2851 memcpy(&wqe->words[16], rsp->rspaddr,
2852 rsp->rsplen);
2853 }
2854 } else {
2855 atomic_inc(&tgtp->xmt_fcp_read);
2856
2857
2858 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
2859 }
2860 break;
2861
2862 case NVMET_FCOP_WRITEDATA:
2863
2864 memcpy(&wqe->words[3],
2865 &lpfc_treceive_cmd_template.words[3],
2866 sizeof(uint32_t) * 9);
2867
2868
2869 wqe->fcp_treceive.bde.tus.f.bdeFlags = LPFC_SGE_TYPE_SKIP;
2870 wqe->fcp_treceive.bde.tus.f.bdeSize = 0;
2871 wqe->fcp_treceive.bde.addrLow = 0;
2872 wqe->fcp_treceive.bde.addrHigh = 0;
2873
2874
2875 wqe->fcp_treceive.relative_offset = ctxp->offset;
2876
2877
2878 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
2879 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2880 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
2881 nvmewqe->sli4_xritag);
2882
2883
2884
2885
2886 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
2887
2888
2889 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
2890 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
2891
2892
2893 if (!xc)
2894 bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
2895
2896
2897 if (phba->cfg_enable_pbde) {
2898 do_pbde = 1;
2899 } else {
2900 bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
2901 do_pbde = 0;
2902 }
2903
2904
2905 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2906
2907
2908 sgl->addr_hi = 0;
2909 sgl->addr_lo = 0;
2910 sgl->word2 = 0;
2911 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2912 sgl->word2 = cpu_to_le32(sgl->word2);
2913 sgl->sge_len = 0;
2914 sgl++;
2915 sgl->addr_hi = 0;
2916 sgl->addr_lo = 0;
2917 sgl->word2 = 0;
2918 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2919 sgl->word2 = cpu_to_le32(sgl->word2);
2920 sgl->sge_len = 0;
2921 sgl++;
2922 atomic_inc(&tgtp->xmt_fcp_write);
2923 break;
2924
2925 case NVMET_FCOP_RSP:
2926
2927 memcpy(&wqe->words[4],
2928 &lpfc_trsp_cmd_template.words[4],
2929 sizeof(uint32_t) * 8);
2930
2931
2932 physaddr = rsp->rspdma;
2933 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2934 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
2935 wqe->fcp_trsp.bde.addrLow =
2936 cpu_to_le32(putPaddrLow(physaddr));
2937 wqe->fcp_trsp.bde.addrHigh =
2938 cpu_to_le32(putPaddrHigh(physaddr));
2939
2940
2941 wqe->fcp_trsp.response_len = rsp->rsplen;
2942
2943
2944 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
2945 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2946 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
2947 nvmewqe->sli4_xritag);
2948
2949
2950
2951
2952 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
2953
2954
2955 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
2956 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
2957
2958
2959 if (xc)
2960 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1);
2961
2962
2963
2964 if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) {
2965
2966 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
2967 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
2968 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
2969 ((rsp->rsplen >> 2) - 1));
2970 memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
2971 }
2972 do_pbde = 0;
2973
2974
2975 wqe->fcp_trsp.rsvd_12_15[0] = 0;
2976
2977
2978 nsegs = 0;
2979 sgl->word2 = 0;
2980 atomic_inc(&tgtp->xmt_fcp_rsp);
2981 break;
2982
2983 default:
2984 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2985 "6064 Unknown Rsp Op %d\n",
2986 rsp->op);
2987 return NULL;
2988 }
2989
2990 nvmewqe->retry = 1;
2991 nvmewqe->vport = phba->pport;
2992 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2993 nvmewqe->context1 = ndlp;
2994
2995 for_each_sg(rsp->sg, sgel, nsegs, i) {
2996 physaddr = sg_dma_address(sgel);
2997 cnt = sg_dma_len(sgel);
2998 sgl->addr_hi = putPaddrHigh(physaddr);
2999 sgl->addr_lo = putPaddrLow(physaddr);
3000 sgl->word2 = 0;
3001 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
3002 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
3003 if ((i+1) == rsp->sg_cnt)
3004 bf_set(lpfc_sli4_sge_last, sgl, 1);
3005 sgl->word2 = cpu_to_le32(sgl->word2);
3006 sgl->sge_len = cpu_to_le32(cnt);
3007 if (i == 0) {
3008 bde = (struct ulp_bde64 *)&wqe->words[13];
3009 if (do_pbde) {
3010
3011 bde->addrLow = sgl->addr_lo;
3012 bde->addrHigh = sgl->addr_hi;
3013 bde->tus.f.bdeSize =
3014 le32_to_cpu(sgl->sge_len);
3015 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3016 bde->tus.w = cpu_to_le32(bde->tus.w);
3017 } else {
3018 memset(bde, 0, sizeof(struct ulp_bde64));
3019 }
3020 }
3021 sgl++;
3022 ctxp->offset += cnt;
3023 }
3024 ctxp->state = LPFC_NVME_STE_DATA;
3025 ctxp->entry_cnt++;
3026 return nvmewqe;
3027}
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039static void
3040lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3041 struct lpfc_wcqe_complete *wcqe)
3042{
3043 struct lpfc_async_xchg_ctx *ctxp;
3044 struct lpfc_nvmet_tgtport *tgtp;
3045 uint32_t result;
3046 unsigned long flags;
3047 bool released = false;
3048
3049 ctxp = cmdwqe->context2;
3050 result = wcqe->parameter;
3051
3052 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3053 if (ctxp->flag & LPFC_NVME_ABORT_OP)
3054 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
3055
3056 spin_lock_irqsave(&ctxp->ctxlock, flags);
3057 ctxp->state = LPFC_NVME_STE_DONE;
3058
3059
3060
3061
3062 if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
3063 !(ctxp->flag & LPFC_NVME_XBUSY)) {
3064 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3065 list_del_init(&ctxp->list);
3066 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3067 released = true;
3068 }
3069 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3070 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3071 atomic_inc(&tgtp->xmt_abort_rsp);
3072
3073 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3074 "6165 ABORT cmpl: oxid x%x flg x%x (%d) "
3075 "WCQE: %08x %08x %08x %08x\n",
3076 ctxp->oxid, ctxp->flag, released,
3077 wcqe->word0, wcqe->total_data_placed,
3078 result, wcqe->word3);
3079
3080 cmdwqe->context2 = NULL;
3081 cmdwqe->context3 = NULL;
3082
3083
3084
3085
3086 if (released)
3087 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3088
3089
3090 lpfc_sli_release_iocbq(phba, cmdwqe);
3091
3092
3093
3094
3095
3096}
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108static void
3109lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3110 struct lpfc_wcqe_complete *wcqe)
3111{
3112 struct lpfc_async_xchg_ctx *ctxp;
3113 struct lpfc_nvmet_tgtport *tgtp;
3114 unsigned long flags;
3115 uint32_t result;
3116 bool released = false;
3117
3118 ctxp = cmdwqe->context2;
3119 result = wcqe->parameter;
3120
3121 if (!ctxp) {
3122
3123 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3124 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
3125 wcqe->word0, wcqe->total_data_placed,
3126 result, wcqe->word3);
3127 return;
3128 }
3129
3130 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3131 spin_lock_irqsave(&ctxp->ctxlock, flags);
3132 if (ctxp->flag & LPFC_NVME_ABORT_OP)
3133 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
3134
3135
3136 if (ctxp->state != LPFC_NVME_STE_ABORT) {
3137 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3138 "6112 ABTS Wrong state:%d oxid x%x\n",
3139 ctxp->state, ctxp->oxid);
3140 }
3141
3142
3143
3144
3145 ctxp->state = LPFC_NVME_STE_DONE;
3146 if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
3147 !(ctxp->flag & LPFC_NVME_XBUSY)) {
3148 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3149 list_del_init(&ctxp->list);
3150 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3151 released = true;
3152 }
3153 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3154 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3155 atomic_inc(&tgtp->xmt_abort_rsp);
3156
3157 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3158 "6316 ABTS cmpl oxid x%x flg x%x (%x) "
3159 "WCQE: %08x %08x %08x %08x\n",
3160 ctxp->oxid, ctxp->flag, released,
3161 wcqe->word0, wcqe->total_data_placed,
3162 result, wcqe->word3);
3163
3164 cmdwqe->context2 = NULL;
3165 cmdwqe->context3 = NULL;
3166
3167
3168
3169
3170 if (released)
3171 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3172
3173
3174
3175
3176
3177}
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189static void
3190lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3191 struct lpfc_wcqe_complete *wcqe)
3192{
3193 struct lpfc_async_xchg_ctx *ctxp;
3194 struct lpfc_nvmet_tgtport *tgtp;
3195 uint32_t result;
3196
3197 ctxp = cmdwqe->context2;
3198 result = wcqe->parameter;
3199
3200 if (phba->nvmet_support) {
3201 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3202 atomic_inc(&tgtp->xmt_ls_abort_cmpl);
3203 }
3204
3205 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3206 "6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n",
3207 ctxp, wcqe->word0, wcqe->total_data_placed,
3208 result, wcqe->word3);
3209
3210 if (!ctxp) {
3211 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3212 "6415 NVMET LS Abort No ctx: WCQE: "
3213 "%08x %08x %08x %08x\n",
3214 wcqe->word0, wcqe->total_data_placed,
3215 result, wcqe->word3);
3216
3217 lpfc_sli_release_iocbq(phba, cmdwqe);
3218 return;
3219 }
3220
3221 if (ctxp->state != LPFC_NVME_STE_LS_ABORT) {
3222 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3223 "6416 NVMET LS abort cmpl state mismatch: "
3224 "oxid x%x: %d %d\n",
3225 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3226 }
3227
3228 cmdwqe->context2 = NULL;
3229 cmdwqe->context3 = NULL;
3230 lpfc_sli_release_iocbq(phba, cmdwqe);
3231 kfree(ctxp);
3232}
3233
3234static int
3235lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
3236 struct lpfc_async_xchg_ctx *ctxp,
3237 uint32_t sid, uint16_t xri)
3238{
3239 struct lpfc_nvmet_tgtport *tgtp = NULL;
3240 struct lpfc_iocbq *abts_wqeq;
3241 union lpfc_wqe128 *wqe_abts;
3242 struct lpfc_nodelist *ndlp;
3243
3244 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3245 "6067 ABTS: sid %x xri x%x/x%x\n",
3246 sid, xri, ctxp->wqeq->sli4_xritag);
3247
3248 if (phba->nvmet_support && phba->targetport)
3249 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3250
3251 ndlp = lpfc_findnode_did(phba->pport, sid);
3252 if (!ndlp ||
3253 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3254 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3255 if (tgtp)
3256 atomic_inc(&tgtp->xmt_abort_rsp_error);
3257 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3258 "6134 Drop ABTS - wrong NDLP state x%x.\n",
3259 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3260
3261
3262 return 0;
3263 }
3264
3265 abts_wqeq = ctxp->wqeq;
3266 wqe_abts = &abts_wqeq->wqe;
3267
3268
3269
3270
3271
3272 memset(wqe_abts, 0, sizeof(union lpfc_wqe));
3273
3274
3275 bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
3276 bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
3277 bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
3278 bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
3279 bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
3280
3281
3282 bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
3283 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
3284 bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
3285 abts_wqeq->sli4_xritag);
3286
3287
3288 bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
3289 CMD_XMIT_SEQUENCE64_WQE);
3290 bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
3291 bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
3292 bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
3293
3294
3295 wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
3296
3297
3298 bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
3299
3300 bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
3301
3302
3303 bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
3304 bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
3305 LPFC_WQE_LENLOC_WORD12);
3306 bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
3307 bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
3308
3309
3310 bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
3311 LPFC_WQE_CQ_ID_DEFAULT);
3312 bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
3313 OTHER_COMMAND);
3314
3315 abts_wqeq->vport = phba->pport;
3316 abts_wqeq->context1 = ndlp;
3317 abts_wqeq->context2 = ctxp;
3318 abts_wqeq->context3 = NULL;
3319 abts_wqeq->rsvd2 = 0;
3320
3321 abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
3322 abts_wqeq->iocb.ulpLe = 1;
3323
3324 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3325 "6069 Issue ABTS to xri x%x reqtag x%x\n",
3326 xri, abts_wqeq->iotag);
3327 return 1;
3328}
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339static void
3340lpfc_nvmet_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt)
3341{
3342 union lpfc_wqe128 *wqe = &pwqeq->wqe;
3343
3344
3345
3346
3347 memset(wqe, 0, sizeof(*wqe));
3348
3349 if (opt & INHIBIT_ABORT)
3350 bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
3351
3352 bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
3353
3354 bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
3355
3356
3357 wqe->abort_cmd.wqe_com.abort_tag = xritag;
3358
3359
3360 bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, pwqeq->iotag);
3361
3362 bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
3363 bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
3364
3365 bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
3366 bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
3367 bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
3368}
3369
3370static int
3371lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
3372 struct lpfc_async_xchg_ctx *ctxp,
3373 uint32_t sid, uint16_t xri)
3374{
3375 struct lpfc_nvmet_tgtport *tgtp;
3376 struct lpfc_iocbq *abts_wqeq;
3377 struct lpfc_nodelist *ndlp;
3378 unsigned long flags;
3379 u8 opt;
3380 int rc;
3381
3382 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3383 if (!ctxp->wqeq) {
3384 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3385 ctxp->wqeq->hba_wqidx = 0;
3386 }
3387
3388 ndlp = lpfc_findnode_did(phba->pport, sid);
3389 if (!ndlp ||
3390 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3391 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3392 atomic_inc(&tgtp->xmt_abort_rsp_error);
3393 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3394 "6160 Drop ABORT - wrong NDLP state x%x.\n",
3395 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3396
3397
3398 spin_lock_irqsave(&ctxp->ctxlock, flags);
3399 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3400 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3401 return 0;
3402 }
3403
3404
3405 ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
3406 spin_lock_irqsave(&ctxp->ctxlock, flags);
3407 if (!ctxp->abort_wqeq) {
3408 atomic_inc(&tgtp->xmt_abort_rsp_error);
3409 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3410 "6161 ABORT failed: No wqeqs: "
3411 "xri: x%x\n", ctxp->oxid);
3412
3413 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3414 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3415 return 0;
3416 }
3417 abts_wqeq = ctxp->abort_wqeq;
3418 ctxp->state = LPFC_NVME_STE_ABORT;
3419 opt = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? INHIBIT_ABORT : 0;
3420 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3421
3422
3423 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3424 "6162 ABORT Request to rport DID x%06x "
3425 "for xri x%x x%x\n",
3426 ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
3427
3428
3429
3430
3431 spin_lock_irqsave(&phba->hbalock, flags);
3432
3433 if (phba->hba_flag & HBA_IOQ_FLUSH) {
3434 spin_unlock_irqrestore(&phba->hbalock, flags);
3435 atomic_inc(&tgtp->xmt_abort_rsp_error);
3436 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3437 "6163 Driver in reset cleanup - flushing "
3438 "NVME Req now. hba_flag x%x oxid x%x\n",
3439 phba->hba_flag, ctxp->oxid);
3440 lpfc_sli_release_iocbq(phba, abts_wqeq);
3441 spin_lock_irqsave(&ctxp->ctxlock, flags);
3442 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3443 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3444 return 0;
3445 }
3446
3447
3448 if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
3449 spin_unlock_irqrestore(&phba->hbalock, flags);
3450 atomic_inc(&tgtp->xmt_abort_rsp_error);
3451 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3452 "6164 Outstanding NVME I/O Abort Request "
3453 "still pending on oxid x%x\n",
3454 ctxp->oxid);
3455 lpfc_sli_release_iocbq(phba, abts_wqeq);
3456 spin_lock_irqsave(&ctxp->ctxlock, flags);
3457 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3458 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3459 return 0;
3460 }
3461
3462
3463 abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
3464
3465 lpfc_nvmet_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt);
3466
3467
3468 abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
3469 abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
3470 abts_wqeq->iocb_cmpl = NULL;
3471 abts_wqeq->iocb_flag |= LPFC_IO_NVME;
3472 abts_wqeq->context2 = ctxp;
3473 abts_wqeq->vport = phba->pport;
3474 if (!ctxp->hdwq)
3475 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3476
3477 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3478 spin_unlock_irqrestore(&phba->hbalock, flags);
3479 if (rc == WQE_SUCCESS) {
3480 atomic_inc(&tgtp->xmt_abort_sol);
3481 return 0;
3482 }
3483
3484 atomic_inc(&tgtp->xmt_abort_rsp_error);
3485 spin_lock_irqsave(&ctxp->ctxlock, flags);
3486 ctxp->flag &= ~LPFC_NVME_ABORT_OP;
3487 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3488 lpfc_sli_release_iocbq(phba, abts_wqeq);
3489 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3490 "6166 Failed ABORT issue_wqe with status x%x "
3491 "for oxid x%x.\n",
3492 rc, ctxp->oxid);
3493 return 1;
3494}
3495
3496static int
3497lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
3498 struct lpfc_async_xchg_ctx *ctxp,
3499 uint32_t sid, uint16_t xri)
3500{
3501 struct lpfc_nvmet_tgtport *tgtp;
3502 struct lpfc_iocbq *abts_wqeq;
3503 unsigned long flags;
3504 bool released = false;
3505 int rc;
3506
3507 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3508 if (!ctxp->wqeq) {
3509 ctxp->wqeq = ctxp->ctxbuf->iocbq;
3510 ctxp->wqeq->hba_wqidx = 0;
3511 }
3512
3513 if (ctxp->state == LPFC_NVME_STE_FREE) {
3514 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3515 "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
3516 ctxp->state, ctxp->entry_cnt, ctxp->oxid);
3517 rc = WQE_BUSY;
3518 goto aerr;
3519 }
3520 ctxp->state = LPFC_NVME_STE_ABORT;
3521 ctxp->entry_cnt++;
3522 rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
3523 if (rc == 0)
3524 goto aerr;
3525
3526 spin_lock_irqsave(&phba->hbalock, flags);
3527 abts_wqeq = ctxp->wqeq;
3528 abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
3529 abts_wqeq->iocb_cmpl = NULL;
3530 abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
3531 if (!ctxp->hdwq)
3532 ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3533
3534 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3535 spin_unlock_irqrestore(&phba->hbalock, flags);
3536 if (rc == WQE_SUCCESS) {
3537 return 0;
3538 }
3539
3540aerr:
3541 spin_lock_irqsave(&ctxp->ctxlock, flags);
3542 if (ctxp->flag & LPFC_NVME_CTX_RLS) {
3543 spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3544 list_del_init(&ctxp->list);
3545 spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3546 released = true;
3547 }
3548 ctxp->flag &= ~(LPFC_NVME_ABORT_OP | LPFC_NVME_CTX_RLS);
3549 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3550
3551 atomic_inc(&tgtp->xmt_abort_rsp_error);
3552 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3553 "6135 Failed to Issue ABTS for oxid x%x. Status x%x "
3554 "(%x)\n",
3555 ctxp->oxid, rc, released);
3556 if (released)
3557 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3558 return 1;
3559}
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569int
3570lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
3571 struct lpfc_async_xchg_ctx *ctxp,
3572 uint32_t sid, uint16_t xri)
3573{
3574 struct lpfc_nvmet_tgtport *tgtp = NULL;
3575 struct lpfc_iocbq *abts_wqeq;
3576 unsigned long flags;
3577 int rc;
3578
3579 if ((ctxp->state == LPFC_NVME_STE_LS_RCV && ctxp->entry_cnt == 1) ||
3580 (ctxp->state == LPFC_NVME_STE_LS_RSP && ctxp->entry_cnt == 2)) {
3581 ctxp->state = LPFC_NVME_STE_LS_ABORT;
3582 ctxp->entry_cnt++;
3583 } else {
3584 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3585 "6418 NVMET LS abort state mismatch "
3586 "IO x%x: %d %d\n",
3587 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3588 ctxp->state = LPFC_NVME_STE_LS_ABORT;
3589 }
3590
3591 if (phba->nvmet_support && phba->targetport)
3592 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3593
3594 if (!ctxp->wqeq) {
3595
3596 ctxp->wqeq = lpfc_sli_get_iocbq(phba);
3597 if (!ctxp->wqeq) {
3598 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3599 "6068 Abort failed: No wqeqs: "
3600 "xri: x%x\n", xri);
3601
3602 kfree(ctxp);
3603 return 0;
3604 }
3605 }
3606 abts_wqeq = ctxp->wqeq;
3607
3608 if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
3609 rc = WQE_BUSY;
3610 goto out;
3611 }
3612
3613 spin_lock_irqsave(&phba->hbalock, flags);
3614 abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
3615 abts_wqeq->iocb_cmpl = NULL;
3616 abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
3617 rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3618 spin_unlock_irqrestore(&phba->hbalock, flags);
3619 if (rc == WQE_SUCCESS) {
3620 if (tgtp)
3621 atomic_inc(&tgtp->xmt_abort_unsol);
3622 return 0;
3623 }
3624out:
3625 if (tgtp)
3626 atomic_inc(&tgtp->xmt_abort_rsp_error);
3627 abts_wqeq->context2 = NULL;
3628 abts_wqeq->context3 = NULL;
3629 lpfc_sli_release_iocbq(phba, abts_wqeq);
3630 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3631 "6056 Failed to Issue ABTS. Status x%x\n", rc);
3632 return 1;
3633}
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644void
3645lpfc_nvmet_invalidate_host(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
3646{
3647 u32 ndlp_has_hh;
3648 struct lpfc_nvmet_tgtport *tgtp;
3649
3650 lpfc_printf_log(phba, KERN_INFO,
3651 LOG_NVME | LOG_NVME_ABTS | LOG_NVME_DISC,
3652 "6203 Invalidating hosthandle x%px\n",
3653 ndlp);
3654
3655 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3656 atomic_set(&tgtp->state, LPFC_NVMET_INV_HOST_ACTIVE);
3657
3658 spin_lock_irq(&ndlp->lock);
3659 ndlp_has_hh = ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH;
3660 spin_unlock_irq(&ndlp->lock);
3661
3662
3663
3664
3665
3666 if (!ndlp_has_hh) {
3667 lpfc_printf_log(phba, KERN_INFO,
3668 LOG_NVME | LOG_NVME_ABTS | LOG_NVME_DISC,
3669 "6204 Skip invalidate on node x%px DID x%x\n",
3670 ndlp, ndlp->nlp_DID);
3671 return;
3672 }
3673
3674#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
3675
3676 nvmet_fc_invalidate_host(phba->targetport, ndlp);
3677#endif
3678}
3679