1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/pci.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/export.h>
27#include <linux/delay.h>
28#include <asm/unaligned.h>
29#include <linux/t10-pi.h>
30#include <linux/crc-t10dif.h>
31#include <linux/blk-cgroup.h>
32#include <net/checksum.h>
33
34#include <scsi/scsi.h>
35#include <scsi/scsi_device.h>
36#include <scsi/scsi_eh.h>
37#include <scsi/scsi_host.h>
38#include <scsi/scsi_tcq.h>
39#include <scsi/scsi_transport_fc.h>
40
41#include "lpfc_version.h"
42#include "lpfc_hw4.h"
43#include "lpfc_hw.h"
44#include "lpfc_sli.h"
45#include "lpfc_sli4.h"
46#include "lpfc_nl.h"
47#include "lpfc_disc.h"
48#include "lpfc.h"
49#include "lpfc_scsi.h"
50#include "lpfc_logmsg.h"
51#include "lpfc_crtn.h"
52#include "lpfc_vport.h"
53
54#define LPFC_RESET_WAIT 2
55#define LPFC_ABORT_WAIT 2
56
57static char *dif_op_str[] = {
58 "PROT_NORMAL",
59 "PROT_READ_INSERT",
60 "PROT_WRITE_STRIP",
61 "PROT_READ_STRIP",
62 "PROT_WRITE_INSERT",
63 "PROT_READ_PASS",
64 "PROT_WRITE_PASS",
65};
66
67struct scsi_dif_tuple {
68 __be16 guard_tag;
69 __be16 app_tag;
70 __be32 ref_tag;
71};
72
73static struct lpfc_rport_data *
74lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
75{
76 struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
77
78 if (vport->phba->cfg_fof)
79 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
80 else
81 return (struct lpfc_rport_data *)sdev->hostdata;
82}
83
84static void
85lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
86static void
87lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
88static int
89lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
90static void
91lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash,
92 struct lpfc_vmid *vmp);
93static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd
94 *cmd, struct lpfc_vmid *vmp,
95 union lpfc_vmid_io_tag *tag);
96static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
97 struct lpfc_vmid *vmid);
98
99static inline unsigned
100lpfc_cmd_blksize(struct scsi_cmnd *sc)
101{
102 return sc->device->sector_size;
103}
104
105#define LPFC_CHECK_PROTECT_GUARD 1
106#define LPFC_CHECK_PROTECT_REF 2
107static inline unsigned
108lpfc_cmd_protect(struct scsi_cmnd *sc, int flag)
109{
110 return 1;
111}
112
113static inline unsigned
114lpfc_cmd_guard_csum(struct scsi_cmnd *sc)
115{
116 if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF)
117 return 0;
118 if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP)
119 return 1;
120 return 0;
121}
122
123
124
125
126
127
128
129
130
131static void
132lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
133 struct lpfc_io_buf *lpfc_cmd)
134{
135 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
136 if (sgl) {
137 sgl += 1;
138 sgl->word2 = le32_to_cpu(sgl->word2);
139 bf_set(lpfc_sli4_sge_last, sgl, 1);
140 sgl->word2 = cpu_to_le32(sgl->word2);
141 }
142}
143
144#define LPFC_INVALID_REFTAG ((u32)-1)
145
146
147
148
149
150
151
152
153
154static void
155lpfc_update_stats(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
156{
157 struct lpfc_hba *phba = vport->phba;
158 struct lpfc_rport_data *rdata;
159 struct lpfc_nodelist *pnode;
160 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
161 unsigned long flags;
162 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
163 unsigned long latency;
164 int i;
165
166 if (!vport->stat_data_enabled ||
167 vport->stat_data_blocked ||
168 (cmd->result))
169 return;
170
171 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
172 rdata = lpfc_cmd->rdata;
173 pnode = rdata->pnode;
174
175 spin_lock_irqsave(shost->host_lock, flags);
176 if (!pnode ||
177 !pnode->lat_data ||
178 (phba->bucket_type == LPFC_NO_BUCKET)) {
179 spin_unlock_irqrestore(shost->host_lock, flags);
180 return;
181 }
182
183 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
184 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
185 phba->bucket_step;
186
187 if (i < 0)
188 i = 0;
189 else if (i >= LPFC_MAX_BUCKET_COUNT)
190 i = LPFC_MAX_BUCKET_COUNT - 1;
191 } else {
192 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
193 if (latency <= (phba->bucket_base +
194 ((1<<i)*phba->bucket_step)))
195 break;
196 }
197
198 pnode->lat_data[i].cmd_count++;
199 spin_unlock_irqrestore(shost->host_lock, flags);
200}
201
202
203
204
205
206
207
208
209
210
211
212
213void
214lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
215{
216 unsigned long flags;
217 uint32_t evt_posted;
218 unsigned long expires;
219
220 spin_lock_irqsave(&phba->hbalock, flags);
221 atomic_inc(&phba->num_rsrc_err);
222 phba->last_rsrc_error_time = jiffies;
223
224 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
225 if (time_after(expires, jiffies)) {
226 spin_unlock_irqrestore(&phba->hbalock, flags);
227 return;
228 }
229
230 phba->last_ramp_down_time = jiffies;
231
232 spin_unlock_irqrestore(&phba->hbalock, flags);
233
234 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
235 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
236 if (!evt_posted)
237 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
238 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
239
240 if (!evt_posted)
241 lpfc_worker_wake_up(phba);
242 return;
243}
244
245
246
247
248
249
250
251
252
253void
254lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
255{
256 struct lpfc_vport **vports;
257 struct Scsi_Host *shost;
258 struct scsi_device *sdev;
259 unsigned long new_queue_depth;
260 unsigned long num_rsrc_err, num_cmd_success;
261 int i;
262
263 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
264 num_cmd_success = atomic_read(&phba->num_cmd_success);
265
266
267
268
269
270
271 if (num_rsrc_err == 0)
272 return;
273
274 vports = lpfc_create_vport_work_array(phba);
275 if (vports != NULL)
276 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
277 shost = lpfc_shost_from_vport(vports[i]);
278 shost_for_each_device(sdev, shost) {
279 new_queue_depth =
280 sdev->queue_depth * num_rsrc_err /
281 (num_rsrc_err + num_cmd_success);
282 if (!new_queue_depth)
283 new_queue_depth = sdev->queue_depth - 1;
284 else
285 new_queue_depth = sdev->queue_depth -
286 new_queue_depth;
287 scsi_change_queue_depth(sdev, new_queue_depth);
288 }
289 }
290 lpfc_destroy_vport_work_array(phba, vports);
291 atomic_set(&phba->num_rsrc_err, 0);
292 atomic_set(&phba->num_cmd_success, 0);
293}
294
295
296
297
298
299
300
301
302
303void
304lpfc_scsi_dev_block(struct lpfc_hba *phba)
305{
306 struct lpfc_vport **vports;
307 struct Scsi_Host *shost;
308 struct scsi_device *sdev;
309 struct fc_rport *rport;
310 int i;
311
312 vports = lpfc_create_vport_work_array(phba);
313 if (vports != NULL)
314 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
315 shost = lpfc_shost_from_vport(vports[i]);
316 shost_for_each_device(sdev, shost) {
317 rport = starget_to_rport(scsi_target(sdev));
318 fc_remote_port_delete(rport);
319 }
320 }
321 lpfc_destroy_vport_work_array(phba, vports);
322}
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340static int
341lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
342{
343 struct lpfc_hba *phba = vport->phba;
344 struct lpfc_io_buf *psb;
345 struct ulp_bde64 *bpl;
346 IOCB_t *iocb;
347 dma_addr_t pdma_phys_fcp_cmd;
348 dma_addr_t pdma_phys_fcp_rsp;
349 dma_addr_t pdma_phys_sgl;
350 uint16_t iotag;
351 int bcnt, bpl_size;
352
353 bpl_size = phba->cfg_sg_dma_buf_size -
354 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
355
356 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
357 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
358 num_to_alloc, phba->cfg_sg_dma_buf_size,
359 (int)sizeof(struct fcp_cmnd),
360 (int)sizeof(struct fcp_rsp), bpl_size);
361
362 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
363 psb = kzalloc(sizeof(struct lpfc_io_buf), GFP_KERNEL);
364 if (!psb)
365 break;
366
367
368
369
370
371
372
373 psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
374 GFP_KERNEL, &psb->dma_handle);
375 if (!psb->data) {
376 kfree(psb);
377 break;
378 }
379
380
381
382 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
383 if (iotag == 0) {
384 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
385 psb->data, psb->dma_handle);
386 kfree(psb);
387 break;
388 }
389 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
390
391 psb->fcp_cmnd = psb->data;
392 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
393 psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) +
394 sizeof(struct fcp_rsp);
395
396
397 bpl = (struct ulp_bde64 *)psb->dma_sgl;
398 pdma_phys_fcp_cmd = psb->dma_handle;
399 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
400 pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) +
401 sizeof(struct fcp_rsp);
402
403
404
405
406
407
408 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
409 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
410 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
411 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
412 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
413
414
415 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
416 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
417 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
418 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
419 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
420
421
422
423
424
425 iocb = &psb->cur_iocbq.iocb;
426 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
427 if ((phba->sli_rev == 3) &&
428 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
429
430 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
431 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
432 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
433 unsli3.fcp_ext.icd);
434 iocb->un.fcpi64.bdl.addrHigh = 0;
435 iocb->ulpBdeCount = 0;
436 iocb->ulpLe = 0;
437
438 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
439 BUFF_TYPE_BDE_64;
440 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
441 sizeof(struct fcp_rsp);
442 iocb->unsli3.fcp_ext.rbde.addrLow =
443 putPaddrLow(pdma_phys_fcp_rsp);
444 iocb->unsli3.fcp_ext.rbde.addrHigh =
445 putPaddrHigh(pdma_phys_fcp_rsp);
446 } else {
447 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
448 iocb->un.fcpi64.bdl.bdeSize =
449 (2 * sizeof(struct ulp_bde64));
450 iocb->un.fcpi64.bdl.addrLow =
451 putPaddrLow(pdma_phys_sgl);
452 iocb->un.fcpi64.bdl.addrHigh =
453 putPaddrHigh(pdma_phys_sgl);
454 iocb->ulpBdeCount = 1;
455 iocb->ulpLe = 1;
456 }
457 iocb->ulpClass = CLASS3;
458 psb->status = IOSTAT_SUCCESS;
459
460 psb->cur_iocbq.context1 = psb;
461 spin_lock_init(&psb->buf_lock);
462 lpfc_release_scsi_buf_s3(phba, psb);
463
464 }
465
466 return bcnt;
467}
468
469
470
471
472
473
474
475
476void
477lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
478{
479 struct lpfc_hba *phba = vport->phba;
480 struct lpfc_io_buf *psb, *next_psb;
481 struct lpfc_sli4_hdw_queue *qp;
482 unsigned long iflag = 0;
483 int idx;
484
485 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
486 return;
487
488 spin_lock_irqsave(&phba->hbalock, iflag);
489 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
490 qp = &phba->sli4_hba.hdwq[idx];
491
492 spin_lock(&qp->abts_io_buf_list_lock);
493 list_for_each_entry_safe(psb, next_psb,
494 &qp->lpfc_abts_io_buf_list, list) {
495 if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME)
496 continue;
497
498 if (psb->rdata && psb->rdata->pnode &&
499 psb->rdata->pnode->vport == vport)
500 psb->rdata = NULL;
501 }
502 spin_unlock(&qp->abts_io_buf_list_lock);
503 }
504 spin_unlock_irqrestore(&phba->hbalock, iflag);
505}
506
507
508
509
510
511
512
513
514
515
516void
517lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
518 struct sli4_wcqe_xri_aborted *axri, int idx)
519{
520 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
521 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
522 struct lpfc_io_buf *psb, *next_psb;
523 struct lpfc_sli4_hdw_queue *qp;
524 unsigned long iflag = 0;
525 struct lpfc_iocbq *iocbq;
526 int i;
527 struct lpfc_nodelist *ndlp;
528 int rrq_empty = 0;
529 struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring;
530 struct scsi_cmnd *cmd;
531
532 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
533 return;
534
535 qp = &phba->sli4_hba.hdwq[idx];
536 spin_lock_irqsave(&phba->hbalock, iflag);
537 spin_lock(&qp->abts_io_buf_list_lock);
538 list_for_each_entry_safe(psb, next_psb,
539 &qp->lpfc_abts_io_buf_list, list) {
540 if (psb->cur_iocbq.sli4_xritag == xri) {
541 list_del_init(&psb->list);
542 psb->flags &= ~LPFC_SBUF_XBUSY;
543 psb->status = IOSTAT_SUCCESS;
544 if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) {
545 qp->abts_nvme_io_bufs--;
546 spin_unlock(&qp->abts_io_buf_list_lock);
547 spin_unlock_irqrestore(&phba->hbalock, iflag);
548 lpfc_sli4_nvme_xri_aborted(phba, axri, psb);
549 return;
550 }
551 qp->abts_scsi_io_bufs--;
552 spin_unlock(&qp->abts_io_buf_list_lock);
553
554 if (psb->rdata && psb->rdata->pnode)
555 ndlp = psb->rdata->pnode;
556 else
557 ndlp = NULL;
558
559 rrq_empty = list_empty(&phba->active_rrq_list);
560 spin_unlock_irqrestore(&phba->hbalock, iflag);
561 if (ndlp) {
562 lpfc_set_rrq_active(phba, ndlp,
563 psb->cur_iocbq.sli4_lxritag, rxid, 1);
564 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
565 }
566
567 if (phba->cfg_fcp_wait_abts_rsp) {
568 spin_lock_irqsave(&psb->buf_lock, iflag);
569 cmd = psb->pCmd;
570 psb->pCmd = NULL;
571 spin_unlock_irqrestore(&psb->buf_lock, iflag);
572
573
574
575
576 if (cmd)
577 cmd->scsi_done(cmd);
578
579
580
581
582
583 spin_lock_irqsave(&psb->buf_lock, iflag);
584 psb->cur_iocbq.iocb_flag &=
585 ~LPFC_DRIVER_ABORTED;
586 if (psb->waitq)
587 wake_up(psb->waitq);
588 spin_unlock_irqrestore(&psb->buf_lock, iflag);
589 }
590
591 lpfc_release_scsi_buf_s4(phba, psb);
592 if (rrq_empty)
593 lpfc_worker_wake_up(phba);
594 return;
595 }
596 }
597 spin_unlock(&qp->abts_io_buf_list_lock);
598 for (i = 1; i <= phba->sli.last_iotag; i++) {
599 iocbq = phba->sli.iocbq_lookup[i];
600
601 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
602 (iocbq->iocb_flag & LPFC_IO_LIBDFC))
603 continue;
604 if (iocbq->sli4_xritag != xri)
605 continue;
606 psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
607 psb->flags &= ~LPFC_SBUF_XBUSY;
608 spin_unlock_irqrestore(&phba->hbalock, iflag);
609 if (!list_empty(&pring->txq))
610 lpfc_worker_wake_up(phba);
611 return;
612
613 }
614 spin_unlock_irqrestore(&phba->hbalock, iflag);
615}
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630static struct lpfc_io_buf *
631lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
632 struct scsi_cmnd *cmnd)
633{
634 struct lpfc_io_buf *lpfc_cmd = NULL;
635 struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
636 unsigned long iflag = 0;
637
638 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
639 list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf,
640 list);
641 if (!lpfc_cmd) {
642 spin_lock(&phba->scsi_buf_list_put_lock);
643 list_splice(&phba->lpfc_scsi_buf_list_put,
644 &phba->lpfc_scsi_buf_list_get);
645 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
646 list_remove_head(scsi_buf_list_get, lpfc_cmd,
647 struct lpfc_io_buf, list);
648 spin_unlock(&phba->scsi_buf_list_put_lock);
649 }
650 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
651
652 if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
653 atomic_inc(&ndlp->cmd_pending);
654 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
655 }
656 return lpfc_cmd;
657}
658
659
660
661
662
663
664
665
666
667
668
669
670
671static struct lpfc_io_buf *
672lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
673 struct scsi_cmnd *cmnd)
674{
675 struct lpfc_io_buf *lpfc_cmd;
676 struct lpfc_sli4_hdw_queue *qp;
677 struct sli4_sge *sgl;
678 dma_addr_t pdma_phys_fcp_rsp;
679 dma_addr_t pdma_phys_fcp_cmd;
680 uint32_t cpu, idx;
681 int tag;
682 struct fcp_cmd_rsp_buf *tmp = NULL;
683
684 cpu = raw_smp_processor_id();
685 if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
686 tag = blk_mq_unique_tag(cmnd->request);
687 idx = blk_mq_unique_tag_to_hwq(tag);
688 } else {
689 idx = phba->sli4_hba.cpu_map[cpu].hdwq;
690 }
691
692 lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx,
693 !phba->cfg_xri_rebalancing);
694 if (!lpfc_cmd) {
695 qp = &phba->sli4_hba.hdwq[idx];
696 qp->empty_io_bufs++;
697 return NULL;
698 }
699
700
701
702
703 lpfc_cmd->cur_iocbq.iocb_flag = LPFC_IO_FCP;
704 lpfc_cmd->prot_seg_cnt = 0;
705 lpfc_cmd->seg_cnt = 0;
706 lpfc_cmd->timeout = 0;
707 lpfc_cmd->flags = 0;
708 lpfc_cmd->start_time = jiffies;
709 lpfc_cmd->waitq = NULL;
710 lpfc_cmd->cpu = cpu;
711#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
712 lpfc_cmd->prot_data_type = 0;
713#endif
714 tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd);
715 if (!tmp) {
716 lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq);
717 return NULL;
718 }
719
720 lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd;
721 lpfc_cmd->fcp_rsp = tmp->fcp_rsp;
722
723
724
725
726
727
728 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
729 pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle;
730 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
731 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
732 sgl->word2 = le32_to_cpu(sgl->word2);
733 bf_set(lpfc_sli4_sge_last, sgl, 0);
734 sgl->word2 = cpu_to_le32(sgl->word2);
735 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
736 sgl++;
737
738
739 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
740 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
741 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
742 sgl->word2 = le32_to_cpu(sgl->word2);
743 bf_set(lpfc_sli4_sge_last, sgl, 1);
744 sgl->word2 = cpu_to_le32(sgl->word2);
745 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
746
747 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
748 atomic_inc(&ndlp->cmd_pending);
749 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
750 }
751 return lpfc_cmd;
752}
753
754
755
756
757
758
759
760
761
762
763
764
765
766static struct lpfc_io_buf*
767lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
768 struct scsi_cmnd *cmnd)
769{
770 return phba->lpfc_get_scsi_buf(phba, ndlp, cmnd);
771}
772
773
774
775
776
777
778
779
780
781static void
782lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
783{
784 unsigned long iflag = 0;
785
786 psb->seg_cnt = 0;
787 psb->prot_seg_cnt = 0;
788
789 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
790 psb->pCmd = NULL;
791 psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
792 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
793 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
794}
795
796
797
798
799
800
801
802
803
804
805
806static void
807lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
808{
809 struct lpfc_sli4_hdw_queue *qp;
810 unsigned long iflag = 0;
811
812 psb->seg_cnt = 0;
813 psb->prot_seg_cnt = 0;
814
815 qp = psb->hdwq;
816 if (psb->flags & LPFC_SBUF_XBUSY) {
817 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
818 if (!phba->cfg_fcp_wait_abts_rsp)
819 psb->pCmd = NULL;
820 list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
821 qp->abts_scsi_io_bufs++;
822 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
823 } else {
824 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp);
825 }
826}
827
828
829
830
831
832
833
834
835
836static void
837lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
838{
839 if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp)
840 atomic_dec(&psb->ndlp->cmd_pending);
841
842 psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
843 phba->lpfc_release_scsi_buf(phba, psb);
844}
845
846
847
848
849
850
851
852
853
854static void
855lpfc_fcpcmd_to_iocb(u8 *data, struct fcp_cmnd *fcp_cmnd)
856{
857 int i, j;
858
859 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
860 i += sizeof(uint32_t), j++) {
861 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
862 }
863}
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879static int
880lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
881{
882 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
883 struct scatterlist *sgel = NULL;
884 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
885 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
886 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
887 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
888 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
889 dma_addr_t physaddr;
890 uint32_t num_bde = 0;
891 int nseg, datadir = scsi_cmnd->sc_data_direction;
892
893
894
895
896
897
898
899 bpl += 2;
900 if (scsi_sg_count(scsi_cmnd)) {
901
902
903
904
905
906
907
908 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
909 scsi_sg_count(scsi_cmnd), datadir);
910 if (unlikely(!nseg))
911 return 1;
912
913 lpfc_cmd->seg_cnt = nseg;
914 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
915 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
916 "9064 BLKGRD: %s: Too many sg segments"
917 " from dma_map_sg. Config %d, seg_cnt"
918 " %d\n", __func__, phba->cfg_sg_seg_cnt,
919 lpfc_cmd->seg_cnt);
920 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
921 lpfc_cmd->seg_cnt = 0;
922 scsi_dma_unmap(scsi_cmnd);
923 return 2;
924 }
925
926
927
928
929
930
931
932
933
934
935 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
936 physaddr = sg_dma_address(sgel);
937 if (phba->sli_rev == 3 &&
938 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
939 !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
940 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
941 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
942 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
943 data_bde->addrLow = putPaddrLow(physaddr);
944 data_bde->addrHigh = putPaddrHigh(physaddr);
945 data_bde++;
946 } else {
947 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
948 bpl->tus.f.bdeSize = sg_dma_len(sgel);
949 bpl->tus.w = le32_to_cpu(bpl->tus.w);
950 bpl->addrLow =
951 le32_to_cpu(putPaddrLow(physaddr));
952 bpl->addrHigh =
953 le32_to_cpu(putPaddrHigh(physaddr));
954 bpl++;
955 }
956 }
957 }
958
959
960
961
962
963
964
965 if (phba->sli_rev == 3 &&
966 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
967 !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
968 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
969
970
971
972
973
974 physaddr = lpfc_cmd->dma_handle;
975 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
976 data_bde->tus.f.bdeSize = (num_bde *
977 sizeof(struct ulp_bde64));
978 physaddr += (sizeof(struct fcp_cmnd) +
979 sizeof(struct fcp_rsp) +
980 (2 * sizeof(struct ulp_bde64)));
981 data_bde->addrHigh = putPaddrHigh(physaddr);
982 data_bde->addrLow = putPaddrLow(physaddr);
983
984 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
985 } else {
986
987 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
988 }
989 } else {
990 iocb_cmd->un.fcpi64.bdl.bdeSize =
991 ((num_bde + 2) * sizeof(struct ulp_bde64));
992 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
993 }
994 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
995
996
997
998
999
1000 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1001 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
1002 return 0;
1003}
1004
1005#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1006
1007
1008#define BG_ERR_INIT 0x1
1009
1010#define BG_ERR_TGT 0x2
1011
1012#define BG_ERR_SWAP 0x10
1013
1014
1015
1016
1017#define BG_ERR_CHECK 0x20
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029static int
1030lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1031 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
1032{
1033 struct scatterlist *sgpe;
1034 struct lpfc_io_buf *lpfc_cmd = NULL;
1035 struct scsi_dif_tuple *src = NULL;
1036 struct lpfc_nodelist *ndlp;
1037 struct lpfc_rport_data *rdata;
1038 uint32_t op = scsi_get_prot_op(sc);
1039 uint32_t blksize;
1040 uint32_t numblks;
1041 u32 lba;
1042 int rc = 0;
1043 int blockoff = 0;
1044
1045 if (op == SCSI_PROT_NORMAL)
1046 return 0;
1047
1048 sgpe = scsi_prot_sglist(sc);
1049 lba = t10_pi_ref_tag(sc->request);
1050 if (lba == LPFC_INVALID_REFTAG)
1051 return 0;
1052
1053
1054 if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
1055 blksize = lpfc_cmd_blksize(sc);
1056 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
1057
1058
1059 if (phba->lpfc_injerr_lba < (u64)lba ||
1060 (phba->lpfc_injerr_lba >= (u64)(lba + numblks)))
1061 return 0;
1062 if (sgpe) {
1063 blockoff = phba->lpfc_injerr_lba - (u64)lba;
1064 numblks = sg_dma_len(sgpe) /
1065 sizeof(struct scsi_dif_tuple);
1066 if (numblks < blockoff)
1067 blockoff = numblks;
1068 }
1069 }
1070
1071
1072 rdata = lpfc_rport_data_from_scsi_device(sc->device);
1073 if (rdata && rdata->pnode) {
1074 ndlp = rdata->pnode;
1075
1076
1077 if (phba->lpfc_injerr_nportid &&
1078 (phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1079 return 0;
1080
1081
1082
1083
1084
1085 if (phba->lpfc_injerr_wwpn.u.wwn[0] &&
1086 (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1087 sizeof(struct lpfc_name)) != 0))
1088 return 0;
1089 }
1090
1091
1092 if (sgpe) {
1093 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1094 src += blockoff;
1095 lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble;
1096 }
1097
1098
1099 if (reftag) {
1100 if (phba->lpfc_injerr_wref_cnt) {
1101 switch (op) {
1102 case SCSI_PROT_WRITE_PASS:
1103 if (src) {
1104
1105
1106
1107
1108
1109
1110
1111
1112 lpfc_printf_log(phba, KERN_ERR,
1113 LOG_TRACE_EVENT,
1114 "9076 BLKGRD: Injecting reftag error: "
1115 "write lba x%lx + x%x oldrefTag x%x\n",
1116 (unsigned long)lba, blockoff,
1117 be32_to_cpu(src->ref_tag));
1118
1119
1120
1121
1122
1123 if (lpfc_cmd) {
1124 lpfc_cmd->prot_data_type =
1125 LPFC_INJERR_REFTAG;
1126 lpfc_cmd->prot_data_segment =
1127 src;
1128 lpfc_cmd->prot_data =
1129 src->ref_tag;
1130 }
1131 src->ref_tag = cpu_to_be32(0xDEADBEEF);
1132 phba->lpfc_injerr_wref_cnt--;
1133 if (phba->lpfc_injerr_wref_cnt == 0) {
1134 phba->lpfc_injerr_nportid = 0;
1135 phba->lpfc_injerr_lba =
1136 LPFC_INJERR_LBA_OFF;
1137 memset(&phba->lpfc_injerr_wwpn,
1138 0, sizeof(struct lpfc_name));
1139 }
1140 rc = BG_ERR_TGT | BG_ERR_CHECK;
1141
1142 break;
1143 }
1144 fallthrough;
1145 case SCSI_PROT_WRITE_INSERT:
1146
1147
1148
1149
1150
1151
1152 *reftag = 0xDEADBEEF;
1153 phba->lpfc_injerr_wref_cnt--;
1154 if (phba->lpfc_injerr_wref_cnt == 0) {
1155 phba->lpfc_injerr_nportid = 0;
1156 phba->lpfc_injerr_lba =
1157 LPFC_INJERR_LBA_OFF;
1158 memset(&phba->lpfc_injerr_wwpn,
1159 0, sizeof(struct lpfc_name));
1160 }
1161 rc = BG_ERR_TGT | BG_ERR_CHECK;
1162
1163 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1164 "9078 BLKGRD: Injecting reftag error: "
1165 "write lba x%lx\n", (unsigned long)lba);
1166 break;
1167 case SCSI_PROT_WRITE_STRIP:
1168
1169
1170
1171
1172
1173 *reftag = 0xDEADBEEF;
1174 phba->lpfc_injerr_wref_cnt--;
1175 if (phba->lpfc_injerr_wref_cnt == 0) {
1176 phba->lpfc_injerr_nportid = 0;
1177 phba->lpfc_injerr_lba =
1178 LPFC_INJERR_LBA_OFF;
1179 memset(&phba->lpfc_injerr_wwpn,
1180 0, sizeof(struct lpfc_name));
1181 }
1182 rc = BG_ERR_INIT;
1183
1184 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1185 "9077 BLKGRD: Injecting reftag error: "
1186 "write lba x%lx\n", (unsigned long)lba);
1187 break;
1188 }
1189 }
1190 if (phba->lpfc_injerr_rref_cnt) {
1191 switch (op) {
1192 case SCSI_PROT_READ_INSERT:
1193 case SCSI_PROT_READ_STRIP:
1194 case SCSI_PROT_READ_PASS:
1195
1196
1197
1198
1199
1200 *reftag = 0xDEADBEEF;
1201 phba->lpfc_injerr_rref_cnt--;
1202 if (phba->lpfc_injerr_rref_cnt == 0) {
1203 phba->lpfc_injerr_nportid = 0;
1204 phba->lpfc_injerr_lba =
1205 LPFC_INJERR_LBA_OFF;
1206 memset(&phba->lpfc_injerr_wwpn,
1207 0, sizeof(struct lpfc_name));
1208 }
1209 rc = BG_ERR_INIT;
1210
1211 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1212 "9079 BLKGRD: Injecting reftag error: "
1213 "read lba x%lx\n", (unsigned long)lba);
1214 break;
1215 }
1216 }
1217 }
1218
1219
1220 if (apptag) {
1221 if (phba->lpfc_injerr_wapp_cnt) {
1222 switch (op) {
1223 case SCSI_PROT_WRITE_PASS:
1224 if (src) {
1225
1226
1227
1228
1229
1230
1231
1232
1233 lpfc_printf_log(phba, KERN_ERR,
1234 LOG_TRACE_EVENT,
1235 "9080 BLKGRD: Injecting apptag error: "
1236 "write lba x%lx + x%x oldappTag x%x\n",
1237 (unsigned long)lba, blockoff,
1238 be16_to_cpu(src->app_tag));
1239
1240
1241
1242
1243
1244 if (lpfc_cmd) {
1245 lpfc_cmd->prot_data_type =
1246 LPFC_INJERR_APPTAG;
1247 lpfc_cmd->prot_data_segment =
1248 src;
1249 lpfc_cmd->prot_data =
1250 src->app_tag;
1251 }
1252 src->app_tag = cpu_to_be16(0xDEAD);
1253 phba->lpfc_injerr_wapp_cnt--;
1254 if (phba->lpfc_injerr_wapp_cnt == 0) {
1255 phba->lpfc_injerr_nportid = 0;
1256 phba->lpfc_injerr_lba =
1257 LPFC_INJERR_LBA_OFF;
1258 memset(&phba->lpfc_injerr_wwpn,
1259 0, sizeof(struct lpfc_name));
1260 }
1261 rc = BG_ERR_TGT | BG_ERR_CHECK;
1262 break;
1263 }
1264 fallthrough;
1265 case SCSI_PROT_WRITE_INSERT:
1266
1267
1268
1269
1270
1271
1272 *apptag = 0xDEAD;
1273 phba->lpfc_injerr_wapp_cnt--;
1274 if (phba->lpfc_injerr_wapp_cnt == 0) {
1275 phba->lpfc_injerr_nportid = 0;
1276 phba->lpfc_injerr_lba =
1277 LPFC_INJERR_LBA_OFF;
1278 memset(&phba->lpfc_injerr_wwpn,
1279 0, sizeof(struct lpfc_name));
1280 }
1281 rc = BG_ERR_TGT | BG_ERR_CHECK;
1282
1283 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1284 "0813 BLKGRD: Injecting apptag error: "
1285 "write lba x%lx\n", (unsigned long)lba);
1286 break;
1287 case SCSI_PROT_WRITE_STRIP:
1288
1289
1290
1291
1292
1293 *apptag = 0xDEAD;
1294 phba->lpfc_injerr_wapp_cnt--;
1295 if (phba->lpfc_injerr_wapp_cnt == 0) {
1296 phba->lpfc_injerr_nportid = 0;
1297 phba->lpfc_injerr_lba =
1298 LPFC_INJERR_LBA_OFF;
1299 memset(&phba->lpfc_injerr_wwpn,
1300 0, sizeof(struct lpfc_name));
1301 }
1302 rc = BG_ERR_INIT;
1303
1304 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1305 "0812 BLKGRD: Injecting apptag error: "
1306 "write lba x%lx\n", (unsigned long)lba);
1307 break;
1308 }
1309 }
1310 if (phba->lpfc_injerr_rapp_cnt) {
1311 switch (op) {
1312 case SCSI_PROT_READ_INSERT:
1313 case SCSI_PROT_READ_STRIP:
1314 case SCSI_PROT_READ_PASS:
1315
1316
1317
1318
1319
1320 *apptag = 0xDEAD;
1321 phba->lpfc_injerr_rapp_cnt--;
1322 if (phba->lpfc_injerr_rapp_cnt == 0) {
1323 phba->lpfc_injerr_nportid = 0;
1324 phba->lpfc_injerr_lba =
1325 LPFC_INJERR_LBA_OFF;
1326 memset(&phba->lpfc_injerr_wwpn,
1327 0, sizeof(struct lpfc_name));
1328 }
1329 rc = BG_ERR_INIT;
1330
1331 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1332 "0814 BLKGRD: Injecting apptag error: "
1333 "read lba x%lx\n", (unsigned long)lba);
1334 break;
1335 }
1336 }
1337 }
1338
1339
1340
1341 if (new_guard) {
1342 if (phba->lpfc_injerr_wgrd_cnt) {
1343 switch (op) {
1344 case SCSI_PROT_WRITE_PASS:
1345 rc = BG_ERR_CHECK;
1346 fallthrough;
1347
1348 case SCSI_PROT_WRITE_INSERT:
1349
1350
1351
1352
1353
1354 phba->lpfc_injerr_wgrd_cnt--;
1355 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1356 phba->lpfc_injerr_nportid = 0;
1357 phba->lpfc_injerr_lba =
1358 LPFC_INJERR_LBA_OFF;
1359 memset(&phba->lpfc_injerr_wwpn,
1360 0, sizeof(struct lpfc_name));
1361 }
1362
1363 rc |= BG_ERR_TGT | BG_ERR_SWAP;
1364
1365
1366 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1367 "0817 BLKGRD: Injecting guard error: "
1368 "write lba x%lx\n", (unsigned long)lba);
1369 break;
1370 case SCSI_PROT_WRITE_STRIP:
1371
1372
1373
1374
1375
1376 phba->lpfc_injerr_wgrd_cnt--;
1377 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1378 phba->lpfc_injerr_nportid = 0;
1379 phba->lpfc_injerr_lba =
1380 LPFC_INJERR_LBA_OFF;
1381 memset(&phba->lpfc_injerr_wwpn,
1382 0, sizeof(struct lpfc_name));
1383 }
1384
1385 rc = BG_ERR_INIT | BG_ERR_SWAP;
1386
1387
1388 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1389 "0816 BLKGRD: Injecting guard error: "
1390 "write lba x%lx\n", (unsigned long)lba);
1391 break;
1392 }
1393 }
1394 if (phba->lpfc_injerr_rgrd_cnt) {
1395 switch (op) {
1396 case SCSI_PROT_READ_INSERT:
1397 case SCSI_PROT_READ_STRIP:
1398 case SCSI_PROT_READ_PASS:
1399
1400
1401
1402
1403
1404 phba->lpfc_injerr_rgrd_cnt--;
1405 if (phba->lpfc_injerr_rgrd_cnt == 0) {
1406 phba->lpfc_injerr_nportid = 0;
1407 phba->lpfc_injerr_lba =
1408 LPFC_INJERR_LBA_OFF;
1409 memset(&phba->lpfc_injerr_wwpn,
1410 0, sizeof(struct lpfc_name));
1411 }
1412
1413 rc = BG_ERR_INIT | BG_ERR_SWAP;
1414
1415
1416 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1417 "0818 BLKGRD: Injecting guard error: "
1418 "read lba x%lx\n", (unsigned long)lba);
1419 }
1420 }
1421 }
1422
1423 return rc;
1424}
1425#endif
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438static int
1439lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1440 uint8_t *txop, uint8_t *rxop)
1441{
1442 uint8_t ret = 0;
1443
1444 if (lpfc_cmd_guard_csum(sc)) {
1445 switch (scsi_get_prot_op(sc)) {
1446 case SCSI_PROT_READ_INSERT:
1447 case SCSI_PROT_WRITE_STRIP:
1448 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1449 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1450 break;
1451
1452 case SCSI_PROT_READ_STRIP:
1453 case SCSI_PROT_WRITE_INSERT:
1454 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1455 *txop = BG_OP_IN_NODIF_OUT_CRC;
1456 break;
1457
1458 case SCSI_PROT_READ_PASS:
1459 case SCSI_PROT_WRITE_PASS:
1460 *rxop = BG_OP_IN_CRC_OUT_CSUM;
1461 *txop = BG_OP_IN_CSUM_OUT_CRC;
1462 break;
1463
1464 case SCSI_PROT_NORMAL:
1465 default:
1466 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1467 "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1468 scsi_get_prot_op(sc));
1469 ret = 1;
1470 break;
1471
1472 }
1473 } else {
1474 switch (scsi_get_prot_op(sc)) {
1475 case SCSI_PROT_READ_STRIP:
1476 case SCSI_PROT_WRITE_INSERT:
1477 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1478 *txop = BG_OP_IN_NODIF_OUT_CRC;
1479 break;
1480
1481 case SCSI_PROT_READ_PASS:
1482 case SCSI_PROT_WRITE_PASS:
1483 *rxop = BG_OP_IN_CRC_OUT_CRC;
1484 *txop = BG_OP_IN_CRC_OUT_CRC;
1485 break;
1486
1487 case SCSI_PROT_READ_INSERT:
1488 case SCSI_PROT_WRITE_STRIP:
1489 *rxop = BG_OP_IN_NODIF_OUT_CRC;
1490 *txop = BG_OP_IN_CRC_OUT_NODIF;
1491 break;
1492
1493 case SCSI_PROT_NORMAL:
1494 default:
1495 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1496 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1497 scsi_get_prot_op(sc));
1498 ret = 1;
1499 break;
1500 }
1501 }
1502
1503 return ret;
1504}
1505
1506#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518static int
1519lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1520 uint8_t *txop, uint8_t *rxop)
1521{
1522 uint8_t ret = 0;
1523
1524 if (lpfc_cmd_guard_csum(sc)) {
1525 switch (scsi_get_prot_op(sc)) {
1526 case SCSI_PROT_READ_INSERT:
1527 case SCSI_PROT_WRITE_STRIP:
1528 *rxop = BG_OP_IN_NODIF_OUT_CRC;
1529 *txop = BG_OP_IN_CRC_OUT_NODIF;
1530 break;
1531
1532 case SCSI_PROT_READ_STRIP:
1533 case SCSI_PROT_WRITE_INSERT:
1534 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1535 *txop = BG_OP_IN_NODIF_OUT_CSUM;
1536 break;
1537
1538 case SCSI_PROT_READ_PASS:
1539 case SCSI_PROT_WRITE_PASS:
1540 *rxop = BG_OP_IN_CSUM_OUT_CRC;
1541 *txop = BG_OP_IN_CRC_OUT_CSUM;
1542 break;
1543
1544 case SCSI_PROT_NORMAL:
1545 default:
1546 break;
1547
1548 }
1549 } else {
1550 switch (scsi_get_prot_op(sc)) {
1551 case SCSI_PROT_READ_STRIP:
1552 case SCSI_PROT_WRITE_INSERT:
1553 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1554 *txop = BG_OP_IN_NODIF_OUT_CSUM;
1555 break;
1556
1557 case SCSI_PROT_READ_PASS:
1558 case SCSI_PROT_WRITE_PASS:
1559 *rxop = BG_OP_IN_CSUM_OUT_CSUM;
1560 *txop = BG_OP_IN_CSUM_OUT_CSUM;
1561 break;
1562
1563 case SCSI_PROT_READ_INSERT:
1564 case SCSI_PROT_WRITE_STRIP:
1565 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1566 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1567 break;
1568
1569 case SCSI_PROT_NORMAL:
1570 default:
1571 break;
1572 }
1573 }
1574
1575 return ret;
1576}
1577#endif
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610static int
1611lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1612 struct ulp_bde64 *bpl, int datasegcnt)
1613{
1614 struct scatterlist *sgde = NULL;
1615 struct lpfc_pde5 *pde5 = NULL;
1616 struct lpfc_pde6 *pde6 = NULL;
1617 dma_addr_t physaddr;
1618 int i = 0, num_bde = 0, status;
1619 int datadir = sc->sc_data_direction;
1620#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1621 uint32_t rc;
1622#endif
1623 uint32_t checking = 1;
1624 uint32_t reftag;
1625 uint8_t txop, rxop;
1626
1627 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1628 if (status)
1629 goto out;
1630
1631
1632 reftag = t10_pi_ref_tag(sc->request);
1633 if (reftag == LPFC_INVALID_REFTAG)
1634 goto out;
1635
1636#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1637 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1638 if (rc) {
1639 if (rc & BG_ERR_SWAP)
1640 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1641 if (rc & BG_ERR_CHECK)
1642 checking = 0;
1643 }
1644#endif
1645
1646
1647 pde5 = (struct lpfc_pde5 *) bpl;
1648 memset(pde5, 0, sizeof(struct lpfc_pde5));
1649 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1650
1651
1652 pde5->word0 = cpu_to_le32(pde5->word0);
1653 pde5->reftag = cpu_to_le32(reftag);
1654
1655
1656 num_bde++;
1657 bpl++;
1658 pde6 = (struct lpfc_pde6 *) bpl;
1659
1660
1661 memset(pde6, 0, sizeof(struct lpfc_pde6));
1662 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1663 bf_set(pde6_optx, pde6, txop);
1664 bf_set(pde6_oprx, pde6, rxop);
1665
1666
1667
1668
1669
1670 if (datadir == DMA_FROM_DEVICE) {
1671 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1672 bf_set(pde6_ce, pde6, checking);
1673 else
1674 bf_set(pde6_ce, pde6, 0);
1675
1676 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
1677 bf_set(pde6_re, pde6, checking);
1678 else
1679 bf_set(pde6_re, pde6, 0);
1680 }
1681 bf_set(pde6_ai, pde6, 1);
1682 bf_set(pde6_ae, pde6, 0);
1683 bf_set(pde6_apptagval, pde6, 0);
1684
1685
1686 pde6->word0 = cpu_to_le32(pde6->word0);
1687 pde6->word1 = cpu_to_le32(pde6->word1);
1688 pde6->word2 = cpu_to_le32(pde6->word2);
1689
1690
1691 num_bde++;
1692 bpl++;
1693
1694
1695 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
1696 physaddr = sg_dma_address(sgde);
1697 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
1698 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1699 bpl->tus.f.bdeSize = sg_dma_len(sgde);
1700 if (datadir == DMA_TO_DEVICE)
1701 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1702 else
1703 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1704 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1705 bpl++;
1706 num_bde++;
1707 }
1708
1709out:
1710 return num_bde;
1711}
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752static int
1753lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1754 struct ulp_bde64 *bpl, int datacnt, int protcnt)
1755{
1756 struct scatterlist *sgde = NULL;
1757 struct scatterlist *sgpe = NULL;
1758 struct lpfc_pde5 *pde5 = NULL;
1759 struct lpfc_pde6 *pde6 = NULL;
1760 struct lpfc_pde7 *pde7 = NULL;
1761 dma_addr_t dataphysaddr, protphysaddr;
1762 unsigned short curr_data = 0, curr_prot = 0;
1763 unsigned int split_offset;
1764 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
1765 unsigned int protgrp_blks, protgrp_bytes;
1766 unsigned int remainder, subtotal;
1767 int status;
1768 int datadir = sc->sc_data_direction;
1769 unsigned char pgdone = 0, alldone = 0;
1770 unsigned blksize;
1771#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1772 uint32_t rc;
1773#endif
1774 uint32_t checking = 1;
1775 uint32_t reftag;
1776 uint8_t txop, rxop;
1777 int num_bde = 0;
1778
1779 sgpe = scsi_prot_sglist(sc);
1780 sgde = scsi_sglist(sc);
1781
1782 if (!sgpe || !sgde) {
1783 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1784 "9020 Invalid s/g entry: data=x%px prot=x%px\n",
1785 sgpe, sgde);
1786 return 0;
1787 }
1788
1789 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1790 if (status)
1791 goto out;
1792
1793
1794 blksize = lpfc_cmd_blksize(sc);
1795 reftag = t10_pi_ref_tag(sc->request);
1796 if (reftag == LPFC_INVALID_REFTAG)
1797 goto out;
1798
1799#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1800 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1801 if (rc) {
1802 if (rc & BG_ERR_SWAP)
1803 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1804 if (rc & BG_ERR_CHECK)
1805 checking = 0;
1806 }
1807#endif
1808
1809 split_offset = 0;
1810 do {
1811
1812 if (num_bde >= (phba->cfg_total_seg_cnt - 2))
1813 return num_bde + 3;
1814
1815
1816 pde5 = (struct lpfc_pde5 *) bpl;
1817 memset(pde5, 0, sizeof(struct lpfc_pde5));
1818 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1819
1820
1821 pde5->word0 = cpu_to_le32(pde5->word0);
1822 pde5->reftag = cpu_to_le32(reftag);
1823
1824
1825 num_bde++;
1826 bpl++;
1827 pde6 = (struct lpfc_pde6 *) bpl;
1828
1829
1830 memset(pde6, 0, sizeof(struct lpfc_pde6));
1831 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1832 bf_set(pde6_optx, pde6, txop);
1833 bf_set(pde6_oprx, pde6, rxop);
1834
1835 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1836 bf_set(pde6_ce, pde6, checking);
1837 else
1838 bf_set(pde6_ce, pde6, 0);
1839
1840 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
1841 bf_set(pde6_re, pde6, checking);
1842 else
1843 bf_set(pde6_re, pde6, 0);
1844
1845 bf_set(pde6_ai, pde6, 1);
1846 bf_set(pde6_ae, pde6, 0);
1847 bf_set(pde6_apptagval, pde6, 0);
1848
1849
1850 pde6->word0 = cpu_to_le32(pde6->word0);
1851 pde6->word1 = cpu_to_le32(pde6->word1);
1852 pde6->word2 = cpu_to_le32(pde6->word2);
1853
1854
1855 num_bde++;
1856 bpl++;
1857
1858
1859 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
1860 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
1861
1862
1863 BUG_ON(protgroup_len % 8);
1864
1865 pde7 = (struct lpfc_pde7 *) bpl;
1866 memset(pde7, 0, sizeof(struct lpfc_pde7));
1867 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
1868
1869 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
1870 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
1871
1872 protgrp_blks = protgroup_len / 8;
1873 protgrp_bytes = protgrp_blks * blksize;
1874
1875
1876 if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
1877 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
1878 protgroup_offset += protgroup_remainder;
1879 protgrp_blks = protgroup_remainder / 8;
1880 protgrp_bytes = protgrp_blks * blksize;
1881 } else {
1882 protgroup_offset = 0;
1883 curr_prot++;
1884 }
1885
1886 num_bde++;
1887
1888
1889 pgdone = 0;
1890 subtotal = 0;
1891 while (!pgdone) {
1892
1893 if (num_bde >= phba->cfg_total_seg_cnt)
1894 return num_bde + 1;
1895
1896 if (!sgde) {
1897 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1898 "9065 BLKGRD:%s Invalid data segment\n",
1899 __func__);
1900 return 0;
1901 }
1902 bpl++;
1903 dataphysaddr = sg_dma_address(sgde) + split_offset;
1904 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1905 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1906
1907 remainder = sg_dma_len(sgde) - split_offset;
1908
1909 if ((subtotal + remainder) <= protgrp_bytes) {
1910
1911 bpl->tus.f.bdeSize = remainder;
1912 split_offset = 0;
1913
1914 if ((subtotal + remainder) == protgrp_bytes)
1915 pgdone = 1;
1916 } else {
1917
1918 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1919 split_offset += bpl->tus.f.bdeSize;
1920 }
1921
1922 subtotal += bpl->tus.f.bdeSize;
1923
1924 if (datadir == DMA_TO_DEVICE)
1925 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1926 else
1927 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1928 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1929
1930 num_bde++;
1931 curr_data++;
1932
1933 if (split_offset)
1934 break;
1935
1936
1937 sgde = sg_next(sgde);
1938
1939 }
1940
1941 if (protgroup_offset) {
1942
1943 reftag += protgrp_blks;
1944 bpl++;
1945 continue;
1946 }
1947
1948
1949 if (curr_prot == protcnt) {
1950 alldone = 1;
1951 } else if (curr_prot < protcnt) {
1952
1953 sgpe = sg_next(sgpe);
1954 bpl++;
1955
1956
1957 reftag += protgrp_blks;
1958 } else {
1959
1960 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1961 "9054 BLKGRD: bug in %s\n", __func__);
1962 }
1963
1964 } while (!alldone);
1965out:
1966
1967 return num_bde;
1968}
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000static int
2001lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2002 struct sli4_sge *sgl, int datasegcnt,
2003 struct lpfc_io_buf *lpfc_cmd)
2004{
2005 struct scatterlist *sgde = NULL;
2006 struct sli4_sge_diseed *diseed = NULL;
2007 dma_addr_t physaddr;
2008 int i = 0, num_sge = 0, status;
2009 uint32_t reftag;
2010 uint8_t txop, rxop;
2011#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2012 uint32_t rc;
2013#endif
2014 uint32_t checking = 1;
2015 uint32_t dma_len;
2016 uint32_t dma_offset = 0;
2017 struct sli4_hybrid_sgl *sgl_xtra = NULL;
2018 int j;
2019 bool lsp_just_set = false;
2020
2021 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2022 if (status)
2023 goto out;
2024
2025
2026 reftag = t10_pi_ref_tag(sc->request);
2027 if (reftag == LPFC_INVALID_REFTAG)
2028 goto out;
2029
2030#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2031 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2032 if (rc) {
2033 if (rc & BG_ERR_SWAP)
2034 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2035 if (rc & BG_ERR_CHECK)
2036 checking = 0;
2037 }
2038#endif
2039
2040
2041 diseed = (struct sli4_sge_diseed *) sgl;
2042 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2043 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2044
2045
2046 diseed->ref_tag = cpu_to_le32(reftag);
2047 diseed->ref_tag_tran = diseed->ref_tag;
2048
2049
2050
2051
2052
2053 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2054 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
2055 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2056 else
2057 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2058
2059 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2060 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2061 else
2062 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2063 }
2064
2065
2066 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2067 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2068
2069 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2070 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2071
2072
2073 diseed->word2 = cpu_to_le32(diseed->word2);
2074 diseed->word3 = cpu_to_le32(diseed->word3);
2075
2076
2077 num_sge++;
2078 sgl++;
2079
2080
2081 sgde = scsi_sglist(sc);
2082 j = 3;
2083 for (i = 0; i < datasegcnt; i++) {
2084
2085 sgl->word2 = 0;
2086
2087
2088 if (!lsp_just_set && !((j + 1) % phba->border_sge_num) &&
2089 ((datasegcnt - 1) != i)) {
2090
2091 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2092
2093 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2094
2095 if (unlikely(!sgl_xtra)) {
2096 lpfc_cmd->seg_cnt = 0;
2097 return 0;
2098 }
2099 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2100 sgl_xtra->dma_phys_sgl));
2101 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2102 sgl_xtra->dma_phys_sgl));
2103
2104 } else {
2105 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2106 }
2107
2108 if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) {
2109 if ((datasegcnt - 1) == i)
2110 bf_set(lpfc_sli4_sge_last, sgl, 1);
2111 physaddr = sg_dma_address(sgde);
2112 dma_len = sg_dma_len(sgde);
2113 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2114 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2115
2116 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2117 sgl->word2 = cpu_to_le32(sgl->word2);
2118 sgl->sge_len = cpu_to_le32(dma_len);
2119
2120 dma_offset += dma_len;
2121 sgde = sg_next(sgde);
2122
2123 sgl++;
2124 num_sge++;
2125 lsp_just_set = false;
2126
2127 } else {
2128 sgl->word2 = cpu_to_le32(sgl->word2);
2129 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2130
2131 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2132 i = i - 1;
2133
2134 lsp_just_set = true;
2135 }
2136
2137 j++;
2138
2139 }
2140
2141out:
2142 return num_sge;
2143}
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183static int
2184lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2185 struct sli4_sge *sgl, int datacnt, int protcnt,
2186 struct lpfc_io_buf *lpfc_cmd)
2187{
2188 struct scatterlist *sgde = NULL;
2189 struct scatterlist *sgpe = NULL;
2190 struct sli4_sge_diseed *diseed = NULL;
2191 dma_addr_t dataphysaddr, protphysaddr;
2192 unsigned short curr_data = 0, curr_prot = 0;
2193 unsigned int split_offset;
2194 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2195 unsigned int protgrp_blks, protgrp_bytes;
2196 unsigned int remainder, subtotal;
2197 int status;
2198 unsigned char pgdone = 0, alldone = 0;
2199 unsigned blksize;
2200 uint32_t reftag;
2201 uint8_t txop, rxop;
2202 uint32_t dma_len;
2203#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2204 uint32_t rc;
2205#endif
2206 uint32_t checking = 1;
2207 uint32_t dma_offset = 0;
2208 int num_sge = 0, j = 2;
2209 struct sli4_hybrid_sgl *sgl_xtra = NULL;
2210
2211 sgpe = scsi_prot_sglist(sc);
2212 sgde = scsi_sglist(sc);
2213
2214 if (!sgpe || !sgde) {
2215 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2216 "9082 Invalid s/g entry: data=x%px prot=x%px\n",
2217 sgpe, sgde);
2218 return 0;
2219 }
2220
2221 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2222 if (status)
2223 goto out;
2224
2225
2226 blksize = lpfc_cmd_blksize(sc);
2227 reftag = t10_pi_ref_tag(sc->request);
2228 if (reftag == LPFC_INVALID_REFTAG)
2229 goto out;
2230
2231#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2232 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2233 if (rc) {
2234 if (rc & BG_ERR_SWAP)
2235 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2236 if (rc & BG_ERR_CHECK)
2237 checking = 0;
2238 }
2239#endif
2240
2241 split_offset = 0;
2242 do {
2243
2244 if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) &&
2245 !(phba->cfg_xpsgl))
2246 return num_sge + 3;
2247
2248
2249 if (!((j + 1) % phba->border_sge_num) ||
2250 !((j + 2) % phba->border_sge_num) ||
2251 !((j + 3) % phba->border_sge_num)) {
2252 sgl->word2 = 0;
2253
2254
2255 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2256
2257 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2258
2259 if (unlikely(!sgl_xtra)) {
2260 goto out;
2261 } else {
2262 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2263 sgl_xtra->dma_phys_sgl));
2264 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2265 sgl_xtra->dma_phys_sgl));
2266 }
2267
2268 sgl->word2 = cpu_to_le32(sgl->word2);
2269 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2270
2271 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2272 j = 0;
2273 }
2274
2275
2276 diseed = (struct sli4_sge_diseed *) sgl;
2277 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2278 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2279
2280
2281 diseed->ref_tag = cpu_to_le32(reftag);
2282 diseed->ref_tag_tran = diseed->ref_tag;
2283
2284 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) {
2285 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2286
2287 } else {
2288 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2289
2290
2291
2292
2293
2294
2295
2296 if (txop == BG_OP_IN_CRC_OUT_CRC) {
2297 txop = BG_OP_RAW_MODE;
2298 rxop = BG_OP_RAW_MODE;
2299 }
2300 }
2301
2302
2303 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2304 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2305 else
2306 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2307
2308
2309 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2310 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2311
2312 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2313 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2314
2315
2316 diseed->word2 = cpu_to_le32(diseed->word2);
2317 diseed->word3 = cpu_to_le32(diseed->word3);
2318
2319
2320 num_sge++;
2321
2322 sgl++;
2323 j++;
2324
2325
2326 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2327 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2328
2329
2330 BUG_ON(protgroup_len % 8);
2331
2332
2333 sgl->word2 = 0;
2334 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2335 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2336 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2337 sgl->word2 = cpu_to_le32(sgl->word2);
2338 sgl->sge_len = 0;
2339
2340 protgrp_blks = protgroup_len / 8;
2341 protgrp_bytes = protgrp_blks * blksize;
2342
2343
2344 if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2345 protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2346 protgroup_offset += protgroup_remainder;
2347 protgrp_blks = protgroup_remainder / 8;
2348 protgrp_bytes = protgrp_blks * blksize;
2349 } else {
2350 protgroup_offset = 0;
2351 curr_prot++;
2352 }
2353
2354 num_sge++;
2355
2356
2357 pgdone = 0;
2358 subtotal = 0;
2359
2360 sgl++;
2361 j++;
2362
2363 while (!pgdone) {
2364
2365 if ((num_sge >= phba->cfg_total_seg_cnt) &&
2366 !phba->cfg_xpsgl)
2367 return num_sge + 1;
2368
2369 if (!sgde) {
2370 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2371 "9086 BLKGRD:%s Invalid data segment\n",
2372 __func__);
2373 return 0;
2374 }
2375
2376 if (!((j + 1) % phba->border_sge_num)) {
2377 sgl->word2 = 0;
2378
2379
2380 bf_set(lpfc_sli4_sge_type, sgl,
2381 LPFC_SGE_TYPE_LSP);
2382
2383 sgl_xtra = lpfc_get_sgl_per_hdwq(phba,
2384 lpfc_cmd);
2385
2386 if (unlikely(!sgl_xtra)) {
2387 goto out;
2388 } else {
2389 sgl->addr_lo = cpu_to_le32(
2390 putPaddrLow(sgl_xtra->dma_phys_sgl));
2391 sgl->addr_hi = cpu_to_le32(
2392 putPaddrHigh(sgl_xtra->dma_phys_sgl));
2393 }
2394
2395 sgl->word2 = cpu_to_le32(sgl->word2);
2396 sgl->sge_len = cpu_to_le32(
2397 phba->cfg_sg_dma_buf_size);
2398
2399 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2400 } else {
2401 dataphysaddr = sg_dma_address(sgde) +
2402 split_offset;
2403
2404 remainder = sg_dma_len(sgde) - split_offset;
2405
2406 if ((subtotal + remainder) <= protgrp_bytes) {
2407
2408 dma_len = remainder;
2409 split_offset = 0;
2410
2411 if ((subtotal + remainder) ==
2412 protgrp_bytes)
2413 pgdone = 1;
2414 } else {
2415
2416
2417
2418 dma_len = protgrp_bytes - subtotal;
2419 split_offset += dma_len;
2420 }
2421
2422 subtotal += dma_len;
2423
2424 sgl->word2 = 0;
2425 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2426 dataphysaddr));
2427 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2428 dataphysaddr));
2429 bf_set(lpfc_sli4_sge_last, sgl, 0);
2430 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2431 bf_set(lpfc_sli4_sge_type, sgl,
2432 LPFC_SGE_TYPE_DATA);
2433
2434 sgl->sge_len = cpu_to_le32(dma_len);
2435 dma_offset += dma_len;
2436
2437 num_sge++;
2438 curr_data++;
2439
2440 if (split_offset) {
2441 sgl++;
2442 j++;
2443 break;
2444 }
2445
2446
2447 sgde = sg_next(sgde);
2448
2449 sgl++;
2450 }
2451
2452 j++;
2453 }
2454
2455 if (protgroup_offset) {
2456
2457 reftag += protgrp_blks;
2458 continue;
2459 }
2460
2461
2462 if (curr_prot == protcnt) {
2463
2464 sgl--;
2465 bf_set(lpfc_sli4_sge_last, sgl, 1);
2466 alldone = 1;
2467 } else if (curr_prot < protcnt) {
2468
2469 sgpe = sg_next(sgpe);
2470
2471
2472 reftag += protgrp_blks;
2473 } else {
2474
2475 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2476 "9085 BLKGRD: bug in %s\n", __func__);
2477 }
2478
2479 } while (!alldone);
2480
2481out:
2482
2483 return num_sge;
2484}
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497static int
2498lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2499{
2500 int ret = LPFC_PG_TYPE_INVALID;
2501 unsigned char op = scsi_get_prot_op(sc);
2502
2503 switch (op) {
2504 case SCSI_PROT_READ_STRIP:
2505 case SCSI_PROT_WRITE_INSERT:
2506 ret = LPFC_PG_TYPE_NO_DIF;
2507 break;
2508 case SCSI_PROT_READ_INSERT:
2509 case SCSI_PROT_WRITE_STRIP:
2510 case SCSI_PROT_READ_PASS:
2511 case SCSI_PROT_WRITE_PASS:
2512 ret = LPFC_PG_TYPE_DIF_BUF;
2513 break;
2514 default:
2515 if (phba)
2516 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2517 "9021 Unsupported protection op:%d\n",
2518 op);
2519 break;
2520 }
2521 return ret;
2522}
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534static int
2535lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
2536 struct lpfc_io_buf *lpfc_cmd)
2537{
2538 struct scsi_cmnd *sc = lpfc_cmd->pCmd;
2539 int fcpdl;
2540
2541 fcpdl = scsi_bufflen(sc);
2542
2543
2544 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2545
2546 if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT)
2547 return fcpdl;
2548
2549 } else {
2550
2551 if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP)
2552 return fcpdl;
2553 }
2554
2555
2556
2557
2558
2559
2560 fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
2561
2562 return fcpdl;
2563}
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577static int
2578lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2579 struct lpfc_io_buf *lpfc_cmd)
2580{
2581 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2582 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2583 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
2584 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2585 uint32_t num_bde = 0;
2586 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2587 int prot_group_type = 0;
2588 int fcpdl;
2589 int ret = 1;
2590 struct lpfc_vport *vport = phba->pport;
2591
2592
2593
2594
2595
2596 bpl += 2;
2597 if (scsi_sg_count(scsi_cmnd)) {
2598
2599
2600
2601
2602
2603
2604 datasegcnt = dma_map_sg(&phba->pcidev->dev,
2605 scsi_sglist(scsi_cmnd),
2606 scsi_sg_count(scsi_cmnd), datadir);
2607 if (unlikely(!datasegcnt))
2608 return 1;
2609
2610 lpfc_cmd->seg_cnt = datasegcnt;
2611
2612
2613 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
2614 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
2615 ret = 2;
2616 goto err;
2617 }
2618
2619 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2620
2621 switch (prot_group_type) {
2622 case LPFC_PG_TYPE_NO_DIF:
2623
2624
2625 if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) {
2626 ret = 2;
2627 goto err;
2628 }
2629
2630 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2631 datasegcnt);
2632
2633 if (num_bde < 2) {
2634 ret = 2;
2635 goto err;
2636 }
2637 break;
2638
2639 case LPFC_PG_TYPE_DIF_BUF:
2640
2641
2642
2643
2644
2645 protsegcnt = dma_map_sg(&phba->pcidev->dev,
2646 scsi_prot_sglist(scsi_cmnd),
2647 scsi_prot_sg_count(scsi_cmnd), datadir);
2648 if (unlikely(!protsegcnt)) {
2649 scsi_dma_unmap(scsi_cmnd);
2650 return 1;
2651 }
2652
2653 lpfc_cmd->prot_seg_cnt = protsegcnt;
2654
2655
2656
2657
2658
2659 if ((lpfc_cmd->prot_seg_cnt * 4) >
2660 (phba->cfg_total_seg_cnt - 2)) {
2661 ret = 2;
2662 goto err;
2663 }
2664
2665 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2666 datasegcnt, protsegcnt);
2667
2668 if ((num_bde < 3) ||
2669 (num_bde > phba->cfg_total_seg_cnt)) {
2670 ret = 2;
2671 goto err;
2672 }
2673 break;
2674
2675 case LPFC_PG_TYPE_INVALID:
2676 default:
2677 scsi_dma_unmap(scsi_cmnd);
2678 lpfc_cmd->seg_cnt = 0;
2679
2680 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2681 "9022 Unexpected protection group %i\n",
2682 prot_group_type);
2683 return 2;
2684 }
2685 }
2686
2687
2688
2689
2690
2691
2692
2693 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
2694 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
2695 iocb_cmd->ulpBdeCount = 1;
2696 iocb_cmd->ulpLe = 1;
2697
2698 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
2699 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2700
2701
2702
2703
2704
2705 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2706
2707
2708
2709
2710
2711 if (iocb_cmd->un.fcpi.fcpi_XRdy &&
2712 (fcpdl < vport->cfg_first_burst_size))
2713 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
2714
2715 return 0;
2716err:
2717 if (lpfc_cmd->seg_cnt)
2718 scsi_dma_unmap(scsi_cmnd);
2719 if (lpfc_cmd->prot_seg_cnt)
2720 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
2721 scsi_prot_sg_count(scsi_cmnd),
2722 scsi_cmnd->sc_data_direction);
2723
2724 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2725 "9023 Cannot setup S/G List for HBA"
2726 "IO segs %d/%d BPL %d SCSI %d: %d %d\n",
2727 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
2728 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
2729 prot_group_type, num_bde);
2730
2731 lpfc_cmd->seg_cnt = 0;
2732 lpfc_cmd->prot_seg_cnt = 0;
2733 return ret;
2734}
2735
2736
2737
2738
2739
2740
2741static uint16_t
2742lpfc_bg_crc(uint8_t *data, int count)
2743{
2744 uint16_t crc = 0;
2745 uint16_t x;
2746
2747 crc = crc_t10dif(data, count);
2748 x = cpu_to_be16(crc);
2749 return x;
2750}
2751
2752
2753
2754
2755
2756
2757static uint16_t
2758lpfc_bg_csum(uint8_t *data, int count)
2759{
2760 uint16_t ret;
2761
2762 ret = ip_compute_csum(data, count);
2763 return ret;
2764}
2765
2766
2767
2768
2769
2770static void
2771lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
2772{
2773 struct scatterlist *sgpe;
2774 struct scatterlist *sgde;
2775 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2776 struct scsi_dif_tuple *src = NULL;
2777 uint8_t *data_src = NULL;
2778 uint16_t guard_tag;
2779 uint16_t start_app_tag, app_tag;
2780 uint32_t start_ref_tag, ref_tag;
2781 int prot, protsegcnt;
2782 int err_type, len, data_len;
2783 int chk_ref, chk_app, chk_guard;
2784 uint16_t sum;
2785 unsigned blksize;
2786
2787 err_type = BGS_GUARD_ERR_MASK;
2788 sum = 0;
2789 guard_tag = 0;
2790
2791
2792 prot = scsi_get_prot_op(cmd);
2793 if ((prot == SCSI_PROT_READ_STRIP) ||
2794 (prot == SCSI_PROT_WRITE_INSERT) ||
2795 (prot == SCSI_PROT_NORMAL))
2796 goto out;
2797
2798
2799 chk_ref = 1;
2800 chk_app = 0;
2801 chk_guard = 0;
2802
2803
2804 sgpe = scsi_prot_sglist(cmd);
2805 protsegcnt = lpfc_cmd->prot_seg_cnt;
2806
2807 if (sgpe && protsegcnt) {
2808
2809
2810
2811
2812
2813 sgde = scsi_sglist(cmd);
2814 blksize = lpfc_cmd_blksize(cmd);
2815 data_src = (uint8_t *)sg_virt(sgde);
2816 data_len = sgde->length;
2817 if ((data_len & (blksize - 1)) == 0)
2818 chk_guard = 1;
2819
2820 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2821 start_ref_tag = t10_pi_ref_tag(cmd->request);
2822 if (start_ref_tag == LPFC_INVALID_REFTAG)
2823 goto out;
2824 start_app_tag = src->app_tag;
2825 len = sgpe->length;
2826 while (src && protsegcnt) {
2827 while (len) {
2828
2829
2830
2831
2832
2833 if ((src->ref_tag == T10_PI_REF_ESCAPE) ||
2834 (src->app_tag == T10_PI_APP_ESCAPE)) {
2835 start_ref_tag++;
2836 goto skipit;
2837 }
2838
2839
2840 if (chk_guard) {
2841 guard_tag = src->guard_tag;
2842 if (lpfc_cmd_guard_csum(cmd))
2843 sum = lpfc_bg_csum(data_src,
2844 blksize);
2845 else
2846 sum = lpfc_bg_crc(data_src,
2847 blksize);
2848 if ((guard_tag != sum)) {
2849 err_type = BGS_GUARD_ERR_MASK;
2850 goto out;
2851 }
2852 }
2853
2854
2855 ref_tag = be32_to_cpu(src->ref_tag);
2856 if (chk_ref && (ref_tag != start_ref_tag)) {
2857 err_type = BGS_REFTAG_ERR_MASK;
2858 goto out;
2859 }
2860 start_ref_tag++;
2861
2862
2863 app_tag = src->app_tag;
2864 if (chk_app && (app_tag != start_app_tag)) {
2865 err_type = BGS_APPTAG_ERR_MASK;
2866 goto out;
2867 }
2868skipit:
2869 len -= sizeof(struct scsi_dif_tuple);
2870 if (len < 0)
2871 len = 0;
2872 src++;
2873
2874 data_src += blksize;
2875 data_len -= blksize;
2876
2877
2878
2879
2880
2881
2882 if (chk_guard && (data_len == 0)) {
2883 chk_guard = 0;
2884 sgde = sg_next(sgde);
2885 if (!sgde)
2886 goto out;
2887
2888 data_src = (uint8_t *)sg_virt(sgde);
2889 data_len = sgde->length;
2890 if ((data_len & (blksize - 1)) == 0)
2891 chk_guard = 1;
2892 }
2893 }
2894
2895
2896 sgpe = sg_next(sgpe);
2897 if (sgpe) {
2898 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2899 len = sgpe->length;
2900 } else {
2901 src = NULL;
2902 }
2903 protsegcnt--;
2904 }
2905 }
2906out:
2907 if (err_type == BGS_GUARD_ERR_MASK) {
2908 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
2909 set_host_byte(cmd, DID_ABORT);
2910 phba->bg_guard_err_cnt++;
2911 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2912 "9069 BLKGRD: reftag %x grd_tag err %x != %x\n",
2913 t10_pi_ref_tag(cmd->request),
2914 sum, guard_tag);
2915
2916 } else if (err_type == BGS_REFTAG_ERR_MASK) {
2917 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
2918 set_host_byte(cmd, DID_ABORT);
2919
2920 phba->bg_reftag_err_cnt++;
2921 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2922 "9066 BLKGRD: reftag %x ref_tag err %x != %x\n",
2923 t10_pi_ref_tag(cmd->request),
2924 ref_tag, start_ref_tag);
2925
2926 } else if (err_type == BGS_APPTAG_ERR_MASK) {
2927 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
2928 set_host_byte(cmd, DID_ABORT);
2929
2930 phba->bg_apptag_err_cnt++;
2931 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2932 "9041 BLKGRD: reftag %x app_tag err %x != %x\n",
2933 t10_pi_ref_tag(cmd->request),
2934 app_tag, start_app_tag);
2935 }
2936}
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950static int
2951lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
2952 struct lpfc_wcqe_complete *wcqe)
2953{
2954 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2955 int ret = 0;
2956 u32 status = bf_get(lpfc_wcqe_c_status, wcqe);
2957 u32 bghm = 0;
2958 u32 bgstat = 0;
2959 u64 failing_sector = 0;
2960
2961 if (status == CQE_STATUS_DI_ERROR) {
2962 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe))
2963 bgstat |= BGS_GUARD_ERR_MASK;
2964 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe))
2965 bgstat |= BGS_APPTAG_ERR_MASK;
2966 if (bf_get(lpfc_wcqe_c_bg_re, wcqe))
2967 bgstat |= BGS_REFTAG_ERR_MASK;
2968
2969
2970 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
2971 bgstat |= BGS_HI_WATER_MARK_PRESENT_MASK;
2972 bghm = wcqe->total_data_placed;
2973 }
2974
2975
2976
2977
2978
2979 if (!bgstat)
2980 bgstat |= (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
2981 BGS_GUARD_ERR_MASK);
2982 }
2983
2984 if (lpfc_bgs_get_guard_err(bgstat)) {
2985 ret = 1;
2986
2987 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
2988 set_host_byte(cmd, DID_ABORT);
2989 phba->bg_guard_err_cnt++;
2990 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2991 "9059 BLKGRD: Guard Tag error in cmd"
2992 " 0x%x lba 0x%llx blk cnt 0x%x "
2993 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2994 (unsigned long long)scsi_get_lba(cmd),
2995 blk_rq_sectors(cmd->request), bgstat, bghm);
2996 }
2997
2998 if (lpfc_bgs_get_reftag_err(bgstat)) {
2999 ret = 1;
3000
3001 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
3002 set_host_byte(cmd, DID_ABORT);
3003
3004 phba->bg_reftag_err_cnt++;
3005 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3006 "9060 BLKGRD: Ref Tag error in cmd"
3007 " 0x%x lba 0x%llx blk cnt 0x%x "
3008 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3009 (unsigned long long)scsi_get_lba(cmd),
3010 blk_rq_sectors(cmd->request), bgstat, bghm);
3011 }
3012
3013 if (lpfc_bgs_get_apptag_err(bgstat)) {
3014 ret = 1;
3015
3016 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
3017 set_host_byte(cmd, DID_ABORT);
3018
3019 phba->bg_apptag_err_cnt++;
3020 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3021 "9062 BLKGRD: App Tag error in cmd"
3022 " 0x%x lba 0x%llx blk cnt 0x%x "
3023 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3024 (unsigned long long)scsi_get_lba(cmd),
3025 blk_rq_sectors(cmd->request), bgstat, bghm);
3026 }
3027
3028 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
3029
3030
3031
3032
3033
3034
3035 cmd->sense_buffer[7] = 0xc;
3036 cmd->sense_buffer[8] = 0;
3037 cmd->sense_buffer[9] = 0xa;
3038 cmd->sense_buffer[10] = 0x80;
3039
3040
3041 switch (scsi_get_prot_op(cmd)) {
3042 case SCSI_PROT_READ_INSERT:
3043 case SCSI_PROT_WRITE_STRIP:
3044 bghm /= cmd->device->sector_size;
3045 break;
3046 case SCSI_PROT_READ_STRIP:
3047 case SCSI_PROT_WRITE_INSERT:
3048 case SCSI_PROT_READ_PASS:
3049 case SCSI_PROT_WRITE_PASS:
3050 bghm /= (cmd->device->sector_size +
3051 sizeof(struct scsi_dif_tuple));
3052 break;
3053 }
3054
3055 failing_sector = scsi_get_lba(cmd);
3056 failing_sector += bghm;
3057
3058
3059 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3060 }
3061
3062 if (!ret) {
3063
3064 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3065 "9068 BLKGRD: Unknown error in cmd"
3066 " 0x%x lba 0x%llx blk cnt 0x%x "
3067 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3068 (unsigned long long)scsi_get_lba(cmd),
3069 blk_rq_sectors(cmd->request), bgstat, bghm);
3070
3071
3072 lpfc_calc_bg_err(phba, lpfc_cmd);
3073 }
3074 return ret;
3075}
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089static int
3090lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
3091 struct lpfc_iocbq *pIocbOut)
3092{
3093 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
3094 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
3095 int ret = 0;
3096 uint32_t bghm = bgf->bghm;
3097 uint32_t bgstat = bgf->bgstat;
3098 uint64_t failing_sector = 0;
3099
3100 if (lpfc_bgs_get_invalid_prof(bgstat)) {
3101 cmd->result = DID_ERROR << 16;
3102 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3103 "9072 BLKGRD: Invalid BG Profile in cmd "
3104 "0x%x reftag 0x%x blk cnt 0x%x "
3105 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3106 t10_pi_ref_tag(cmd->request),
3107 blk_rq_sectors(cmd->request), bgstat, bghm);
3108 ret = (-1);
3109 goto out;
3110 }
3111
3112 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
3113 cmd->result = DID_ERROR << 16;
3114 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3115 "9073 BLKGRD: Invalid BG PDIF Block in cmd "
3116 "0x%x reftag 0x%x blk cnt 0x%x "
3117 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3118 t10_pi_ref_tag(cmd->request),
3119 blk_rq_sectors(cmd->request), bgstat, bghm);
3120 ret = (-1);
3121 goto out;
3122 }
3123
3124 if (lpfc_bgs_get_guard_err(bgstat)) {
3125 ret = 1;
3126
3127 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
3128 set_host_byte(cmd, DID_ABORT);
3129 phba->bg_guard_err_cnt++;
3130 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3131 "9055 BLKGRD: Guard Tag error in cmd "
3132 "0x%x reftag 0x%x blk cnt 0x%x "
3133 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3134 t10_pi_ref_tag(cmd->request),
3135 blk_rq_sectors(cmd->request), bgstat, bghm);
3136 }
3137
3138 if (lpfc_bgs_get_reftag_err(bgstat)) {
3139 ret = 1;
3140
3141 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
3142 set_host_byte(cmd, DID_ABORT);
3143
3144 phba->bg_reftag_err_cnt++;
3145 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3146 "9056 BLKGRD: Ref Tag error in cmd "
3147 "0x%x reftag 0x%x blk cnt 0x%x "
3148 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3149 t10_pi_ref_tag(cmd->request),
3150 blk_rq_sectors(cmd->request), bgstat, bghm);
3151 }
3152
3153 if (lpfc_bgs_get_apptag_err(bgstat)) {
3154 ret = 1;
3155
3156 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
3157 set_host_byte(cmd, DID_ABORT);
3158
3159 phba->bg_apptag_err_cnt++;
3160 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3161 "9061 BLKGRD: App Tag error in cmd "
3162 "0x%x reftag 0x%x blk cnt 0x%x "
3163 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3164 t10_pi_ref_tag(cmd->request),
3165 blk_rq_sectors(cmd->request), bgstat, bghm);
3166 }
3167
3168 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
3169
3170
3171
3172
3173
3174
3175 cmd->sense_buffer[7] = 0xc;
3176 cmd->sense_buffer[8] = 0;
3177 cmd->sense_buffer[9] = 0xa;
3178 cmd->sense_buffer[10] = 0x80;
3179
3180
3181 switch (scsi_get_prot_op(cmd)) {
3182 case SCSI_PROT_READ_INSERT:
3183 case SCSI_PROT_WRITE_STRIP:
3184 bghm /= cmd->device->sector_size;
3185 break;
3186 case SCSI_PROT_READ_STRIP:
3187 case SCSI_PROT_WRITE_INSERT:
3188 case SCSI_PROT_READ_PASS:
3189 case SCSI_PROT_WRITE_PASS:
3190 bghm /= (cmd->device->sector_size +
3191 sizeof(struct scsi_dif_tuple));
3192 break;
3193 }
3194
3195 failing_sector = scsi_get_lba(cmd);
3196 failing_sector += bghm;
3197
3198
3199 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3200 }
3201
3202 if (!ret) {
3203
3204 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3205 "9057 BLKGRD: Unknown error in cmd "
3206 "0x%x reftag 0x%x blk cnt 0x%x "
3207 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3208 t10_pi_ref_tag(cmd->request),
3209 blk_rq_sectors(cmd->request), bgstat, bghm);
3210
3211
3212 lpfc_calc_bg_err(phba, lpfc_cmd);
3213 }
3214out:
3215 return ret;
3216}
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231static int
3232lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3233{
3234 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3235 struct scatterlist *sgel = NULL;
3236 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3237 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
3238 struct sli4_sge *first_data_sgl;
3239 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
3240 struct lpfc_vport *vport = phba->pport;
3241 union lpfc_wqe128 *wqe = &pwqeq->wqe;
3242 dma_addr_t physaddr;
3243 uint32_t num_bde = 0;
3244 uint32_t dma_len;
3245 uint32_t dma_offset = 0;
3246 int nseg, i, j;
3247 struct ulp_bde64 *bde;
3248 bool lsp_just_set = false;
3249 struct sli4_hybrid_sgl *sgl_xtra = NULL;
3250
3251
3252
3253
3254
3255
3256
3257 if (scsi_sg_count(scsi_cmnd)) {
3258
3259
3260
3261
3262
3263
3264
3265 nseg = scsi_dma_map(scsi_cmnd);
3266 if (unlikely(nseg <= 0))
3267 return 1;
3268 sgl += 1;
3269
3270 sgl->word2 = le32_to_cpu(sgl->word2);
3271 bf_set(lpfc_sli4_sge_last, sgl, 0);
3272 sgl->word2 = cpu_to_le32(sgl->word2);
3273 sgl += 1;
3274 first_data_sgl = sgl;
3275 lpfc_cmd->seg_cnt = nseg;
3276 if (!phba->cfg_xpsgl &&
3277 lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3278 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3279 "9074 BLKGRD:"
3280 " %s: Too many sg segments from "
3281 "dma_map_sg. Config %d, seg_cnt %d\n",
3282 __func__, phba->cfg_sg_seg_cnt,
3283 lpfc_cmd->seg_cnt);
3284 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3285 lpfc_cmd->seg_cnt = 0;
3286 scsi_dma_unmap(scsi_cmnd);
3287 return 2;
3288 }
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301 sgel = scsi_sglist(scsi_cmnd);
3302 j = 2;
3303 for (i = 0; i < nseg; i++) {
3304 sgl->word2 = 0;
3305 if ((num_bde + 1) == nseg) {
3306 bf_set(lpfc_sli4_sge_last, sgl, 1);
3307 bf_set(lpfc_sli4_sge_type, sgl,
3308 LPFC_SGE_TYPE_DATA);
3309 } else {
3310 bf_set(lpfc_sli4_sge_last, sgl, 0);
3311
3312
3313 if (!lsp_just_set &&
3314 !((j + 1) % phba->border_sge_num) &&
3315 ((nseg - 1) != i)) {
3316
3317 bf_set(lpfc_sli4_sge_type, sgl,
3318 LPFC_SGE_TYPE_LSP);
3319
3320 sgl_xtra = lpfc_get_sgl_per_hdwq(
3321 phba, lpfc_cmd);
3322
3323 if (unlikely(!sgl_xtra)) {
3324 lpfc_cmd->seg_cnt = 0;
3325 scsi_dma_unmap(scsi_cmnd);
3326 return 1;
3327 }
3328 sgl->addr_lo = cpu_to_le32(putPaddrLow(
3329 sgl_xtra->dma_phys_sgl));
3330 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3331 sgl_xtra->dma_phys_sgl));
3332
3333 } else {
3334 bf_set(lpfc_sli4_sge_type, sgl,
3335 LPFC_SGE_TYPE_DATA);
3336 }
3337 }
3338
3339 if (!(bf_get(lpfc_sli4_sge_type, sgl) &
3340 LPFC_SGE_TYPE_LSP)) {
3341 if ((nseg - 1) == i)
3342 bf_set(lpfc_sli4_sge_last, sgl, 1);
3343
3344 physaddr = sg_dma_address(sgel);
3345 dma_len = sg_dma_len(sgel);
3346 sgl->addr_lo = cpu_to_le32(putPaddrLow(
3347 physaddr));
3348 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3349 physaddr));
3350
3351 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3352 sgl->word2 = cpu_to_le32(sgl->word2);
3353 sgl->sge_len = cpu_to_le32(dma_len);
3354
3355 dma_offset += dma_len;
3356 sgel = sg_next(sgel);
3357
3358 sgl++;
3359 lsp_just_set = false;
3360
3361 } else {
3362 sgl->word2 = cpu_to_le32(sgl->word2);
3363 sgl->sge_len = cpu_to_le32(
3364 phba->cfg_sg_dma_buf_size);
3365
3366 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
3367 i = i - 1;
3368
3369 lsp_just_set = true;
3370 }
3371
3372 j++;
3373 }
3374
3375
3376
3377
3378
3379 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3380 phba->cfg_enable_pbde) {
3381 bde = (struct ulp_bde64 *)
3382 &wqe->words[13];
3383 bde->addrLow = first_data_sgl->addr_lo;
3384 bde->addrHigh = first_data_sgl->addr_hi;
3385 bde->tus.f.bdeSize =
3386 le32_to_cpu(first_data_sgl->sge_len);
3387 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3388 bde->tus.w = cpu_to_le32(bde->tus.w);
3389
3390 } else {
3391 memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
3392 }
3393 } else {
3394 sgl += 1;
3395
3396 sgl->word2 = le32_to_cpu(sgl->word2);
3397 bf_set(lpfc_sli4_sge_last, sgl, 1);
3398 sgl->word2 = cpu_to_le32(sgl->word2);
3399
3400 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3401 phba->cfg_enable_pbde) {
3402 bde = (struct ulp_bde64 *)
3403 &wqe->words[13];
3404 memset(bde, 0, (sizeof(uint32_t) * 3));
3405 }
3406 }
3407
3408
3409 if (phba->cfg_enable_pbde)
3410 bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
3411
3412
3413
3414
3415
3416
3417
3418 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
3419
3420 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
3421 vport->cfg_first_burst_size &&
3422 scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) {
3423 u32 init_len, total_len;
3424
3425 total_len = be32_to_cpu(fcp_cmnd->fcpDl);
3426 init_len = min(total_len, vport->cfg_first_burst_size);
3427
3428
3429 wqe->fcp_iwrite.initial_xfer_len = init_len;
3430 wqe->fcp_iwrite.total_xfer_len = total_len;
3431 } else {
3432
3433 wqe->fcp_iwrite.total_xfer_len =
3434 be32_to_cpu(fcp_cmnd->fcpDl);
3435 }
3436
3437
3438
3439
3440
3441 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3442 scsi_cmnd->device->hostdata)->oas_enabled) {
3443 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3444 lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
3445 scsi_cmnd->device->hostdata)->priority;
3446
3447
3448 bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
3449 bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1);
3450
3451 if (lpfc_cmd->cur_iocbq.priority)
3452 bf_set(wqe_ccp, &wqe->generic.wqe_com,
3453 (lpfc_cmd->cur_iocbq.priority << 1));
3454 else
3455 bf_set(wqe_ccp, &wqe->generic.wqe_com,
3456 (phba->cfg_XLanePriority << 1));
3457 }
3458
3459 return 0;
3460}
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475static int
3476lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3477 struct lpfc_io_buf *lpfc_cmd)
3478{
3479 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3480 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3481 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl);
3482 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
3483 union lpfc_wqe128 *wqe = &pwqeq->wqe;
3484 uint32_t num_sge = 0;
3485 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3486 int prot_group_type = 0;
3487 int fcpdl;
3488 int ret = 1;
3489 struct lpfc_vport *vport = phba->pport;
3490
3491
3492
3493
3494
3495 if (scsi_sg_count(scsi_cmnd)) {
3496
3497
3498
3499
3500
3501
3502 datasegcnt = dma_map_sg(&phba->pcidev->dev,
3503 scsi_sglist(scsi_cmnd),
3504 scsi_sg_count(scsi_cmnd), datadir);
3505 if (unlikely(!datasegcnt))
3506 return 1;
3507
3508 sgl += 1;
3509
3510 sgl->word2 = le32_to_cpu(sgl->word2);
3511 bf_set(lpfc_sli4_sge_last, sgl, 0);
3512 sgl->word2 = cpu_to_le32(sgl->word2);
3513
3514 sgl += 1;
3515 lpfc_cmd->seg_cnt = datasegcnt;
3516
3517
3518 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt &&
3519 !phba->cfg_xpsgl) {
3520 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3521 ret = 2;
3522 goto err;
3523 }
3524
3525 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3526
3527 switch (prot_group_type) {
3528 case LPFC_PG_TYPE_NO_DIF:
3529
3530 if (((lpfc_cmd->seg_cnt + 1) >
3531 phba->cfg_total_seg_cnt) &&
3532 !phba->cfg_xpsgl) {
3533 ret = 2;
3534 goto err;
3535 }
3536
3537 num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3538 datasegcnt, lpfc_cmd);
3539
3540
3541 if (num_sge < 2) {
3542 ret = 2;
3543 goto err;
3544 }
3545 break;
3546
3547 case LPFC_PG_TYPE_DIF_BUF:
3548
3549
3550
3551
3552
3553 protsegcnt = dma_map_sg(&phba->pcidev->dev,
3554 scsi_prot_sglist(scsi_cmnd),
3555 scsi_prot_sg_count(scsi_cmnd), datadir);
3556 if (unlikely(!protsegcnt)) {
3557 scsi_dma_unmap(scsi_cmnd);
3558 return 1;
3559 }
3560
3561 lpfc_cmd->prot_seg_cnt = protsegcnt;
3562
3563
3564
3565
3566 if (((lpfc_cmd->prot_seg_cnt * 3) >
3567 (phba->cfg_total_seg_cnt - 2)) &&
3568 !phba->cfg_xpsgl) {
3569 ret = 2;
3570 goto err;
3571 }
3572
3573 num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3574 datasegcnt, protsegcnt, lpfc_cmd);
3575
3576
3577 if (num_sge < 3 ||
3578 (num_sge > phba->cfg_total_seg_cnt &&
3579 !phba->cfg_xpsgl)) {
3580 ret = 2;
3581 goto err;
3582 }
3583 break;
3584
3585 case LPFC_PG_TYPE_INVALID:
3586 default:
3587 scsi_dma_unmap(scsi_cmnd);
3588 lpfc_cmd->seg_cnt = 0;
3589
3590 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3591 "9083 Unexpected protection group %i\n",
3592 prot_group_type);
3593 return 2;
3594 }
3595 }
3596
3597 switch (scsi_get_prot_op(scsi_cmnd)) {
3598 case SCSI_PROT_WRITE_STRIP:
3599 case SCSI_PROT_READ_STRIP:
3600 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
3601 break;
3602 case SCSI_PROT_WRITE_INSERT:
3603 case SCSI_PROT_READ_INSERT:
3604 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
3605 break;
3606 case SCSI_PROT_WRITE_PASS:
3607 case SCSI_PROT_READ_PASS:
3608 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
3609 break;
3610 }
3611
3612 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3613 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3614
3615
3616 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
3617 vport->cfg_first_burst_size &&
3618 scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) {
3619 u32 init_len, total_len;
3620
3621 total_len = be32_to_cpu(fcp_cmnd->fcpDl);
3622 init_len = min(total_len, vport->cfg_first_burst_size);
3623
3624
3625 wqe->fcp_iwrite.initial_xfer_len = init_len;
3626 wqe->fcp_iwrite.total_xfer_len = total_len;
3627 } else {
3628
3629 wqe->fcp_iwrite.total_xfer_len =
3630 be32_to_cpu(fcp_cmnd->fcpDl);
3631 }
3632
3633
3634
3635
3636
3637 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3638 scsi_cmnd->device->hostdata)->oas_enabled) {
3639 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3640
3641
3642 bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
3643 bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1);
3644 bf_set(wqe_ccp, &wqe->generic.wqe_com,
3645 (phba->cfg_XLanePriority << 1));
3646 }
3647
3648
3649 if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_PASS)
3650 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
3651 else if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_STRIP)
3652 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
3653 else if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_INSERT)
3654 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
3655
3656 lpfc_cmd->cur_iocbq.iocb_flag &= ~(LPFC_IO_DIF_PASS |
3657 LPFC_IO_DIF_STRIP | LPFC_IO_DIF_INSERT);
3658
3659 return 0;
3660err:
3661 if (lpfc_cmd->seg_cnt)
3662 scsi_dma_unmap(scsi_cmnd);
3663 if (lpfc_cmd->prot_seg_cnt)
3664 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
3665 scsi_prot_sg_count(scsi_cmnd),
3666 scsi_cmnd->sc_data_direction);
3667
3668 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3669 "9084 Cannot setup S/G List for HBA"
3670 "IO segs %d/%d SGL %d SCSI %d: %d %d\n",
3671 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
3672 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
3673 prot_group_type, num_sge);
3674
3675 lpfc_cmd->seg_cnt = 0;
3676 lpfc_cmd->prot_seg_cnt = 0;
3677 return ret;
3678}
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692static inline int
3693lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3694{
3695 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3696}
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711static inline int
3712lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3713{
3714 return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3715}
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730static inline int
3731lpfc_scsi_prep_cmnd_buf(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
3732 uint8_t tmo)
3733{
3734 return vport->phba->lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, tmo);
3735}
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747static void
3748lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
3749 struct lpfc_io_buf *lpfc_cmd, uint32_t fcpi_parm) {
3750 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3751 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3752 uint32_t resp_info = fcprsp->rspStatus2;
3753 uint32_t scsi_status = fcprsp->rspStatus3;
3754 struct lpfc_fast_path_event *fast_path_evt = NULL;
3755 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
3756 unsigned long flags;
3757
3758 if (!pnode)
3759 return;
3760
3761
3762 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
3763 (cmnd->result == SAM_STAT_BUSY)) {
3764 fast_path_evt = lpfc_alloc_fast_evt(phba);
3765 if (!fast_path_evt)
3766 return;
3767 fast_path_evt->un.scsi_evt.event_type =
3768 FC_REG_SCSI_EVENT;
3769 fast_path_evt->un.scsi_evt.subcategory =
3770 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
3771 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
3772 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
3773 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
3774 &pnode->nlp_portname, sizeof(struct lpfc_name));
3775 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
3776 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3777 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
3778 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
3779 fast_path_evt = lpfc_alloc_fast_evt(phba);
3780 if (!fast_path_evt)
3781 return;
3782 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
3783 FC_REG_SCSI_EVENT;
3784 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
3785 LPFC_EVENT_CHECK_COND;
3786 fast_path_evt->un.check_cond_evt.scsi_event.lun =
3787 cmnd->device->lun;
3788 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
3789 &pnode->nlp_portname, sizeof(struct lpfc_name));
3790 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
3791 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3792 fast_path_evt->un.check_cond_evt.sense_key =
3793 cmnd->sense_buffer[2] & 0xf;
3794 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
3795 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
3796 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3797 fcpi_parm &&
3798 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
3799 ((scsi_status == SAM_STAT_GOOD) &&
3800 !(resp_info & (RESID_UNDER | RESID_OVER))))) {
3801
3802
3803
3804
3805 fast_path_evt = lpfc_alloc_fast_evt(phba);
3806 if (!fast_path_evt)
3807 return;
3808 fast_path_evt->un.read_check_error.header.event_type =
3809 FC_REG_FABRIC_EVENT;
3810 fast_path_evt->un.read_check_error.header.subcategory =
3811 LPFC_EVENT_FCPRDCHKERR;
3812 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
3813 &pnode->nlp_portname, sizeof(struct lpfc_name));
3814 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
3815 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3816 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
3817 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
3818 fast_path_evt->un.read_check_error.fcpiparam =
3819 fcpi_parm;
3820 } else
3821 return;
3822
3823 fast_path_evt->vport = vport;
3824 spin_lock_irqsave(&phba->hbalock, flags);
3825 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3826 spin_unlock_irqrestore(&phba->hbalock, flags);
3827 lpfc_worker_wake_up(phba);
3828 return;
3829}
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839static void
3840lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
3841{
3842
3843
3844
3845
3846
3847
3848 if (psb->seg_cnt > 0)
3849 scsi_dma_unmap(psb->pCmd);
3850 if (psb->prot_seg_cnt > 0)
3851 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3852 scsi_prot_sg_count(psb->pCmd),
3853 psb->pCmd->sc_data_direction);
3854}
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866static void
3867lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
3868 uint32_t fcpi_parm)
3869{
3870 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3871 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
3872 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3873 uint32_t resp_info = fcprsp->rspStatus2;
3874 uint32_t scsi_status = fcprsp->rspStatus3;
3875 uint32_t *lp;
3876 uint32_t host_status = DID_OK;
3877 uint32_t rsplen = 0;
3878 uint32_t fcpDl;
3879 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
3880
3881
3882
3883
3884
3885
3886
3887 if (fcpcmd->fcpCntl2) {
3888 scsi_status = 0;
3889 goto out;
3890 }
3891
3892 if (resp_info & RSP_LEN_VALID) {
3893 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3894 if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
3895 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3896 "2719 Invalid response length: "
3897 "tgt x%x lun x%llx cmnd x%x rsplen "
3898 "x%x\n", cmnd->device->id,
3899 cmnd->device->lun, cmnd->cmnd[0],
3900 rsplen);
3901 host_status = DID_ERROR;
3902 goto out;
3903 }
3904 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
3905 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3906 "2757 Protocol failure detected during "
3907 "processing of FCP I/O op: "
3908 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
3909 cmnd->device->id,
3910 cmnd->device->lun, cmnd->cmnd[0],
3911 fcprsp->rspInfo3);
3912 host_status = DID_ERROR;
3913 goto out;
3914 }
3915 }
3916
3917 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
3918 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
3919 if (snslen > SCSI_SENSE_BUFFERSIZE)
3920 snslen = SCSI_SENSE_BUFFERSIZE;
3921
3922 if (resp_info & RSP_LEN_VALID)
3923 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3924 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
3925 }
3926 lp = (uint32_t *)cmnd->sense_buffer;
3927
3928
3929 if (!scsi_status && (resp_info & RESID_UNDER)) {
3930
3931 if (vport->cfg_log_verbose & LOG_FCP)
3932 logit = LOG_FCP_ERROR;
3933
3934 if (vport->cfg_log_verbose & LOG_FCP_UNDER)
3935 logit = LOG_FCP_UNDER;
3936 }
3937
3938 lpfc_printf_vlog(vport, KERN_WARNING, logit,
3939 "9024 FCP command x%x failed: x%x SNS x%x x%x "
3940 "Data: x%x x%x x%x x%x x%x\n",
3941 cmnd->cmnd[0], scsi_status,
3942 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
3943 be32_to_cpu(fcprsp->rspResId),
3944 be32_to_cpu(fcprsp->rspSnsLen),
3945 be32_to_cpu(fcprsp->rspRspLen),
3946 fcprsp->rspInfo3);
3947
3948 scsi_set_resid(cmnd, 0);
3949 fcpDl = be32_to_cpu(fcpcmd->fcpDl);
3950 if (resp_info & RESID_UNDER) {
3951 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
3952
3953 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
3954 "9025 FCP Underrun, expected %d, "
3955 "residual %d Data: x%x x%x x%x\n",
3956 fcpDl,
3957 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
3958 cmnd->underflow);
3959
3960
3961
3962
3963
3964
3965 if (fcpi_parm && (scsi_get_resid(cmnd) != fcpi_parm)) {
3966 lpfc_printf_vlog(vport, KERN_WARNING,
3967 LOG_FCP | LOG_FCP_ERROR,
3968 "9026 FCP Read Check Error "
3969 "and Underrun Data: x%x x%x x%x x%x\n",
3970 fcpDl,
3971 scsi_get_resid(cmnd), fcpi_parm,
3972 cmnd->cmnd[0]);
3973 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3974 host_status = DID_ERROR;
3975 }
3976
3977
3978
3979
3980
3981
3982 if (!(resp_info & SNS_LEN_VALID) &&
3983 (scsi_status == SAM_STAT_GOOD) &&
3984 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
3985 < cmnd->underflow)) {
3986 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3987 "9027 FCP command x%x residual "
3988 "underrun converted to error "
3989 "Data: x%x x%x x%x\n",
3990 cmnd->cmnd[0], scsi_bufflen(cmnd),
3991 scsi_get_resid(cmnd), cmnd->underflow);
3992 host_status = DID_ERROR;
3993 }
3994 } else if (resp_info & RESID_OVER) {
3995 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3996 "9028 FCP command x%x residual overrun error. "
3997 "Data: x%x x%x\n", cmnd->cmnd[0],
3998 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
3999 host_status = DID_ERROR;
4000
4001
4002
4003
4004
4005 } else if (fcpi_parm) {
4006 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
4007 "9029 FCP %s Check Error Data: "
4008 "x%x x%x x%x x%x x%x\n",
4009 ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ?
4010 "Read" : "Write"),
4011 fcpDl, be32_to_cpu(fcprsp->rspResId),
4012 fcpi_parm, cmnd->cmnd[0], scsi_status);
4013
4014
4015
4016
4017
4018 if (fcpi_parm > fcpDl)
4019 goto out;
4020
4021 switch (scsi_status) {
4022 case SAM_STAT_GOOD:
4023 case SAM_STAT_CHECK_CONDITION:
4024
4025
4026
4027
4028
4029 host_status = DID_ERROR;
4030 break;
4031 }
4032 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
4033 }
4034
4035 out:
4036 cmnd->result = host_status << 16 | scsi_status;
4037 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, fcpi_parm);
4038}
4039
4040
4041
4042
4043
4044
4045
4046
4047
4048
4049
4050static void
4051lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
4052 struct lpfc_wcqe_complete *wcqe)
4053{
4054 struct lpfc_io_buf *lpfc_cmd =
4055 (struct lpfc_io_buf *)pwqeIn->context1;
4056 struct lpfc_vport *vport = pwqeIn->vport;
4057 struct lpfc_rport_data *rdata;
4058 struct lpfc_nodelist *ndlp;
4059 struct scsi_cmnd *cmd;
4060 unsigned long flags;
4061 struct lpfc_fast_path_event *fast_path_evt;
4062 struct Scsi_Host *shost;
4063 u32 logit = LOG_FCP;
4064 u32 status, idx;
4065 unsigned long iflags = 0;
4066 u8 wait_xb_clr = 0;
4067
4068
4069 if (!lpfc_cmd) {
4070 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4071 "9032 Null lpfc_cmd pointer. No "
4072 "release, skip completion\n");
4073 return;
4074 }
4075
4076 rdata = lpfc_cmd->rdata;
4077 ndlp = rdata->pnode;
4078
4079 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
4080
4081
4082
4083
4084
4085
4086 spin_lock_irqsave(&phba->hbalock, iflags);
4087 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_EXCHANGE_BUSY;
4088 spin_unlock_irqrestore(&phba->hbalock, iflags);
4089 }
4090
4091
4092 spin_lock(&lpfc_cmd->buf_lock);
4093
4094
4095 cmd = lpfc_cmd->pCmd;
4096 if (!cmd) {
4097 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4098 "9042 I/O completion: Not an active IO\n");
4099 spin_unlock(&lpfc_cmd->buf_lock);
4100 lpfc_release_scsi_buf(phba, lpfc_cmd);
4101 return;
4102 }
4103 idx = lpfc_cmd->cur_iocbq.hba_wqidx;
4104 if (phba->sli4_hba.hdwq)
4105 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
4106
4107#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4108 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
4109 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
4110#endif
4111 shost = cmd->device->host;
4112
4113 status = bf_get(lpfc_wcqe_c_status, wcqe);
4114 lpfc_cmd->status = (status & LPFC_IOCB_STATUS_MASK);
4115 lpfc_cmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
4116
4117 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
4118 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
4119 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
4120 if (phba->cfg_fcp_wait_abts_rsp)
4121 wait_xb_clr = 1;
4122 }
4123
4124#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4125 if (lpfc_cmd->prot_data_type) {
4126 struct scsi_dif_tuple *src = NULL;
4127
4128 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
4129
4130
4131
4132
4133 switch (lpfc_cmd->prot_data_type) {
4134 case LPFC_INJERR_REFTAG:
4135 src->ref_tag =
4136 lpfc_cmd->prot_data;
4137 break;
4138 case LPFC_INJERR_APPTAG:
4139 src->app_tag =
4140 (uint16_t)lpfc_cmd->prot_data;
4141 break;
4142 case LPFC_INJERR_GUARD:
4143 src->guard_tag =
4144 (uint16_t)lpfc_cmd->prot_data;
4145 break;
4146 default:
4147 break;
4148 }
4149
4150 lpfc_cmd->prot_data = 0;
4151 lpfc_cmd->prot_data_type = 0;
4152 lpfc_cmd->prot_data_segment = NULL;
4153 }
4154#endif
4155 if (unlikely(lpfc_cmd->status)) {
4156 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
4157 (lpfc_cmd->result & IOERR_DRVR_MASK))
4158 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4159 else if (lpfc_cmd->status >= IOSTAT_CNT)
4160 lpfc_cmd->status = IOSTAT_DEFAULT;
4161 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
4162 !lpfc_cmd->fcp_rsp->rspStatus3 &&
4163 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
4164 !(vport->cfg_log_verbose & LOG_FCP_UNDER))
4165 logit = 0;
4166 else
4167 logit = LOG_FCP | LOG_FCP_UNDER;
4168 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4169 "9034 FCP cmd x%x failed <%d/%lld> "
4170 "status: x%x result: x%x "
4171 "sid: x%x did: x%x oxid: x%x "
4172 "Data: x%x x%x x%x\n",
4173 cmd->cmnd[0],
4174 cmd->device ? cmd->device->id : 0xffff,
4175 cmd->device ? cmd->device->lun : 0xffff,
4176 lpfc_cmd->status, lpfc_cmd->result,
4177 vport->fc_myDID,
4178 (ndlp) ? ndlp->nlp_DID : 0,
4179 lpfc_cmd->cur_iocbq.sli4_xritag,
4180 wcqe->parameter, wcqe->total_data_placed,
4181 lpfc_cmd->cur_iocbq.iotag);
4182 }
4183
4184 switch (lpfc_cmd->status) {
4185 case IOSTAT_SUCCESS:
4186 cmd->result = DID_OK << 16;
4187 break;
4188 case IOSTAT_FCP_RSP_ERROR:
4189 lpfc_handle_fcp_err(vport, lpfc_cmd,
4190 pwqeIn->wqe.fcp_iread.total_xfer_len -
4191 wcqe->total_data_placed);
4192 break;
4193 case IOSTAT_NPORT_BSY:
4194 case IOSTAT_FABRIC_BSY:
4195 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
4196 fast_path_evt = lpfc_alloc_fast_evt(phba);
4197 if (!fast_path_evt)
4198 break;
4199 fast_path_evt->un.fabric_evt.event_type =
4200 FC_REG_FABRIC_EVENT;
4201 fast_path_evt->un.fabric_evt.subcategory =
4202 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
4203 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
4204 if (ndlp) {
4205 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
4206 &ndlp->nlp_portname,
4207 sizeof(struct lpfc_name));
4208 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
4209 &ndlp->nlp_nodename,
4210 sizeof(struct lpfc_name));
4211 }
4212 fast_path_evt->vport = vport;
4213 fast_path_evt->work_evt.evt =
4214 LPFC_EVT_FASTPATH_MGMT_EVT;
4215 spin_lock_irqsave(&phba->hbalock, flags);
4216 list_add_tail(&fast_path_evt->work_evt.evt_listp,
4217 &phba->work_list);
4218 spin_unlock_irqrestore(&phba->hbalock, flags);
4219 lpfc_worker_wake_up(phba);
4220 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4221 "9035 Fabric/Node busy FCP cmd x%x failed"
4222 " <%d/%lld> "
4223 "status: x%x result: x%x "
4224 "sid: x%x did: x%x oxid: x%x "
4225 "Data: x%x x%x x%x\n",
4226 cmd->cmnd[0],
4227 cmd->device ? cmd->device->id : 0xffff,
4228 cmd->device ? cmd->device->lun : 0xffff,
4229 lpfc_cmd->status, lpfc_cmd->result,
4230 vport->fc_myDID,
4231 (ndlp) ? ndlp->nlp_DID : 0,
4232 lpfc_cmd->cur_iocbq.sli4_xritag,
4233 wcqe->parameter,
4234 wcqe->total_data_placed,
4235 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4236 break;
4237 case IOSTAT_REMOTE_STOP:
4238 if (ndlp) {
4239
4240
4241
4242
4243 lpfc_set_rrq_active(phba, ndlp,
4244 lpfc_cmd->cur_iocbq.sli4_lxritag,
4245 0, 0);
4246 }
4247 fallthrough;
4248 case IOSTAT_LOCAL_REJECT:
4249 if (lpfc_cmd->result & IOERR_DRVR_MASK)
4250 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4251 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
4252 lpfc_cmd->result ==
4253 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
4254 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
4255 lpfc_cmd->result ==
4256 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
4257 cmd->result = DID_NO_CONNECT << 16;
4258 break;
4259 }
4260 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
4261 lpfc_cmd->result == IOERR_NO_RESOURCES ||
4262 lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
4263 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
4264 cmd->result = DID_REQUEUE << 16;
4265 break;
4266 }
4267 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
4268 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
4269 status == CQE_STATUS_DI_ERROR) {
4270 if (scsi_get_prot_op(cmd) !=
4271 SCSI_PROT_NORMAL) {
4272
4273
4274
4275
4276 lpfc_sli4_parse_bg_err(phba, lpfc_cmd,
4277 wcqe);
4278 break;
4279 }
4280 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
4281 "9040 non-zero BGSTAT on unprotected cmd\n");
4282 }
4283 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4284 "9036 Local Reject FCP cmd x%x failed"
4285 " <%d/%lld> "
4286 "status: x%x result: x%x "
4287 "sid: x%x did: x%x oxid: x%x "
4288 "Data: x%x x%x x%x\n",
4289 cmd->cmnd[0],
4290 cmd->device ? cmd->device->id : 0xffff,
4291 cmd->device ? cmd->device->lun : 0xffff,
4292 lpfc_cmd->status, lpfc_cmd->result,
4293 vport->fc_myDID,
4294 (ndlp) ? ndlp->nlp_DID : 0,
4295 lpfc_cmd->cur_iocbq.sli4_xritag,
4296 wcqe->parameter,
4297 wcqe->total_data_placed,
4298 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4299 fallthrough;
4300 default:
4301 if (lpfc_cmd->status >= IOSTAT_CNT)
4302 lpfc_cmd->status = IOSTAT_DEFAULT;
4303 cmd->result = DID_ERROR << 16;
4304 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
4305 "9037 FCP Completion Error: xri %x "
4306 "status x%x result x%x [x%x] "
4307 "placed x%x\n",
4308 lpfc_cmd->cur_iocbq.sli4_xritag,
4309 lpfc_cmd->status, lpfc_cmd->result,
4310 wcqe->parameter,
4311 wcqe->total_data_placed);
4312 }
4313 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
4314 u32 *lp = (u32 *)cmd->sense_buffer;
4315
4316 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4317 "9039 Iodone <%d/%llu> cmd x%px, error "
4318 "x%x SNS x%x x%x Data: x%x x%x\n",
4319 cmd->device->id, cmd->device->lun, cmd,
4320 cmd->result, *lp, *(lp + 3), cmd->retries,
4321 scsi_get_resid(cmd));
4322 }
4323
4324 lpfc_update_stats(vport, lpfc_cmd);
4325
4326 if (vport->cfg_max_scsicmpl_time &&
4327 time_after(jiffies, lpfc_cmd->start_time +
4328 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4329 spin_lock_irqsave(shost->host_lock, flags);
4330 if (ndlp) {
4331 if (ndlp->cmd_qdepth >
4332 atomic_read(&ndlp->cmd_pending) &&
4333 (atomic_read(&ndlp->cmd_pending) >
4334 LPFC_MIN_TGT_QDEPTH) &&
4335 (cmd->cmnd[0] == READ_10 ||
4336 cmd->cmnd[0] == WRITE_10))
4337 ndlp->cmd_qdepth =
4338 atomic_read(&ndlp->cmd_pending);
4339
4340 ndlp->last_change_time = jiffies;
4341 }
4342 spin_unlock_irqrestore(shost->host_lock, flags);
4343 }
4344 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4345
4346#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4347 if (lpfc_cmd->ts_cmd_start) {
4348 lpfc_cmd->ts_isr_cmpl = lpfc_cmd->cur_iocbq.isr_timestamp;
4349 lpfc_cmd->ts_data_io = ktime_get_ns();
4350 phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
4351 lpfc_io_ktime(phba, lpfc_cmd);
4352 }
4353#endif
4354 if (wait_xb_clr)
4355 goto out;
4356 lpfc_cmd->pCmd = NULL;
4357 spin_unlock(&lpfc_cmd->buf_lock);
4358
4359
4360 cmd->scsi_done(cmd);
4361
4362
4363
4364
4365
4366 spin_lock(&lpfc_cmd->buf_lock);
4367 lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
4368 if (lpfc_cmd->waitq)
4369 wake_up(lpfc_cmd->waitq);
4370out:
4371 spin_unlock(&lpfc_cmd->buf_lock);
4372 lpfc_release_scsi_buf(phba, lpfc_cmd);
4373}
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384
4385static void
4386lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
4387 struct lpfc_iocbq *pIocbOut)
4388{
4389 struct lpfc_io_buf *lpfc_cmd =
4390 (struct lpfc_io_buf *) pIocbIn->context1;
4391 struct lpfc_vport *vport = pIocbIn->vport;
4392 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4393 struct lpfc_nodelist *pnode = rdata->pnode;
4394 struct scsi_cmnd *cmd;
4395 unsigned long flags;
4396 struct lpfc_fast_path_event *fast_path_evt;
4397 struct Scsi_Host *shost;
4398 int idx;
4399 uint32_t logit = LOG_FCP;
4400
4401
4402 spin_lock(&lpfc_cmd->buf_lock);
4403
4404
4405 cmd = lpfc_cmd->pCmd;
4406 if (!cmd || !phba) {
4407 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4408 "2621 IO completion: Not an active IO\n");
4409 spin_unlock(&lpfc_cmd->buf_lock);
4410 return;
4411 }
4412
4413 idx = lpfc_cmd->cur_iocbq.hba_wqidx;
4414 if (phba->sli4_hba.hdwq)
4415 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
4416
4417#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4418 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
4419 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
4420#endif
4421 shost = cmd->device->host;
4422
4423 lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK);
4424 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
4425
4426 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
4427 if (pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY)
4428 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
4429
4430#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4431 if (lpfc_cmd->prot_data_type) {
4432 struct scsi_dif_tuple *src = NULL;
4433
4434 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
4435
4436
4437
4438
4439 switch (lpfc_cmd->prot_data_type) {
4440 case LPFC_INJERR_REFTAG:
4441 src->ref_tag =
4442 lpfc_cmd->prot_data;
4443 break;
4444 case LPFC_INJERR_APPTAG:
4445 src->app_tag =
4446 (uint16_t)lpfc_cmd->prot_data;
4447 break;
4448 case LPFC_INJERR_GUARD:
4449 src->guard_tag =
4450 (uint16_t)lpfc_cmd->prot_data;
4451 break;
4452 default:
4453 break;
4454 }
4455
4456 lpfc_cmd->prot_data = 0;
4457 lpfc_cmd->prot_data_type = 0;
4458 lpfc_cmd->prot_data_segment = NULL;
4459 }
4460#endif
4461
4462 if (unlikely(lpfc_cmd->status)) {
4463 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
4464 (lpfc_cmd->result & IOERR_DRVR_MASK))
4465 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4466 else if (lpfc_cmd->status >= IOSTAT_CNT)
4467 lpfc_cmd->status = IOSTAT_DEFAULT;
4468 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
4469 !lpfc_cmd->fcp_rsp->rspStatus3 &&
4470 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
4471 !(vport->cfg_log_verbose & LOG_FCP_UNDER))
4472 logit = 0;
4473 else
4474 logit = LOG_FCP | LOG_FCP_UNDER;
4475 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4476 "9030 FCP cmd x%x failed <%d/%lld> "
4477 "status: x%x result: x%x "
4478 "sid: x%x did: x%x oxid: x%x "
4479 "Data: x%x x%x\n",
4480 cmd->cmnd[0],
4481 cmd->device ? cmd->device->id : 0xffff,
4482 cmd->device ? cmd->device->lun : 0xffff,
4483 lpfc_cmd->status, lpfc_cmd->result,
4484 vport->fc_myDID,
4485 (pnode) ? pnode->nlp_DID : 0,
4486 phba->sli_rev == LPFC_SLI_REV4 ?
4487 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
4488 pIocbOut->iocb.ulpContext,
4489 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
4490
4491 switch (lpfc_cmd->status) {
4492 case IOSTAT_FCP_RSP_ERROR:
4493
4494 lpfc_handle_fcp_err(vport, lpfc_cmd,
4495 pIocbOut->iocb.un.fcpi.fcpi_parm);
4496 break;
4497 case IOSTAT_NPORT_BSY:
4498 case IOSTAT_FABRIC_BSY:
4499 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
4500 fast_path_evt = lpfc_alloc_fast_evt(phba);
4501 if (!fast_path_evt)
4502 break;
4503 fast_path_evt->un.fabric_evt.event_type =
4504 FC_REG_FABRIC_EVENT;
4505 fast_path_evt->un.fabric_evt.subcategory =
4506 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
4507 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
4508 if (pnode) {
4509 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
4510 &pnode->nlp_portname,
4511 sizeof(struct lpfc_name));
4512 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
4513 &pnode->nlp_nodename,
4514 sizeof(struct lpfc_name));
4515 }
4516 fast_path_evt->vport = vport;
4517 fast_path_evt->work_evt.evt =
4518 LPFC_EVT_FASTPATH_MGMT_EVT;
4519 spin_lock_irqsave(&phba->hbalock, flags);
4520 list_add_tail(&fast_path_evt->work_evt.evt_listp,
4521 &phba->work_list);
4522 spin_unlock_irqrestore(&phba->hbalock, flags);
4523 lpfc_worker_wake_up(phba);
4524 break;
4525 case IOSTAT_LOCAL_REJECT:
4526 case IOSTAT_REMOTE_STOP:
4527 if (lpfc_cmd->result == IOERR_ELXSEC_KEY_UNWRAP_ERROR ||
4528 lpfc_cmd->result ==
4529 IOERR_ELXSEC_KEY_UNWRAP_COMPARE_ERROR ||
4530 lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
4531 lpfc_cmd->result ==
4532 IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
4533 cmd->result = DID_NO_CONNECT << 16;
4534 break;
4535 }
4536 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
4537 lpfc_cmd->result == IOERR_NO_RESOURCES ||
4538 lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
4539 lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
4540 cmd->result = DID_REQUEUE << 16;
4541 break;
4542 }
4543 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
4544 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
4545 pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
4546 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
4547
4548
4549
4550
4551 lpfc_parse_bg_err(phba, lpfc_cmd,
4552 pIocbOut);
4553 break;
4554 } else {
4555 lpfc_printf_vlog(vport, KERN_WARNING,
4556 LOG_BG,
4557 "9031 non-zero BGSTAT "
4558 "on unprotected cmd\n");
4559 }
4560 }
4561 if ((lpfc_cmd->status == IOSTAT_REMOTE_STOP)
4562 && (phba->sli_rev == LPFC_SLI_REV4)
4563 && pnode) {
4564
4565
4566
4567
4568 lpfc_set_rrq_active(phba, pnode,
4569 lpfc_cmd->cur_iocbq.sli4_lxritag,
4570 0, 0);
4571 }
4572 fallthrough;
4573 default:
4574 cmd->result = DID_ERROR << 16;
4575 break;
4576 }
4577
4578 if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
4579 cmd->result = DID_TRANSPORT_DISRUPTED << 16 |
4580 SAM_STAT_BUSY;
4581 } else
4582 cmd->result = DID_OK << 16;
4583
4584 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
4585 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
4586
4587 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
4588 "0710 Iodone <%d/%llu> cmd x%px, error "
4589 "x%x SNS x%x x%x Data: x%x x%x\n",
4590 cmd->device->id, cmd->device->lun, cmd,
4591 cmd->result, *lp, *(lp + 3), cmd->retries,
4592 scsi_get_resid(cmd));
4593 }
4594
4595 lpfc_update_stats(vport, lpfc_cmd);
4596 if (vport->cfg_max_scsicmpl_time &&
4597 time_after(jiffies, lpfc_cmd->start_time +
4598 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
4599 spin_lock_irqsave(shost->host_lock, flags);
4600 if (pnode) {
4601 if (pnode->cmd_qdepth >
4602 atomic_read(&pnode->cmd_pending) &&
4603 (atomic_read(&pnode->cmd_pending) >
4604 LPFC_MIN_TGT_QDEPTH) &&
4605 ((cmd->cmnd[0] == READ_10) ||
4606 (cmd->cmnd[0] == WRITE_10)))
4607 pnode->cmd_qdepth =
4608 atomic_read(&pnode->cmd_pending);
4609
4610 pnode->last_change_time = jiffies;
4611 }
4612 spin_unlock_irqrestore(shost->host_lock, flags);
4613 }
4614 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4615
4616 lpfc_cmd->pCmd = NULL;
4617 spin_unlock(&lpfc_cmd->buf_lock);
4618
4619#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4620 if (lpfc_cmd->ts_cmd_start) {
4621 lpfc_cmd->ts_isr_cmpl = pIocbIn->isr_timestamp;
4622 lpfc_cmd->ts_data_io = ktime_get_ns();
4623 phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
4624 lpfc_io_ktime(phba, lpfc_cmd);
4625 }
4626#endif
4627
4628
4629 cmd->scsi_done(cmd);
4630
4631
4632
4633
4634
4635 spin_lock(&lpfc_cmd->buf_lock);
4636 lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
4637 if (lpfc_cmd->waitq)
4638 wake_up(lpfc_cmd->waitq);
4639 spin_unlock(&lpfc_cmd->buf_lock);
4640
4641 lpfc_release_scsi_buf(phba, lpfc_cmd);
4642}
4643
4644
4645
4646
4647
4648
4649
4650
4651
4652
4653
4654
4655
4656static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport,
4657 struct lpfc_io_buf *lpfc_cmd,
4658 uint8_t tmo)
4659{
4660 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
4661 struct lpfc_iocbq *piocbq = &lpfc_cmd->cur_iocbq;
4662 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4663 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4664 struct lpfc_nodelist *pnode = lpfc_cmd->ndlp;
4665 int datadir = scsi_cmnd->sc_data_direction;
4666 u32 fcpdl;
4667
4668 piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
4669
4670
4671
4672
4673
4674
4675
4676 if (scsi_sg_count(scsi_cmnd)) {
4677 if (datadir == DMA_TO_DEVICE) {
4678 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
4679 iocb_cmd->ulpPU = PARM_READ_CHECK;
4680 if (vport->cfg_first_burst_size &&
4681 (pnode->nlp_flag & NLP_FIRSTBURST)) {
4682 u32 xrdy_len;
4683
4684 fcpdl = scsi_bufflen(scsi_cmnd);
4685 xrdy_len = min(fcpdl,
4686 vport->cfg_first_burst_size);
4687 piocbq->iocb.un.fcpi.fcpi_XRdy = xrdy_len;
4688 }
4689 fcp_cmnd->fcpCntl3 = WRITE_DATA;
4690 } else {
4691 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
4692 iocb_cmd->ulpPU = PARM_READ_CHECK;
4693 fcp_cmnd->fcpCntl3 = READ_DATA;
4694 }
4695 } else {
4696 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
4697 iocb_cmd->un.fcpi.fcpi_parm = 0;
4698 iocb_cmd->ulpPU = 0;
4699 fcp_cmnd->fcpCntl3 = 0;
4700 }
4701
4702
4703
4704
4705
4706 piocbq->iocb.ulpContext = pnode->nlp_rpi;
4707 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4708 piocbq->iocb.ulpFCP2Rcvy = 1;
4709 else
4710 piocbq->iocb.ulpFCP2Rcvy = 0;
4711
4712 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
4713 piocbq->context1 = lpfc_cmd;
4714 if (!piocbq->iocb_cmpl)
4715 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4716 piocbq->iocb.ulpTimeout = tmo;
4717 piocbq->vport = vport;
4718 return 0;
4719}
4720
4721
4722
4723
4724
4725
4726
4727
4728
4729
4730
4731
4732
4733static int lpfc_scsi_prep_cmnd_buf_s4(struct lpfc_vport *vport,
4734 struct lpfc_io_buf *lpfc_cmd,
4735 uint8_t tmo)
4736{
4737 struct lpfc_hba *phba = vport->phba;
4738 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4739 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4740 struct lpfc_sli4_hdw_queue *hdwq = NULL;
4741 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
4742 struct lpfc_nodelist *pnode = lpfc_cmd->ndlp;
4743 union lpfc_wqe128 *wqe = &pwqeq->wqe;
4744 u16 idx = lpfc_cmd->hdwq_no;
4745 int datadir = scsi_cmnd->sc_data_direction;
4746
4747 hdwq = &phba->sli4_hba.hdwq[idx];
4748
4749
4750 memset(wqe, 0, sizeof(union lpfc_wqe128));
4751
4752
4753
4754
4755
4756 if (scsi_sg_count(scsi_cmnd)) {
4757 if (datadir == DMA_TO_DEVICE) {
4758
4759 memcpy(&wqe->words[7],
4760 &lpfc_iwrite_cmd_template.words[7],
4761 sizeof(uint32_t) * 5);
4762
4763 fcp_cmnd->fcpCntl3 = WRITE_DATA;
4764 if (hdwq)
4765 hdwq->scsi_cstat.output_requests++;
4766 } else {
4767
4768 memcpy(&wqe->words[7],
4769 &lpfc_iread_cmd_template.words[7],
4770 sizeof(uint32_t) * 5);
4771
4772
4773 bf_set(wqe_tmo, &wqe->fcp_iread.wqe_com, tmo);
4774
4775 fcp_cmnd->fcpCntl3 = READ_DATA;
4776 if (hdwq)
4777 hdwq->scsi_cstat.input_requests++;
4778 }
4779 } else {
4780
4781 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
4782 sizeof(uint32_t) * 8);
4783
4784
4785 bf_set(wqe_tmo, &wqe->fcp_icmd.wqe_com, tmo);
4786
4787 fcp_cmnd->fcpCntl3 = 0;
4788 if (hdwq)
4789 hdwq->scsi_cstat.control_requests++;
4790 }
4791
4792
4793
4794
4795
4796
4797
4798 bf_set(payload_offset_len, &wqe->fcp_icmd,
4799 sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
4800
4801
4802 bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
4803 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
4804 bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
4805
4806
4807 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
4808 bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
4809
4810 bf_set(wqe_class, &wqe->generic.wqe_com,
4811 (pnode->nlp_fcp_info & 0x0f));
4812
4813
4814 wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
4815
4816
4817 bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
4818
4819 pwqeq->vport = vport;
4820 pwqeq->vport = vport;
4821 pwqeq->context1 = lpfc_cmd;
4822 pwqeq->hba_wqidx = lpfc_cmd->hdwq_no;
4823 pwqeq->wqe_cmpl = lpfc_fcp_io_cmd_wqe_cmpl;
4824
4825 return 0;
4826}
4827
4828
4829
4830
4831
4832
4833
4834
4835
4836
4837static int
4838lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
4839 struct lpfc_nodelist *pnode)
4840{
4841 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
4842 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
4843 u8 *ptr;
4844
4845 if (!pnode)
4846 return 0;
4847
4848 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
4849
4850 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
4851
4852 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
4853 &lpfc_cmd->fcp_cmnd->fcp_lun);
4854
4855 ptr = &fcp_cmnd->fcpCdb[0];
4856 memcpy(ptr, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
4857 if (scsi_cmnd->cmd_len < LPFC_FCP_CDB_LEN) {
4858 ptr += scsi_cmnd->cmd_len;
4859 memset(ptr, 0, (LPFC_FCP_CDB_LEN - scsi_cmnd->cmd_len));
4860 }
4861
4862 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
4863
4864 lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, lpfc_cmd->timeout);
4865
4866 return 0;
4867}
4868
4869
4870
4871
4872
4873
4874
4875
4876
4877
4878
4879
4880
4881
4882
4883static int
4884lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
4885 struct lpfc_io_buf *lpfc_cmd,
4886 uint64_t lun,
4887 uint8_t task_mgmt_cmd)
4888{
4889 struct lpfc_iocbq *piocbq;
4890 IOCB_t *piocb;
4891 struct fcp_cmnd *fcp_cmnd;
4892 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
4893 struct lpfc_nodelist *ndlp = rdata->pnode;
4894
4895 if (!ndlp || ndlp->nlp_state != NLP_STE_MAPPED_NODE)
4896 return 0;
4897
4898 piocbq = &(lpfc_cmd->cur_iocbq);
4899 piocbq->vport = vport;
4900
4901 piocb = &piocbq->iocb;
4902
4903 fcp_cmnd = lpfc_cmd->fcp_cmnd;
4904
4905 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
4906 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
4907 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
4908 if (vport->phba->sli_rev == 3 &&
4909 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4910 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
4911 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
4912 piocb->ulpContext = ndlp->nlp_rpi;
4913 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
4914 piocb->ulpContext =
4915 vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4916 }
4917 piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;
4918 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
4919 piocb->ulpPU = 0;
4920 piocb->un.fcpi.fcpi_parm = 0;
4921
4922
4923 if (lpfc_cmd->timeout > 0xff) {
4924
4925
4926
4927
4928 piocb->ulpTimeout = 0;
4929 } else
4930 piocb->ulpTimeout = lpfc_cmd->timeout;
4931
4932 if (vport->phba->sli_rev == LPFC_SLI_REV4)
4933 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
4934
4935 return 1;
4936}
4937
4938
4939
4940
4941
4942
4943
4944
4945
4946
4947int
4948lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4949{
4950
4951 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
4952
4953 switch (dev_grp) {
4954 case LPFC_PCI_DEV_LP:
4955 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
4956 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
4957 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
4958 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
4959 phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s3;
4960 break;
4961 case LPFC_PCI_DEV_OC:
4962 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
4963 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
4964 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
4965 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
4966 phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s4;
4967 break;
4968 default:
4969 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4970 "1418 Invalid HBA PCI-device group: 0x%x\n",
4971 dev_grp);
4972 return -ENODEV;
4973 }
4974 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
4975 phba->lpfc_scsi_cmd_iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
4976 return 0;
4977}
4978
4979
4980
4981
4982
4983
4984
4985
4986
4987
4988static void
4989lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
4990 struct lpfc_iocbq *cmdiocbq,
4991 struct lpfc_iocbq *rspiocbq)
4992{
4993 struct lpfc_io_buf *lpfc_cmd =
4994 (struct lpfc_io_buf *) cmdiocbq->context1;
4995 if (lpfc_cmd)
4996 lpfc_release_scsi_buf(phba, lpfc_cmd);
4997 return;
4998}
4999
5000
5001
5002
5003
5004
5005
5006
5007
5008
5009
5010
5011
5012
5013
5014int
5015lpfc_check_pci_resettable(struct lpfc_hba *phba)
5016{
5017 const struct pci_dev *pdev = phba->pcidev;
5018 struct pci_dev *ptr = NULL;
5019 u8 counter = 0;
5020
5021
5022 list_for_each_entry(ptr, &pdev->bus->devices, bus_list) {
5023
5024 if (ptr->vendor != PCI_VENDOR_ID_EMULEX) {
5025 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5026 "8346 Non-Emulex vendor found: "
5027 "0x%04x\n", ptr->vendor);
5028 return -EBADSLT;
5029 }
5030
5031
5032 switch (ptr->device) {
5033 case PCI_DEVICE_ID_LANCER_FC:
5034 case PCI_DEVICE_ID_LANCER_G6_FC:
5035 case PCI_DEVICE_ID_LANCER_G7_FC:
5036 break;
5037 default:
5038 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5039 "8347 Incapable PCI reset device: "
5040 "0x%04x\n", ptr->device);
5041 return -EBADSLT;
5042 }
5043
5044
5045
5046
5047 if (ptr->devfn == 0) {
5048 if (++counter > 1) {
5049 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5050 "8348 More than one device on "
5051 "secondary bus found\n");
5052 return -EBADSLT;
5053 }
5054 }
5055 }
5056
5057 return 0;
5058}
5059
5060
5061
5062
5063
5064
5065
5066
5067
5068
5069const char *
5070lpfc_info(struct Scsi_Host *host)
5071{
5072 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
5073 struct lpfc_hba *phba = vport->phba;
5074 int link_speed = 0;
5075 static char lpfcinfobuf[384];
5076 char tmp[384] = {0};
5077
5078 memset(lpfcinfobuf, 0, sizeof(lpfcinfobuf));
5079 if (phba && phba->pcidev){
5080
5081 scnprintf(tmp, sizeof(tmp), phba->ModelDesc);
5082 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5083 sizeof(lpfcinfobuf))
5084 goto buffer_done;
5085
5086
5087 scnprintf(tmp, sizeof(tmp),
5088 " on PCI bus %02x device %02x irq %d",
5089 phba->pcidev->bus->number, phba->pcidev->devfn,
5090 phba->pcidev->irq);
5091 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5092 sizeof(lpfcinfobuf))
5093 goto buffer_done;
5094
5095
5096 if (phba->Port[0]) {
5097 scnprintf(tmp, sizeof(tmp), " port %s", phba->Port);
5098 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5099 sizeof(lpfcinfobuf))
5100 goto buffer_done;
5101 }
5102
5103
5104 link_speed = lpfc_sli_port_speed_get(phba);
5105 if (link_speed != 0) {
5106 scnprintf(tmp, sizeof(tmp),
5107 " Logical Link Speed: %d Mbps", link_speed);
5108 if (strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf)) >=
5109 sizeof(lpfcinfobuf))
5110 goto buffer_done;
5111 }
5112
5113
5114 if (!lpfc_check_pci_resettable(phba)) {
5115 scnprintf(tmp, sizeof(tmp), " PCI resettable");
5116 strlcat(lpfcinfobuf, tmp, sizeof(lpfcinfobuf));
5117 }
5118 }
5119
5120buffer_done:
5121 return lpfcinfobuf;
5122}
5123
5124
5125
5126
5127
5128
5129
5130
5131static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
5132{
5133 unsigned long poll_tmo_expires =
5134 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
5135
5136 if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq))
5137 mod_timer(&phba->fcp_poll_timer,
5138 poll_tmo_expires);
5139}
5140
5141
5142
5143
5144
5145
5146
5147void lpfc_poll_start_timer(struct lpfc_hba * phba)
5148{
5149 lpfc_poll_rearm_timer(phba);
5150}
5151
5152
5153
5154
5155
5156
5157
5158
5159void lpfc_poll_timeout(struct timer_list *t)
5160{
5161 struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer);
5162
5163 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5164 lpfc_sli_handle_fast_ring_event(phba,
5165 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5166
5167 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5168 lpfc_poll_rearm_timer(phba);
5169 }
5170}
5171
5172
5173
5174
5175
5176
5177
5178
5179
5180struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport,
5181 u32 hash, u8 *buf)
5182{
5183 struct lpfc_vmid *vmp;
5184
5185 hash_for_each_possible(vport->hash_table, vmp, hnode, hash) {
5186 if (memcmp(&vmp->host_vmid[0], buf, 16) == 0)
5187 return vmp;
5188 }
5189 return NULL;
5190}
5191
5192
5193
5194
5195
5196
5197
5198
5199
5200
5201static void
5202lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash,
5203 struct lpfc_vmid *vmp)
5204{
5205 hash_add(vport->hash_table, &vmp->hnode, hash);
5206}
5207
5208
5209
5210
5211
5212
5213
5214int lpfc_vmid_hash_fn(const char *vmid, int len)
5215{
5216 int c;
5217 int hash = 0;
5218
5219 if (len == 0)
5220 return 0;
5221 while (len--) {
5222 c = *vmid++;
5223 if (c >= 'A' && c <= 'Z')
5224 c += 'a' - 'A';
5225
5226 hash = (hash + (c << LPFC_VMID_HASH_SHIFT) +
5227 (c >> LPFC_VMID_HASH_SHIFT)) * 19;
5228 }
5229
5230 return hash & LPFC_VMID_HASH_MASK;
5231}
5232
5233
5234
5235
5236
5237
5238
5239
5240static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd
5241 *cmd, struct lpfc_vmid *vmp,
5242 union lpfc_vmid_io_tag *tag)
5243{
5244 u64 *lta;
5245
5246 if (vport->vmid_priority_tagging)
5247 tag->cs_ctl_vmid = vmp->un.cs_ctl_vmid;
5248 else
5249 tag->app_id = vmp->un.app_id;
5250
5251 if (cmd->sc_data_direction == DMA_TO_DEVICE)
5252 vmp->io_wr_cnt++;
5253 else
5254 vmp->io_rd_cnt++;
5255
5256
5257 lta = per_cpu_ptr(vmp->last_io_time, raw_smp_processor_id());
5258 *lta = jiffies;
5259}
5260
5261static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
5262 struct lpfc_vmid *vmid)
5263{
5264 u32 hash;
5265 struct lpfc_vmid *pvmid;
5266
5267 if (vport->port_type == LPFC_PHYSICAL_PORT) {
5268 vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
5269 } else {
5270 hash = lpfc_vmid_hash_fn(vmid->host_vmid, vmid->vmid_len);
5271 pvmid =
5272 lpfc_get_vmid_from_hashtable(vport->phba->pport, hash,
5273 vmid->host_vmid);
5274 if (pvmid)
5275 vmid->un.cs_ctl_vmid = pvmid->un.cs_ctl_vmid;
5276 else
5277 vmid->un.cs_ctl_vmid = lpfc_vmid_get_cs_ctl(vport);
5278 }
5279}
5280
5281
5282
5283
5284
5285
5286
5287
5288
5289static int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, struct
5290 scsi_cmnd * cmd, union lpfc_vmid_io_tag *tag)
5291{
5292 struct lpfc_vmid *vmp = NULL;
5293 int hash, len, rc, i;
5294
5295
5296 if (lpfc_vmid_is_type_priority_tag(vport) && !(vport->vmid_flag &
5297 LPFC_VMID_QFPA_CMPL)) {
5298 vport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
5299 return -EAGAIN;
5300 }
5301
5302
5303 len = strlen(uuid);
5304 hash = lpfc_vmid_hash_fn(uuid, len);
5305
5306
5307 read_lock(&vport->vmid_lock);
5308 vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
5309
5310
5311 if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
5312 read_unlock(&vport->vmid_lock);
5313 lpfc_vmid_update_entry(vport, cmd, vmp, tag);
5314 rc = 0;
5315 } else if (vmp && (vmp->flag & LPFC_VMID_REQ_REGISTER ||
5316 vmp->flag & LPFC_VMID_DE_REGISTER)) {
5317
5318
5319 read_unlock(&vport->vmid_lock);
5320 rc = -EBUSY;
5321 } else {
5322
5323
5324 read_unlock(&vport->vmid_lock);
5325
5326
5327 write_lock(&vport->vmid_lock);
5328 vmp = lpfc_get_vmid_from_hashtable(vport, hash, uuid);
5329
5330
5331
5332 if (vmp && vmp->flag & LPFC_VMID_REGISTERED) {
5333 lpfc_vmid_update_entry(vport, cmd, vmp, tag);
5334 write_unlock(&vport->vmid_lock);
5335 return 0;
5336 } else if (vmp && vmp->flag & LPFC_VMID_REQ_REGISTER) {
5337 write_unlock(&vport->vmid_lock);
5338 return -EBUSY;
5339 }
5340
5341
5342 if (vport->cur_vmid_cnt < vport->max_vmid) {
5343 for (i = 0; i < vport->max_vmid; i++) {
5344 vmp = vport->vmid + i;
5345 if (vmp->flag == LPFC_VMID_SLOT_FREE)
5346 break;
5347 }
5348 if (i == vport->max_vmid)
5349 vmp = NULL;
5350 } else {
5351 vmp = NULL;
5352 }
5353
5354 if (!vmp) {
5355 write_unlock(&vport->vmid_lock);
5356 return -ENOMEM;
5357 }
5358
5359
5360 lpfc_put_vmid_in_hashtable(vport, hash, vmp);
5361 vmp->vmid_len = len;
5362 memcpy(vmp->host_vmid, uuid, vmp->vmid_len);
5363 vmp->io_rd_cnt = 0;
5364 vmp->io_wr_cnt = 0;
5365 vmp->flag = LPFC_VMID_SLOT_USED;
5366
5367 vmp->delete_inactive =
5368 vport->vmid_inactivity_timeout ? 1 : 0;
5369
5370
5371 if (lpfc_vmid_is_type_priority_tag(vport))
5372 lpfc_vmid_assign_cs_ctl(vport, vmp);
5373
5374
5375
5376 if (!vmp->last_io_time)
5377 vmp->last_io_time = __alloc_percpu(sizeof(u64),
5378 __alignof__(struct
5379 lpfc_vmid));
5380 if (!vmp->last_io_time) {
5381 hash_del(&vmp->hnode);
5382 vmp->flag = LPFC_VMID_SLOT_FREE;
5383 write_unlock(&vport->vmid_lock);
5384 return -EIO;
5385 }
5386
5387 write_unlock(&vport->vmid_lock);
5388
5389
5390 if (lpfc_vmid_is_type_priority_tag(vport))
5391 rc = lpfc_vmid_uvem(vport, vmp, true);
5392 else
5393 rc = lpfc_vmid_cmd(vport, SLI_CTAS_RAPP_IDENT, vmp);
5394 if (!rc) {
5395 write_lock(&vport->vmid_lock);
5396 vport->cur_vmid_cnt++;
5397 vmp->flag |= LPFC_VMID_REQ_REGISTER;
5398 write_unlock(&vport->vmid_lock);
5399 } else {
5400 write_lock(&vport->vmid_lock);
5401 hash_del(&vmp->hnode);
5402 vmp->flag = LPFC_VMID_SLOT_FREE;
5403 free_percpu(vmp->last_io_time);
5404 write_unlock(&vport->vmid_lock);
5405 return -EIO;
5406 }
5407
5408
5409 if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) {
5410 mod_timer(&vport->phba->inactive_vmid_poll,
5411 jiffies +
5412 msecs_to_jiffies(1000 * LPFC_VMID_TIMER));
5413 vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD;
5414 }
5415 }
5416 return rc;
5417}
5418
5419
5420
5421
5422
5423
5424static char *lpfc_is_command_vm_io(struct scsi_cmnd *cmd)
5425{
5426 char *uuid = NULL;
5427
5428 if (cmd->request) {
5429 if (cmd->request->bio)
5430 uuid = blkcg_get_fc_appid(cmd->request->bio);
5431 }
5432 return uuid;
5433}
5434
5435
5436
5437
5438
5439
5440
5441
5442
5443
5444
5445
5446
5447
5448static int
5449lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
5450{
5451 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5452 struct lpfc_hba *phba = vport->phba;
5453 struct lpfc_rport_data *rdata;
5454 struct lpfc_nodelist *ndlp;
5455 struct lpfc_io_buf *lpfc_cmd;
5456 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
5457 int err, idx;
5458 u8 *uuid = NULL;
5459#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
5460 uint64_t start = 0L;
5461
5462 if (phba->ktime_on)
5463 start = ktime_get_ns();
5464#endif
5465
5466 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
5467
5468
5469 if (unlikely(!rdata) || unlikely(!rport))
5470 goto out_fail_command;
5471
5472 err = fc_remote_port_chkready(rport);
5473 if (err) {
5474 cmnd->result = err;
5475 goto out_fail_command;
5476 }
5477 ndlp = rdata->pnode;
5478
5479 if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
5480 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
5481
5482 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5483 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
5484 " op:%02x str=%s without registering for"
5485 " BlockGuard - Rejecting command\n",
5486 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
5487 dif_op_str[scsi_get_prot_op(cmnd)]);
5488 goto out_fail_command;
5489 }
5490
5491
5492
5493
5494
5495 if (!ndlp)
5496 goto out_tgt_busy;
5497 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
5498 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
5499 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
5500 "3377 Target Queue Full, scsi Id:%d "
5501 "Qdepth:%d Pending command:%d"
5502 " WWNN:%02x:%02x:%02x:%02x:"
5503 "%02x:%02x:%02x:%02x, "
5504 " WWPN:%02x:%02x:%02x:%02x:"
5505 "%02x:%02x:%02x:%02x",
5506 ndlp->nlp_sid, ndlp->cmd_qdepth,
5507 atomic_read(&ndlp->cmd_pending),
5508 ndlp->nlp_nodename.u.wwn[0],
5509 ndlp->nlp_nodename.u.wwn[1],
5510 ndlp->nlp_nodename.u.wwn[2],
5511 ndlp->nlp_nodename.u.wwn[3],
5512 ndlp->nlp_nodename.u.wwn[4],
5513 ndlp->nlp_nodename.u.wwn[5],
5514 ndlp->nlp_nodename.u.wwn[6],
5515 ndlp->nlp_nodename.u.wwn[7],
5516 ndlp->nlp_portname.u.wwn[0],
5517 ndlp->nlp_portname.u.wwn[1],
5518 ndlp->nlp_portname.u.wwn[2],
5519 ndlp->nlp_portname.u.wwn[3],
5520 ndlp->nlp_portname.u.wwn[4],
5521 ndlp->nlp_portname.u.wwn[5],
5522 ndlp->nlp_portname.u.wwn[6],
5523 ndlp->nlp_portname.u.wwn[7]);
5524 goto out_tgt_busy;
5525 }
5526 }
5527
5528 lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, cmnd);
5529 if (lpfc_cmd == NULL) {
5530 lpfc_rampdown_queue_depth(phba);
5531
5532 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
5533 "0707 driver's buffer pool is empty, "
5534 "IO busied\n");
5535 goto out_host_busy;
5536 }
5537
5538
5539
5540
5541
5542 lpfc_cmd->pCmd = cmnd;
5543 lpfc_cmd->rdata = rdata;
5544 lpfc_cmd->ndlp = ndlp;
5545 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
5546 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
5547
5548 err = lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
5549 if (err)
5550 goto out_host_busy_release_buf;
5551
5552 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
5553 if (vport->phba->cfg_enable_bg) {
5554 lpfc_printf_vlog(vport,
5555 KERN_INFO, LOG_SCSI_CMD,
5556 "9033 BLKGRD: rcvd %s cmd:x%x "
5557 "reftag x%x cnt %u pt %x\n",
5558 dif_op_str[scsi_get_prot_op(cmnd)],
5559 cmnd->cmnd[0],
5560 t10_pi_ref_tag(cmnd->request),
5561 blk_rq_sectors(cmnd->request),
5562 (cmnd->cmnd[1]>>5));
5563 }
5564 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
5565 } else {
5566 if (vport->phba->cfg_enable_bg) {
5567 lpfc_printf_vlog(vport,
5568 KERN_INFO, LOG_SCSI_CMD,
5569 "9038 BLKGRD: rcvd PROT_NORMAL cmd: "
5570 "x%x reftag x%x cnt %u pt %x\n",
5571 cmnd->cmnd[0],
5572 t10_pi_ref_tag(cmnd->request),
5573 blk_rq_sectors(cmnd->request),
5574 (cmnd->cmnd[1]>>5));
5575 }
5576 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
5577 }
5578
5579 if (unlikely(err)) {
5580 if (err == 2) {
5581 cmnd->result = DID_ERROR << 16;
5582 goto out_fail_command_release_buf;
5583 }
5584 goto out_host_busy_free_buf;
5585 }
5586
5587
5588
5589 if (lpfc_is_vmid_enabled(phba) &&
5590 (ndlp->vmid_support ||
5591 phba->pport->vmid_priority_tagging ==
5592 LPFC_VMID_PRIO_TAG_ALL_TARGETS)) {
5593
5594
5595 uuid = lpfc_is_command_vm_io(cmnd);
5596
5597 if (uuid) {
5598 err = lpfc_vmid_get_appid(vport, uuid, cmnd,
5599 (union lpfc_vmid_io_tag *)
5600 &lpfc_cmd->cur_iocbq.vmid_tag);
5601 if (!err)
5602 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_VMID;
5603 }
5604 }
5605
5606 atomic_inc(&ndlp->cmd_pending);
5607#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
5608 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
5609 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
5610#endif
5611
5612 err = lpfc_sli_issue_fcp_io(phba, LPFC_FCP_RING,
5613 &lpfc_cmd->cur_iocbq,
5614 SLI_IOCB_RET_IOCB);
5615#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
5616 if (start) {
5617 lpfc_cmd->ts_cmd_start = start;
5618 lpfc_cmd->ts_last_cmd = phba->ktime_last_cmd;
5619 lpfc_cmd->ts_cmd_wqput = ktime_get_ns();
5620 } else {
5621 lpfc_cmd->ts_cmd_start = 0;
5622 }
5623#endif
5624 if (err) {
5625 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5626 "3376 FCP could not issue IOCB err %x "
5627 "FCP cmd x%x <%d/%llu> "
5628 "sid: x%x did: x%x oxid: x%x "
5629 "Data: x%x x%x x%x x%x\n",
5630 err, cmnd->cmnd[0],
5631 cmnd->device ? cmnd->device->id : 0xffff,
5632 cmnd->device ? cmnd->device->lun : (u64)-1,
5633 vport->fc_myDID, ndlp->nlp_DID,
5634 phba->sli_rev == LPFC_SLI_REV4 ?
5635 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff,
5636 phba->sli_rev == LPFC_SLI_REV4 ?
5637 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi] :
5638 lpfc_cmd->cur_iocbq.iocb.ulpContext,
5639 lpfc_cmd->cur_iocbq.iotag,
5640 phba->sli_rev == LPFC_SLI_REV4 ?
5641 bf_get(wqe_tmo,
5642 &lpfc_cmd->cur_iocbq.wqe.generic.wqe_com) :
5643 lpfc_cmd->cur_iocbq.iocb.ulpTimeout,
5644 (uint32_t)
5645 (cmnd->request->timeout / 1000));
5646
5647 goto out_host_busy_free_buf;
5648 }
5649
5650 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5651 lpfc_sli_handle_fast_ring_event(phba,
5652 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5653
5654 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5655 lpfc_poll_rearm_timer(phba);
5656 }
5657
5658 if (phba->cfg_xri_rebalancing)
5659 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no);
5660
5661 return 0;
5662
5663 out_host_busy_free_buf:
5664 idx = lpfc_cmd->hdwq_no;
5665 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
5666 if (phba->sli4_hba.hdwq) {
5667 switch (lpfc_cmd->fcp_cmnd->fcpCntl3) {
5668 case WRITE_DATA:
5669 phba->sli4_hba.hdwq[idx].scsi_cstat.output_requests--;
5670 break;
5671 case READ_DATA:
5672 phba->sli4_hba.hdwq[idx].scsi_cstat.input_requests--;
5673 break;
5674 default:
5675 phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--;
5676 }
5677 }
5678 out_host_busy_release_buf:
5679 lpfc_release_scsi_buf(phba, lpfc_cmd);
5680 out_host_busy:
5681 return SCSI_MLQUEUE_HOST_BUSY;
5682
5683 out_tgt_busy:
5684 return SCSI_MLQUEUE_TARGET_BUSY;
5685
5686 out_fail_command_release_buf:
5687 lpfc_release_scsi_buf(phba, lpfc_cmd);
5688
5689 out_fail_command:
5690 cmnd->scsi_done(cmnd);
5691 return 0;
5692}
5693
5694
5695
5696
5697
5698void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport)
5699{
5700 u32 bucket;
5701 struct lpfc_vmid *cur;
5702
5703 if (vport->port_type == LPFC_PHYSICAL_PORT)
5704 del_timer_sync(&vport->phba->inactive_vmid_poll);
5705
5706 kfree(vport->qfpa_res);
5707 kfree(vport->vmid_priority.vmid_range);
5708 kfree(vport->vmid);
5709
5710 if (!hash_empty(vport->hash_table))
5711 hash_for_each(vport->hash_table, bucket, cur, hnode)
5712 hash_del(&cur->hnode);
5713
5714 vport->qfpa_res = NULL;
5715 vport->vmid_priority.vmid_range = NULL;
5716 vport->vmid = NULL;
5717 vport->cur_vmid_cnt = 0;
5718}
5719
5720
5721
5722
5723
5724
5725
5726
5727
5728
5729
5730static int
5731lpfc_abort_handler(struct scsi_cmnd *cmnd)
5732{
5733 struct Scsi_Host *shost = cmnd->device->host;
5734 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
5735 struct lpfc_hba *phba = vport->phba;
5736 struct lpfc_iocbq *iocb;
5737 struct lpfc_io_buf *lpfc_cmd;
5738 int ret = SUCCESS, status = 0;
5739 struct lpfc_sli_ring *pring_s4 = NULL;
5740 struct lpfc_sli_ring *pring = NULL;
5741 int ret_val;
5742 unsigned long flags;
5743 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
5744
5745 status = fc_block_scsi_eh(cmnd);
5746 if (status != 0 && status != SUCCESS)
5747 return status;
5748
5749 lpfc_cmd = (struct lpfc_io_buf *)cmnd->host_scribble;
5750 if (!lpfc_cmd)
5751 return ret;
5752
5753 spin_lock_irqsave(&phba->hbalock, flags);
5754
5755 if (phba->hba_flag & HBA_IOQ_FLUSH) {
5756 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5757 "3168 SCSI Layer abort requested I/O has been "
5758 "flushed by LLD.\n");
5759 ret = FAILED;
5760 goto out_unlock;
5761 }
5762
5763
5764 spin_lock(&lpfc_cmd->buf_lock);
5765
5766 if (!lpfc_cmd->pCmd) {
5767 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5768 "2873 SCSI Layer I/O Abort Request IO CMPL Status "
5769 "x%x ID %d LUN %llu\n",
5770 SUCCESS, cmnd->device->id, cmnd->device->lun);
5771 goto out_unlock_buf;
5772 }
5773
5774 iocb = &lpfc_cmd->cur_iocbq;
5775 if (phba->sli_rev == LPFC_SLI_REV4) {
5776 pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
5777 if (!pring_s4) {
5778 ret = FAILED;
5779 goto out_unlock_buf;
5780 }
5781 spin_lock(&pring_s4->ring_lock);
5782 }
5783
5784 if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
5785 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5786 "3169 SCSI Layer abort requested I/O has been "
5787 "cancelled by LLD.\n");
5788 ret = FAILED;
5789 goto out_unlock_ring;
5790 }
5791
5792
5793
5794
5795
5796
5797 if (lpfc_cmd->pCmd != cmnd) {
5798 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5799 "3170 SCSI Layer abort requested I/O has been "
5800 "completed by LLD.\n");
5801 goto out_unlock_ring;
5802 }
5803
5804 BUG_ON(iocb->context1 != lpfc_cmd);
5805
5806
5807 if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
5808 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5809 "3389 SCSI Layer I/O Abort Request is pending\n");
5810 if (phba->sli_rev == LPFC_SLI_REV4)
5811 spin_unlock(&pring_s4->ring_lock);
5812 spin_unlock(&lpfc_cmd->buf_lock);
5813 spin_unlock_irqrestore(&phba->hbalock, flags);
5814 goto wait_for_cmpl;
5815 }
5816
5817 lpfc_cmd->waitq = &waitq;
5818 if (phba->sli_rev == LPFC_SLI_REV4) {
5819 spin_unlock(&pring_s4->ring_lock);
5820 ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb,
5821 lpfc_sli4_abort_fcp_cmpl);
5822 } else {
5823 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
5824 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb,
5825 lpfc_sli_abort_fcp_cmpl);
5826 }
5827
5828
5829 lpfc_issue_hb_tmo(phba);
5830
5831 if (ret_val != IOCB_SUCCESS) {
5832
5833 lpfc_cmd->waitq = NULL;
5834 spin_unlock(&lpfc_cmd->buf_lock);
5835 spin_unlock_irqrestore(&phba->hbalock, flags);
5836 ret = FAILED;
5837 goto out;
5838 }
5839
5840
5841 spin_unlock(&lpfc_cmd->buf_lock);
5842 spin_unlock_irqrestore(&phba->hbalock, flags);
5843
5844 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5845 lpfc_sli_handle_fast_ring_event(phba,
5846 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5847
5848wait_for_cmpl:
5849
5850
5851
5852
5853 wait_event_timeout(waitq,
5854 (lpfc_cmd->pCmd != cmnd),
5855 msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000));
5856
5857 spin_lock(&lpfc_cmd->buf_lock);
5858
5859 if (lpfc_cmd->pCmd == cmnd) {
5860 ret = FAILED;
5861 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
5862 "0748 abort handler timed out waiting "
5863 "for aborting I/O (xri:x%x) to complete: "
5864 "ret %#x, ID %d, LUN %llu\n",
5865 iocb->sli4_xritag, ret,
5866 cmnd->device->id, cmnd->device->lun);
5867 }
5868
5869 lpfc_cmd->waitq = NULL;
5870
5871 spin_unlock(&lpfc_cmd->buf_lock);
5872 goto out;
5873
5874out_unlock_ring:
5875 if (phba->sli_rev == LPFC_SLI_REV4)
5876 spin_unlock(&pring_s4->ring_lock);
5877out_unlock_buf:
5878 spin_unlock(&lpfc_cmd->buf_lock);
5879out_unlock:
5880 spin_unlock_irqrestore(&phba->hbalock, flags);
5881out:
5882 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
5883 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
5884 "LUN %llu\n", ret, cmnd->device->id,
5885 cmnd->device->lun);
5886 return ret;
5887}
5888
5889static char *
5890lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)
5891{
5892 switch (task_mgmt_cmd) {
5893 case FCP_ABORT_TASK_SET:
5894 return "ABORT_TASK_SET";
5895 case FCP_CLEAR_TASK_SET:
5896 return "FCP_CLEAR_TASK_SET";
5897 case FCP_BUS_RESET:
5898 return "FCP_BUS_RESET";
5899 case FCP_LUN_RESET:
5900 return "FCP_LUN_RESET";
5901 case FCP_TARGET_RESET:
5902 return "FCP_TARGET_RESET";
5903 case FCP_CLEAR_ACA:
5904 return "FCP_CLEAR_ACA";
5905 case FCP_TERMINATE_TASK:
5906 return "FCP_TERMINATE_TASK";
5907 default:
5908 return "unknown";
5909 }
5910}
5911
5912
5913
5914
5915
5916
5917
5918
5919
5920
5921
5922
5923
5924static int
5925lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
5926{
5927 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
5928 uint32_t rsp_info;
5929 uint32_t rsp_len;
5930 uint8_t rsp_info_code;
5931 int ret = FAILED;
5932
5933
5934 if (fcprsp == NULL)
5935 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5936 "0703 fcp_rsp is missing\n");
5937 else {
5938 rsp_info = fcprsp->rspStatus2;
5939 rsp_len = be32_to_cpu(fcprsp->rspRspLen);
5940 rsp_info_code = fcprsp->rspInfo3;
5941
5942
5943 lpfc_printf_vlog(vport, KERN_INFO,
5944 LOG_FCP,
5945 "0706 fcp_rsp valid 0x%x,"
5946 " rsp len=%d code 0x%x\n",
5947 rsp_info,
5948 rsp_len, rsp_info_code);
5949
5950
5951
5952
5953
5954 if ((fcprsp->rspStatus2 & RSP_LEN_VALID) &&
5955 ((rsp_len == 8) || (rsp_len == 4))) {
5956 switch (rsp_info_code) {
5957 case RSP_NO_FAILURE:
5958 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5959 "0715 Task Mgmt No Failure\n");
5960 ret = SUCCESS;
5961 break;
5962 case RSP_TM_NOT_SUPPORTED:
5963 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5964 "0716 Task Mgmt Target "
5965 "reject\n");
5966 break;
5967 case RSP_TM_NOT_COMPLETED:
5968 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5969 "0717 Task Mgmt Target "
5970 "failed TM\n");
5971 break;
5972 case RSP_TM_INVALID_LU:
5973 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
5974 "0718 Task Mgmt to invalid "
5975 "LUN\n");
5976 break;
5977 }
5978 }
5979 }
5980 return ret;
5981}
5982
5983
5984
5985
5986
5987
5988
5989
5990
5991
5992
5993
5994
5995
5996
5997
5998
5999static int
6000lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
6001 unsigned int tgt_id, uint64_t lun_id,
6002 uint8_t task_mgmt_cmd)
6003{
6004 struct lpfc_hba *phba = vport->phba;
6005 struct lpfc_io_buf *lpfc_cmd;
6006 struct lpfc_iocbq *iocbq;
6007 struct lpfc_iocbq *iocbqrsp;
6008 struct lpfc_rport_data *rdata;
6009 struct lpfc_nodelist *pnode;
6010 int ret;
6011 int status;
6012
6013 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
6014 if (!rdata || !rdata->pnode)
6015 return FAILED;
6016 pnode = rdata->pnode;
6017
6018 lpfc_cmd = lpfc_get_scsi_buf(phba, pnode, NULL);
6019 if (lpfc_cmd == NULL)
6020 return FAILED;
6021 lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
6022 lpfc_cmd->rdata = rdata;
6023 lpfc_cmd->pCmd = cmnd;
6024 lpfc_cmd->ndlp = pnode;
6025
6026 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
6027 task_mgmt_cmd);
6028 if (!status) {
6029 lpfc_release_scsi_buf(phba, lpfc_cmd);
6030 return FAILED;
6031 }
6032
6033 iocbq = &lpfc_cmd->cur_iocbq;
6034 iocbqrsp = lpfc_sli_get_iocbq(phba);
6035 if (iocbqrsp == NULL) {
6036 lpfc_release_scsi_buf(phba, lpfc_cmd);
6037 return FAILED;
6038 }
6039 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
6040
6041 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
6042 "0702 Issue %s to TGT %d LUN %llu "
6043 "rpi x%x nlp_flag x%x Data: x%x x%x\n",
6044 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
6045 pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag,
6046 iocbq->iocb_flag);
6047
6048 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
6049 iocbq, iocbqrsp, lpfc_cmd->timeout);
6050 if ((status != IOCB_SUCCESS) ||
6051 (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {
6052 if (status != IOCB_SUCCESS ||
6053 iocbqrsp->iocb.ulpStatus != IOSTAT_FCP_RSP_ERROR)
6054 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6055 "0727 TMF %s to TGT %d LUN %llu "
6056 "failed (%d, %d) iocb_flag x%x\n",
6057 lpfc_taskmgmt_name(task_mgmt_cmd),
6058 tgt_id, lun_id,
6059 iocbqrsp->iocb.ulpStatus,
6060 iocbqrsp->iocb.un.ulpWord[4],
6061 iocbq->iocb_flag);
6062
6063 if (status == IOCB_SUCCESS) {
6064 if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
6065
6066
6067 ret = lpfc_check_fcp_rsp(vport, lpfc_cmd);
6068 else
6069 ret = FAILED;
6070 } else if (status == IOCB_TIMEDOUT) {
6071 ret = TIMEOUT_ERROR;
6072 } else {
6073 ret = FAILED;
6074 }
6075 } else
6076 ret = SUCCESS;
6077
6078 lpfc_sli_release_iocbq(phba, iocbqrsp);
6079
6080 if (ret != TIMEOUT_ERROR)
6081 lpfc_release_scsi_buf(phba, lpfc_cmd);
6082
6083 return ret;
6084}
6085
6086
6087
6088
6089
6090
6091
6092
6093
6094
6095
6096
6097
6098static int
6099lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)
6100{
6101 struct lpfc_rport_data *rdata;
6102 struct lpfc_nodelist *pnode;
6103 unsigned long later;
6104
6105 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
6106 if (!rdata) {
6107 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
6108 "0797 Tgt Map rport failure: rdata x%px\n", rdata);
6109 return FAILED;
6110 }
6111 pnode = rdata->pnode;
6112
6113
6114
6115
6116 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
6117 while (time_after(later, jiffies)) {
6118 if (!pnode)
6119 return FAILED;
6120 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
6121 return SUCCESS;
6122 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
6123 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
6124 if (!rdata)
6125 return FAILED;
6126 pnode = rdata->pnode;
6127 }
6128 if (!pnode || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
6129 return FAILED;
6130 return SUCCESS;
6131}
6132
6133
6134
6135
6136
6137
6138
6139
6140
6141
6142
6143
6144
6145
6146
6147
6148
6149static int
6150lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
6151 uint64_t lun_id, lpfc_ctx_cmd context)
6152{
6153 struct lpfc_hba *phba = vport->phba;
6154 unsigned long later;
6155 int cnt;
6156
6157 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
6158 if (cnt)
6159 lpfc_sli_abort_taskmgmt(vport,
6160 &phba->sli.sli3_ring[LPFC_FCP_RING],
6161 tgt_id, lun_id, context);
6162 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
6163 while (time_after(later, jiffies) && cnt) {
6164 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
6165 cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
6166 }
6167 if (cnt) {
6168 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6169 "0724 I/O flush failure for context %s : cnt x%x\n",
6170 ((context == LPFC_CTX_LUN) ? "LUN" :
6171 ((context == LPFC_CTX_TGT) ? "TGT" :
6172 ((context == LPFC_CTX_HOST) ? "HOST" : "Unknown"))),
6173 cnt);
6174 return FAILED;
6175 }
6176 return SUCCESS;
6177}
6178
6179
6180
6181
6182
6183
6184
6185
6186
6187
6188
6189
6190static int
6191lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
6192{
6193 struct Scsi_Host *shost = cmnd->device->host;
6194 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6195 struct lpfc_rport_data *rdata;
6196 struct lpfc_nodelist *pnode;
6197 unsigned tgt_id = cmnd->device->id;
6198 uint64_t lun_id = cmnd->device->lun;
6199 struct lpfc_scsi_event_header scsi_event;
6200 int status;
6201 u32 logit = LOG_FCP;
6202
6203 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
6204 if (!rdata || !rdata->pnode) {
6205 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6206 "0798 Device Reset rdata failure: rdata x%px\n",
6207 rdata);
6208 return FAILED;
6209 }
6210 pnode = rdata->pnode;
6211 status = fc_block_scsi_eh(cmnd);
6212 if (status != 0 && status != SUCCESS)
6213 return status;
6214
6215 status = lpfc_chk_tgt_mapped(vport, cmnd);
6216 if (status == FAILED) {
6217 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6218 "0721 Device Reset rport failure: rdata x%px\n", rdata);
6219 return FAILED;
6220 }
6221
6222 scsi_event.event_type = FC_REG_SCSI_EVENT;
6223 scsi_event.subcategory = LPFC_EVENT_LUNRESET;
6224 scsi_event.lun = lun_id;
6225 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
6226 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
6227
6228 fc_host_post_vendor_event(shost, fc_get_event_number(),
6229 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
6230
6231 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
6232 FCP_LUN_RESET);
6233 if (status != SUCCESS)
6234 logit = LOG_TRACE_EVENT;
6235
6236 lpfc_printf_vlog(vport, KERN_ERR, logit,
6237 "0713 SCSI layer issued Device Reset (%d, %llu) "
6238 "return x%x\n", tgt_id, lun_id, status);
6239
6240
6241
6242
6243
6244
6245
6246 if (status == SUCCESS)
6247 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
6248 LPFC_CTX_LUN);
6249
6250 return status;
6251}
6252
6253
6254
6255
6256
6257
6258
6259
6260
6261
6262
6263
6264static int
6265lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
6266{
6267 struct Scsi_Host *shost = cmnd->device->host;
6268 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6269 struct lpfc_rport_data *rdata;
6270 struct lpfc_nodelist *pnode;
6271 unsigned tgt_id = cmnd->device->id;
6272 uint64_t lun_id = cmnd->device->lun;
6273 struct lpfc_scsi_event_header scsi_event;
6274 int status;
6275 u32 logit = LOG_FCP;
6276 unsigned long flags;
6277 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
6278
6279 rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
6280 if (!rdata || !rdata->pnode) {
6281 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6282 "0799 Target Reset rdata failure: rdata x%px\n",
6283 rdata);
6284 return FAILED;
6285 }
6286 pnode = rdata->pnode;
6287 status = fc_block_scsi_eh(cmnd);
6288 if (status != 0 && status != SUCCESS)
6289 return status;
6290
6291 status = lpfc_chk_tgt_mapped(vport, cmnd);
6292 if (status == FAILED) {
6293 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6294 "0722 Target Reset rport failure: rdata x%px\n", rdata);
6295 if (pnode) {
6296 spin_lock_irqsave(&pnode->lock, flags);
6297 pnode->nlp_flag &= ~NLP_NPR_ADISC;
6298 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
6299 spin_unlock_irqrestore(&pnode->lock, flags);
6300 }
6301 lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
6302 LPFC_CTX_TGT);
6303 return FAST_IO_FAIL;
6304 }
6305
6306 scsi_event.event_type = FC_REG_SCSI_EVENT;
6307 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
6308 scsi_event.lun = 0;
6309 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
6310 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
6311
6312 fc_host_post_vendor_event(shost, fc_get_event_number(),
6313 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
6314
6315 status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
6316 FCP_TARGET_RESET);
6317 if (status != SUCCESS)
6318 logit = LOG_TRACE_EVENT;
6319 spin_lock_irqsave(&pnode->lock, flags);
6320 if (status != SUCCESS &&
6321 (!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO)) &&
6322 !pnode->logo_waitq) {
6323 pnode->logo_waitq = &waitq;
6324 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
6325 pnode->nlp_flag |= NLP_ISSUE_LOGO;
6326 pnode->upcall_flags |= NLP_WAIT_FOR_LOGO;
6327 spin_unlock_irqrestore(&pnode->lock, flags);
6328 lpfc_unreg_rpi(vport, pnode);
6329 wait_event_timeout(waitq,
6330 (!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO)),
6331 msecs_to_jiffies(vport->cfg_devloss_tmo *
6332 1000));
6333
6334 if (pnode->upcall_flags & NLP_WAIT_FOR_LOGO) {
6335 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6336 "0725 SCSI layer TGTRST failed & LOGO TMO "
6337 " (%d, %llu) return x%x\n", tgt_id,
6338 lun_id, status);
6339 spin_lock_irqsave(&pnode->lock, flags);
6340 pnode->upcall_flags &= ~NLP_WAIT_FOR_LOGO;
6341 } else {
6342 spin_lock_irqsave(&pnode->lock, flags);
6343 }
6344 pnode->logo_waitq = NULL;
6345 spin_unlock_irqrestore(&pnode->lock, flags);
6346 status = SUCCESS;
6347 } else {
6348 status = FAILED;
6349 spin_unlock_irqrestore(&pnode->lock, flags);
6350 }
6351
6352 lpfc_printf_vlog(vport, KERN_ERR, logit,
6353 "0723 SCSI layer issued Target Reset (%d, %llu) "
6354 "return x%x\n", tgt_id, lun_id, status);
6355
6356
6357
6358
6359
6360
6361
6362 if (status == SUCCESS)
6363 status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
6364 LPFC_CTX_TGT);
6365 return status;
6366}
6367
6368
6369
6370
6371
6372
6373
6374
6375
6376
6377
6378
6379static int
6380lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
6381{
6382 struct Scsi_Host *shost = cmnd->device->host;
6383 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6384 struct lpfc_nodelist *ndlp = NULL;
6385 struct lpfc_scsi_event_header scsi_event;
6386 int match;
6387 int ret = SUCCESS, status, i;
6388 u32 logit = LOG_FCP;
6389
6390 scsi_event.event_type = FC_REG_SCSI_EVENT;
6391 scsi_event.subcategory = LPFC_EVENT_BUSRESET;
6392 scsi_event.lun = 0;
6393 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
6394 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
6395
6396 fc_host_post_vendor_event(shost, fc_get_event_number(),
6397 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
6398
6399 status = fc_block_scsi_eh(cmnd);
6400 if (status != 0 && status != SUCCESS)
6401 return status;
6402
6403
6404
6405
6406
6407
6408 for (i = 0; i < LPFC_MAX_TARGET; i++) {
6409
6410 match = 0;
6411 spin_lock_irq(shost->host_lock);
6412 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
6413
6414 if (vport->phba->cfg_fcp2_no_tgt_reset &&
6415 (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
6416 continue;
6417 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
6418 ndlp->nlp_sid == i &&
6419 ndlp->rport &&
6420 ndlp->nlp_type & NLP_FCP_TARGET) {
6421 match = 1;
6422 break;
6423 }
6424 }
6425 spin_unlock_irq(shost->host_lock);
6426 if (!match)
6427 continue;
6428
6429 status = lpfc_send_taskmgmt(vport, cmnd,
6430 i, 0, FCP_TARGET_RESET);
6431
6432 if (status != SUCCESS) {
6433 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6434 "0700 Bus Reset on target %d failed\n",
6435 i);
6436 ret = FAILED;
6437 }
6438 }
6439
6440
6441
6442
6443
6444
6445
6446 status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
6447 if (status != SUCCESS)
6448 ret = FAILED;
6449 if (ret == FAILED)
6450 logit = LOG_TRACE_EVENT;
6451
6452 lpfc_printf_vlog(vport, KERN_ERR, logit,
6453 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
6454 return ret;
6455}
6456
6457
6458
6459
6460
6461
6462
6463
6464
6465
6466
6467
6468
6469
6470
6471
6472
6473static int
6474lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
6475{
6476 struct Scsi_Host *shost = cmnd->device->host;
6477 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
6478 struct lpfc_hba *phba = vport->phba;
6479 int rc, ret = SUCCESS;
6480
6481 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
6482 "3172 SCSI layer issued Host Reset Data:\n");
6483
6484 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
6485 lpfc_offline(phba);
6486 rc = lpfc_sli_brdrestart(phba);
6487 if (rc)
6488 goto error;
6489
6490 rc = lpfc_online(phba);
6491 if (rc)
6492 goto error;
6493
6494 lpfc_unblock_mgmt_io(phba);
6495
6496 return ret;
6497error:
6498 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6499 "3323 Failed host reset\n");
6500 lpfc_unblock_mgmt_io(phba);
6501 return FAILED;
6502}
6503
6504
6505
6506
6507
6508
6509
6510
6511
6512
6513
6514
6515
6516
6517static int
6518lpfc_slave_alloc(struct scsi_device *sdev)
6519{
6520 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6521 struct lpfc_hba *phba = vport->phba;
6522 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
6523 uint32_t total = 0;
6524 uint32_t num_to_alloc = 0;
6525 int num_allocated = 0;
6526 uint32_t sdev_cnt;
6527 struct lpfc_device_data *device_data;
6528 unsigned long flags;
6529 struct lpfc_name target_wwpn;
6530
6531 if (!rport || fc_remote_port_chkready(rport))
6532 return -ENXIO;
6533
6534 if (phba->cfg_fof) {
6535
6536
6537
6538
6539
6540
6541 u64_to_wwn(rport->port_name, target_wwpn.u.wwn);
6542 spin_lock_irqsave(&phba->devicelock, flags);
6543 device_data = __lpfc_get_device_data(phba,
6544 &phba->luns,
6545 &vport->fc_portname,
6546 &target_wwpn,
6547 sdev->lun);
6548 if (!device_data) {
6549 spin_unlock_irqrestore(&phba->devicelock, flags);
6550 device_data = lpfc_create_device_data(phba,
6551 &vport->fc_portname,
6552 &target_wwpn,
6553 sdev->lun,
6554 phba->cfg_XLanePriority,
6555 true);
6556 if (!device_data)
6557 return -ENOMEM;
6558 spin_lock_irqsave(&phba->devicelock, flags);
6559 list_add_tail(&device_data->listentry, &phba->luns);
6560 }
6561 device_data->rport_data = rport->dd_data;
6562 device_data->available = true;
6563 spin_unlock_irqrestore(&phba->devicelock, flags);
6564 sdev->hostdata = device_data;
6565 } else {
6566 sdev->hostdata = rport->dd_data;
6567 }
6568 sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
6569
6570
6571 if (phba->sli_rev == LPFC_SLI_REV4)
6572 return 0;
6573
6574
6575
6576
6577
6578
6579
6580
6581
6582
6583 total = phba->total_scsi_bufs;
6584 num_to_alloc = vport->cfg_lun_queue_depth + 2;
6585
6586
6587 if ((sdev_cnt * (vport->cfg_lun_queue_depth + 2)) < total)
6588 return 0;
6589
6590
6591 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
6592 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
6593 "0704 At limitation of %d preallocated "
6594 "command buffers\n", total);
6595 return 0;
6596
6597 } else if (total + num_to_alloc >
6598 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
6599 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
6600 "0705 Allocation request of %d "
6601 "command buffers will exceed max of %d. "
6602 "Reducing allocation request to %d.\n",
6603 num_to_alloc, phba->cfg_hba_queue_depth,
6604 (phba->cfg_hba_queue_depth - total));
6605 num_to_alloc = phba->cfg_hba_queue_depth - total;
6606 }
6607 num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc);
6608 if (num_to_alloc != num_allocated) {
6609 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
6610 "0708 Allocation request of %d "
6611 "command buffers did not succeed. "
6612 "Allocated %d buffers.\n",
6613 num_to_alloc, num_allocated);
6614 }
6615 if (num_allocated > 0)
6616 phba->total_scsi_bufs += num_allocated;
6617 return 0;
6618}
6619
6620
6621
6622
6623
6624
6625
6626
6627
6628
6629
6630
6631static int
6632lpfc_slave_configure(struct scsi_device *sdev)
6633{
6634 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6635 struct lpfc_hba *phba = vport->phba;
6636
6637 scsi_change_queue_depth(sdev, vport->cfg_lun_queue_depth);
6638
6639 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
6640 lpfc_sli_handle_fast_ring_event(phba,
6641 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
6642 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
6643 lpfc_poll_rearm_timer(phba);
6644 }
6645
6646 return 0;
6647}
6648
6649
6650
6651
6652
6653
6654
6655static void
6656lpfc_slave_destroy(struct scsi_device *sdev)
6657{
6658 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
6659 struct lpfc_hba *phba = vport->phba;
6660 unsigned long flags;
6661 struct lpfc_device_data *device_data = sdev->hostdata;
6662
6663 atomic_dec(&phba->sdev_cnt);
6664 if ((phba->cfg_fof) && (device_data)) {
6665 spin_lock_irqsave(&phba->devicelock, flags);
6666 device_data->available = false;
6667 if (!device_data->oas_enabled)
6668 lpfc_delete_device_data(phba, device_data);
6669 spin_unlock_irqrestore(&phba->devicelock, flags);
6670 }
6671 sdev->hostdata = NULL;
6672 return;
6673}
6674
6675
6676
6677
6678
6679
6680
6681
6682
6683
6684
6685
6686
6687
6688
6689
6690
6691
6692
6693
6694struct lpfc_device_data*
6695lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6696 struct lpfc_name *target_wwpn, uint64_t lun,
6697 uint32_t pri, bool atomic_create)
6698{
6699
6700 struct lpfc_device_data *lun_info;
6701 int memory_flags;
6702
6703 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6704 !(phba->cfg_fof))
6705 return NULL;
6706
6707
6708
6709 if (atomic_create)
6710 memory_flags = GFP_ATOMIC;
6711 else
6712 memory_flags = GFP_KERNEL;
6713 lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
6714 if (!lun_info)
6715 return NULL;
6716 INIT_LIST_HEAD(&lun_info->listentry);
6717 lun_info->rport_data = NULL;
6718 memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn,
6719 sizeof(struct lpfc_name));
6720 memcpy(&lun_info->device_id.target_wwpn, target_wwpn,
6721 sizeof(struct lpfc_name));
6722 lun_info->device_id.lun = lun;
6723 lun_info->oas_enabled = false;
6724 lun_info->priority = pri;
6725 lun_info->available = false;
6726 return lun_info;
6727}
6728
6729
6730
6731
6732
6733
6734
6735
6736
6737void
6738lpfc_delete_device_data(struct lpfc_hba *phba,
6739 struct lpfc_device_data *lun_info)
6740{
6741
6742 if (unlikely(!phba) || !lun_info ||
6743 !(phba->cfg_fof))
6744 return;
6745
6746 if (!list_empty(&lun_info->listentry))
6747 list_del(&lun_info->listentry);
6748 mempool_free(lun_info, phba->device_data_mem_pool);
6749 return;
6750}
6751
6752
6753
6754
6755
6756
6757
6758
6759
6760
6761
6762
6763
6764
6765
6766
6767
6768struct lpfc_device_data*
6769__lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
6770 struct lpfc_name *vport_wwpn,
6771 struct lpfc_name *target_wwpn, uint64_t lun)
6772{
6773
6774 struct lpfc_device_data *lun_info;
6775
6776 if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
6777 !phba->cfg_fof)
6778 return NULL;
6779
6780
6781
6782 list_for_each_entry(lun_info, list, listentry) {
6783 if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
6784 sizeof(struct lpfc_name)) == 0) &&
6785 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
6786 sizeof(struct lpfc_name)) == 0) &&
6787 (lun_info->device_id.lun == lun))
6788 return lun_info;
6789 }
6790
6791 return NULL;
6792}
6793
6794
6795
6796
6797
6798
6799
6800
6801
6802
6803
6804
6805
6806
6807
6808
6809
6810
6811
6812
6813
6814
6815
6816
6817
6818
6819
6820
6821bool
6822lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6823 struct lpfc_name *target_wwpn, uint64_t *starting_lun,
6824 struct lpfc_name *found_vport_wwpn,
6825 struct lpfc_name *found_target_wwpn,
6826 uint64_t *found_lun,
6827 uint32_t *found_lun_status,
6828 uint32_t *found_lun_pri)
6829{
6830
6831 unsigned long flags;
6832 struct lpfc_device_data *lun_info;
6833 struct lpfc_device_id *device_id;
6834 uint64_t lun;
6835 bool found = false;
6836
6837 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6838 !starting_lun || !found_vport_wwpn ||
6839 !found_target_wwpn || !found_lun || !found_lun_status ||
6840 (*starting_lun == NO_MORE_OAS_LUN) ||
6841 !phba->cfg_fof)
6842 return false;
6843
6844 lun = *starting_lun;
6845 *found_lun = NO_MORE_OAS_LUN;
6846 *starting_lun = NO_MORE_OAS_LUN;
6847
6848
6849
6850 spin_lock_irqsave(&phba->devicelock, flags);
6851 list_for_each_entry(lun_info, &phba->luns, listentry) {
6852 if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) ||
6853 (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn,
6854 sizeof(struct lpfc_name)) == 0)) &&
6855 ((wwn_to_u64(target_wwpn->u.wwn) == 0) ||
6856 (memcmp(&lun_info->device_id.target_wwpn, target_wwpn,
6857 sizeof(struct lpfc_name)) == 0)) &&
6858 (lun_info->oas_enabled)) {
6859 device_id = &lun_info->device_id;
6860 if ((!found) &&
6861 ((lun == FIND_FIRST_OAS_LUN) ||
6862 (device_id->lun == lun))) {
6863 *found_lun = device_id->lun;
6864 memcpy(found_vport_wwpn,
6865 &device_id->vport_wwpn,
6866 sizeof(struct lpfc_name));
6867 memcpy(found_target_wwpn,
6868 &device_id->target_wwpn,
6869 sizeof(struct lpfc_name));
6870 if (lun_info->available)
6871 *found_lun_status =
6872 OAS_LUN_STATUS_EXISTS;
6873 else
6874 *found_lun_status = 0;
6875 *found_lun_pri = lun_info->priority;
6876 if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
6877 memset(vport_wwpn, 0x0,
6878 sizeof(struct lpfc_name));
6879 if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
6880 memset(target_wwpn, 0x0,
6881 sizeof(struct lpfc_name));
6882 found = true;
6883 } else if (found) {
6884 *starting_lun = device_id->lun;
6885 memcpy(vport_wwpn, &device_id->vport_wwpn,
6886 sizeof(struct lpfc_name));
6887 memcpy(target_wwpn, &device_id->target_wwpn,
6888 sizeof(struct lpfc_name));
6889 break;
6890 }
6891 }
6892 }
6893 spin_unlock_irqrestore(&phba->devicelock, flags);
6894 return found;
6895}
6896
6897
6898
6899
6900
6901
6902
6903
6904
6905
6906
6907
6908
6909
6910
6911
6912
6913
6914
6915
6916
6917
6918bool
6919lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6920 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
6921{
6922
6923 struct lpfc_device_data *lun_info;
6924 unsigned long flags;
6925
6926 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6927 !phba->cfg_fof)
6928 return false;
6929
6930 spin_lock_irqsave(&phba->devicelock, flags);
6931
6932
6933 lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
6934 target_wwpn, lun);
6935 if (lun_info) {
6936 if (!lun_info->oas_enabled)
6937 lun_info->oas_enabled = true;
6938 lun_info->priority = pri;
6939 spin_unlock_irqrestore(&phba->devicelock, flags);
6940 return true;
6941 }
6942
6943
6944 lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
6945 pri, true);
6946 if (lun_info) {
6947 lun_info->oas_enabled = true;
6948 lun_info->priority = pri;
6949 lun_info->available = false;
6950 list_add_tail(&lun_info->listentry, &phba->luns);
6951 spin_unlock_irqrestore(&phba->devicelock, flags);
6952 return true;
6953 }
6954 spin_unlock_irqrestore(&phba->devicelock, flags);
6955 return false;
6956}
6957
6958
6959
6960
6961
6962
6963
6964
6965
6966
6967
6968
6969
6970
6971
6972
6973
6974
6975
6976
6977
6978bool
6979lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6980 struct lpfc_name *target_wwpn, uint64_t lun, uint8_t pri)
6981{
6982
6983 struct lpfc_device_data *lun_info;
6984 unsigned long flags;
6985
6986 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6987 !phba->cfg_fof)
6988 return false;
6989
6990 spin_lock_irqsave(&phba->devicelock, flags);
6991
6992
6993 lun_info = __lpfc_get_device_data(phba,
6994 &phba->luns, vport_wwpn,
6995 target_wwpn, lun);
6996 if (lun_info) {
6997 lun_info->oas_enabled = false;
6998 lun_info->priority = pri;
6999 if (!lun_info->available)
7000 lpfc_delete_device_data(phba, lun_info);
7001 spin_unlock_irqrestore(&phba->devicelock, flags);
7002 return true;
7003 }
7004
7005 spin_unlock_irqrestore(&phba->devicelock, flags);
7006 return false;
7007}
7008
7009static int
7010lpfc_no_command(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
7011{
7012 return SCSI_MLQUEUE_HOST_BUSY;
7013}
7014
7015static int
7016lpfc_no_handler(struct scsi_cmnd *cmnd)
7017{
7018 return FAILED;
7019}
7020
7021static int
7022lpfc_no_slave(struct scsi_device *sdev)
7023{
7024 return -ENODEV;
7025}
7026
7027struct scsi_host_template lpfc_template_nvme = {
7028 .module = THIS_MODULE,
7029 .name = LPFC_DRIVER_NAME,
7030 .proc_name = LPFC_DRIVER_NAME,
7031 .info = lpfc_info,
7032 .queuecommand = lpfc_no_command,
7033 .eh_abort_handler = lpfc_no_handler,
7034 .eh_device_reset_handler = lpfc_no_handler,
7035 .eh_target_reset_handler = lpfc_no_handler,
7036 .eh_bus_reset_handler = lpfc_no_handler,
7037 .eh_host_reset_handler = lpfc_no_handler,
7038 .slave_alloc = lpfc_no_slave,
7039 .slave_configure = lpfc_no_slave,
7040 .scan_finished = lpfc_scan_finished,
7041 .this_id = -1,
7042 .sg_tablesize = 1,
7043 .cmd_per_lun = 1,
7044 .shost_attrs = lpfc_hba_attrs,
7045 .max_sectors = 0xFFFFFFFF,
7046 .vendor_id = LPFC_NL_VENDOR_ID,
7047 .track_queue_depth = 0,
7048};
7049
7050struct scsi_host_template lpfc_template = {
7051 .module = THIS_MODULE,
7052 .name = LPFC_DRIVER_NAME,
7053 .proc_name = LPFC_DRIVER_NAME,
7054 .info = lpfc_info,
7055 .queuecommand = lpfc_queuecommand,
7056 .eh_timed_out = fc_eh_timed_out,
7057 .eh_should_retry_cmd = fc_eh_should_retry_cmd,
7058 .eh_abort_handler = lpfc_abort_handler,
7059 .eh_device_reset_handler = lpfc_device_reset_handler,
7060 .eh_target_reset_handler = lpfc_target_reset_handler,
7061 .eh_bus_reset_handler = lpfc_bus_reset_handler,
7062 .eh_host_reset_handler = lpfc_host_reset_handler,
7063 .slave_alloc = lpfc_slave_alloc,
7064 .slave_configure = lpfc_slave_configure,
7065 .slave_destroy = lpfc_slave_destroy,
7066 .scan_finished = lpfc_scan_finished,
7067 .this_id = -1,
7068 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
7069 .cmd_per_lun = LPFC_CMD_PER_LUN,
7070 .shost_attrs = lpfc_hba_attrs,
7071 .max_sectors = 0xFFFFFFFF,
7072 .vendor_id = LPFC_NL_VENDOR_ID,
7073 .change_queue_depth = scsi_change_queue_depth,
7074 .track_queue_depth = 1,
7075};
7076