1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/pci.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/export.h>
27#include <linux/delay.h>
28#include <asm/unaligned.h>
29#include <linux/t10-pi.h>
30#include <linux/crc-t10dif.h>
31#include <linux/blk-cgroup.h>
32#include <net/checksum.h>
33
34#include <scsi/scsi.h>
35#include <scsi/scsi_device.h>
36#include <scsi/scsi_eh.h>
37#include <scsi/scsi_host.h>
38#include <scsi/scsi_tcq.h>
39#include <scsi/scsi_transport_fc.h>
40
41#include "lpfc_version.h"
42#include "lpfc_hw4.h"
43#include "lpfc_hw.h"
44#include "lpfc_sli.h"
45#include "lpfc_sli4.h"
46#include "lpfc_nl.h"
47#include "lpfc_disc.h"
48#include "lpfc.h"
49#include "lpfc_scsi.h"
50#include "lpfc_logmsg.h"
51#include "lpfc_crtn.h"
52#include "lpfc_vport.h"
53
54#define LPFC_RESET_WAIT 2
55#define LPFC_ABORT_WAIT 2
56
57static char *dif_op_str[] = {
58 "PROT_NORMAL",
59 "PROT_READ_INSERT",
60 "PROT_WRITE_STRIP",
61 "PROT_READ_STRIP",
62 "PROT_WRITE_INSERT",
63 "PROT_READ_PASS",
64 "PROT_WRITE_PASS",
65};
66
67struct scsi_dif_tuple {
68 __be16 guard_tag;
69 __be16 app_tag;
70 __be32 ref_tag;
71};
72
73static struct lpfc_rport_data *
74lpfc_rport_data_from_scsi_device(struct scsi_device *sdev)
75{
76 struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata;
77
78 if (vport->phba->cfg_fof)
79 return ((struct lpfc_device_data *)sdev->hostdata)->rport_data;
80 else
81 return (struct lpfc_rport_data *)sdev->hostdata;
82}
83
84static void
85lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
86static void
87lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
88static int
89lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
90static void
91lpfc_put_vmid_in_hashtable(struct lpfc_vport *vport, u32 hash,
92 struct lpfc_vmid *vmp);
93static void lpfc_vmid_update_entry(struct lpfc_vport *vport, struct scsi_cmnd
94 *cmd, struct lpfc_vmid *vmp,
95 union lpfc_vmid_io_tag *tag);
96static void lpfc_vmid_assign_cs_ctl(struct lpfc_vport *vport,
97 struct lpfc_vmid *vmid);
98
99static inline unsigned
100lpfc_cmd_blksize(struct scsi_cmnd *sc)
101{
102 return sc->device->sector_size;
103}
104
105#define LPFC_CHECK_PROTECT_GUARD 1
106#define LPFC_CHECK_PROTECT_REF 2
107static inline unsigned
108lpfc_cmd_protect(struct scsi_cmnd *sc, int flag)
109{
110 return 1;
111}
112
113static inline unsigned
114lpfc_cmd_guard_csum(struct scsi_cmnd *sc)
115{
116 if (lpfc_prot_group_type(NULL, sc) == LPFC_PG_TYPE_NO_DIF)
117 return 0;
118 if (scsi_host_get_guard(sc->device->host) == SHOST_DIX_GUARD_IP)
119 return 1;
120 return 0;
121}
122
123
124
125
126
127
128
129
130
131static void
132lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
133 struct lpfc_io_buf *lpfc_cmd)
134{
135 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
136 if (sgl) {
137 sgl += 1;
138 sgl->word2 = le32_to_cpu(sgl->word2);
139 bf_set(lpfc_sli4_sge_last, sgl, 1);
140 sgl->word2 = cpu_to_le32(sgl->word2);
141 }
142}
143
144#define LPFC_INVALID_REFTAG ((u32)-1)
145
146
147
148
149
150
151
152
153
154static void
155lpfc_update_stats(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd)
156{
157 struct lpfc_hba *phba = vport->phba;
158 struct lpfc_rport_data *rdata;
159 struct lpfc_nodelist *pnode;
160 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
161 unsigned long flags;
162 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
163 unsigned long latency;
164 int i;
165
166 if (!vport->stat_data_enabled ||
167 vport->stat_data_blocked ||
168 (cmd->result))
169 return;
170
171 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
172 rdata = lpfc_cmd->rdata;
173 pnode = rdata->pnode;
174
175 spin_lock_irqsave(shost->host_lock, flags);
176 if (!pnode ||
177 !pnode->lat_data ||
178 (phba->bucket_type == LPFC_NO_BUCKET)) {
179 spin_unlock_irqrestore(shost->host_lock, flags);
180 return;
181 }
182
183 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
184 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
185 phba->bucket_step;
186
187 if (i < 0)
188 i = 0;
189 else if (i >= LPFC_MAX_BUCKET_COUNT)
190 i = LPFC_MAX_BUCKET_COUNT - 1;
191 } else {
192 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
193 if (latency <= (phba->bucket_base +
194 ((1<<i)*phba->bucket_step)))
195 break;
196 }
197
198 pnode->lat_data[i].cmd_count++;
199 spin_unlock_irqrestore(shost->host_lock, flags);
200}
201
202
203
204
205
206
207
208
209
210
211
212
213void
214lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
215{
216 unsigned long flags;
217 uint32_t evt_posted;
218 unsigned long expires;
219
220 spin_lock_irqsave(&phba->hbalock, flags);
221 atomic_inc(&phba->num_rsrc_err);
222 phba->last_rsrc_error_time = jiffies;
223
224 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
225 if (time_after(expires, jiffies)) {
226 spin_unlock_irqrestore(&phba->hbalock, flags);
227 return;
228 }
229
230 phba->last_ramp_down_time = jiffies;
231
232 spin_unlock_irqrestore(&phba->hbalock, flags);
233
234 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
235 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
236 if (!evt_posted)
237 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
238 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
239
240 if (!evt_posted)
241 lpfc_worker_wake_up(phba);
242 return;
243}
244
245
246
247
248
249
250
251
252
253void
254lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
255{
256 struct lpfc_vport **vports;
257 struct Scsi_Host *shost;
258 struct scsi_device *sdev;
259 unsigned long new_queue_depth;
260 unsigned long num_rsrc_err, num_cmd_success;
261 int i;
262
263 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
264 num_cmd_success = atomic_read(&phba->num_cmd_success);
265
266
267
268
269
270
271 if (num_rsrc_err == 0)
272 return;
273
274 vports = lpfc_create_vport_work_array(phba);
275 if (vports != NULL)
276 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
277 shost = lpfc_shost_from_vport(vports[i]);
278 shost_for_each_device(sdev, shost) {
279 new_queue_depth =
280 sdev->queue_depth * num_rsrc_err /
281 (num_rsrc_err + num_cmd_success);
282 if (!new_queue_depth)
283 new_queue_depth = sdev->queue_depth - 1;
284 else
285 new_queue_depth = sdev->queue_depth -
286 new_queue_depth;
287 scsi_change_queue_depth(sdev, new_queue_depth);
288 }
289 }
290 lpfc_destroy_vport_work_array(phba, vports);
291 atomic_set(&phba->num_rsrc_err, 0);
292 atomic_set(&phba->num_cmd_success, 0);
293}
294
295
296
297
298
299
300
301
302
303void
304lpfc_scsi_dev_block(struct lpfc_hba *phba)
305{
306 struct lpfc_vport **vports;
307 struct Scsi_Host *shost;
308 struct scsi_device *sdev;
309 struct fc_rport *rport;
310 int i;
311
312 vports = lpfc_create_vport_work_array(phba);
313 if (vports != NULL)
314 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
315 shost = lpfc_shost_from_vport(vports[i]);
316 shost_for_each_device(sdev, shost) {
317 rport = starget_to_rport(scsi_target(sdev));
318 fc_remote_port_delete(rport);
319 }
320 }
321 lpfc_destroy_vport_work_array(phba, vports);
322}
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340static int
341lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
342{
343 struct lpfc_hba *phba = vport->phba;
344 struct lpfc_io_buf *psb;
345 struct ulp_bde64 *bpl;
346 IOCB_t *iocb;
347 dma_addr_t pdma_phys_fcp_cmd;
348 dma_addr_t pdma_phys_fcp_rsp;
349 dma_addr_t pdma_phys_sgl;
350 uint16_t iotag;
351 int bcnt, bpl_size;
352
353 bpl_size = phba->cfg_sg_dma_buf_size -
354 (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
355
356 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
357 "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
358 num_to_alloc, phba->cfg_sg_dma_buf_size,
359 (int)sizeof(struct fcp_cmnd),
360 (int)sizeof(struct fcp_rsp), bpl_size);
361
362 for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
363 psb = kzalloc(sizeof(struct lpfc_io_buf), GFP_KERNEL);
364 if (!psb)
365 break;
366
367
368
369
370
371
372
373 psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
374 GFP_KERNEL, &psb->dma_handle);
375 if (!psb->data) {
376 kfree(psb);
377 break;
378 }
379
380
381
382 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
383 if (iotag == 0) {
384 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
385 psb->data, psb->dma_handle);
386 kfree(psb);
387 break;
388 }
389 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
390
391 psb->fcp_cmnd = psb->data;
392 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
393 psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) +
394 sizeof(struct fcp_rsp);
395
396
397 bpl = (struct ulp_bde64 *)psb->dma_sgl;
398 pdma_phys_fcp_cmd = psb->dma_handle;
399 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
400 pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) +
401 sizeof(struct fcp_rsp);
402
403
404
405
406
407
408 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
409 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
410 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
411 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
412 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
413
414
415 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
416 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
417 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
418 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
419 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
420
421
422
423
424
425 iocb = &psb->cur_iocbq.iocb;
426 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
427 if ((phba->sli_rev == 3) &&
428 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
429
430 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
431 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
432 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
433 unsli3.fcp_ext.icd);
434 iocb->un.fcpi64.bdl.addrHigh = 0;
435 iocb->ulpBdeCount = 0;
436 iocb->ulpLe = 0;
437
438 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
439 BUFF_TYPE_BDE_64;
440 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
441 sizeof(struct fcp_rsp);
442 iocb->unsli3.fcp_ext.rbde.addrLow =
443 putPaddrLow(pdma_phys_fcp_rsp);
444 iocb->unsli3.fcp_ext.rbde.addrHigh =
445 putPaddrHigh(pdma_phys_fcp_rsp);
446 } else {
447 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
448 iocb->un.fcpi64.bdl.bdeSize =
449 (2 * sizeof(struct ulp_bde64));
450 iocb->un.fcpi64.bdl.addrLow =
451 putPaddrLow(pdma_phys_sgl);
452 iocb->un.fcpi64.bdl.addrHigh =
453 putPaddrHigh(pdma_phys_sgl);
454 iocb->ulpBdeCount = 1;
455 iocb->ulpLe = 1;
456 }
457 iocb->ulpClass = CLASS3;
458 psb->status = IOSTAT_SUCCESS;
459
460 psb->cur_iocbq.context1 = psb;
461 spin_lock_init(&psb->buf_lock);
462 lpfc_release_scsi_buf_s3(phba, psb);
463
464 }
465
466 return bcnt;
467}
468
469
470
471
472
473
474
475
476void
477lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport)
478{
479 struct lpfc_hba *phba = vport->phba;
480 struct lpfc_io_buf *psb, *next_psb;
481 struct lpfc_sli4_hdw_queue *qp;
482 unsigned long iflag = 0;
483 int idx;
484
485 if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
486 return;
487
488 spin_lock_irqsave(&phba->hbalock, iflag);
489 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
490 qp = &phba->sli4_hba.hdwq[idx];
491
492 spin_lock(&qp->abts_io_buf_list_lock);
493 list_for_each_entry_safe(psb, next_psb,
494 &qp->lpfc_abts_io_buf_list, list) {
495 if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME)
496 continue;
497
498 if (psb->rdata && psb->rdata->pnode &&
499 psb->rdata->pnode->vport == vport)
500 psb->rdata = NULL;
501 }
502 spin_unlock(&qp->abts_io_buf_list_lock);
503 }
504 spin_unlock_irqrestore(&phba->hbalock, iflag);
505}
506
507
508
509
510
511
512
513
514
515
516void
517lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
518 struct sli4_wcqe_xri_aborted *axri, int idx)
519{
520 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
521 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
522 struct lpfc_io_buf *psb, *next_psb;
523 struct lpfc_sli4_hdw_queue *qp;
524 unsigned long iflag = 0;
525 struct lpfc_iocbq *iocbq;
526 int i;
527 struct lpfc_nodelist *ndlp;
528 int rrq_empty = 0;
529 struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring;
530 struct scsi_cmnd *cmd;
531
532 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
533 return;
534
535 qp = &phba->sli4_hba.hdwq[idx];
536 spin_lock_irqsave(&phba->hbalock, iflag);
537 spin_lock(&qp->abts_io_buf_list_lock);
538 list_for_each_entry_safe(psb, next_psb,
539 &qp->lpfc_abts_io_buf_list, list) {
540 if (psb->cur_iocbq.sli4_xritag == xri) {
541 list_del_init(&psb->list);
542 psb->flags &= ~LPFC_SBUF_XBUSY;
543 psb->status = IOSTAT_SUCCESS;
544 if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) {
545 qp->abts_nvme_io_bufs--;
546 spin_unlock(&qp->abts_io_buf_list_lock);
547 spin_unlock_irqrestore(&phba->hbalock, iflag);
548 lpfc_sli4_nvme_xri_aborted(phba, axri, psb);
549 return;
550 }
551 qp->abts_scsi_io_bufs--;
552 spin_unlock(&qp->abts_io_buf_list_lock);
553
554 if (psb->rdata && psb->rdata->pnode)
555 ndlp = psb->rdata->pnode;
556 else
557 ndlp = NULL;
558
559 rrq_empty = list_empty(&phba->active_rrq_list);
560 spin_unlock_irqrestore(&phba->hbalock, iflag);
561 if (ndlp) {
562 lpfc_set_rrq_active(phba, ndlp,
563 psb->cur_iocbq.sli4_lxritag, rxid, 1);
564 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
565 }
566
567 if (phba->cfg_fcp_wait_abts_rsp) {
568 spin_lock_irqsave(&psb->buf_lock, iflag);
569 cmd = psb->pCmd;
570 psb->pCmd = NULL;
571 spin_unlock_irqrestore(&psb->buf_lock, iflag);
572
573
574
575
576 if (cmd)
577 cmd->scsi_done(cmd);
578
579
580
581
582
583 spin_lock_irqsave(&psb->buf_lock, iflag);
584 psb->cur_iocbq.iocb_flag &=
585 ~LPFC_DRIVER_ABORTED;
586 if (psb->waitq)
587 wake_up(psb->waitq);
588 spin_unlock_irqrestore(&psb->buf_lock, iflag);
589 }
590
591 lpfc_release_scsi_buf_s4(phba, psb);
592 if (rrq_empty)
593 lpfc_worker_wake_up(phba);
594 return;
595 }
596 }
597 spin_unlock(&qp->abts_io_buf_list_lock);
598 for (i = 1; i <= phba->sli.last_iotag; i++) {
599 iocbq = phba->sli.iocbq_lookup[i];
600
601 if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
602 (iocbq->iocb_flag & LPFC_IO_LIBDFC))
603 continue;
604 if (iocbq->sli4_xritag != xri)
605 continue;
606 psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
607 psb->flags &= ~LPFC_SBUF_XBUSY;
608 spin_unlock_irqrestore(&phba->hbalock, iflag);
609 if (!list_empty(&pring->txq))
610 lpfc_worker_wake_up(phba);
611 return;
612
613 }
614 spin_unlock_irqrestore(&phba->hbalock, iflag);
615}
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630static struct lpfc_io_buf *
631lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
632 struct scsi_cmnd *cmnd)
633{
634 struct lpfc_io_buf *lpfc_cmd = NULL;
635 struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
636 unsigned long iflag = 0;
637
638 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
639 list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_io_buf,
640 list);
641 if (!lpfc_cmd) {
642 spin_lock(&phba->scsi_buf_list_put_lock);
643 list_splice(&phba->lpfc_scsi_buf_list_put,
644 &phba->lpfc_scsi_buf_list_get);
645 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
646 list_remove_head(scsi_buf_list_get, lpfc_cmd,
647 struct lpfc_io_buf, list);
648 spin_unlock(&phba->scsi_buf_list_put_lock);
649 }
650 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
651
652 if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
653 atomic_inc(&ndlp->cmd_pending);
654 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
655 }
656 return lpfc_cmd;
657}
658
659
660
661
662
663
664
665
666
667
668
669
670
671static struct lpfc_io_buf *
672lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
673 struct scsi_cmnd *cmnd)
674{
675 struct lpfc_io_buf *lpfc_cmd;
676 struct lpfc_sli4_hdw_queue *qp;
677 struct sli4_sge *sgl;
678 dma_addr_t pdma_phys_fcp_rsp;
679 dma_addr_t pdma_phys_fcp_cmd;
680 uint32_t cpu, idx;
681 int tag;
682 struct fcp_cmd_rsp_buf *tmp = NULL;
683
684 cpu = raw_smp_processor_id();
685 if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
686 tag = blk_mq_unique_tag(cmnd->request);
687 idx = blk_mq_unique_tag_to_hwq(tag);
688 } else {
689 idx = phba->sli4_hba.cpu_map[cpu].hdwq;
690 }
691
692 lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx,
693 !phba->cfg_xri_rebalancing);
694 if (!lpfc_cmd) {
695 qp = &phba->sli4_hba.hdwq[idx];
696 qp->empty_io_bufs++;
697 return NULL;
698 }
699
700
701
702
703 lpfc_cmd->cur_iocbq.iocb_flag = LPFC_IO_FCP;
704 lpfc_cmd->prot_seg_cnt = 0;
705 lpfc_cmd->seg_cnt = 0;
706 lpfc_cmd->timeout = 0;
707 lpfc_cmd->flags = 0;
708 lpfc_cmd->start_time = jiffies;
709 lpfc_cmd->waitq = NULL;
710 lpfc_cmd->cpu = cpu;
711#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
712 lpfc_cmd->prot_data_type = 0;
713#endif
714 tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd);
715 if (!tmp) {
716 lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq);
717 return NULL;
718 }
719
720 lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd;
721 lpfc_cmd->fcp_rsp = tmp->fcp_rsp;
722
723
724
725
726
727
728 sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
729 pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle;
730 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
731 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
732 sgl->word2 = le32_to_cpu(sgl->word2);
733 bf_set(lpfc_sli4_sge_last, sgl, 0);
734 sgl->word2 = cpu_to_le32(sgl->word2);
735 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
736 sgl++;
737
738
739 pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
740 sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
741 sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
742 sgl->word2 = le32_to_cpu(sgl->word2);
743 bf_set(lpfc_sli4_sge_last, sgl, 1);
744 sgl->word2 = cpu_to_le32(sgl->word2);
745 sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
746
747 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
748 atomic_inc(&ndlp->cmd_pending);
749 lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
750 }
751 return lpfc_cmd;
752}
753
754
755
756
757
758
759
760
761
762
763
764
765
766static struct lpfc_io_buf*
767lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
768 struct scsi_cmnd *cmnd)
769{
770 return phba->lpfc_get_scsi_buf(phba, ndlp, cmnd);
771}
772
773
774
775
776
777
778
779
780
781static void
782lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
783{
784 unsigned long iflag = 0;
785
786 psb->seg_cnt = 0;
787 psb->prot_seg_cnt = 0;
788
789 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
790 psb->pCmd = NULL;
791 psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
792 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
793 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
794}
795
796
797
798
799
800
801
802
803
804
805
806static void
807lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
808{
809 struct lpfc_sli4_hdw_queue *qp;
810 unsigned long iflag = 0;
811
812 psb->seg_cnt = 0;
813 psb->prot_seg_cnt = 0;
814
815 qp = psb->hdwq;
816 if (psb->flags & LPFC_SBUF_XBUSY) {
817 spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
818 if (!phba->cfg_fcp_wait_abts_rsp)
819 psb->pCmd = NULL;
820 list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list);
821 qp->abts_scsi_io_bufs++;
822 spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
823 } else {
824 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp);
825 }
826}
827
828
829
830
831
832
833
834
835
836static void
837lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
838{
839 if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp)
840 atomic_dec(&psb->ndlp->cmd_pending);
841
842 psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
843 phba->lpfc_release_scsi_buf(phba, psb);
844}
845
846
847
848
849
850
851
852
853
854static void
855lpfc_fcpcmd_to_iocb(u8 *data, struct fcp_cmnd *fcp_cmnd)
856{
857 int i, j;
858
859 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
860 i += sizeof(uint32_t), j++) {
861 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
862 }
863}
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879static int
880lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
881{
882 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
883 struct scatterlist *sgel = NULL;
884 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
885 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
886 struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
887 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
888 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
889 dma_addr_t physaddr;
890 uint32_t num_bde = 0;
891 int nseg, datadir = scsi_cmnd->sc_data_direction;
892
893
894
895
896
897
898
899 bpl += 2;
900 if (scsi_sg_count(scsi_cmnd)) {
901
902
903
904
905
906
907
908 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
909 scsi_sg_count(scsi_cmnd), datadir);
910 if (unlikely(!nseg))
911 return 1;
912
913 lpfc_cmd->seg_cnt = nseg;
914 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
915 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
916 "9064 BLKGRD: %s: Too many sg segments"
917 " from dma_map_sg. Config %d, seg_cnt"
918 " %d\n", __func__, phba->cfg_sg_seg_cnt,
919 lpfc_cmd->seg_cnt);
920 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
921 lpfc_cmd->seg_cnt = 0;
922 scsi_dma_unmap(scsi_cmnd);
923 return 2;
924 }
925
926
927
928
929
930
931
932
933
934
935 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
936 physaddr = sg_dma_address(sgel);
937 if (phba->sli_rev == 3 &&
938 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
939 !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
940 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
941 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
942 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
943 data_bde->addrLow = putPaddrLow(physaddr);
944 data_bde->addrHigh = putPaddrHigh(physaddr);
945 data_bde++;
946 } else {
947 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
948 bpl->tus.f.bdeSize = sg_dma_len(sgel);
949 bpl->tus.w = le32_to_cpu(bpl->tus.w);
950 bpl->addrLow =
951 le32_to_cpu(putPaddrLow(physaddr));
952 bpl->addrHigh =
953 le32_to_cpu(putPaddrHigh(physaddr));
954 bpl++;
955 }
956 }
957 }
958
959
960
961
962
963
964
965 if (phba->sli_rev == 3 &&
966 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
967 !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
968 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
969
970
971
972
973
974 physaddr = lpfc_cmd->dma_handle;
975 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
976 data_bde->tus.f.bdeSize = (num_bde *
977 sizeof(struct ulp_bde64));
978 physaddr += (sizeof(struct fcp_cmnd) +
979 sizeof(struct fcp_rsp) +
980 (2 * sizeof(struct ulp_bde64)));
981 data_bde->addrHigh = putPaddrHigh(physaddr);
982 data_bde->addrLow = putPaddrLow(physaddr);
983
984 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
985 } else {
986
987 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
988 }
989 } else {
990 iocb_cmd->un.fcpi64.bdl.bdeSize =
991 ((num_bde + 2) * sizeof(struct ulp_bde64));
992 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
993 }
994 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
995
996
997
998
999
1000 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1001 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
1002 return 0;
1003}
1004
1005#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1006
1007
1008#define BG_ERR_INIT 0x1
1009
1010#define BG_ERR_TGT 0x2
1011
1012#define BG_ERR_SWAP 0x10
1013
1014
1015
1016
1017#define BG_ERR_CHECK 0x20
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029static int
1030lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1031 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
1032{
1033 struct scatterlist *sgpe;
1034 struct lpfc_io_buf *lpfc_cmd = NULL;
1035 struct scsi_dif_tuple *src = NULL;
1036 struct lpfc_nodelist *ndlp;
1037 struct lpfc_rport_data *rdata;
1038 uint32_t op = scsi_get_prot_op(sc);
1039 uint32_t blksize;
1040 uint32_t numblks;
1041 u32 lba;
1042 int rc = 0;
1043 int blockoff = 0;
1044
1045 if (op == SCSI_PROT_NORMAL)
1046 return 0;
1047
1048 sgpe = scsi_prot_sglist(sc);
1049 lba = t10_pi_ref_tag(sc->request);
1050 if (lba == LPFC_INVALID_REFTAG)
1051 return 0;
1052
1053
1054 if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
1055 blksize = lpfc_cmd_blksize(sc);
1056 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
1057
1058
1059 if (phba->lpfc_injerr_lba < (u64)lba ||
1060 (phba->lpfc_injerr_lba >= (u64)(lba + numblks)))
1061 return 0;
1062 if (sgpe) {
1063 blockoff = phba->lpfc_injerr_lba - (u64)lba;
1064 numblks = sg_dma_len(sgpe) /
1065 sizeof(struct scsi_dif_tuple);
1066 if (numblks < blockoff)
1067 blockoff = numblks;
1068 }
1069 }
1070
1071
1072 rdata = lpfc_rport_data_from_scsi_device(sc->device);
1073 if (rdata && rdata->pnode) {
1074 ndlp = rdata->pnode;
1075
1076
1077 if (phba->lpfc_injerr_nportid &&
1078 (phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1079 return 0;
1080
1081
1082
1083
1084
1085 if (phba->lpfc_injerr_wwpn.u.wwn[0] &&
1086 (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1087 sizeof(struct lpfc_name)) != 0))
1088 return 0;
1089 }
1090
1091
1092 if (sgpe) {
1093 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1094 src += blockoff;
1095 lpfc_cmd = (struct lpfc_io_buf *)sc->host_scribble;
1096 }
1097
1098
1099 if (reftag) {
1100 if (phba->lpfc_injerr_wref_cnt) {
1101 switch (op) {
1102 case SCSI_PROT_WRITE_PASS:
1103 if (src) {
1104
1105
1106
1107
1108
1109
1110
1111
1112 lpfc_printf_log(phba, KERN_ERR,
1113 LOG_TRACE_EVENT,
1114 "9076 BLKGRD: Injecting reftag error: "
1115 "write lba x%lx + x%x oldrefTag x%x\n",
1116 (unsigned long)lba, blockoff,
1117 be32_to_cpu(src->ref_tag));
1118
1119
1120
1121
1122
1123 if (lpfc_cmd) {
1124 lpfc_cmd->prot_data_type =
1125 LPFC_INJERR_REFTAG;
1126 lpfc_cmd->prot_data_segment =
1127 src;
1128 lpfc_cmd->prot_data =
1129 src->ref_tag;
1130 }
1131 src->ref_tag = cpu_to_be32(0xDEADBEEF);
1132 phba->lpfc_injerr_wref_cnt--;
1133 if (phba->lpfc_injerr_wref_cnt == 0) {
1134 phba->lpfc_injerr_nportid = 0;
1135 phba->lpfc_injerr_lba =
1136 LPFC_INJERR_LBA_OFF;
1137 memset(&phba->lpfc_injerr_wwpn,
1138 0, sizeof(struct lpfc_name));
1139 }
1140 rc = BG_ERR_TGT | BG_ERR_CHECK;
1141
1142 break;
1143 }
1144 fallthrough;
1145 case SCSI_PROT_WRITE_INSERT:
1146
1147
1148
1149
1150
1151
1152 *reftag = 0xDEADBEEF;
1153 phba->lpfc_injerr_wref_cnt--;
1154 if (phba->lpfc_injerr_wref_cnt == 0) {
1155 phba->lpfc_injerr_nportid = 0;
1156 phba->lpfc_injerr_lba =
1157 LPFC_INJERR_LBA_OFF;
1158 memset(&phba->lpfc_injerr_wwpn,
1159 0, sizeof(struct lpfc_name));
1160 }
1161 rc = BG_ERR_TGT | BG_ERR_CHECK;
1162
1163 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1164 "9078 BLKGRD: Injecting reftag error: "
1165 "write lba x%lx\n", (unsigned long)lba);
1166 break;
1167 case SCSI_PROT_WRITE_STRIP:
1168
1169
1170
1171
1172
1173 *reftag = 0xDEADBEEF;
1174 phba->lpfc_injerr_wref_cnt--;
1175 if (phba->lpfc_injerr_wref_cnt == 0) {
1176 phba->lpfc_injerr_nportid = 0;
1177 phba->lpfc_injerr_lba =
1178 LPFC_INJERR_LBA_OFF;
1179 memset(&phba->lpfc_injerr_wwpn,
1180 0, sizeof(struct lpfc_name));
1181 }
1182 rc = BG_ERR_INIT;
1183
1184 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1185 "9077 BLKGRD: Injecting reftag error: "
1186 "write lba x%lx\n", (unsigned long)lba);
1187 break;
1188 }
1189 }
1190 if (phba->lpfc_injerr_rref_cnt) {
1191 switch (op) {
1192 case SCSI_PROT_READ_INSERT:
1193 case SCSI_PROT_READ_STRIP:
1194 case SCSI_PROT_READ_PASS:
1195
1196
1197
1198
1199
1200 *reftag = 0xDEADBEEF;
1201 phba->lpfc_injerr_rref_cnt--;
1202 if (phba->lpfc_injerr_rref_cnt == 0) {
1203 phba->lpfc_injerr_nportid = 0;
1204 phba->lpfc_injerr_lba =
1205 LPFC_INJERR_LBA_OFF;
1206 memset(&phba->lpfc_injerr_wwpn,
1207 0, sizeof(struct lpfc_name));
1208 }
1209 rc = BG_ERR_INIT;
1210
1211 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1212 "9079 BLKGRD: Injecting reftag error: "
1213 "read lba x%lx\n", (unsigned long)lba);
1214 break;
1215 }
1216 }
1217 }
1218
1219
1220 if (apptag) {
1221 if (phba->lpfc_injerr_wapp_cnt) {
1222 switch (op) {
1223 case SCSI_PROT_WRITE_PASS:
1224 if (src) {
1225
1226
1227
1228
1229
1230
1231
1232
1233 lpfc_printf_log(phba, KERN_ERR,
1234 LOG_TRACE_EVENT,
1235 "9080 BLKGRD: Injecting apptag error: "
1236 "write lba x%lx + x%x oldappTag x%x\n",
1237 (unsigned long)lba, blockoff,
1238 be16_to_cpu(src->app_tag));
1239
1240
1241
1242
1243
1244 if (lpfc_cmd) {
1245 lpfc_cmd->prot_data_type =
1246 LPFC_INJERR_APPTAG;
1247 lpfc_cmd->prot_data_segment =
1248 src;
1249 lpfc_cmd->prot_data =
1250 src->app_tag;
1251 }
1252 src->app_tag = cpu_to_be16(0xDEAD);
1253 phba->lpfc_injerr_wapp_cnt--;
1254 if (phba->lpfc_injerr_wapp_cnt == 0) {
1255 phba->lpfc_injerr_nportid = 0;
1256 phba->lpfc_injerr_lba =
1257 LPFC_INJERR_LBA_OFF;
1258 memset(&phba->lpfc_injerr_wwpn,
1259 0, sizeof(struct lpfc_name));
1260 }
1261 rc = BG_ERR_TGT | BG_ERR_CHECK;
1262 break;
1263 }
1264 fallthrough;
1265 case SCSI_PROT_WRITE_INSERT:
1266
1267
1268
1269
1270
1271
1272 *apptag = 0xDEAD;
1273 phba->lpfc_injerr_wapp_cnt--;
1274 if (phba->lpfc_injerr_wapp_cnt == 0) {
1275 phba->lpfc_injerr_nportid = 0;
1276 phba->lpfc_injerr_lba =
1277 LPFC_INJERR_LBA_OFF;
1278 memset(&phba->lpfc_injerr_wwpn,
1279 0, sizeof(struct lpfc_name));
1280 }
1281 rc = BG_ERR_TGT | BG_ERR_CHECK;
1282
1283 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1284 "0813 BLKGRD: Injecting apptag error: "
1285 "write lba x%lx\n", (unsigned long)lba);
1286 break;
1287 case SCSI_PROT_WRITE_STRIP:
1288
1289
1290
1291
1292
1293 *apptag = 0xDEAD;
1294 phba->lpfc_injerr_wapp_cnt--;
1295 if (phba->lpfc_injerr_wapp_cnt == 0) {
1296 phba->lpfc_injerr_nportid = 0;
1297 phba->lpfc_injerr_lba =
1298 LPFC_INJERR_LBA_OFF;
1299 memset(&phba->lpfc_injerr_wwpn,
1300 0, sizeof(struct lpfc_name));
1301 }
1302 rc = BG_ERR_INIT;
1303
1304 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1305 "0812 BLKGRD: Injecting apptag error: "
1306 "write lba x%lx\n", (unsigned long)lba);
1307 break;
1308 }
1309 }
1310 if (phba->lpfc_injerr_rapp_cnt) {
1311 switch (op) {
1312 case SCSI_PROT_READ_INSERT:
1313 case SCSI_PROT_READ_STRIP:
1314 case SCSI_PROT_READ_PASS:
1315
1316
1317
1318
1319
1320 *apptag = 0xDEAD;
1321 phba->lpfc_injerr_rapp_cnt--;
1322 if (phba->lpfc_injerr_rapp_cnt == 0) {
1323 phba->lpfc_injerr_nportid = 0;
1324 phba->lpfc_injerr_lba =
1325 LPFC_INJERR_LBA_OFF;
1326 memset(&phba->lpfc_injerr_wwpn,
1327 0, sizeof(struct lpfc_name));
1328 }
1329 rc = BG_ERR_INIT;
1330
1331 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1332 "0814 BLKGRD: Injecting apptag error: "
1333 "read lba x%lx\n", (unsigned long)lba);
1334 break;
1335 }
1336 }
1337 }
1338
1339
1340
1341 if (new_guard) {
1342 if (phba->lpfc_injerr_wgrd_cnt) {
1343 switch (op) {
1344 case SCSI_PROT_WRITE_PASS:
1345 rc = BG_ERR_CHECK;
1346 fallthrough;
1347
1348 case SCSI_PROT_WRITE_INSERT:
1349
1350
1351
1352
1353
1354 phba->lpfc_injerr_wgrd_cnt--;
1355 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1356 phba->lpfc_injerr_nportid = 0;
1357 phba->lpfc_injerr_lba =
1358 LPFC_INJERR_LBA_OFF;
1359 memset(&phba->lpfc_injerr_wwpn,
1360 0, sizeof(struct lpfc_name));
1361 }
1362
1363 rc |= BG_ERR_TGT | BG_ERR_SWAP;
1364
1365
1366 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1367 "0817 BLKGRD: Injecting guard error: "
1368 "write lba x%lx\n", (unsigned long)lba);
1369 break;
1370 case SCSI_PROT_WRITE_STRIP:
1371
1372
1373
1374
1375
1376 phba->lpfc_injerr_wgrd_cnt--;
1377 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1378 phba->lpfc_injerr_nportid = 0;
1379 phba->lpfc_injerr_lba =
1380 LPFC_INJERR_LBA_OFF;
1381 memset(&phba->lpfc_injerr_wwpn,
1382 0, sizeof(struct lpfc_name));
1383 }
1384
1385 rc = BG_ERR_INIT | BG_ERR_SWAP;
1386
1387
1388 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1389 "0816 BLKGRD: Injecting guard error: "
1390 "write lba x%lx\n", (unsigned long)lba);
1391 break;
1392 }
1393 }
1394 if (phba->lpfc_injerr_rgrd_cnt) {
1395 switch (op) {
1396 case SCSI_PROT_READ_INSERT:
1397 case SCSI_PROT_READ_STRIP:
1398 case SCSI_PROT_READ_PASS:
1399
1400
1401
1402
1403
1404 phba->lpfc_injerr_rgrd_cnt--;
1405 if (phba->lpfc_injerr_rgrd_cnt == 0) {
1406 phba->lpfc_injerr_nportid = 0;
1407 phba->lpfc_injerr_lba =
1408 LPFC_INJERR_LBA_OFF;
1409 memset(&phba->lpfc_injerr_wwpn,
1410 0, sizeof(struct lpfc_name));
1411 }
1412
1413 rc = BG_ERR_INIT | BG_ERR_SWAP;
1414
1415
1416 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1417 "0818 BLKGRD: Injecting guard error: "
1418 "read lba x%lx\n", (unsigned long)lba);
1419 }
1420 }
1421 }
1422
1423 return rc;
1424}
1425#endif
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438static int
1439lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1440 uint8_t *txop, uint8_t *rxop)
1441{
1442 uint8_t ret = 0;
1443
1444 if (lpfc_cmd_guard_csum(sc)) {
1445 switch (scsi_get_prot_op(sc)) {
1446 case SCSI_PROT_READ_INSERT:
1447 case SCSI_PROT_WRITE_STRIP:
1448 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1449 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1450 break;
1451
1452 case SCSI_PROT_READ_STRIP:
1453 case SCSI_PROT_WRITE_INSERT:
1454 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1455 *txop = BG_OP_IN_NODIF_OUT_CRC;
1456 break;
1457
1458 case SCSI_PROT_READ_PASS:
1459 case SCSI_PROT_WRITE_PASS:
1460 *rxop = BG_OP_IN_CRC_OUT_CSUM;
1461 *txop = BG_OP_IN_CSUM_OUT_CRC;
1462 break;
1463
1464 case SCSI_PROT_NORMAL:
1465 default:
1466 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1467 "9063 BLKGRD: Bad op/guard:%d/IP combination\n",
1468 scsi_get_prot_op(sc));
1469 ret = 1;
1470 break;
1471
1472 }
1473 } else {
1474 switch (scsi_get_prot_op(sc)) {
1475 case SCSI_PROT_READ_STRIP:
1476 case SCSI_PROT_WRITE_INSERT:
1477 *rxop = BG_OP_IN_CRC_OUT_NODIF;
1478 *txop = BG_OP_IN_NODIF_OUT_CRC;
1479 break;
1480
1481 case SCSI_PROT_READ_PASS:
1482 case SCSI_PROT_WRITE_PASS:
1483 *rxop = BG_OP_IN_CRC_OUT_CRC;
1484 *txop = BG_OP_IN_CRC_OUT_CRC;
1485 break;
1486
1487 case SCSI_PROT_READ_INSERT:
1488 case SCSI_PROT_WRITE_STRIP:
1489 *rxop = BG_OP_IN_NODIF_OUT_CRC;
1490 *txop = BG_OP_IN_CRC_OUT_NODIF;
1491 break;
1492
1493 case SCSI_PROT_NORMAL:
1494 default:
1495 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1496 "9075 BLKGRD: Bad op/guard:%d/CRC combination\n",
1497 scsi_get_prot_op(sc));
1498 ret = 1;
1499 break;
1500 }
1501 }
1502
1503 return ret;
1504}
1505
1506#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518static int
1519lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1520 uint8_t *txop, uint8_t *rxop)
1521{
1522 uint8_t ret = 0;
1523
1524 if (lpfc_cmd_guard_csum(sc)) {
1525 switch (scsi_get_prot_op(sc)) {
1526 case SCSI_PROT_READ_INSERT:
1527 case SCSI_PROT_WRITE_STRIP:
1528 *rxop = BG_OP_IN_NODIF_OUT_CRC;
1529 *txop = BG_OP_IN_CRC_OUT_NODIF;
1530 break;
1531
1532 case SCSI_PROT_READ_STRIP:
1533 case SCSI_PROT_WRITE_INSERT:
1534 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1535 *txop = BG_OP_IN_NODIF_OUT_CSUM;
1536 break;
1537
1538 case SCSI_PROT_READ_PASS:
1539 case SCSI_PROT_WRITE_PASS:
1540 *rxop = BG_OP_IN_CSUM_OUT_CRC;
1541 *txop = BG_OP_IN_CRC_OUT_CSUM;
1542 break;
1543
1544 case SCSI_PROT_NORMAL:
1545 default:
1546 break;
1547
1548 }
1549 } else {
1550 switch (scsi_get_prot_op(sc)) {
1551 case SCSI_PROT_READ_STRIP:
1552 case SCSI_PROT_WRITE_INSERT:
1553 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1554 *txop = BG_OP_IN_NODIF_OUT_CSUM;
1555 break;
1556
1557 case SCSI_PROT_READ_PASS:
1558 case SCSI_PROT_WRITE_PASS:
1559 *rxop = BG_OP_IN_CSUM_OUT_CSUM;
1560 *txop = BG_OP_IN_CSUM_OUT_CSUM;
1561 break;
1562
1563 case SCSI_PROT_READ_INSERT:
1564 case SCSI_PROT_WRITE_STRIP:
1565 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1566 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1567 break;
1568
1569 case SCSI_PROT_NORMAL:
1570 default:
1571 break;
1572 }
1573 }
1574
1575 return ret;
1576}
1577#endif
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610static int
1611lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1612 struct ulp_bde64 *bpl, int datasegcnt)
1613{
1614 struct scatterlist *sgde = NULL;
1615 struct lpfc_pde5 *pde5 = NULL;
1616 struct lpfc_pde6 *pde6 = NULL;
1617 dma_addr_t physaddr;
1618 int i = 0, num_bde = 0, status;
1619 int datadir = sc->sc_data_direction;
1620#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1621 uint32_t rc;
1622#endif
1623 uint32_t checking = 1;
1624 uint32_t reftag;
1625 uint8_t txop, rxop;
1626
1627 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1628 if (status)
1629 goto out;
1630
1631
1632 reftag = t10_pi_ref_tag(sc->request);
1633 if (reftag == LPFC_INVALID_REFTAG)
1634 goto out;
1635
1636#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1637 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1638 if (rc) {
1639 if (rc & BG_ERR_SWAP)
1640 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1641 if (rc & BG_ERR_CHECK)
1642 checking = 0;
1643 }
1644#endif
1645
1646
1647 pde5 = (struct lpfc_pde5 *) bpl;
1648 memset(pde5, 0, sizeof(struct lpfc_pde5));
1649 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1650
1651
1652 pde5->word0 = cpu_to_le32(pde5->word0);
1653 pde5->reftag = cpu_to_le32(reftag);
1654
1655
1656 num_bde++;
1657 bpl++;
1658 pde6 = (struct lpfc_pde6 *) bpl;
1659
1660
1661 memset(pde6, 0, sizeof(struct lpfc_pde6));
1662 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1663 bf_set(pde6_optx, pde6, txop);
1664 bf_set(pde6_oprx, pde6, rxop);
1665
1666
1667
1668
1669
1670 if (datadir == DMA_FROM_DEVICE) {
1671 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1672 bf_set(pde6_ce, pde6, checking);
1673 else
1674 bf_set(pde6_ce, pde6, 0);
1675
1676 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
1677 bf_set(pde6_re, pde6, checking);
1678 else
1679 bf_set(pde6_re, pde6, 0);
1680 }
1681 bf_set(pde6_ai, pde6, 1);
1682 bf_set(pde6_ae, pde6, 0);
1683 bf_set(pde6_apptagval, pde6, 0);
1684
1685
1686 pde6->word0 = cpu_to_le32(pde6->word0);
1687 pde6->word1 = cpu_to_le32(pde6->word1);
1688 pde6->word2 = cpu_to_le32(pde6->word2);
1689
1690
1691 num_bde++;
1692 bpl++;
1693
1694
1695 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
1696 physaddr = sg_dma_address(sgde);
1697 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
1698 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1699 bpl->tus.f.bdeSize = sg_dma_len(sgde);
1700 if (datadir == DMA_TO_DEVICE)
1701 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1702 else
1703 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1704 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1705 bpl++;
1706 num_bde++;
1707 }
1708
1709out:
1710 return num_bde;
1711}
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752static int
1753lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1754 struct ulp_bde64 *bpl, int datacnt, int protcnt)
1755{
1756 struct scatterlist *sgde = NULL;
1757 struct scatterlist *sgpe = NULL;
1758 struct lpfc_pde5 *pde5 = NULL;
1759 struct lpfc_pde6 *pde6 = NULL;
1760 struct lpfc_pde7 *pde7 = NULL;
1761 dma_addr_t dataphysaddr, protphysaddr;
1762 unsigned short curr_data = 0, curr_prot = 0;
1763 unsigned int split_offset;
1764 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
1765 unsigned int protgrp_blks, protgrp_bytes;
1766 unsigned int remainder, subtotal;
1767 int status;
1768 int datadir = sc->sc_data_direction;
1769 unsigned char pgdone = 0, alldone = 0;
1770 unsigned blksize;
1771#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1772 uint32_t rc;
1773#endif
1774 uint32_t checking = 1;
1775 uint32_t reftag;
1776 uint8_t txop, rxop;
1777 int num_bde = 0;
1778
1779 sgpe = scsi_prot_sglist(sc);
1780 sgde = scsi_sglist(sc);
1781
1782 if (!sgpe || !sgde) {
1783 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1784 "9020 Invalid s/g entry: data=x%px prot=x%px\n",
1785 sgpe, sgde);
1786 return 0;
1787 }
1788
1789 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1790 if (status)
1791 goto out;
1792
1793
1794 blksize = lpfc_cmd_blksize(sc);
1795 reftag = t10_pi_ref_tag(sc->request);
1796 if (reftag == LPFC_INVALID_REFTAG)
1797 goto out;
1798
1799#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1800 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1801 if (rc) {
1802 if (rc & BG_ERR_SWAP)
1803 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1804 if (rc & BG_ERR_CHECK)
1805 checking = 0;
1806 }
1807#endif
1808
1809 split_offset = 0;
1810 do {
1811
1812 if (num_bde >= (phba->cfg_total_seg_cnt - 2))
1813 return num_bde + 3;
1814
1815
1816 pde5 = (struct lpfc_pde5 *) bpl;
1817 memset(pde5, 0, sizeof(struct lpfc_pde5));
1818 bf_set(pde5_type, pde5, LPFC_PDE5_DESCRIPTOR);
1819
1820
1821 pde5->word0 = cpu_to_le32(pde5->word0);
1822 pde5->reftag = cpu_to_le32(reftag);
1823
1824
1825 num_bde++;
1826 bpl++;
1827 pde6 = (struct lpfc_pde6 *) bpl;
1828
1829
1830 memset(pde6, 0, sizeof(struct lpfc_pde6));
1831 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1832 bf_set(pde6_optx, pde6, txop);
1833 bf_set(pde6_oprx, pde6, rxop);
1834
1835 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
1836 bf_set(pde6_ce, pde6, checking);
1837 else
1838 bf_set(pde6_ce, pde6, 0);
1839
1840 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
1841 bf_set(pde6_re, pde6, checking);
1842 else
1843 bf_set(pde6_re, pde6, 0);
1844
1845 bf_set(pde6_ai, pde6, 1);
1846 bf_set(pde6_ae, pde6, 0);
1847 bf_set(pde6_apptagval, pde6, 0);
1848
1849
1850 pde6->word0 = cpu_to_le32(pde6->word0);
1851 pde6->word1 = cpu_to_le32(pde6->word1);
1852 pde6->word2 = cpu_to_le32(pde6->word2);
1853
1854
1855 num_bde++;
1856 bpl++;
1857
1858
1859 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
1860 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
1861
1862
1863 BUG_ON(protgroup_len % 8);
1864
1865 pde7 = (struct lpfc_pde7 *) bpl;
1866 memset(pde7, 0, sizeof(struct lpfc_pde7));
1867 bf_set(pde7_type, pde7, LPFC_PDE7_DESCRIPTOR);
1868
1869 pde7->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
1870 pde7->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
1871
1872 protgrp_blks = protgroup_len / 8;
1873 protgrp_bytes = protgrp_blks * blksize;
1874
1875
1876 if ((pde7->addrLow & 0xfff) + protgroup_len > 0x1000) {
1877 protgroup_remainder = 0x1000 - (pde7->addrLow & 0xfff);
1878 protgroup_offset += protgroup_remainder;
1879 protgrp_blks = protgroup_remainder / 8;
1880 protgrp_bytes = protgrp_blks * blksize;
1881 } else {
1882 protgroup_offset = 0;
1883 curr_prot++;
1884 }
1885
1886 num_bde++;
1887
1888
1889 pgdone = 0;
1890 subtotal = 0;
1891 while (!pgdone) {
1892
1893 if (num_bde >= phba->cfg_total_seg_cnt)
1894 return num_bde + 1;
1895
1896 if (!sgde) {
1897 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1898 "9065 BLKGRD:%s Invalid data segment\n",
1899 __func__);
1900 return 0;
1901 }
1902 bpl++;
1903 dataphysaddr = sg_dma_address(sgde) + split_offset;
1904 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1905 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1906
1907 remainder = sg_dma_len(sgde) - split_offset;
1908
1909 if ((subtotal + remainder) <= protgrp_bytes) {
1910
1911 bpl->tus.f.bdeSize = remainder;
1912 split_offset = 0;
1913
1914 if ((subtotal + remainder) == protgrp_bytes)
1915 pgdone = 1;
1916 } else {
1917
1918 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1919 split_offset += bpl->tus.f.bdeSize;
1920 }
1921
1922 subtotal += bpl->tus.f.bdeSize;
1923
1924 if (datadir == DMA_TO_DEVICE)
1925 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1926 else
1927 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1928 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1929
1930 num_bde++;
1931 curr_data++;
1932
1933 if (split_offset)
1934 break;
1935
1936
1937 sgde = sg_next(sgde);
1938
1939 }
1940
1941 if (protgroup_offset) {
1942
1943 reftag += protgrp_blks;
1944 bpl++;
1945 continue;
1946 }
1947
1948
1949 if (curr_prot == protcnt) {
1950 alldone = 1;
1951 } else if (curr_prot < protcnt) {
1952
1953 sgpe = sg_next(sgpe);
1954 bpl++;
1955
1956
1957 reftag += protgrp_blks;
1958 } else {
1959
1960 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1961 "9054 BLKGRD: bug in %s\n", __func__);
1962 }
1963
1964 } while (!alldone);
1965out:
1966
1967 return num_bde;
1968}
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000static int
2001lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2002 struct sli4_sge *sgl, int datasegcnt,
2003 struct lpfc_io_buf *lpfc_cmd)
2004{
2005 struct scatterlist *sgde = NULL;
2006 struct sli4_sge_diseed *diseed = NULL;
2007 dma_addr_t physaddr;
2008 int i = 0, num_sge = 0, status;
2009 uint32_t reftag;
2010 uint8_t txop, rxop;
2011#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2012 uint32_t rc;
2013#endif
2014 uint32_t checking = 1;
2015 uint32_t dma_len;
2016 uint32_t dma_offset = 0;
2017 struct sli4_hybrid_sgl *sgl_xtra = NULL;
2018 int j;
2019 bool lsp_just_set = false;
2020
2021 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2022 if (status)
2023 goto out;
2024
2025
2026 reftag = t10_pi_ref_tag(sc->request);
2027 if (reftag == LPFC_INVALID_REFTAG)
2028 goto out;
2029
2030#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2031 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2032 if (rc) {
2033 if (rc & BG_ERR_SWAP)
2034 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2035 if (rc & BG_ERR_CHECK)
2036 checking = 0;
2037 }
2038#endif
2039
2040
2041 diseed = (struct sli4_sge_diseed *) sgl;
2042 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2043 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2044
2045
2046 diseed->ref_tag = cpu_to_le32(reftag);
2047 diseed->ref_tag_tran = diseed->ref_tag;
2048
2049
2050
2051
2052
2053 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2054 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD))
2055 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2056 else
2057 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2058
2059 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2060 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2061 else
2062 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2063 }
2064
2065
2066 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2067 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2068
2069 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2070 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2071
2072
2073 diseed->word2 = cpu_to_le32(diseed->word2);
2074 diseed->word3 = cpu_to_le32(diseed->word3);
2075
2076
2077 num_sge++;
2078 sgl++;
2079
2080
2081 sgde = scsi_sglist(sc);
2082 j = 3;
2083 for (i = 0; i < datasegcnt; i++) {
2084
2085 sgl->word2 = 0;
2086
2087
2088 if (!lsp_just_set && !((j + 1) % phba->border_sge_num) &&
2089 ((datasegcnt - 1) != i)) {
2090
2091 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2092
2093 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2094
2095 if (unlikely(!sgl_xtra)) {
2096 lpfc_cmd->seg_cnt = 0;
2097 return 0;
2098 }
2099 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2100 sgl_xtra->dma_phys_sgl));
2101 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2102 sgl_xtra->dma_phys_sgl));
2103
2104 } else {
2105 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2106 }
2107
2108 if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) {
2109 if ((datasegcnt - 1) == i)
2110 bf_set(lpfc_sli4_sge_last, sgl, 1);
2111 physaddr = sg_dma_address(sgde);
2112 dma_len = sg_dma_len(sgde);
2113 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2114 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2115
2116 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2117 sgl->word2 = cpu_to_le32(sgl->word2);
2118 sgl->sge_len = cpu_to_le32(dma_len);
2119
2120 dma_offset += dma_len;
2121 sgde = sg_next(sgde);
2122
2123 sgl++;
2124 num_sge++;
2125 lsp_just_set = false;
2126
2127 } else {
2128 sgl->word2 = cpu_to_le32(sgl->word2);
2129 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2130
2131 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2132 i = i - 1;
2133
2134 lsp_just_set = true;
2135 }
2136
2137 j++;
2138
2139 }
2140
2141out:
2142 return num_sge;
2143}
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183static int
2184lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2185 struct sli4_sge *sgl, int datacnt, int protcnt,
2186 struct lpfc_io_buf *lpfc_cmd)
2187{
2188 struct scatterlist *sgde = NULL;
2189 struct scatterlist *sgpe = NULL;
2190 struct sli4_sge_diseed *diseed = NULL;
2191 dma_addr_t dataphysaddr, protphysaddr;
2192 unsigned short curr_data = 0, curr_prot = 0;
2193 unsigned int split_offset;
2194 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2195 unsigned int protgrp_blks, protgrp_bytes;
2196 unsigned int remainder, subtotal;
2197 int status;
2198 unsigned char pgdone = 0, alldone = 0;
2199 unsigned blksize;
2200 uint32_t reftag;
2201 uint8_t txop, rxop;
2202 uint32_t dma_len;
2203#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2204 uint32_t rc;
2205#endif
2206 uint32_t checking = 1;
2207 uint32_t dma_offset = 0;
2208 int num_sge = 0, j = 2;
2209 struct sli4_hybrid_sgl *sgl_xtra = NULL;
2210
2211 sgpe = scsi_prot_sglist(sc);
2212 sgde = scsi_sglist(sc);
2213
2214 if (!sgpe || !sgde) {
2215 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2216 "9082 Invalid s/g entry: data=x%px prot=x%px\n",
2217 sgpe, sgde);
2218 return 0;
2219 }
2220
2221 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2222 if (status)
2223 goto out;
2224
2225
2226 blksize = lpfc_cmd_blksize(sc);
2227 reftag = t10_pi_ref_tag(sc->request);
2228 if (reftag == LPFC_INVALID_REFTAG)
2229 goto out;
2230
2231#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2232 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2233 if (rc) {
2234 if (rc & BG_ERR_SWAP)
2235 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2236 if (rc & BG_ERR_CHECK)
2237 checking = 0;
2238 }
2239#endif
2240
2241 split_offset = 0;
2242 do {
2243
2244 if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) &&
2245 !(phba->cfg_xpsgl))
2246 return num_sge + 3;
2247
2248
2249 if (!((j + 1) % phba->border_sge_num) ||
2250 !((j + 2) % phba->border_sge_num) ||
2251 !((j + 3) % phba->border_sge_num)) {
2252 sgl->word2 = 0;
2253
2254
2255 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP);
2256
2257 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2258
2259 if (unlikely(!sgl_xtra)) {
2260 goto out;
2261 } else {
2262 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2263 sgl_xtra->dma_phys_sgl));
2264 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2265 sgl_xtra->dma_phys_sgl));
2266 }
2267
2268 sgl->word2 = cpu_to_le32(sgl->word2);
2269 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2270
2271 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2272 j = 0;
2273 }
2274
2275
2276 diseed = (struct sli4_sge_diseed *) sgl;
2277 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2278 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2279
2280
2281 diseed->ref_tag = cpu_to_le32(reftag);
2282 diseed->ref_tag_tran = diseed->ref_tag;
2283
2284 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_GUARD)) {
2285 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2286
2287 } else {
2288 bf_set(lpfc_sli4_sge_dif_ce, diseed, 0);
2289
2290
2291
2292
2293
2294
2295
2296 if (txop == BG_OP_IN_CRC_OUT_CRC) {
2297 txop = BG_OP_RAW_MODE;
2298 rxop = BG_OP_RAW_MODE;
2299 }
2300 }
2301
2302
2303 if (lpfc_cmd_protect(sc, LPFC_CHECK_PROTECT_REF))
2304 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2305 else
2306 bf_set(lpfc_sli4_sge_dif_re, diseed, 0);
2307
2308
2309 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2310 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2311
2312 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2313 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2314
2315
2316 diseed->word2 = cpu_to_le32(diseed->word2);
2317 diseed->word3 = cpu_to_le32(diseed->word3);
2318
2319
2320 num_sge++;
2321
2322 sgl++;
2323 j++;
2324
2325
2326 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2327 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2328
2329
2330 BUG_ON(protgroup_len % 8);
2331
2332
2333 sgl->word2 = 0;
2334 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2335 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2336 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2337 sgl->word2 = cpu_to_le32(sgl->word2);
2338 sgl->sge_len = 0;
2339
2340 protgrp_blks = protgroup_len / 8;
2341 protgrp_bytes = protgrp_blks * blksize;
2342
2343
2344 if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2345 protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2346 protgroup_offset += protgroup_remainder;
2347 protgrp_blks = protgroup_remainder / 8;
2348 protgrp_bytes = protgrp_blks * blksize;
2349 } else {
2350 protgroup_offset = 0;
2351 curr_prot++;
2352 }
2353
2354 num_sge++;
2355
2356
2357 pgdone = 0;
2358 subtotal = 0;
2359
2360 sgl++;
2361 j++;
2362
2363 while (!pgdone) {
2364
2365 if ((num_sge >= phba->cfg_total_seg_cnt) &&
2366 !phba->cfg_xpsgl)
2367 return num_sge + 1;
2368
2369 if (!sgde) {
2370 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2371 "9086 BLKGRD:%s Invalid data segment\n",
2372 __func__);
2373 return 0;
2374 }
2375
2376 if (!((j + 1) % phba->border_sge_num)) {
2377 sgl->word2 = 0;
2378
2379
2380 bf_set(lpfc_sli4_sge_type, sgl,
2381 LPFC_SGE_TYPE_LSP);
2382
2383 sgl_xtra = lpfc_get_sgl_per_hdwq(phba,
2384 lpfc_cmd);
2385
2386 if (unlikely(!sgl_xtra)) {
2387 goto out;
2388 } else {
2389 sgl->addr_lo = cpu_to_le32(
2390 putPaddrLow(sgl_xtra->dma_phys_sgl));
2391 sgl->addr_hi = cpu_to_le32(
2392 putPaddrHigh(sgl_xtra->dma_phys_sgl));
2393 }
2394
2395 sgl->word2 = cpu_to_le32(sgl->word2);
2396 sgl->sge_len = cpu_to_le32(
2397 phba->cfg_sg_dma_buf_size);
2398
2399 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
2400 } else {
2401 dataphysaddr = sg_dma_address(sgde) +
2402 split_offset;
2403
2404 remainder = sg_dma_len(sgde) - split_offset;
2405
2406 if ((subtotal + remainder) <= protgrp_bytes) {
2407
2408 dma_len = remainder;
2409 split_offset = 0;
2410
2411 if ((subtotal + remainder) ==
2412 protgrp_bytes)
2413 pgdone = 1;
2414 } else {
2415
2416
2417
2418 dma_len = protgrp_bytes - subtotal;
2419 split_offset += dma_len;
2420 }
2421
2422 subtotal += dma_len;
2423
2424 sgl->word2 = 0;
2425 sgl->addr_lo = cpu_to_le32(putPaddrLow(
2426 dataphysaddr));
2427 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
2428 dataphysaddr));
2429 bf_set(lpfc_sli4_sge_last, sgl, 0);
2430 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2431 bf_set(lpfc_sli4_sge_type, sgl,
2432 LPFC_SGE_TYPE_DATA);
2433
2434 sgl->sge_len = cpu_to_le32(dma_len);
2435 dma_offset += dma_len;
2436
2437 num_sge++;
2438 curr_data++;
2439
2440 if (split_offset) {
2441 sgl++;
2442 j++;
2443 break;
2444 }
2445
2446
2447 sgde = sg_next(sgde);
2448
2449 sgl++;
2450 }
2451
2452 j++;
2453 }
2454
2455 if (protgroup_offset) {
2456
2457 reftag += protgrp_blks;
2458 continue;
2459 }
2460
2461
2462 if (curr_prot == protcnt) {
2463
2464 sgl--;
2465 bf_set(lpfc_sli4_sge_last, sgl, 1);
2466 alldone = 1;
2467 } else if (curr_prot < protcnt) {
2468
2469 sgpe = sg_next(sgpe);
2470
2471
2472 reftag += protgrp_blks;
2473 } else {
2474
2475 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2476 "9085 BLKGRD: bug in %s\n", __func__);
2477 }
2478
2479 } while (!alldone);
2480
2481out:
2482
2483 return num_sge;
2484}
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497static int
2498lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2499{
2500 int ret = LPFC_PG_TYPE_INVALID;
2501 unsigned char op = scsi_get_prot_op(sc);
2502
2503 switch (op) {
2504 case SCSI_PROT_READ_STRIP:
2505 case SCSI_PROT_WRITE_INSERT:
2506 ret = LPFC_PG_TYPE_NO_DIF;
2507 break;
2508 case SCSI_PROT_READ_INSERT:
2509 case SCSI_PROT_WRITE_STRIP:
2510 case SCSI_PROT_READ_PASS:
2511 case SCSI_PROT_WRITE_PASS:
2512 ret = LPFC_PG_TYPE_DIF_BUF;
2513 break;
2514 default:
2515 if (phba)
2516 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2517 "9021 Unsupported protection op:%d\n",
2518 op);
2519 break;
2520 }
2521 return ret;
2522}
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534static int
2535lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
2536 struct lpfc_io_buf *lpfc_cmd)
2537{
2538 struct scsi_cmnd *sc = lpfc_cmd->pCmd;
2539 int fcpdl;
2540
2541 fcpdl = scsi_bufflen(sc);
2542
2543
2544 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2545
2546 if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT)
2547 return fcpdl;
2548
2549 } else {
2550
2551 if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP)
2552 return fcpdl;
2553 }
2554
2555
2556
2557
2558
2559
2560 fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8;
2561
2562 return fcpdl;
2563}
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577static int
2578lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2579 struct lpfc_io_buf *lpfc_cmd)
2580{
2581 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2582 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2583 struct ulp_bde64 *bpl = (struct ulp_bde64 *)lpfc_cmd->dma_sgl;
2584 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2585 uint32_t num_bde = 0;
2586 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2587 int prot_group_type = 0;
2588 int fcpdl;
2589 int ret = 1;
2590 struct lpfc_vport *vport = phba->pport;
2591
2592
2593
2594
2595
2596 bpl += 2;
2597 if (scsi_sg_count(scsi_cmnd)) {
2598
2599
2600
2601
2602
2603
2604 datasegcnt = dma_map_sg(&phba->pcidev->dev,
2605 scsi_sglist(scsi_cmnd),
2606 scsi_sg_count(scsi_cmnd), datadir);
2607 if (unlikely(!datasegcnt))
2608 return 1;
2609
2610 lpfc_cmd->seg_cnt = datasegcnt;
2611
2612
2613 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
2614 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
2615 ret = 2;
2616 goto err;
2617 }
2618
2619 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2620
2621 switch (prot_group_type) {
2622 case LPFC_PG_TYPE_NO_DIF:
2623
2624
2625 if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) {
2626 ret = 2;
2627 goto err;
2628 }
2629
2630 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2631 datasegcnt);
2632
2633 if (num_bde < 2) {
2634 ret = 2;
2635 goto err;
2636 }
2637 break;
2638
2639 case LPFC_PG_TYPE_DIF_BUF:
2640
2641
2642
2643
2644
2645 protsegcnt = dma_map_sg(&phba->pcidev->dev,
2646 scsi_prot_sglist(scsi_cmnd),
2647 scsi_prot_sg_count(scsi_cmnd), datadir);
2648 if (unlikely(!protsegcnt)) {
2649 scsi_dma_unmap(scsi_cmnd);
2650 return 1;
2651 }
2652
2653 lpfc_cmd->prot_seg_cnt = protsegcnt;
2654
2655
2656
2657
2658
2659 if ((lpfc_cmd->prot_seg_cnt * 4) >
2660 (phba->cfg_total_seg_cnt - 2)) {
2661 ret = 2;
2662 goto err;
2663 }
2664
2665 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2666 datasegcnt, protsegcnt);
2667
2668 if ((num_bde < 3) ||
2669 (num_bde > phba->cfg_total_seg_cnt)) {
2670 ret = 2;
2671 goto err;
2672 }
2673 break;
2674
2675 case LPFC_PG_TYPE_INVALID:
2676 default:
2677 scsi_dma_unmap(scsi_cmnd);
2678 lpfc_cmd->seg_cnt = 0;
2679
2680 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2681 "9022 Unexpected protection group %i\n",
2682 prot_group_type);
2683 return 2;
2684 }
2685 }
2686
2687
2688
2689
2690
2691
2692
2693 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
2694 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
2695 iocb_cmd->ulpBdeCount = 1;
2696 iocb_cmd->ulpLe = 1;
2697
2698 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
2699 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
2700
2701
2702
2703
2704
2705 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
2706
2707
2708
2709
2710
2711 if (iocb_cmd->un.fcpi.fcpi_XRdy &&
2712 (fcpdl < vport->cfg_first_burst_size))
2713 iocb_cmd->un.fcpi.fcpi_XRdy = fcpdl;
2714
2715 return 0;
2716err:
2717 if (lpfc_cmd->seg_cnt)
2718 scsi_dma_unmap(scsi_cmnd);
2719 if (lpfc_cmd->prot_seg_cnt)
2720 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
2721 scsi_prot_sg_count(scsi_cmnd),
2722 scsi_cmnd->sc_data_direction);
2723
2724 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2725 "9023 Cannot setup S/G List for HBA"
2726 "IO segs %d/%d BPL %d SCSI %d: %d %d\n",
2727 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
2728 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
2729 prot_group_type, num_bde);
2730
2731 lpfc_cmd->seg_cnt = 0;
2732 lpfc_cmd->prot_seg_cnt = 0;
2733 return ret;
2734}
2735
2736
2737
2738
2739
2740
2741static uint16_t
2742lpfc_bg_crc(uint8_t *data, int count)
2743{
2744 uint16_t crc = 0;
2745 uint16_t x;
2746
2747 crc = crc_t10dif(data, count);
2748 x = cpu_to_be16(crc);
2749 return x;
2750}
2751
2752
2753
2754
2755
2756
2757static uint16_t
2758lpfc_bg_csum(uint8_t *data, int count)
2759{
2760 uint16_t ret;
2761
2762 ret = ip_compute_csum(data, count);
2763 return ret;
2764}
2765
2766
2767
2768
2769
2770static void
2771lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
2772{
2773 struct scatterlist *sgpe;
2774 struct scatterlist *sgde;
2775 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2776 struct scsi_dif_tuple *src = NULL;
2777 uint8_t *data_src = NULL;
2778 uint16_t guard_tag;
2779 uint16_t start_app_tag, app_tag;
2780 uint32_t start_ref_tag, ref_tag;
2781 int prot, protsegcnt;
2782 int err_type, len, data_len;
2783 int chk_ref, chk_app, chk_guard;
2784 uint16_t sum;
2785 unsigned blksize;
2786
2787 err_type = BGS_GUARD_ERR_MASK;
2788 sum = 0;
2789 guard_tag = 0;
2790
2791
2792 prot = scsi_get_prot_op(cmd);
2793 if ((prot == SCSI_PROT_READ_STRIP) ||
2794 (prot == SCSI_PROT_WRITE_INSERT) ||
2795 (prot == SCSI_PROT_NORMAL))
2796 goto out;
2797
2798
2799 chk_ref = 1;
2800 chk_app = 0;
2801 chk_guard = 0;
2802
2803
2804 sgpe = scsi_prot_sglist(cmd);
2805 protsegcnt = lpfc_cmd->prot_seg_cnt;
2806
2807 if (sgpe && protsegcnt) {
2808
2809
2810
2811
2812
2813 sgde = scsi_sglist(cmd);
2814 blksize = lpfc_cmd_blksize(cmd);
2815 data_src = (uint8_t *)sg_virt(sgde);
2816 data_len = sgde->length;
2817 if ((data_len & (blksize - 1)) == 0)
2818 chk_guard = 1;
2819
2820 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2821 start_ref_tag = t10_pi_ref_tag(cmd->request);
2822 if (start_ref_tag == LPFC_INVALID_REFTAG)
2823 goto out;
2824 start_app_tag = src->app_tag;
2825 len = sgpe->length;
2826 while (src && protsegcnt) {
2827 while (len) {
2828
2829
2830
2831
2832
2833 if ((src->ref_tag == T10_PI_REF_ESCAPE) ||
2834 (src->app_tag == T10_PI_APP_ESCAPE)) {
2835 start_ref_tag++;
2836 goto skipit;
2837 }
2838
2839
2840 if (chk_guard) {
2841 guard_tag = src->guard_tag;
2842 if (lpfc_cmd_guard_csum(cmd))
2843 sum = lpfc_bg_csum(data_src,
2844 blksize);
2845 else
2846 sum = lpfc_bg_crc(data_src,
2847 blksize);
2848 if ((guard_tag != sum)) {
2849 err_type = BGS_GUARD_ERR_MASK;
2850 goto out;
2851 }
2852 }
2853
2854
2855 ref_tag = be32_to_cpu(src->ref_tag);
2856 if (chk_ref && (ref_tag != start_ref_tag)) {
2857 err_type = BGS_REFTAG_ERR_MASK;
2858 goto out;
2859 }
2860 start_ref_tag++;
2861
2862
2863 app_tag = src->app_tag;
2864 if (chk_app && (app_tag != start_app_tag)) {
2865 err_type = BGS_APPTAG_ERR_MASK;
2866 goto out;
2867 }
2868skipit:
2869 len -= sizeof(struct scsi_dif_tuple);
2870 if (len < 0)
2871 len = 0;
2872 src++;
2873
2874 data_src += blksize;
2875 data_len -= blksize;
2876
2877
2878
2879
2880
2881
2882 if (chk_guard && (data_len == 0)) {
2883 chk_guard = 0;
2884 sgde = sg_next(sgde);
2885 if (!sgde)
2886 goto out;
2887
2888 data_src = (uint8_t *)sg_virt(sgde);
2889 data_len = sgde->length;
2890 if ((data_len & (blksize - 1)) == 0)
2891 chk_guard = 1;
2892 }
2893 }
2894
2895
2896 sgpe = sg_next(sgpe);
2897 if (sgpe) {
2898 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
2899 len = sgpe->length;
2900 } else {
2901 src = NULL;
2902 }
2903 protsegcnt--;
2904 }
2905 }
2906out:
2907 if (err_type == BGS_GUARD_ERR_MASK) {
2908 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
2909 set_host_byte(cmd, DID_ABORT);
2910 phba->bg_guard_err_cnt++;
2911 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2912 "9069 BLKGRD: reftag %x grd_tag err %x != %x\n",
2913 t10_pi_ref_tag(cmd->request),
2914 sum, guard_tag);
2915
2916 } else if (err_type == BGS_REFTAG_ERR_MASK) {
2917 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
2918 set_host_byte(cmd, DID_ABORT);
2919
2920 phba->bg_reftag_err_cnt++;
2921 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2922 "9066 BLKGRD: reftag %x ref_tag err %x != %x\n",
2923 t10_pi_ref_tag(cmd->request),
2924 ref_tag, start_ref_tag);
2925
2926 } else if (err_type == BGS_APPTAG_ERR_MASK) {
2927 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
2928 set_host_byte(cmd, DID_ABORT);
2929
2930 phba->bg_apptag_err_cnt++;
2931 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2932 "9041 BLKGRD: reftag %x app_tag err %x != %x\n",
2933 t10_pi_ref_tag(cmd->request),
2934 app_tag, start_app_tag);
2935 }
2936}
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950static int
2951lpfc_sli4_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
2952 struct lpfc_wcqe_complete *wcqe)
2953{
2954 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
2955 int ret = 0;
2956 u32 status = bf_get(lpfc_wcqe_c_status, wcqe);
2957 u32 bghm = 0;
2958 u32 bgstat = 0;
2959 u64 failing_sector = 0;
2960
2961 if (status == CQE_STATUS_DI_ERROR) {
2962 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe))
2963 bgstat |= BGS_GUARD_ERR_MASK;
2964 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe))
2965 bgstat |= BGS_APPTAG_ERR_MASK;
2966 if (bf_get(lpfc_wcqe_c_bg_re, wcqe))
2967 bgstat |= BGS_REFTAG_ERR_MASK;
2968
2969
2970 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
2971 bgstat |= BGS_HI_WATER_MARK_PRESENT_MASK;
2972 bghm = wcqe->total_data_placed;
2973 }
2974
2975
2976
2977
2978
2979 if (!bgstat)
2980 bgstat |= (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
2981 BGS_GUARD_ERR_MASK);
2982 }
2983
2984 if (lpfc_bgs_get_guard_err(bgstat)) {
2985 ret = 1;
2986
2987 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
2988 set_host_byte(cmd, DID_ABORT);
2989 phba->bg_guard_err_cnt++;
2990 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2991 "9059 BLKGRD: Guard Tag error in cmd"
2992 " 0x%x lba 0x%llx blk cnt 0x%x "
2993 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
2994 (unsigned long long)scsi_get_lba(cmd),
2995 blk_rq_sectors(cmd->request), bgstat, bghm);
2996 }
2997
2998 if (lpfc_bgs_get_reftag_err(bgstat)) {
2999 ret = 1;
3000
3001 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
3002 set_host_byte(cmd, DID_ABORT);
3003
3004 phba->bg_reftag_err_cnt++;
3005 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3006 "9060 BLKGRD: Ref Tag error in cmd"
3007 " 0x%x lba 0x%llx blk cnt 0x%x "
3008 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3009 (unsigned long long)scsi_get_lba(cmd),
3010 blk_rq_sectors(cmd->request), bgstat, bghm);
3011 }
3012
3013 if (lpfc_bgs_get_apptag_err(bgstat)) {
3014 ret = 1;
3015
3016 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
3017 set_host_byte(cmd, DID_ABORT);
3018
3019 phba->bg_apptag_err_cnt++;
3020 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3021 "9062 BLKGRD: App Tag error in cmd"
3022 " 0x%x lba 0x%llx blk cnt 0x%x "
3023 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3024 (unsigned long long)scsi_get_lba(cmd),
3025 blk_rq_sectors(cmd->request), bgstat, bghm);
3026 }
3027
3028 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
3029
3030
3031
3032
3033
3034
3035 cmd->sense_buffer[7] = 0xc;
3036 cmd->sense_buffer[8] = 0;
3037 cmd->sense_buffer[9] = 0xa;
3038 cmd->sense_buffer[10] = 0x80;
3039
3040
3041 switch (scsi_get_prot_op(cmd)) {
3042 case SCSI_PROT_READ_INSERT:
3043 case SCSI_PROT_WRITE_STRIP:
3044 bghm /= cmd->device->sector_size;
3045 break;
3046 case SCSI_PROT_READ_STRIP:
3047 case SCSI_PROT_WRITE_INSERT:
3048 case SCSI_PROT_READ_PASS:
3049 case SCSI_PROT_WRITE_PASS:
3050 bghm /= (cmd->device->sector_size +
3051 sizeof(struct scsi_dif_tuple));
3052 break;
3053 }
3054
3055 failing_sector = scsi_get_lba(cmd);
3056 failing_sector += bghm;
3057
3058
3059 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3060 }
3061
3062 if (!ret) {
3063
3064 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3065 "9068 BLKGRD: Unknown error in cmd"
3066 " 0x%x lba 0x%llx blk cnt 0x%x "
3067 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3068 (unsigned long long)scsi_get_lba(cmd),
3069 blk_rq_sectors(cmd->request), bgstat, bghm);
3070
3071
3072 lpfc_calc_bg_err(phba, lpfc_cmd);
3073 }
3074 return ret;
3075}
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089static int
3090lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
3091 struct lpfc_iocbq *pIocbOut)
3092{
3093 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
3094 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
3095 int ret = 0;
3096 uint32_t bghm = bgf->bghm;
3097 uint32_t bgstat = bgf->bgstat;
3098 uint64_t failing_sector = 0;
3099
3100 if (lpfc_bgs_get_invalid_prof(bgstat)) {
3101 cmd->result = DID_ERROR << 16;
3102 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3103 "9072 BLKGRD: Invalid BG Profile in cmd "
3104 "0x%x reftag 0x%x blk cnt 0x%x "
3105 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3106 t10_pi_ref_tag(cmd->request),
3107 blk_rq_sectors(cmd->request), bgstat, bghm);
3108 ret = (-1);
3109 goto out;
3110 }
3111
3112 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
3113 cmd->result = DID_ERROR << 16;
3114 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3115 "9073 BLKGRD: Invalid BG PDIF Block in cmd "
3116 "0x%x reftag 0x%x blk cnt 0x%x "
3117 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3118 t10_pi_ref_tag(cmd->request),
3119 blk_rq_sectors(cmd->request), bgstat, bghm);
3120 ret = (-1);
3121 goto out;
3122 }
3123
3124 if (lpfc_bgs_get_guard_err(bgstat)) {
3125 ret = 1;
3126
3127 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1);
3128 set_host_byte(cmd, DID_ABORT);
3129 phba->bg_guard_err_cnt++;
3130 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3131 "9055 BLKGRD: Guard Tag error in cmd "
3132 "0x%x reftag 0x%x blk cnt 0x%x "
3133 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3134 t10_pi_ref_tag(cmd->request),
3135 blk_rq_sectors(cmd->request), bgstat, bghm);
3136 }
3137
3138 if (lpfc_bgs_get_reftag_err(bgstat)) {
3139 ret = 1;
3140
3141 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3);
3142 set_host_byte(cmd, DID_ABORT);
3143
3144 phba->bg_reftag_err_cnt++;
3145 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3146 "9056 BLKGRD: Ref Tag error in cmd "
3147 "0x%x reftag 0x%x blk cnt 0x%x "
3148 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3149 t10_pi_ref_tag(cmd->request),
3150 blk_rq_sectors(cmd->request), bgstat, bghm);
3151 }
3152
3153 if (lpfc_bgs_get_apptag_err(bgstat)) {
3154 ret = 1;
3155
3156 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2);
3157 set_host_byte(cmd, DID_ABORT);
3158
3159 phba->bg_apptag_err_cnt++;
3160 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3161 "9061 BLKGRD: App Tag error in cmd "
3162 "0x%x reftag 0x%x blk cnt 0x%x "
3163 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3164 t10_pi_ref_tag(cmd->request),
3165 blk_rq_sectors(cmd->request), bgstat, bghm);
3166 }
3167
3168 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
3169
3170
3171
3172
3173
3174
3175 cmd->sense_buffer[7] = 0xc;
3176 cmd->sense_buffer[8] = 0;
3177 cmd->sense_buffer[9] = 0xa;
3178 cmd->sense_buffer[10] = 0x80;
3179
3180
3181 switch (scsi_get_prot_op(cmd)) {
3182 case SCSI_PROT_READ_INSERT:
3183 case SCSI_PROT_WRITE_STRIP:
3184 bghm /= cmd->device->sector_size;
3185 break;
3186 case SCSI_PROT_READ_STRIP:
3187 case SCSI_PROT_WRITE_INSERT:
3188 case SCSI_PROT_READ_PASS:
3189 case SCSI_PROT_WRITE_PASS:
3190 bghm /= (cmd->device->sector_size +
3191 sizeof(struct scsi_dif_tuple));
3192 break;
3193 }
3194
3195 failing_sector = scsi_get_lba(cmd);
3196 failing_sector += bghm;
3197
3198
3199 put_unaligned_be64(failing_sector, &cmd->sense_buffer[12]);
3200 }
3201
3202 if (!ret) {
3203
3204 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3205 "9057 BLKGRD: Unknown error in cmd "
3206 "0x%x reftag 0x%x blk cnt 0x%x "
3207 "bgstat=x%x bghm=x%x\n", cmd->cmnd[0],
3208 t10_pi_ref_tag(cmd->request),
3209 blk_rq_sectors(cmd->request), bgstat, bghm);
3210
3211
3212 lpfc_calc_bg_err(phba, lpfc_cmd);
3213 }
3214out:
3215 return ret;
3216}
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231static int
3232lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3233{
3234 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3235 struct scatterlist *sgel = NULL;
3236 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3237 struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
3238 struct sli4_sge *first_data_sgl;
3239 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
3240 struct lpfc_vport *vport = phba->pport;
3241 union lpfc_wqe128 *wqe = &pwqeq->wqe;
3242 dma_addr_t physaddr;
3243 uint32_t num_bde = 0;
3244 uint32_t dma_len;
3245 uint32_t dma_offset = 0;
3246 int nseg, i, j;
3247 struct ulp_bde64 *bde;
3248 bool lsp_just_set = false;
3249 struct sli4_hybrid_sgl *sgl_xtra = NULL;
3250
3251
3252
3253
3254
3255
3256
3257 if (scsi_sg_count(scsi_cmnd)) {
3258
3259
3260
3261
3262
3263
3264
3265 nseg = scsi_dma_map(scsi_cmnd);
3266 if (unlikely(nseg <= 0))
3267 return 1;
3268 sgl += 1;
3269
3270 sgl->word2 = le32_to_cpu(sgl->word2);
3271 bf_set(lpfc_sli4_sge_last, sgl, 0);
3272 sgl->word2 = cpu_to_le32(sgl->word2);
3273 sgl += 1;
3274 first_data_sgl = sgl;
3275 lpfc_cmd->seg_cnt = nseg;
3276 if (!phba->cfg_xpsgl &&
3277 lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3278 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3279 "9074 BLKGRD:"
3280 " %s: Too many sg segments from "
3281 "dma_map_sg. Config %d, seg_cnt %d\n",
3282 __func__, phba->cfg_sg_seg_cnt,
3283 lpfc_cmd->seg_cnt);
3284 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3285 lpfc_cmd->seg_cnt = 0;
3286 scsi_dma_unmap(scsi_cmnd);
3287 return 2;
3288 }
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301 sgel = scsi_sglist(scsi_cmnd);
3302 j = 2;
3303 for (i = 0; i < nseg; i++) {
3304 sgl->word2 = 0;
3305 if ((num_bde + 1) == nseg) {
3306 bf_set(lpfc_sli4_sge_last, sgl, 1);
3307 bf_set(lpfc_sli4_sge_type, sgl,
3308 LPFC_SGE_TYPE_DATA);
3309 } else {
3310 bf_set(lpfc_sli4_sge_last, sgl, 0);
3311
3312
3313 if (!lsp_just_set &&
3314 !((j + 1) % phba->border_sge_num) &&
3315 ((nseg - 1) != i)) {
3316
3317 bf_set(lpfc_sli4_sge_type, sgl,
3318 LPFC_SGE_TYPE_LSP);
3319
3320 sgl_xtra = lpfc_get_sgl_per_hdwq(
3321 phba, lpfc_cmd);
3322
3323 if (unlikely(!sgl_xtra)) {
3324 lpfc_cmd->seg_cnt = 0;
3325 scsi_dma_unmap(scsi_cmnd);
3326 return 1;
3327 }
3328 sgl->addr_lo = cpu_to_le32(putPaddrLow(
3329 sgl_xtra->dma_phys_sgl));
3330 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3331 sgl_xtra->dma_phys_sgl));
3332
3333 } else {
3334 bf_set(lpfc_sli4_sge_type, sgl,
3335 LPFC_SGE_TYPE_DATA);
3336 }
3337 }
3338
3339 if (!(bf_get(lpfc_sli4_sge_type, sgl) &
3340 LPFC_SGE_TYPE_LSP)) {
3341 if ((nseg - 1) == i)
3342 bf_set(lpfc_sli4_sge_last, sgl, 1);
3343
3344 physaddr = sg_dma_address(sgel);
3345 dma_len = sg_dma_len(sgel);
3346 sgl->addr_lo = cpu_to_le32(putPaddrLow(
3347 physaddr));
3348 sgl->addr_hi = cpu_to_le32(putPaddrHigh(
3349 physaddr));
3350
3351 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
3352 sgl->word2 = cpu_to_le32(sgl->word2);
3353 sgl->sge_len = cpu_to_le32(dma_len);
3354
3355 dma_offset += dma_len;
3356 sgel = sg_next(sgel);
3357
3358 sgl++;
3359 lsp_just_set = false;
3360
3361 } else {
3362 sgl->word2 = cpu_to_le32(sgl->word2);
3363 sgl->sge_len = cpu_to_le32(
3364 phba->cfg_sg_dma_buf_size);
3365
3366 sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
3367 i = i - 1;
3368
3369 lsp_just_set = true;
3370 }
3371
3372 j++;
3373 }
3374
3375
3376
3377
3378
3379 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3380 phba->cfg_enable_pbde) {
3381 bde = (struct ulp_bde64 *)
3382 &wqe->words[13];
3383 bde->addrLow = first_data_sgl->addr_lo;
3384 bde->addrHigh = first_data_sgl->addr_hi;
3385 bde->tus.f.bdeSize =
3386 le32_to_cpu(first_data_sgl->sge_len);
3387 bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
3388 bde->tus.w = cpu_to_le32(bde->tus.w);
3389
3390 } else {
3391 memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
3392 }
3393 } else {
3394 sgl += 1;
3395
3396 sgl->word2 = le32_to_cpu(sgl->word2);
3397 bf_set(lpfc_sli4_sge_last, sgl, 1);
3398 sgl->word2 = cpu_to_le32(sgl->word2);
3399
3400 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3401 phba->cfg_enable_pbde) {
3402 bde = (struct ulp_bde64 *)
3403 &wqe->words[13];
3404 memset(bde, 0, (sizeof(uint32_t) * 3));
3405 }
3406 }
3407
3408
3409 if (phba->cfg_enable_pbde)
3410 bf_set(wqe_pbde, &wqe->generic.wqe_com, 1);
3411
3412
3413
3414
3415
3416
3417
3418 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
3419
3420 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
3421 vport->cfg_first_burst_size &&
3422 scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) {
3423 u32 init_len, total_len;
3424
3425 total_len = be32_to_cpu(fcp_cmnd->fcpDl);
3426 init_len = min(total_len, vport->cfg_first_burst_size);
3427
3428
3429 wqe->fcp_iwrite.initial_xfer_len = init_len;
3430 wqe->fcp_iwrite.total_xfer_len = total_len;
3431 } else {
3432
3433 wqe->fcp_iwrite.total_xfer_len =
3434 be32_to_cpu(fcp_cmnd->fcpDl);
3435 }
3436
3437
3438
3439
3440
3441 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3442 scsi_cmnd->device->hostdata)->oas_enabled) {
3443 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3444 lpfc_cmd->cur_iocbq.priority = ((struct lpfc_device_data *)
3445 scsi_cmnd->device->hostdata)->priority;
3446
3447
3448 bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
3449 bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1);
3450
3451 if (lpfc_cmd->cur_iocbq.priority)
3452 bf_set(wqe_ccp, &wqe->generic.wqe_com,
3453 (lpfc_cmd->cur_iocbq.priority << 1));
3454 else
3455 bf_set(wqe_ccp, &wqe->generic.wqe_com,
3456 (phba->cfg_XLanePriority << 1));
3457 }
3458
3459 return 0;
3460}
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475static int
3476lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3477 struct lpfc_io_buf *lpfc_cmd)
3478{
3479 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
3480 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
3481 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl);
3482 struct lpfc_iocbq *pwqeq = &lpfc_cmd->cur_iocbq;
3483 union lpfc_wqe128 *wqe = &pwqeq->wqe;
3484 uint32_t num_sge = 0;
3485 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
3486 int prot_group_type = 0;
3487 int fcpdl;
3488 int ret = 1;
3489 struct lpfc_vport *vport = phba->pport;
3490
3491
3492
3493
3494
3495 if (scsi_sg_count(scsi_cmnd)) {
3496
3497
3498
3499
3500
3501
3502 datasegcnt = dma_map_sg(&phba->pcidev->dev,
3503 scsi_sglist(scsi_cmnd),
3504 scsi_sg_count(scsi_cmnd), datadir);
3505 if (unlikely(!datasegcnt))
3506 return 1;
3507
3508 sgl += 1;
3509
3510 sgl->word2 = le32_to_cpu(sgl->word2);
3511 bf_set(lpfc_sli4_sge_last, sgl, 0);
3512 sgl->word2 = cpu_to_le32(sgl->word2);
3513
3514 sgl += 1;
3515 lpfc_cmd->seg_cnt = datasegcnt;
3516
3517
3518 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt &&
3519 !phba->cfg_xpsgl) {
3520 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3521 ret = 2;
3522 goto err;
3523 }
3524
3525 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3526
3527 switch (prot_group_type) {
3528 case LPFC_PG_TYPE_NO_DIF:
3529
3530 if (((lpfc_cmd->seg_cnt + 1) >
3531 phba->cfg_total_seg_cnt) &&
3532 !phba->cfg_xpsgl) {
3533 ret = 2;
3534 goto err;
3535 }
3536
3537 num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3538 datasegcnt, lpfc_cmd);
3539
3540
3541 if (num_sge < 2) {
3542 ret = 2;
3543 goto err;
3544 }
3545 break;
3546
3547 case LPFC_PG_TYPE_DIF_BUF:
3548
3549
3550
3551
3552
3553 protsegcnt = dma_map_sg(&phba->pcidev->dev,
3554 scsi_prot_sglist(scsi_cmnd),
3555 scsi_prot_sg_count(scsi_cmnd), datadir);
3556 if (unlikely(!protsegcnt)) {
3557 scsi_dma_unmap(scsi_cmnd);
3558 return 1;
3559 }
3560
3561 lpfc_cmd->prot_seg_cnt = protsegcnt;
3562
3563
3564
3565
3566 if (((lpfc_cmd->prot_seg_cnt * 3) >
3567 (phba->cfg_total_seg_cnt - 2)) &&
3568 !phba->cfg_xpsgl) {
3569 ret = 2;
3570 goto err;
3571 }
3572
3573 num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3574 datasegcnt, protsegcnt, lpfc_cmd);
3575
3576
3577 if (num_sge < 3 ||
3578 (num_sge > phba->cfg_total_seg_cnt &&
3579 !phba->cfg_xpsgl)) {
3580 ret = 2;
3581 goto err;
3582 }
3583 break;
3584
3585 case LPFC_PG_TYPE_INVALID:
3586 default:
3587 scsi_dma_unmap(scsi_cmnd);
3588 lpfc_cmd->seg_cnt = 0;
3589
3590 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3591 "9083 Unexpected protection group %i\n",
3592 prot_group_type);
3593 return 2;
3594 }
3595 }
3596
3597 switch (scsi_get_prot_op(scsi_cmnd)) {
3598 case SCSI_PROT_WRITE_STRIP:
3599 case SCSI_PROT_READ_STRIP:
3600 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
3601 break;
3602 case SCSI_PROT_WRITE_INSERT:
3603 case SCSI_PROT_READ_INSERT:
3604 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
3605 break;
3606 case SCSI_PROT_WRITE_PASS:
3607 case SCSI_PROT_READ_PASS:
3608 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
3609 break;
3610 }
3611
3612 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3613 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3614
3615
3616 if (!(phba->hba_flag & HBA_FCOE_MODE) &&
3617 vport->cfg_first_burst_size &&
3618 scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) {
3619 u32 init_len, total_len;
3620
3621 total_len = be32_to_cpu(fcp_cmnd->fcpDl);
3622 init_len = min(total_len, vport->cfg_first_burst_size);
3623
3624
3625 wqe->fcp_iwrite.initial_xfer_len = init_len;
3626 wqe->fcp_iwrite.total_xfer_len = total_len;
3627 } else {
3628
3629 wqe->fcp_iwrite.total_xfer_len =
3630 be32_to_cpu(fcp_cmnd->fcpDl);
3631 }
3632
3633
3634
3635
3636
3637 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3638 scsi_cmnd->device->hostdata)->oas_enabled) {
3639 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3640
3641
3642 bf_set(wqe_oas, &wqe->generic.wqe_com, 1);
3643 bf_set(wqe_ccpe, &wqe->generic.wqe_com, 1);
3644 bf_set(wqe_ccp, &wqe->generic.wqe_com,
3645 (phba->cfg_XLanePriority << 1));
3646 }
3647
3648
3649 if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_PASS)
3650 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
3651 else if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_STRIP)
3652 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
3653 else if (lpfc_cmd->cur_iocbq.iocb_flag & LPFC_IO_DIF_INSERT)
3654 bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
3655
3656 lpfc_cmd->cur_iocbq.iocb_flag &= ~(LPFC_IO_DIF_PASS |
3657 LPFC_IO_DIF_STRIP | LPFC_IO_DIF_INSERT);
3658
3659 return 0;
3660err:
3661 if (lpfc_cmd->seg_cnt)
3662 scsi_dma_unmap(scsi_cmnd);
3663 if (lpfc_cmd->prot_seg_cnt)
3664 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
3665 scsi_prot_sg_count(scsi_cmnd),
3666 scsi_cmnd->sc_data_direction);
3667
3668 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3669 "9084 Cannot setup S/G List for HBA"
3670 "IO segs %d/%d SGL %d SCSI %d: %d %d\n",
3671 lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt,
3672 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
3673 prot_group_type, num_sge);
3674
3675 lpfc_cmd->seg_cnt = 0;
3676 lpfc_cmd->prot_seg_cnt = 0;
3677 return ret;
3678}
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692static inline int
3693lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3694{
3695 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3696}
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711static inline int
3712lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3713{
3714 return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3715}
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730static inline int
3731lpfc_scsi_prep_cmnd_buf(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
3732 uint8_t tmo)
3733{
3734 return vport->phba->lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, tmo);
3735}
3736
3737
3738
3739
3740
3741
3742
3743
3744
3745
3746
3747static void
3748lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
3749 struct lpfc_io_buf *lpfc_cmd, uint32_t fcpi_parm) {
3750 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3751 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3752 uint32_t resp_info = fcprsp->rspStatus2;
3753 uint32_t scsi_status = fcprsp->rspStatus3;
3754 struct lpfc_fast_path_event *fast_path_evt = NULL;
3755 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
3756 unsigned long flags;
3757
3758 if (!pnode)
3759 return;
3760
3761
3762 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
3763 (cmnd->result == SAM_STAT_BUSY)) {
3764 fast_path_evt = lpfc_alloc_fast_evt(phba);
3765 if (!fast_path_evt)
3766 return;
3767 fast_path_evt->un.scsi_evt.event_type =
3768 FC_REG_SCSI_EVENT;
3769 fast_path_evt->un.scsi_evt.subcategory =
3770 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
3771 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
3772 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
3773 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
3774 &pnode->nlp_portname, sizeof(struct lpfc_name));
3775 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
3776 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3777 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
3778 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
3779 fast_path_evt = lpfc_alloc_fast_evt(phba);
3780 if (!fast_path_evt)
3781 return;
3782 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
3783 FC_REG_SCSI_EVENT;
3784 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
3785 LPFC_EVENT_CHECK_COND;
3786 fast_path_evt->un.check_cond_evt.scsi_event.lun =
3787 cmnd->device->lun;
3788 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
3789 &pnode->nlp_portname, sizeof(struct lpfc_name));
3790 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
3791 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3792 fast_path_evt->un.check_cond_evt.sense_key =
3793 cmnd->sense_buffer[2] & 0xf;
3794 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
3795 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
3796 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
3797 fcpi_parm &&
3798 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
3799 ((scsi_status == SAM_STAT_GOOD) &&
3800 !(resp_info & (RESID_UNDER | RESID_OVER))))) {
3801
3802
3803
3804
3805 fast_path_evt = lpfc_alloc_fast_evt(phba);
3806 if (!fast_path_evt)
3807 return;
3808 fast_path_evt->un.read_check_error.header.event_type =
3809 FC_REG_FABRIC_EVENT;
3810 fast_path_evt->un.read_check_error.header.subcategory =
3811 LPFC_EVENT_FCPRDCHKERR;
3812 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
3813 &pnode->nlp_portname, sizeof(struct lpfc_name));
3814 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
3815 &pnode->nlp_nodename, sizeof(struct lpfc_name));
3816 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
3817 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
3818 fast_path_evt->un.read_check_error.fcpiparam =
3819 fcpi_parm;
3820 } else
3821 return;
3822
3823 fast_path_evt->vport = vport;
3824 spin_lock_irqsave(&phba->hbalock, flags);
3825 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3826 spin_unlock_irqrestore(&phba->hbalock, flags);
3827 lpfc_worker_wake_up(phba);
3828 return;
3829}
3830
3831
3832
3833
3834
3835
3836
3837
3838
3839static void
3840lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
3841{
3842
3843
3844
3845
3846
3847
3848 if (psb->seg_cnt > 0)
3849 scsi_dma_unmap(psb->pCmd);
3850 if (psb->prot_seg_cnt > 0)
3851 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3852 scsi_prot_sg_count(psb->pCmd),
3853 psb->pCmd->sc_data_direction);
3854}
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866static void
3867lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd,
3868 uint32_t fcpi_parm)
3869{
3870 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
3871 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
3872 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
3873 uint32_t resp_info = fcprsp->rspStatus2;
3874 uint32_t scsi_status = fcprsp->rspStatus3;
3875 uint32_t *lp;
3876 uint32_t host_status = DID_OK;
3877 uint32_t rsplen = 0;
3878 uint32_t fcpDl;
3879 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
3880
3881
3882
3883
3884
3885
3886
3887 if (fcpcmd->fcpCntl2) {
3888 scsi_status = 0;
3889 goto out;
3890 }
3891
3892 if (resp_info & RSP_LEN_VALID) {
3893 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3894 if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
3895 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3896 "2719 Invalid response length: "
3897 "tgt x%x lun x%llx cmnd x%x rsplen "
3898 "x%x\n", cmnd->device->id,
3899 cmnd->device->lun, cmnd->cmnd[0],
3900 rsplen);
3901 host_status = DID_ERROR;
3902 goto out;
3903 }
3904 if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
3905 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
3906 "2757 Protocol failure detected during "
3907 "processing of FCP I/O op: "
3908 "tgt x%x lun x%llx cmnd x%x rspInfo3 x%x\n",
3909 cmnd->device->id,
3910 cmnd->device->lun, cmnd->cmnd[0],
3911 fcprsp->rspInfo3);
3912 host_status = DID_ERROR;
3913 goto out;
3914 }
3915 }
3916
3917 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
3918 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
3919 if (snslen > SCSI_SENSE_BUFFERSIZE)
3920 snslen = SCSI_SENSE_BUFFERSIZE;
3921
3922 if (resp_info & RSP_LEN_VALID)
3923 rsplen = be32_to_cpu(fcprsp->rspRspLen);
3924 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
3925 }
3926 lp = (uint32_t *)cmnd->sense_buffer;
3927
3928
3929 if (!scsi_status && (resp_info & RESID_UNDER)) {
3930
3931 if (vport->cfg_log_verbose & LOG_FCP)
3932 logit = LOG_FCP_ERROR;
3933
3934 if (vport->cfg_log_verbose & LOG_FCP_UNDER)
3935 logit = LOG_FCP_UNDER;
3936 }
3937
3938 lpfc_printf_vlog(vport, KERN_WARNING, logit,
3939 "9024 FCP command x%x failed: x%x SNS x%x x%x "
3940 "Data: x%x x%x x%x x%x x%x\n",
3941 cmnd->cmnd[0], scsi_status,
3942 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
3943 be32_to_cpu(fcprsp->rspResId),
3944 be32_to_cpu(fcprsp->rspSnsLen),
3945 be32_to_cpu(fcprsp->rspRspLen),
3946 fcprsp->rspInfo3);
3947
3948 scsi_set_resid(cmnd, 0);
3949 fcpDl = be32_to_cpu(fcpcmd->fcpDl);
3950 if (resp_info & RESID_UNDER) {
3951 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
3952
3953 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
3954 "9025 FCP Underrun, expected %d, "
3955 "residual %d Data: x%x x%x x%x\n",
3956 fcpDl,
3957 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
3958 cmnd->underflow);
3959
3960
3961
3962
3963
3964
3965 if (fcpi_parm && (scsi_get_resid(cmnd) != fcpi_parm)) {
3966 lpfc_printf_vlog(vport, KERN_WARNING,
3967 LOG_FCP | LOG_FCP_ERROR,
3968 "9026 FCP Read Check Error "
3969 "and Underrun Data: x%x x%x x%x x%x\n",
3970 fcpDl,
3971 scsi_get_resid(cmnd), fcpi_parm,
3972 cmnd->cmnd[0]);
3973 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
3974 host_status = DID_ERROR;
3975 }
3976
3977
3978
3979
3980
3981
3982 if (!(resp_info & SNS_LEN_VALID) &&
3983 (scsi_status == SAM_STAT_GOOD) &&
3984 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
3985 < cmnd->underflow)) {
3986 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
3987 "9027 FCP command x%x residual "
3988 "underrun converted to error "
3989 "Data: x%x x%x x%x\n",
3990 cmnd->cmnd[0], scsi_bufflen(cmnd),
3991 scsi_get_resid(cmnd), cmnd->underflow);
3992 host_status = DID_ERROR;
3993 }
3994 } else if (resp_info & RESID_OVER) {
3995 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
3996 "9028 FCP command x%x residual overrun error. "
3997 "Data: x%x x%x\n", cmnd->cmnd[0],
3998 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
3999 host_status = DID_ERROR;
4000
4001
4002
4003
4004
4005 } else if (fcpi_parm) {
4006 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
4007 "9029 FCP %s Check Error Data: "
4008 "x%x x%x x%x x%x x%x\n",
4009 ((cmnd->sc_data_direction == DMA_FROM_DEVICE) ?
4010 "Read" : "Write"),
4011 fcpDl, be32_to_cpu(fcprsp->rspResId),
4012 fcpi_parm, cmnd->cmnd[0], scsi_status);
4013
4014
4015
4016
4017
4018 if (fcpi_parm > fcpDl)
4019 goto out;
4020
4021 switch (scsi_status) {
4022 case SAM_STAT_GOOD:
4023 case SAM_STAT_CHECK_CONDITION:
4024
4025
4026
4027
4028
4029 host_status = DID_ERROR;
4030 break;
4031 }
4032 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
4033 }
4034
4035 out:
4036 cmnd->result = host_status << 16 | scsi_status;
4037 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, fcpi_parm);
4038}
4039
4040
4041
4042
4043
4044
4045
4046
4047
4048
4049
4050static void
4051lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
4052 struct lpfc_wcqe_complete *wcqe)
4053{
4054 struct lpfc_io_buf *lpfc_cmd =
4055 (struct lpfc_io_buf *)pwqeIn->context1;
4056 struct lpfc_vport *vport = pwqeIn->vport;
4057 struct lpfc_rport_data *rdata;
4058 struct lpfc_nodelist *ndlp;
4059 struct scsi_cmnd *cmd;
4060 unsigned long flags;
4061 struct lpfc_fast_path_event *fast_path_evt;
4062 struct Scsi_Host *shost;
4063 u32 logit = LOG_FCP;
4064 u32 status, idx;
4065 unsigned long iflags = 0;
4066 u8 wait_xb_clr = 0;
4067
4068
4069 if (!lpfc_cmd) {
4070 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4071 "9032 Null lpfc_cmd pointer. No "
4072 "release, skip completion\n");
4073 return;
4074 }
4075
4076 rdata = lpfc_cmd->rdata;
4077 ndlp = rdata->pnode;
4078
4079 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
4080
4081
4082
4083
4084
4085
4086 spin_lock_irqsave(&phba->hbalock, iflags);
4087 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_EXCHANGE_BUSY;
4088 spin_unlock_irqrestore(&phba->hbalock, iflags);
4089 }
4090
4091
4092 spin_lock(&lpfc_cmd->buf_lock);
4093
4094
4095 cmd = lpfc_cmd->pCmd;
4096 if (!cmd) {
4097 lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
4098 "9042 I/O completion: Not an active IO\n");
4099 spin_unlock(&lpfc_cmd->buf_lock);
4100 lpfc_release_scsi_buf(phba, lpfc_cmd);
4101 return;
4102 }
4103 idx = lpfc_cmd->cur_iocbq.hba_wqidx;
4104 if (phba->sli4_hba.hdwq)
4105 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
4106
4107#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4108 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
4109 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
4110#endif
4111 shost = cmd->device->host;
4112
4113 status = bf_get(lpfc_wcqe_c_status, wcqe);
4114 lpfc_cmd->status = (status & LPFC_IOCB_STATUS_MASK);
4115 lpfc_cmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
4116
4117 lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
4118 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
4119 lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
4120 if (phba->cfg_fcp_wait_abts_rsp)
4121 wait_xb_clr = 1;
4122 }
4123
4124#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
4125 if (lpfc_cmd->prot_data_type) {
4126 struct scsi_dif_tuple *src = NULL;
4127
4128 src = (struct scsi_dif_tuple *)lpfc_cmd->prot_data_segment;
4129
4130
4131
4132
4133 switch (lpfc_cmd->prot_data_type) {
4134 case LPFC_INJERR_REFTAG:
4135 src->ref_tag =
4136 lpfc_cmd->prot_data;
4137 break;
4138 case LPFC_INJERR_APPTAG:
4139 src->app_tag =
4140 (uint16_t)lpfc_cmd->prot_data;
4141 break;
4142 case LPFC_INJERR_GUARD:
4143 src->guard_tag =
4144 (uint16_t)lpfc_cmd->prot_data;
4145 break;
4146 default:
4147 break;
4148 }
4149
4150 lpfc_cmd->prot_data = 0;
4151 lpfc_cmd->prot_data_type = 0;
4152 lpfc_cmd->prot_data_segment = NULL;
4153 }
4154#endif
4155 if (unlikely(lpfc_cmd->status)) {
4156 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
4157 (lpfc_cmd->result & IOERR_DRVR_MASK))
4158 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
4159 else if (lpfc_cmd->status >= IOSTAT_CNT)
4160 lpfc_cmd->status = IOSTAT_DEFAULT;
4161 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR &&
4162 !lpfc_cmd->fcp_rsp->rspStatus3 &&
4163 (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) &&
4164 !(vport->cfg_log_verbose & LOG_FCP_UNDER))
4165 logit = 0;
4166 else
4167 logit = LOG_FCP | LOG_FCP_UNDER;
4168 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4169 "9034 FCP cmd x%x failed <%d/%lld> "
4170 "status: x%x result: x%x "
4171 "sid: x%x did: x%x oxid: x%x "
4172 "Data: x%x x%x x%x\n",
4173 cmd->cmnd[0],
4174 cmd->device ? cmd->device->id : 0xffff,
4175 cmd->device ? cmd->device->lun : 0xffff,
4176 lpfc_cmd->status, lpfc_cmd->result,
4177 vport->fc_myDID,
4178 (ndlp) ? ndlp->nlp_DID : 0,
4179 lpfc_cmd->cur_iocbq.sli4_xritag,
4180 wcqe->parameter, wcqe->total_data_placed,
4181 lpfc_cmd->cur_iocbq.iotag);
4182 }
4183
4184 switch (lpfc_cmd->status) {
4185 case IOSTAT_SUCCESS:
4186 cmd->result = DID_OK << 16;
4187 break;
4188 case IOSTAT_FCP_RSP_ERROR:
4189 lpfc_handle_fcp_err(vport, lpfc_cmd,
4190 pwqeIn->wqe.fcp_iread.total_xfer_len -
4191 wcqe->total_data_placed);
4192 break;
4193 case IOSTAT_NPORT_BSY:
4194 case IOSTAT_FABRIC_BSY:
4195 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
4196 fast_path_evt = lpfc_alloc_fast_evt(phba);
4197 if (!fast_path_evt)
4198 break;
4199 fast_path_evt->un.fabric_evt.event_type =
4200 FC_REG_FABRIC_EVENT;
4201 fast_path_evt->un.fabric_evt.subcategory =
4202 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
4203 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
4204 if (ndlp) {
4205 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
4206 &ndlp->nlp_portname,
4207 sizeof(struct lpfc_name));
4208 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
4209 &ndlp->nlp_nodename,
4210 sizeof(struct lpfc_name));
4211 }
4212 fast_path_evt->vport = vport;
4213 fast_path_evt->work_evt.evt =
4214 LPFC_EVT_FASTPATH_MGMT_EVT;
4215 spin_lock_irqsave(&phba->hbalock, flags);
4216 list_add_tail(&fast_path_evt->work_evt.evt_listp,
4217 &phba->work_list);
4218 spin_unlock_irqrestore(&phba->hbalock, flags);
4219 lpfc_worker_wake_up(phba);
4220 lpfc_printf_vlog(vport, KERN_WARNING, logit,
4221 "9035 Fabric/Node busy FCP cmd x%x failed"
4222 " <%d/%lld> "
4223 "status: x%x result: x%x "
4224 "sid: x%x did: x%x oxid: x%x "
4225 "Data: x%x x%x x%x\n",
4226 cmd->cmnd[0],
4227 cmd->device ? cmd-><