1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/blkdev.h>
25#include <linux/pci.h>
26#include <linux/interrupt.h>
27#include <linux/delay.h>
28#include <linux/slab.h>
29#include <linux/lockdep.h>
30
31#include <scsi/scsi.h>
32#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi_transport_fc.h>
36#include <scsi/fc/fc_fs.h>
37#include <linux/aer.h>
38#include <linux/crash_dump.h>
39#ifdef CONFIG_X86
40#include <asm/set_memory.h>
41#endif
42
43#include "lpfc_hw4.h"
44#include "lpfc_hw.h"
45#include "lpfc_sli.h"
46#include "lpfc_sli4.h"
47#include "lpfc_nl.h"
48#include "lpfc_disc.h"
49#include "lpfc.h"
50#include "lpfc_scsi.h"
51#include "lpfc_nvme.h"
52#include "lpfc_crtn.h"
53#include "lpfc_logmsg.h"
54#include "lpfc_compat.h"
55#include "lpfc_debugfs.h"
56#include "lpfc_vport.h"
57#include "lpfc_version.h"
58
59
60typedef enum _lpfc_iocb_type {
61 LPFC_UNKNOWN_IOCB,
62 LPFC_UNSOL_IOCB,
63 LPFC_SOL_IOCB,
64 LPFC_ABORT_IOCB
65} lpfc_iocb_type;
66
67
68
69static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
70 uint32_t);
71static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
72 uint8_t *, uint32_t *);
73static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
74 struct lpfc_iocbq *);
75static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
76 struct hbq_dmabuf *);
77static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
78 struct hbq_dmabuf *dmabuf);
79static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
80 struct lpfc_queue *cq, struct lpfc_cqe *cqe);
81static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
82 int);
83static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
84 struct lpfc_queue *eq,
85 struct lpfc_eqe *eqe);
86static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
87static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
88static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
89static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
90 struct lpfc_queue *cq,
91 struct lpfc_cqe *cqe);
92
93union lpfc_wqe128 lpfc_iread_cmd_template;
94union lpfc_wqe128 lpfc_iwrite_cmd_template;
95union lpfc_wqe128 lpfc_icmnd_cmd_template;
96
97static IOCB_t *
98lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
99{
100 return &iocbq->iocb;
101}
102
103
104void lpfc_wqe_cmd_template(void)
105{
106 union lpfc_wqe128 *wqe;
107
108
109 wqe = &lpfc_iread_cmd_template;
110 memset(wqe, 0, sizeof(union lpfc_wqe128));
111
112
113
114
115
116
117
118
119
120
121
122
123 bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
124 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
125 bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
126 bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
127
128
129
130
131
132
133 bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
134 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
135 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
136 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
137 bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
138
139
140 bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN);
141 bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
142 bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
143
144
145
146
147
148
149 wqe = &lpfc_iwrite_cmd_template;
150 memset(wqe, 0, sizeof(union lpfc_wqe128));
151
152
153
154
155
156
157
158
159
160
161
162
163 bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
164 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
165 bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
166 bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
167
168
169
170
171
172
173 bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
174 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
175 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
176 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
177 bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
178
179
180 bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT);
181 bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
182 bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
183
184
185
186
187
188
189 wqe = &lpfc_icmnd_cmd_template;
190 memset(wqe, 0, sizeof(union lpfc_wqe128));
191
192
193
194
195
196
197
198
199
200
201 bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
202 bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
203 bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
204 bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
205
206
207
208
209
210
211 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
212 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
213 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
214 bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
215 bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
216
217
218 bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN);
219 bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
220 bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
221
222
223}
224
225#if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
226
227
228
229
230
231
232
233
234
235
236
237
238
239static void
240lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
241{
242 uint64_t *src = srcp;
243 uint64_t *dest = destp;
244 int i;
245
246 for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
247 *dest++ = *src++;
248}
249#else
250#define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
251#endif
252
253
254
255
256
257
258
259
260
261
262
263
264
265static int
266lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
267{
268 union lpfc_wqe *temp_wqe;
269 struct lpfc_register doorbell;
270 uint32_t host_index;
271 uint32_t idx;
272 uint32_t i = 0;
273 uint8_t *tmp;
274 u32 if_type;
275
276
277 if (unlikely(!q))
278 return -ENOMEM;
279
280 temp_wqe = lpfc_sli4_qe(q, q->host_index);
281
282
283 idx = ((q->host_index + 1) % q->entry_count);
284 if (idx == q->hba_index) {
285 q->WQ_overflow++;
286 return -EBUSY;
287 }
288 q->WQ_posted++;
289
290 if (!((q->host_index + 1) % q->notify_interval))
291 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
292 else
293 bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
294 if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
295 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
296 lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
297 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
298
299 tmp = (uint8_t *)temp_wqe;
300#ifdef __raw_writeq
301 for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
302 __raw_writeq(*((uint64_t *)(tmp + i)),
303 q->dpp_regaddr + i);
304#else
305 for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
306 __raw_writel(*((uint32_t *)(tmp + i)),
307 q->dpp_regaddr + i);
308#endif
309 }
310
311 wmb();
312
313
314 host_index = q->host_index;
315
316 q->host_index = idx;
317
318
319 doorbell.word0 = 0;
320 if (q->db_format == LPFC_DB_LIST_FORMAT) {
321 if (q->dpp_enable && q->phba->cfg_enable_dpp) {
322 bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
323 bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
324 bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
325 q->dpp_id);
326 bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
327 q->queue_id);
328 } else {
329 bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
330 bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
331
332
333 if_type = bf_get(lpfc_sli_intf_if_type,
334 &q->phba->sli4_hba.sli_intf);
335 if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
336 bf_set(lpfc_wq_db_list_fm_index, &doorbell,
337 host_index);
338 }
339 } else if (q->db_format == LPFC_DB_RING_FORMAT) {
340 bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
341 bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
342 } else {
343 return -EINVAL;
344 }
345 writel(doorbell.word0, q->db_regaddr);
346
347 return 0;
348}
349
350
351
352
353
354
355
356
357
358
359
360static void
361lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
362{
363
364 if (unlikely(!q))
365 return;
366
367 q->hba_index = index;
368}
369
370
371
372
373
374
375
376
377
378
379
380
381
382static uint32_t
383lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
384{
385 struct lpfc_mqe *temp_mqe;
386 struct lpfc_register doorbell;
387
388
389 if (unlikely(!q))
390 return -ENOMEM;
391 temp_mqe = lpfc_sli4_qe(q, q->host_index);
392
393
394 if (((q->host_index + 1) % q->entry_count) == q->hba_index)
395 return -ENOMEM;
396 lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
397
398 q->phba->mbox = (MAILBOX_t *)temp_mqe;
399
400
401 q->host_index = ((q->host_index + 1) % q->entry_count);
402
403
404 doorbell.word0 = 0;
405 bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
406 bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
407 writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
408 return 0;
409}
410
411
412
413
414
415
416
417
418
419
420
421static uint32_t
422lpfc_sli4_mq_release(struct lpfc_queue *q)
423{
424
425 if (unlikely(!q))
426 return 0;
427
428
429 q->phba->mbox = NULL;
430 q->hba_index = ((q->hba_index + 1) % q->entry_count);
431 return 1;
432}
433
434
435
436
437
438
439
440
441
442
443static struct lpfc_eqe *
444lpfc_sli4_eq_get(struct lpfc_queue *q)
445{
446 struct lpfc_eqe *eqe;
447
448
449 if (unlikely(!q))
450 return NULL;
451 eqe = lpfc_sli4_qe(q, q->host_index);
452
453
454 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
455 return NULL;
456
457
458
459
460
461
462
463
464
465
466 mb();
467 return eqe;
468}
469
470
471
472
473
474
475void
476lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
477{
478 struct lpfc_register doorbell;
479
480 doorbell.word0 = 0;
481 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
482 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
483 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
484 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
485 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
486 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
487}
488
489
490
491
492
493
494void
495lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
496{
497 struct lpfc_register doorbell;
498
499 doorbell.word0 = 0;
500 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
501 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
502}
503
504
505
506
507
508
509
510
511
512
513
514
515void
516lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
517 uint32_t count, bool arm)
518{
519 struct lpfc_register doorbell;
520
521
522 if (unlikely(!q || (count == 0 && !arm)))
523 return;
524
525
526 doorbell.word0 = 0;
527 if (arm) {
528 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
529 bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
530 }
531 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
532 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
533 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
534 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
535 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
536 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
537
538 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
539 readl(q->phba->sli4_hba.EQDBregaddr);
540}
541
542
543
544
545
546
547
548
549
550
551
552
553void
554lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
555 uint32_t count, bool arm)
556{
557 struct lpfc_register doorbell;
558
559
560 if (unlikely(!q || (count == 0 && !arm)))
561 return;
562
563
564 doorbell.word0 = 0;
565 if (arm)
566 bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
567 bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
568 bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
569 writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
570
571 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
572 readl(q->phba->sli4_hba.EQDBregaddr);
573}
574
575static void
576__lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
577 struct lpfc_eqe *eqe)
578{
579 if (!phba->sli4_hba.pc_sli4_params.eqav)
580 bf_set_le32(lpfc_eqe_valid, eqe, 0);
581
582 eq->host_index = ((eq->host_index + 1) % eq->entry_count);
583
584
585 if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
586 eq->qe_valid = (eq->qe_valid) ? 0 : 1;
587}
588
589static void
590lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
591{
592 struct lpfc_eqe *eqe = NULL;
593 u32 eq_count = 0, cq_count = 0;
594 struct lpfc_cqe *cqe = NULL;
595 struct lpfc_queue *cq = NULL, *childq = NULL;
596 int cqid = 0;
597
598
599 eqe = lpfc_sli4_eq_get(eq);
600 while (eqe) {
601
602 cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
603 cq = NULL;
604
605 list_for_each_entry(childq, &eq->child_list, list) {
606 if (childq->queue_id == cqid) {
607 cq = childq;
608 break;
609 }
610 }
611
612 if (cq) {
613 cqe = lpfc_sli4_cq_get(cq);
614 while (cqe) {
615 __lpfc_sli4_consume_cqe(phba, cq, cqe);
616 cq_count++;
617 cqe = lpfc_sli4_cq_get(cq);
618 }
619
620 phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
621 LPFC_QUEUE_REARM);
622 cq_count = 0;
623 }
624 __lpfc_sli4_consume_eqe(phba, eq, eqe);
625 eq_count++;
626 eqe = lpfc_sli4_eq_get(eq);
627 }
628
629
630 phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
631}
632
633static int
634lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
635 uint8_t rearm)
636{
637 struct lpfc_eqe *eqe;
638 int count = 0, consumed = 0;
639
640 if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
641 goto rearm_and_exit;
642
643 eqe = lpfc_sli4_eq_get(eq);
644 while (eqe) {
645 lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
646 __lpfc_sli4_consume_eqe(phba, eq, eqe);
647
648 consumed++;
649 if (!(++count % eq->max_proc_limit))
650 break;
651
652 if (!(count % eq->notify_interval)) {
653 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
654 LPFC_QUEUE_NOARM);
655 consumed = 0;
656 }
657
658 eqe = lpfc_sli4_eq_get(eq);
659 }
660 eq->EQ_processed += count;
661
662
663 if (count > eq->EQ_max_eqe)
664 eq->EQ_max_eqe = count;
665
666 xchg(&eq->queue_claimed, 0);
667
668rearm_and_exit:
669
670 phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
671
672 return count;
673}
674
675
676
677
678
679
680
681
682
683
684static struct lpfc_cqe *
685lpfc_sli4_cq_get(struct lpfc_queue *q)
686{
687 struct lpfc_cqe *cqe;
688
689
690 if (unlikely(!q))
691 return NULL;
692 cqe = lpfc_sli4_qe(q, q->host_index);
693
694
695 if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
696 return NULL;
697
698
699
700
701
702
703
704
705
706 mb();
707 return cqe;
708}
709
710static void
711__lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
712 struct lpfc_cqe *cqe)
713{
714 if (!phba->sli4_hba.pc_sli4_params.cqav)
715 bf_set_le32(lpfc_cqe_valid, cqe, 0);
716
717 cq->host_index = ((cq->host_index + 1) % cq->entry_count);
718
719
720 if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
721 cq->qe_valid = (cq->qe_valid) ? 0 : 1;
722}
723
724
725
726
727
728
729
730
731
732
733
734
735void
736lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
737 uint32_t count, bool arm)
738{
739 struct lpfc_register doorbell;
740
741
742 if (unlikely(!q || (count == 0 && !arm)))
743 return;
744
745
746 doorbell.word0 = 0;
747 if (arm)
748 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
749 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
750 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
751 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
752 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
753 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
754 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
755}
756
757
758
759
760
761
762
763
764
765
766
767
768void
769lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
770 uint32_t count, bool arm)
771{
772 struct lpfc_register doorbell;
773
774
775 if (unlikely(!q || (count == 0 && !arm)))
776 return;
777
778
779 doorbell.word0 = 0;
780 if (arm)
781 bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
782 bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
783 bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
784 writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
785}
786
787
788
789
790
791
792
793
794
795
796
797int
798lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
799 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
800{
801 struct lpfc_rqe *temp_hrqe;
802 struct lpfc_rqe *temp_drqe;
803 struct lpfc_register doorbell;
804 int hq_put_index;
805 int dq_put_index;
806
807
808 if (unlikely(!hq) || unlikely(!dq))
809 return -ENOMEM;
810 hq_put_index = hq->host_index;
811 dq_put_index = dq->host_index;
812 temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
813 temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
814
815 if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
816 return -EINVAL;
817 if (hq_put_index != dq_put_index)
818 return -EINVAL;
819
820 if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
821 return -EBUSY;
822 lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
823 lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
824
825
826 hq->host_index = ((hq_put_index + 1) % hq->entry_count);
827 dq->host_index = ((dq_put_index + 1) % dq->entry_count);
828 hq->RQ_buf_posted++;
829
830
831 if (!(hq->host_index % hq->notify_interval)) {
832 doorbell.word0 = 0;
833 if (hq->db_format == LPFC_DB_RING_FORMAT) {
834 bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
835 hq->notify_interval);
836 bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
837 } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
838 bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
839 hq->notify_interval);
840 bf_set(lpfc_rq_db_list_fm_index, &doorbell,
841 hq->host_index);
842 bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
843 } else {
844 return -EINVAL;
845 }
846 writel(doorbell.word0, hq->db_regaddr);
847 }
848 return hq_put_index;
849}
850
851
852
853
854
855
856
857
858
859
860static uint32_t
861lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
862{
863
864 if (unlikely(!hq) || unlikely(!dq))
865 return 0;
866
867 if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
868 return 0;
869 hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
870 dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
871 return 1;
872}
873
874
875
876
877
878
879
880
881
882
883
884static inline IOCB_t *
885lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
886{
887 return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
888 pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
889}
890
891
892
893
894
895
896
897
898
899
900
901static inline IOCB_t *
902lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
903{
904 return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
905 pring->sli.sli3.rspidx * phba->iocb_rsp_size);
906}
907
908
909
910
911
912
913
914
915
916
917struct lpfc_iocbq *
918__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
919{
920 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
921 struct lpfc_iocbq * iocbq = NULL;
922
923 lockdep_assert_held(&phba->hbalock);
924
925 list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
926 if (iocbq)
927 phba->iocb_cnt++;
928 if (phba->iocb_cnt > phba->iocb_max)
929 phba->iocb_max = phba->iocb_cnt;
930 return iocbq;
931}
932
933
934
935
936
937
938
939
940
941
942
943
944
945struct lpfc_sglq *
946__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
947{
948 struct lpfc_sglq *sglq;
949
950 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
951 phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
952 return sglq;
953}
954
955
956
957
958
959
960
961
962
963
964
965
966
967struct lpfc_sglq *
968__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
969{
970 struct lpfc_sglq *sglq;
971
972 sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
973 return sglq;
974}
975
976
977
978
979
980
981
982
983void
984lpfc_clr_rrq_active(struct lpfc_hba *phba,
985 uint16_t xritag,
986 struct lpfc_node_rrq *rrq)
987{
988 struct lpfc_nodelist *ndlp = NULL;
989
990
991 if (rrq->vport)
992 ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
993
994 if (!ndlp)
995 goto out;
996
997 if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
998 rrq->send_rrq = 0;
999 rrq->xritag = 0;
1000 rrq->rrq_stop_time = 0;
1001 }
1002out:
1003 mempool_free(rrq, phba->rrq_pool);
1004}
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020void
1021lpfc_handle_rrq_active(struct lpfc_hba *phba)
1022{
1023 struct lpfc_node_rrq *rrq;
1024 struct lpfc_node_rrq *nextrrq;
1025 unsigned long next_time;
1026 unsigned long iflags;
1027 LIST_HEAD(send_rrq);
1028
1029 spin_lock_irqsave(&phba->hbalock, iflags);
1030 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1031 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1032 list_for_each_entry_safe(rrq, nextrrq,
1033 &phba->active_rrq_list, list) {
1034 if (time_after(jiffies, rrq->rrq_stop_time))
1035 list_move(&rrq->list, &send_rrq);
1036 else if (time_before(rrq->rrq_stop_time, next_time))
1037 next_time = rrq->rrq_stop_time;
1038 }
1039 spin_unlock_irqrestore(&phba->hbalock, iflags);
1040 if ((!list_empty(&phba->active_rrq_list)) &&
1041 (!(phba->pport->load_flag & FC_UNLOADING)))
1042 mod_timer(&phba->rrq_tmr, next_time);
1043 list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
1044 list_del(&rrq->list);
1045 if (!rrq->send_rrq) {
1046
1047 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1048 } else if (lpfc_send_rrq(phba, rrq)) {
1049
1050
1051
1052 lpfc_clr_rrq_active(phba, rrq->xritag,
1053 rrq);
1054 }
1055 }
1056}
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067struct lpfc_node_rrq *
1068lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
1069{
1070 struct lpfc_hba *phba = vport->phba;
1071 struct lpfc_node_rrq *rrq;
1072 struct lpfc_node_rrq *nextrrq;
1073 unsigned long iflags;
1074
1075 if (phba->sli_rev != LPFC_SLI_REV4)
1076 return NULL;
1077 spin_lock_irqsave(&phba->hbalock, iflags);
1078 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1079 if (rrq->vport == vport && rrq->xritag == xri &&
1080 rrq->nlp_DID == did){
1081 list_del(&rrq->list);
1082 spin_unlock_irqrestore(&phba->hbalock, iflags);
1083 return rrq;
1084 }
1085 }
1086 spin_unlock_irqrestore(&phba->hbalock, iflags);
1087 return NULL;
1088}
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098void
1099lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1100
1101{
1102 struct lpfc_hba *phba = vport->phba;
1103 struct lpfc_node_rrq *rrq;
1104 struct lpfc_node_rrq *nextrrq;
1105 unsigned long iflags;
1106 LIST_HEAD(rrq_list);
1107
1108 if (phba->sli_rev != LPFC_SLI_REV4)
1109 return;
1110 if (!ndlp) {
1111 lpfc_sli4_vport_delete_els_xri_aborted(vport);
1112 lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
1113 }
1114 spin_lock_irqsave(&phba->hbalock, iflags);
1115 list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1116 if (rrq->vport != vport)
1117 continue;
1118
1119 if (!ndlp || ndlp == lpfc_findnode_did(vport, rrq->nlp_DID))
1120 list_move(&rrq->list, &rrq_list);
1121
1122 }
1123 spin_unlock_irqrestore(&phba->hbalock, iflags);
1124
1125 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1126 list_del(&rrq->list);
1127 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1128 }
1129}
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141int
1142lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1143 uint16_t xritag)
1144{
1145 if (!ndlp)
1146 return 0;
1147 if (!ndlp->active_rrqs_xri_bitmap)
1148 return 0;
1149 if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1150 return 1;
1151 else
1152 return 0;
1153}
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170int
1171lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1172 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1173{
1174 unsigned long iflags;
1175 struct lpfc_node_rrq *rrq;
1176 int empty;
1177
1178 if (!ndlp)
1179 return -EINVAL;
1180
1181 if (!phba->cfg_enable_rrq)
1182 return -EINVAL;
1183
1184 spin_lock_irqsave(&phba->hbalock, iflags);
1185 if (phba->pport->load_flag & FC_UNLOADING) {
1186 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1187 goto out;
1188 }
1189
1190 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1191 goto out;
1192
1193 if (!ndlp->active_rrqs_xri_bitmap)
1194 goto out;
1195
1196 if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1197 goto out;
1198
1199 spin_unlock_irqrestore(&phba->hbalock, iflags);
1200 rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC);
1201 if (!rrq) {
1202 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1203 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1204 " DID:0x%x Send:%d\n",
1205 xritag, rxid, ndlp->nlp_DID, send_rrq);
1206 return -EINVAL;
1207 }
1208 if (phba->cfg_enable_rrq == 1)
1209 rrq->send_rrq = send_rrq;
1210 else
1211 rrq->send_rrq = 0;
1212 rrq->xritag = xritag;
1213 rrq->rrq_stop_time = jiffies +
1214 msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1215 rrq->nlp_DID = ndlp->nlp_DID;
1216 rrq->vport = ndlp->vport;
1217 rrq->rxid = rxid;
1218 spin_lock_irqsave(&phba->hbalock, iflags);
1219 empty = list_empty(&phba->active_rrq_list);
1220 list_add_tail(&rrq->list, &phba->active_rrq_list);
1221 phba->hba_flag |= HBA_RRQ_ACTIVE;
1222 if (empty)
1223 lpfc_worker_wake_up(phba);
1224 spin_unlock_irqrestore(&phba->hbalock, iflags);
1225 return 0;
1226out:
1227 spin_unlock_irqrestore(&phba->hbalock, iflags);
1228 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1229 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1230 " DID:0x%x Send:%d\n",
1231 xritag, rxid, ndlp->nlp_DID, send_rrq);
1232 return -EINVAL;
1233}
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246static struct lpfc_sglq *
1247__lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1248{
1249 struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1250 struct lpfc_sglq *sglq = NULL;
1251 struct lpfc_sglq *start_sglq = NULL;
1252 struct lpfc_io_buf *lpfc_cmd;
1253 struct lpfc_nodelist *ndlp;
1254 struct lpfc_sli_ring *pring = NULL;
1255 int found = 0;
1256
1257 if (piocbq->iocb_flag & LPFC_IO_NVME_LS)
1258 pring = phba->sli4_hba.nvmels_wq->pring;
1259 else
1260 pring = lpfc_phba_elsring(phba);
1261
1262 lockdep_assert_held(&pring->ring_lock);
1263
1264 if (piocbq->iocb_flag & LPFC_IO_FCP) {
1265 lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
1266 ndlp = lpfc_cmd->rdata->pnode;
1267 } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
1268 !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
1269 ndlp = piocbq->context_un.ndlp;
1270 } else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1271 if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1272 ndlp = NULL;
1273 else
1274 ndlp = piocbq->context_un.ndlp;
1275 } else {
1276 ndlp = piocbq->context1;
1277 }
1278
1279 spin_lock(&phba->sli4_hba.sgl_list_lock);
1280 list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1281 start_sglq = sglq;
1282 while (!found) {
1283 if (!sglq)
1284 break;
1285 if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1286 test_bit(sglq->sli4_lxritag,
1287 ndlp->active_rrqs_xri_bitmap)) {
1288
1289
1290
1291 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1292 sglq = NULL;
1293 list_remove_head(lpfc_els_sgl_list, sglq,
1294 struct lpfc_sglq, list);
1295 if (sglq == start_sglq) {
1296 list_add_tail(&sglq->list, lpfc_els_sgl_list);
1297 sglq = NULL;
1298 break;
1299 } else
1300 continue;
1301 }
1302 sglq->ndlp = ndlp;
1303 found = 1;
1304 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1305 sglq->state = SGL_ALLOCATED;
1306 }
1307 spin_unlock(&phba->sli4_hba.sgl_list_lock);
1308 return sglq;
1309}
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321struct lpfc_sglq *
1322__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1323{
1324 struct list_head *lpfc_nvmet_sgl_list;
1325 struct lpfc_sglq *sglq = NULL;
1326
1327 lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1328
1329 lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1330
1331 list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1332 if (!sglq)
1333 return NULL;
1334 phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1335 sglq->state = SGL_ALLOCATED;
1336 return sglq;
1337}
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348struct lpfc_iocbq *
1349lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1350{
1351 struct lpfc_iocbq * iocbq = NULL;
1352 unsigned long iflags;
1353
1354 spin_lock_irqsave(&phba->hbalock, iflags);
1355 iocbq = __lpfc_sli_get_iocbq(phba);
1356 spin_unlock_irqrestore(&phba->hbalock, iflags);
1357 return iocbq;
1358}
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379static void
1380__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1381{
1382 struct lpfc_sglq *sglq;
1383 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1384 unsigned long iflag = 0;
1385 struct lpfc_sli_ring *pring;
1386
1387 if (iocbq->sli4_xritag == NO_XRI)
1388 sglq = NULL;
1389 else
1390 sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1391
1392
1393 if (sglq) {
1394 if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1395 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1396 iflag);
1397 sglq->state = SGL_FREED;
1398 sglq->ndlp = NULL;
1399 list_add_tail(&sglq->list,
1400 &phba->sli4_hba.lpfc_nvmet_sgl_list);
1401 spin_unlock_irqrestore(
1402 &phba->sli4_hba.sgl_list_lock, iflag);
1403 goto out;
1404 }
1405
1406 if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1407 (sglq->state != SGL_XRI_ABORTED)) {
1408 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1409 iflag);
1410
1411
1412 if (sglq->ndlp && !lpfc_nlp_get(sglq->ndlp))
1413 sglq->ndlp = NULL;
1414
1415 list_add(&sglq->list,
1416 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1417 spin_unlock_irqrestore(
1418 &phba->sli4_hba.sgl_list_lock, iflag);
1419 } else {
1420 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1421 iflag);
1422 sglq->state = SGL_FREED;
1423 sglq->ndlp = NULL;
1424 list_add_tail(&sglq->list,
1425 &phba->sli4_hba.lpfc_els_sgl_list);
1426 spin_unlock_irqrestore(
1427 &phba->sli4_hba.sgl_list_lock, iflag);
1428 pring = lpfc_phba_elsring(phba);
1429
1430 if (pring && (!list_empty(&pring->txq)))
1431 lpfc_worker_wake_up(phba);
1432 }
1433 }
1434
1435out:
1436
1437
1438
1439 memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1440 iocbq->sli4_lxritag = NO_XRI;
1441 iocbq->sli4_xritag = NO_XRI;
1442 iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1443 LPFC_IO_NVME_LS);
1444 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1445}
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459static void
1460__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1461{
1462 size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1463
1464
1465
1466
1467 memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1468 iocbq->sli4_xritag = NO_XRI;
1469 list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1470}
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482static void
1483__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1484{
1485 lockdep_assert_held(&phba->hbalock);
1486
1487 phba->__lpfc_sli_release_iocbq(phba, iocbq);
1488 phba->iocb_cnt--;
1489}
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499void
1500lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1501{
1502 unsigned long iflags;
1503
1504
1505
1506
1507 spin_lock_irqsave(&phba->hbalock, iflags);
1508 __lpfc_sli_release_iocbq(phba, iocbq);
1509 spin_unlock_irqrestore(&phba->hbalock, iflags);
1510}
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524void
1525lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1526 uint32_t ulpstatus, uint32_t ulpWord4)
1527{
1528 struct lpfc_iocbq *piocb;
1529
1530 while (!list_empty(iocblist)) {
1531 list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1532 if (piocb->wqe_cmpl) {
1533 if (piocb->iocb_flag & LPFC_IO_NVME)
1534 lpfc_nvme_cancel_iocb(phba, piocb,
1535 ulpstatus, ulpWord4);
1536 else
1537 lpfc_sli_release_iocbq(phba, piocb);
1538
1539 } else if (piocb->iocb_cmpl) {
1540 piocb->iocb.ulpStatus = ulpstatus;
1541 piocb->iocb.un.ulpWord[4] = ulpWord4;
1542 (piocb->iocb_cmpl) (phba, piocb, piocb);
1543 } else {
1544 lpfc_sli_release_iocbq(phba, piocb);
1545 }
1546 }
1547 return;
1548}
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565static lpfc_iocb_type
1566lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1567{
1568 lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1569
1570 if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1571 return 0;
1572
1573 switch (iocb_cmnd) {
1574 case CMD_XMIT_SEQUENCE_CR:
1575 case CMD_XMIT_SEQUENCE_CX:
1576 case CMD_XMIT_BCAST_CN:
1577 case CMD_XMIT_BCAST_CX:
1578 case CMD_ELS_REQUEST_CR:
1579 case CMD_ELS_REQUEST_CX:
1580 case CMD_CREATE_XRI_CR:
1581 case CMD_CREATE_XRI_CX:
1582 case CMD_GET_RPI_CN:
1583 case CMD_XMIT_ELS_RSP_CX:
1584 case CMD_GET_RPI_CR:
1585 case CMD_FCP_IWRITE_CR:
1586 case CMD_FCP_IWRITE_CX:
1587 case CMD_FCP_IREAD_CR:
1588 case CMD_FCP_IREAD_CX:
1589 case CMD_FCP_ICMND_CR:
1590 case CMD_FCP_ICMND_CX:
1591 case CMD_FCP_TSEND_CX:
1592 case CMD_FCP_TRSP_CX:
1593 case CMD_FCP_TRECEIVE_CX:
1594 case CMD_FCP_AUTO_TRSP_CX:
1595 case CMD_ADAPTER_MSG:
1596 case CMD_ADAPTER_DUMP:
1597 case CMD_XMIT_SEQUENCE64_CR:
1598 case CMD_XMIT_SEQUENCE64_CX:
1599 case CMD_XMIT_BCAST64_CN:
1600 case CMD_XMIT_BCAST64_CX:
1601 case CMD_ELS_REQUEST64_CR:
1602 case CMD_ELS_REQUEST64_CX:
1603 case CMD_FCP_IWRITE64_CR:
1604 case CMD_FCP_IWRITE64_CX:
1605 case CMD_FCP_IREAD64_CR:
1606 case CMD_FCP_IREAD64_CX:
1607 case CMD_FCP_ICMND64_CR:
1608 case CMD_FCP_ICMND64_CX:
1609 case CMD_FCP_TSEND64_CX:
1610 case CMD_FCP_TRSP64_CX:
1611 case CMD_FCP_TRECEIVE64_CX:
1612 case CMD_GEN_REQUEST64_CR:
1613 case CMD_GEN_REQUEST64_CX:
1614 case CMD_XMIT_ELS_RSP64_CX:
1615 case DSSCMD_IWRITE64_CR:
1616 case DSSCMD_IWRITE64_CX:
1617 case DSSCMD_IREAD64_CR:
1618 case DSSCMD_IREAD64_CX:
1619 case CMD_SEND_FRAME:
1620 type = LPFC_SOL_IOCB;
1621 break;
1622 case CMD_ABORT_XRI_CN:
1623 case CMD_ABORT_XRI_CX:
1624 case CMD_CLOSE_XRI_CN:
1625 case CMD_CLOSE_XRI_CX:
1626 case CMD_XRI_ABORTED_CX:
1627 case CMD_ABORT_MXRI64_CN:
1628 case CMD_XMIT_BLS_RSP64_CX:
1629 type = LPFC_ABORT_IOCB;
1630 break;
1631 case CMD_RCV_SEQUENCE_CX:
1632 case CMD_RCV_ELS_REQ_CX:
1633 case CMD_RCV_SEQUENCE64_CX:
1634 case CMD_RCV_ELS_REQ64_CX:
1635 case CMD_ASYNC_STATUS:
1636 case CMD_IOCB_RCV_SEQ64_CX:
1637 case CMD_IOCB_RCV_ELS64_CX:
1638 case CMD_IOCB_RCV_CONT64_CX:
1639 case CMD_IOCB_RET_XRI64_CX:
1640 type = LPFC_UNSOL_IOCB;
1641 break;
1642 case CMD_IOCB_XMIT_MSEQ64_CR:
1643 case CMD_IOCB_XMIT_MSEQ64_CX:
1644 case CMD_IOCB_RCV_SEQ_LIST64_CX:
1645 case CMD_IOCB_RCV_ELS_LIST64_CX:
1646 case CMD_IOCB_CLOSE_EXTENDED_CN:
1647 case CMD_IOCB_ABORT_EXTENDED_CN:
1648 case CMD_IOCB_RET_HBQE64_CN:
1649 case CMD_IOCB_FCP_IBIDIR64_CR:
1650 case CMD_IOCB_FCP_IBIDIR64_CX:
1651 case CMD_IOCB_FCP_ITASKMGT64_CX:
1652 case CMD_IOCB_LOGENTRY_CN:
1653 case CMD_IOCB_LOGENTRY_ASYNC_CN:
1654 printk("%s - Unhandled SLI-3 Command x%x\n",
1655 __func__, iocb_cmnd);
1656 type = LPFC_UNKNOWN_IOCB;
1657 break;
1658 default:
1659 type = LPFC_UNKNOWN_IOCB;
1660 break;
1661 }
1662
1663 return type;
1664}
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677static int
1678lpfc_sli_ring_map(struct lpfc_hba *phba)
1679{
1680 struct lpfc_sli *psli = &phba->sli;
1681 LPFC_MBOXQ_t *pmb;
1682 MAILBOX_t *pmbox;
1683 int i, rc, ret = 0;
1684
1685 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1686 if (!pmb)
1687 return -ENOMEM;
1688 pmbox = &pmb->u.mb;
1689 phba->link_state = LPFC_INIT_MBX_CMDS;
1690 for (i = 0; i < psli->num_rings; i++) {
1691 lpfc_config_ring(phba, i, pmb);
1692 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1693 if (rc != MBX_SUCCESS) {
1694 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1695 "0446 Adapter failed to init (%d), "
1696 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1697 "ring %d\n",
1698 rc, pmbox->mbxCommand,
1699 pmbox->mbxStatus, i);
1700 phba->link_state = LPFC_HBA_ERROR;
1701 ret = -ENXIO;
1702 break;
1703 }
1704 }
1705 mempool_free(pmb, phba->mbox_mem_pool);
1706 return ret;
1707}
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722static int
1723lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1724 struct lpfc_iocbq *piocb)
1725{
1726 if (phba->sli_rev == LPFC_SLI_REV4)
1727 lockdep_assert_held(&pring->ring_lock);
1728 else
1729 lockdep_assert_held(&phba->hbalock);
1730
1731 BUG_ON(!piocb);
1732
1733 list_add_tail(&piocb->list, &pring->txcmplq);
1734 piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1735 pring->txcmplq_cnt++;
1736
1737 if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1738 (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1739 (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1740 BUG_ON(!piocb->vport);
1741 if (!(piocb->vport->load_flag & FC_UNLOADING))
1742 mod_timer(&piocb->vport->els_tmofunc,
1743 jiffies +
1744 msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1745 }
1746
1747 return 0;
1748}
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760struct lpfc_iocbq *
1761lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1762{
1763 struct lpfc_iocbq *cmd_iocb;
1764
1765 lockdep_assert_held(&phba->hbalock);
1766
1767 list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1768 return cmd_iocb;
1769}
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785static IOCB_t *
1786lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1787{
1788 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1789 uint32_t max_cmd_idx = pring->sli.sli3.numCiocb;
1790
1791 lockdep_assert_held(&phba->hbalock);
1792
1793 if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1794 (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1795 pring->sli.sli3.next_cmdidx = 0;
1796
1797 if (unlikely(pring->sli.sli3.local_getidx ==
1798 pring->sli.sli3.next_cmdidx)) {
1799
1800 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1801
1802 if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1803 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1804 "0315 Ring %d issue: portCmdGet %d "
1805 "is bigger than cmd ring %d\n",
1806 pring->ringno,
1807 pring->sli.sli3.local_getidx,
1808 max_cmd_idx);
1809
1810 phba->link_state = LPFC_HBA_ERROR;
1811
1812
1813
1814
1815 phba->work_ha |= HA_ERATT;
1816 phba->work_hs = HS_FFER3;
1817
1818 lpfc_worker_wake_up(phba);
1819
1820 return NULL;
1821 }
1822
1823 if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1824 return NULL;
1825 }
1826
1827 return lpfc_cmd_iocb(phba, pring);
1828}
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842uint16_t
1843lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1844{
1845 struct lpfc_iocbq **new_arr;
1846 struct lpfc_iocbq **old_arr;
1847 size_t new_len;
1848 struct lpfc_sli *psli = &phba->sli;
1849 uint16_t iotag;
1850
1851 spin_lock_irq(&phba->hbalock);
1852 iotag = psli->last_iotag;
1853 if(++iotag < psli->iocbq_lookup_len) {
1854 psli->last_iotag = iotag;
1855 psli->iocbq_lookup[iotag] = iocbq;
1856 spin_unlock_irq(&phba->hbalock);
1857 iocbq->iotag = iotag;
1858 return iotag;
1859 } else if (psli->iocbq_lookup_len < (0xffff
1860 - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1861 new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1862 spin_unlock_irq(&phba->hbalock);
1863 new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
1864 GFP_KERNEL);
1865 if (new_arr) {
1866 spin_lock_irq(&phba->hbalock);
1867 old_arr = psli->iocbq_lookup;
1868 if (new_len <= psli->iocbq_lookup_len) {
1869
1870 kfree(new_arr);
1871 iotag = psli->last_iotag;
1872 if(++iotag < psli->iocbq_lookup_len) {
1873 psli->last_iotag = iotag;
1874 psli->iocbq_lookup[iotag] = iocbq;
1875 spin_unlock_irq(&phba->hbalock);
1876 iocbq->iotag = iotag;
1877 return iotag;
1878 }
1879 spin_unlock_irq(&phba->hbalock);
1880 return 0;
1881 }
1882 if (psli->iocbq_lookup)
1883 memcpy(new_arr, old_arr,
1884 ((psli->last_iotag + 1) *
1885 sizeof (struct lpfc_iocbq *)));
1886 psli->iocbq_lookup = new_arr;
1887 psli->iocbq_lookup_len = new_len;
1888 psli->last_iotag = iotag;
1889 psli->iocbq_lookup[iotag] = iocbq;
1890 spin_unlock_irq(&phba->hbalock);
1891 iocbq->iotag = iotag;
1892 kfree(old_arr);
1893 return iotag;
1894 }
1895 } else
1896 spin_unlock_irq(&phba->hbalock);
1897
1898 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1899 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1900 psli->last_iotag);
1901
1902 return 0;
1903}
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920static void
1921lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1922 IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1923{
1924
1925
1926
1927 nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1928
1929
1930 if (pring->ringno == LPFC_ELS_RING) {
1931 lpfc_debugfs_slow_ring_trc(phba,
1932 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1933 *(((uint32_t *) &nextiocb->iocb) + 4),
1934 *(((uint32_t *) &nextiocb->iocb) + 6),
1935 *(((uint32_t *) &nextiocb->iocb) + 7));
1936 }
1937
1938
1939
1940
1941 lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1942 wmb();
1943 pring->stats.iocb_cmd++;
1944
1945
1946
1947
1948
1949
1950 if (nextiocb->iocb_cmpl)
1951 lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1952 else
1953 __lpfc_sli_release_iocbq(phba, nextiocb);
1954
1955
1956
1957
1958
1959 pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1960 writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1961}
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975static void
1976lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1977{
1978 int ringno = pring->ringno;
1979
1980 pring->flag |= LPFC_CALL_RING_AVAILABLE;
1981
1982 wmb();
1983
1984
1985
1986
1987
1988 writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1989 readl(phba->CAregaddr);
1990
1991 pring->stats.iocb_cmd_full++;
1992}
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003static void
2004lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2005{
2006 int ringno = pring->ringno;
2007
2008
2009
2010
2011 if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
2012 wmb();
2013 writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
2014 readl(phba->CAregaddr);
2015 }
2016}
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027static void
2028lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2029{
2030 IOCB_t *iocb;
2031 struct lpfc_iocbq *nextiocb;
2032
2033 lockdep_assert_held(&phba->hbalock);
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043 if (lpfc_is_link_up(phba) &&
2044 (!list_empty(&pring->txq)) &&
2045 (pring->ringno != LPFC_FCP_RING ||
2046 phba->sli.sli_flag & LPFC_PROCESS_LA)) {
2047
2048 while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2049 (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
2050 lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2051
2052 if (iocb)
2053 lpfc_sli_update_ring(phba, pring);
2054 else
2055 lpfc_sli_update_full_ring(phba, pring);
2056 }
2057
2058 return;
2059}
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071static struct lpfc_hbq_entry *
2072lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
2073{
2074 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2075
2076 lockdep_assert_held(&phba->hbalock);
2077
2078 if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
2079 ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
2080 hbqp->next_hbqPutIdx = 0;
2081
2082 if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
2083 uint32_t raw_index = phba->hbq_get[hbqno];
2084 uint32_t getidx = le32_to_cpu(raw_index);
2085
2086 hbqp->local_hbqGetIdx = getidx;
2087
2088 if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
2089 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2090 "1802 HBQ %d: local_hbqGetIdx "
2091 "%u is > than hbqp->entry_count %u\n",
2092 hbqno, hbqp->local_hbqGetIdx,
2093 hbqp->entry_count);
2094
2095 phba->link_state = LPFC_HBA_ERROR;
2096 return NULL;
2097 }
2098
2099 if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
2100 return NULL;
2101 }
2102
2103 return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
2104 hbqp->hbqPutIdx;
2105}
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116void
2117lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
2118{
2119 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2120 struct hbq_dmabuf *hbq_buf;
2121 unsigned long flags;
2122 int i, hbq_count;
2123
2124 hbq_count = lpfc_sli_hbq_count();
2125
2126 spin_lock_irqsave(&phba->hbalock, flags);
2127 for (i = 0; i < hbq_count; ++i) {
2128 list_for_each_entry_safe(dmabuf, next_dmabuf,
2129 &phba->hbqs[i].hbq_buffer_list, list) {
2130 hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
2131 list_del(&hbq_buf->dbuf.list);
2132 (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
2133 }
2134 phba->hbqs[i].buffer_count = 0;
2135 }
2136
2137
2138 phba->hbq_in_use = 0;
2139 spin_unlock_irqrestore(&phba->hbalock, flags);
2140}
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154static int
2155lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2156 struct hbq_dmabuf *hbq_buf)
2157{
2158 lockdep_assert_held(&phba->hbalock);
2159 return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2160}
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173static int
2174lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2175 struct hbq_dmabuf *hbq_buf)
2176{
2177 struct lpfc_hbq_entry *hbqe;
2178 dma_addr_t physaddr = hbq_buf->dbuf.phys;
2179
2180 lockdep_assert_held(&phba->hbalock);
2181
2182 hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2183 if (hbqe) {
2184 struct hbq_s *hbqp = &phba->hbqs[hbqno];
2185
2186 hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2187 hbqe->bde.addrLow = le32_to_cpu(putPaddrLow(physaddr));
2188 hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2189 hbqe->bde.tus.f.bdeFlags = 0;
2190 hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2191 hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2192
2193 hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2194 writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2195
2196 readl(phba->hbq_put + hbqno);
2197 list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2198 return 0;
2199 } else
2200 return -ENOMEM;
2201}
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213static int
2214lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2215 struct hbq_dmabuf *hbq_buf)
2216{
2217 int rc;
2218 struct lpfc_rqe hrqe;
2219 struct lpfc_rqe drqe;
2220 struct lpfc_queue *hrq;
2221 struct lpfc_queue *drq;
2222
2223 if (hbqno != LPFC_ELS_HBQ)
2224 return 1;
2225 hrq = phba->sli4_hba.hdr_rq;
2226 drq = phba->sli4_hba.dat_rq;
2227
2228 lockdep_assert_held(&phba->hbalock);
2229 hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2230 hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2231 drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2232 drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2233 rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2234 if (rc < 0)
2235 return rc;
2236 hbq_buf->tag = (rc | (hbqno << 16));
2237 list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2238 return 0;
2239}
2240
2241
2242static struct lpfc_hbq_init lpfc_els_hbq = {
2243 .rn = 1,
2244 .entry_count = 256,
2245 .mask_count = 0,
2246 .profile = 0,
2247 .ring_mask = (1 << LPFC_ELS_RING),
2248 .buffer_count = 0,
2249 .init_count = 40,
2250 .add_count = 40,
2251};
2252
2253
2254struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2255 &lpfc_els_hbq,
2256};
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268static int
2269lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2270{
2271 uint32_t i, posted = 0;
2272 unsigned long flags;
2273 struct hbq_dmabuf *hbq_buffer;
2274 LIST_HEAD(hbq_buf_list);
2275 if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2276 return 0;
2277
2278 if ((phba->hbqs[hbqno].buffer_count + count) >
2279 lpfc_hbq_defs[hbqno]->entry_count)
2280 count = lpfc_hbq_defs[hbqno]->entry_count -
2281 phba->hbqs[hbqno].buffer_count;
2282 if (!count)
2283 return 0;
2284
2285 for (i = 0; i < count; i++) {
2286 hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2287 if (!hbq_buffer)
2288 break;
2289 list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2290 }
2291
2292 spin_lock_irqsave(&phba->hbalock, flags);
2293 if (!phba->hbq_in_use)
2294 goto err;
2295 while (!list_empty(&hbq_buf_list)) {
2296 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2297 dbuf.list);
2298 hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2299 (hbqno << 16));
2300 if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2301 phba->hbqs[hbqno].buffer_count++;
2302 posted++;
2303 } else
2304 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2305 }
2306 spin_unlock_irqrestore(&phba->hbalock, flags);
2307 return posted;
2308err:
2309 spin_unlock_irqrestore(&phba->hbalock, flags);
2310 while (!list_empty(&hbq_buf_list)) {
2311 list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2312 dbuf.list);
2313 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2314 }
2315 return 0;
2316}
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327int
2328lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2329{
2330 if (phba->sli_rev == LPFC_SLI_REV4)
2331 return 0;
2332 else
2333 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2334 lpfc_hbq_defs[qno]->add_count);
2335}
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346static int
2347lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2348{
2349 if (phba->sli_rev == LPFC_SLI_REV4)
2350 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2351 lpfc_hbq_defs[qno]->entry_count);
2352 else
2353 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2354 lpfc_hbq_defs[qno]->init_count);
2355}
2356
2357
2358
2359
2360
2361
2362
2363static struct hbq_dmabuf *
2364lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2365{
2366 struct lpfc_dmabuf *d_buf;
2367
2368 list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2369 if (!d_buf)
2370 return NULL;
2371 return container_of(d_buf, struct hbq_dmabuf, dbuf);
2372}
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382static struct rqb_dmabuf *
2383lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2384{
2385 struct lpfc_dmabuf *h_buf;
2386 struct lpfc_rqb *rqbp;
2387
2388 rqbp = hrq->rqbp;
2389 list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2390 struct lpfc_dmabuf, list);
2391 if (!h_buf)
2392 return NULL;
2393 rqbp->buffer_count--;
2394 return container_of(h_buf, struct rqb_dmabuf, hbuf);
2395}
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406static struct hbq_dmabuf *
2407lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2408{
2409 struct lpfc_dmabuf *d_buf;
2410 struct hbq_dmabuf *hbq_buf;
2411 uint32_t hbqno;
2412
2413 hbqno = tag >> 16;
2414 if (hbqno >= LPFC_MAX_HBQS)
2415 return NULL;
2416
2417 spin_lock_irq(&phba->hbalock);
2418 list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2419 hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2420 if (hbq_buf->tag == tag) {
2421 spin_unlock_irq(&phba->hbalock);
2422 return hbq_buf;
2423 }
2424 }
2425 spin_unlock_irq(&phba->hbalock);
2426 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2427 "1803 Bad hbq tag. Data: x%x x%x\n",
2428 tag, phba->hbqs[tag >> 16].buffer_count);
2429 return NULL;
2430}
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441void
2442lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2443{
2444 uint32_t hbqno;
2445
2446 if (hbq_buffer) {
2447 hbqno = hbq_buffer->tag >> 16;
2448 if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2449 (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2450 }
2451}
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462static int
2463lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2464{
2465 uint8_t ret;
2466
2467 switch (mbxCommand) {
2468 case MBX_LOAD_SM:
2469 case MBX_READ_NV:
2470 case MBX_WRITE_NV:
2471 case MBX_WRITE_VPARMS:
2472 case MBX_RUN_BIU_DIAG:
2473 case MBX_INIT_LINK:
2474 case MBX_DOWN_LINK:
2475 case MBX_CONFIG_LINK:
2476 case MBX_CONFIG_RING:
2477 case MBX_RESET_RING:
2478 case MBX_READ_CONFIG:
2479 case MBX_READ_RCONFIG:
2480 case MBX_READ_SPARM:
2481 case MBX_READ_STATUS:
2482 case MBX_READ_RPI:
2483 case MBX_READ_XRI:
2484 case MBX_READ_REV:
2485 case MBX_READ_LNK_STAT:
2486 case MBX_REG_LOGIN:
2487 case MBX_UNREG_LOGIN:
2488 case MBX_CLEAR_LA:
2489 case MBX_DUMP_MEMORY:
2490 case MBX_DUMP_CONTEXT:
2491 case MBX_RUN_DIAGS:
2492 case MBX_RESTART:
2493 case MBX_UPDATE_CFG:
2494 case MBX_DOWN_LOAD:
2495 case MBX_DEL_LD_ENTRY:
2496 case MBX_RUN_PROGRAM:
2497 case MBX_SET_MASK:
2498 case MBX_SET_VARIABLE:
2499 case MBX_UNREG_D_ID:
2500 case MBX_KILL_BOARD:
2501 case MBX_CONFIG_FARP:
2502 case MBX_BEACON:
2503 case MBX_LOAD_AREA:
2504 case MBX_RUN_BIU_DIAG64:
2505 case MBX_CONFIG_PORT:
2506 case MBX_READ_SPARM64:
2507 case MBX_READ_RPI64:
2508 case MBX_REG_LOGIN64:
2509 case MBX_READ_TOPOLOGY:
2510 case MBX_WRITE_WWN:
2511 case MBX_SET_DEBUG:
2512 case MBX_LOAD_EXP_ROM:
2513 case MBX_ASYNCEVT_ENABLE:
2514 case MBX_REG_VPI:
2515 case MBX_UNREG_VPI:
2516 case MBX_HEARTBEAT:
2517 case MBX_PORT_CAPABILITIES:
2518 case MBX_PORT_IOV_CONTROL:
2519 case MBX_SLI4_CONFIG:
2520 case MBX_SLI4_REQ_FTRS:
2521 case MBX_REG_FCFI:
2522 case MBX_UNREG_FCFI:
2523 case MBX_REG_VFI:
2524 case MBX_UNREG_VFI:
2525 case MBX_INIT_VPI:
2526 case MBX_INIT_VFI:
2527 case MBX_RESUME_RPI:
2528 case MBX_READ_EVENT_LOG_STATUS:
2529 case MBX_READ_EVENT_LOG:
2530 case MBX_SECURITY_MGMT:
2531 case MBX_AUTH_PORT:
2532 case MBX_ACCESS_VDATA:
2533 ret = mbxCommand;
2534 break;
2535 default:
2536 ret = MBX_SHUTDOWN;
2537 break;
2538 }
2539 return ret;
2540}
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553void
2554lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2555{
2556 unsigned long drvr_flag;
2557 struct completion *pmbox_done;
2558
2559
2560
2561
2562
2563 pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2564 spin_lock_irqsave(&phba->hbalock, drvr_flag);
2565 pmbox_done = (struct completion *)pmboxq->context3;
2566 if (pmbox_done)
2567 complete(pmbox_done);
2568 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2569 return;
2570}
2571
2572static void
2573__lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2574{
2575 unsigned long iflags;
2576
2577 if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2578 lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2579 spin_lock_irqsave(&ndlp->lock, iflags);
2580 ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2581 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2582 spin_unlock_irqrestore(&ndlp->lock, iflags);
2583 }
2584 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2585}
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597void
2598lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2599{
2600 struct lpfc_vport *vport = pmb->vport;
2601 struct lpfc_dmabuf *mp;
2602 struct lpfc_nodelist *ndlp;
2603 struct Scsi_Host *shost;
2604 uint16_t rpi, vpi;
2605 int rc;
2606
2607 mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
2608
2609 if (mp) {
2610 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2611 kfree(mp);
2612 }
2613
2614
2615
2616
2617
2618 if (!(phba->pport->load_flag & FC_UNLOADING) &&
2619 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2620 !pmb->u.mb.mbxStatus) {
2621 rpi = pmb->u.mb.un.varWords[0];
2622 vpi = pmb->u.mb.un.varRegLogin.vpi;
2623 if (phba->sli_rev == LPFC_SLI_REV4)
2624 vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
2625 lpfc_unreg_login(phba, vpi, rpi, pmb);
2626 pmb->vport = vport;
2627 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2628 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2629 if (rc != MBX_NOT_FINISHED)
2630 return;
2631 }
2632
2633 if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2634 !(phba->pport->load_flag & FC_UNLOADING) &&
2635 !pmb->u.mb.mbxStatus) {
2636 shost = lpfc_shost_from_vport(vport);
2637 spin_lock_irq(shost->host_lock);
2638 vport->vpi_state |= LPFC_VPI_REGISTERED;
2639 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2640 spin_unlock_irq(shost->host_lock);
2641 }
2642
2643 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2644 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2645 lpfc_nlp_put(ndlp);
2646 pmb->ctx_buf = NULL;
2647 pmb->ctx_ndlp = NULL;
2648 }
2649
2650 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2651 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2652
2653
2654 if (ndlp) {
2655 lpfc_printf_vlog(
2656 vport,
2657 KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2658 "1438 UNREG cmpl deferred mbox x%x "
2659 "on NPort x%x Data: x%x x%x x%px x%x x%x\n",
2660 ndlp->nlp_rpi, ndlp->nlp_DID,
2661 ndlp->nlp_flag, ndlp->nlp_defer_did,
2662 ndlp, vport->load_flag, kref_read(&ndlp->kref));
2663
2664 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2665 (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2666 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2667 ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2668 lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2669 } else {
2670 __lpfc_sli_rpi_release(vport, ndlp);
2671 }
2672
2673
2674
2675
2676
2677 lpfc_nlp_put(ndlp);
2678 pmb->ctx_ndlp = NULL;
2679 }
2680 }
2681
2682
2683 if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) {
2684 ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2685 lpfc_nlp_put(ndlp);
2686 }
2687
2688
2689 if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2690 (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2691 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2692 "2860 SLI authentication is required "
2693 "for INIT_LINK but has not done yet\n");
2694
2695 if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2696 lpfc_sli4_mbox_cmd_free(phba, pmb);
2697 else
2698 mempool_free(pmb, phba->mbox_mem_pool);
2699}
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713void
2714lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2715{
2716 struct lpfc_vport *vport = pmb->vport;
2717 struct lpfc_nodelist *ndlp;
2718
2719 ndlp = pmb->ctx_ndlp;
2720 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2721 if (phba->sli_rev == LPFC_SLI_REV4 &&
2722 (bf_get(lpfc_sli_intf_if_type,
2723 &phba->sli4_hba.sli_intf) >=
2724 LPFC_SLI_INTF_IF_TYPE_2)) {
2725 if (ndlp) {
2726 lpfc_printf_vlog(
2727 vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2728 "0010 UNREG_LOGIN vpi:%x "
2729 "rpi:%x DID:%x defer x%x flg x%x "
2730 "x%px\n",
2731 vport->vpi, ndlp->nlp_rpi,
2732 ndlp->nlp_DID, ndlp->nlp_defer_did,
2733 ndlp->nlp_flag,
2734 ndlp);
2735 ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2736
2737
2738
2739
2740 if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2741 (ndlp->nlp_defer_did !=
2742 NLP_EVT_NOTHING_PENDING)) {
2743 lpfc_printf_vlog(
2744 vport, KERN_INFO, LOG_DISCOVERY,
2745 "4111 UNREG cmpl deferred "
2746 "clr x%x on "
2747 "NPort x%x Data: x%x x%px\n",
2748 ndlp->nlp_rpi, ndlp->nlp_DID,
2749 ndlp->nlp_defer_did, ndlp);
2750 ndlp->nlp_flag &= ~NLP_UNREG_INP;
2751 ndlp->nlp_defer_did =
2752 NLP_EVT_NOTHING_PENDING;
2753 lpfc_issue_els_plogi(
2754 vport, ndlp->nlp_DID, 0);
2755 } else {
2756 __lpfc_sli_rpi_release(vport, ndlp);
2757 }
2758 lpfc_nlp_put(ndlp);
2759 }
2760 }
2761 }
2762
2763 mempool_free(pmb, phba->mbox_mem_pool);
2764}
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779int
2780lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2781{
2782 MAILBOX_t *pmbox;
2783 LPFC_MBOXQ_t *pmb;
2784 int rc;
2785 LIST_HEAD(cmplq);
2786
2787 phba->sli.slistat.mbox_event++;
2788
2789
2790 spin_lock_irq(&phba->hbalock);
2791 list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2792 spin_unlock_irq(&phba->hbalock);
2793
2794
2795 do {
2796 list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2797 if (pmb == NULL)
2798 break;
2799
2800 pmbox = &pmb->u.mb;
2801
2802 if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2803 if (pmb->vport) {
2804 lpfc_debugfs_disc_trc(pmb->vport,
2805 LPFC_DISC_TRC_MBOX_VPORT,
2806 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2807 (uint32_t)pmbox->mbxCommand,
2808 pmbox->un.varWords[0],
2809 pmbox->un.varWords[1]);
2810 }
2811 else {
2812 lpfc_debugfs_disc_trc(phba->pport,
2813 LPFC_DISC_TRC_MBOX,
2814 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2815 (uint32_t)pmbox->mbxCommand,
2816 pmbox->un.varWords[0],
2817 pmbox->un.varWords[1]);
2818 }
2819 }
2820
2821
2822
2823
2824 if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2825 MBX_SHUTDOWN) {
2826
2827 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2828 "(%d):0323 Unknown Mailbox command "
2829 "x%x (x%x/x%x) Cmpl\n",
2830 pmb->vport ? pmb->vport->vpi :
2831 LPFC_VPORT_UNKNOWN,
2832 pmbox->mbxCommand,
2833 lpfc_sli_config_mbox_subsys_get(phba,
2834 pmb),
2835 lpfc_sli_config_mbox_opcode_get(phba,
2836 pmb));
2837 phba->link_state = LPFC_HBA_ERROR;
2838 phba->work_hs = HS_FFER3;
2839 lpfc_handle_eratt(phba);
2840 continue;
2841 }
2842
2843 if (pmbox->mbxStatus) {
2844 phba->sli.slistat.mbox_stat_err++;
2845 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2846
2847 lpfc_printf_log(phba, KERN_INFO,
2848 LOG_MBOX | LOG_SLI,
2849 "(%d):0305 Mbox cmd cmpl "
2850 "error - RETRYing Data: x%x "
2851 "(x%x/x%x) x%x x%x x%x\n",
2852 pmb->vport ? pmb->vport->vpi :
2853 LPFC_VPORT_UNKNOWN,
2854 pmbox->mbxCommand,
2855 lpfc_sli_config_mbox_subsys_get(phba,
2856 pmb),
2857 lpfc_sli_config_mbox_opcode_get(phba,
2858 pmb),
2859 pmbox->mbxStatus,
2860 pmbox->un.varWords[0],
2861 pmb->vport ? pmb->vport->port_state :
2862 LPFC_VPORT_UNKNOWN);
2863 pmbox->mbxStatus = 0;
2864 pmbox->mbxOwner = OWN_HOST;
2865 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2866 if (rc != MBX_NOT_FINISHED)
2867 continue;
2868 }
2869 }
2870
2871
2872 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2873 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
2874 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2875 "x%x x%x x%x\n",
2876 pmb->vport ? pmb->vport->vpi : 0,
2877 pmbox->mbxCommand,
2878 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2879 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2880 pmb->mbox_cmpl,
2881 *((uint32_t *) pmbox),
2882 pmbox->un.varWords[0],
2883 pmbox->un.varWords[1],
2884 pmbox->un.varWords[2],
2885 pmbox->un.varWords[3],
2886 pmbox->un.varWords[4],
2887 pmbox->un.varWords[5],
2888 pmbox->un.varWords[6],
2889 pmbox->un.varWords[7],
2890 pmbox->un.varWords[8],
2891 pmbox->un.varWords[9],
2892 pmbox->un.varWords[10]);
2893
2894 if (pmb->mbox_cmpl)
2895 pmb->mbox_cmpl(phba,pmb);
2896 } while (1);
2897 return 0;
2898}
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912static struct lpfc_dmabuf *
2913lpfc_sli_get_buff(struct lpfc_hba *phba,
2914 struct lpfc_sli_ring *pring,
2915 uint32_t tag)
2916{
2917 struct hbq_dmabuf *hbq_entry;
2918
2919 if (tag & QUE_BUFTAG_BIT)
2920 return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2921 hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2922 if (!hbq_entry)
2923 return NULL;
2924 return &hbq_entry->dbuf;
2925}
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938static void
2939lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
2940{
2941 struct lpfc_nodelist *ndlp;
2942 struct lpfc_dmabuf *d_buf;
2943 struct hbq_dmabuf *nvmebuf;
2944 struct fc_frame_header *fc_hdr;
2945 struct lpfc_async_xchg_ctx *axchg = NULL;
2946 char *failwhy = NULL;
2947 uint32_t oxid, sid, did, fctl, size;
2948 int ret = 1;
2949
2950 d_buf = piocb->context2;
2951
2952 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2953 fc_hdr = nvmebuf->hbuf.virt;
2954 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2955 sid = sli4_sid_from_fc_hdr(fc_hdr);
2956 did = sli4_did_from_fc_hdr(fc_hdr);
2957 fctl = (fc_hdr->fh_f_ctl[0] << 16 |
2958 fc_hdr->fh_f_ctl[1] << 8 |
2959 fc_hdr->fh_f_ctl[2]);
2960 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
2961
2962 lpfc_nvmeio_data(phba, "NVME LS RCV: xri x%x sz %d from %06x\n",
2963 oxid, size, sid);
2964
2965 if (phba->pport->load_flag & FC_UNLOADING) {
2966 failwhy = "Driver Unloading";
2967 } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
2968 failwhy = "NVME FC4 Disabled";
2969 } else if (!phba->nvmet_support && !phba->pport->localport) {
2970 failwhy = "No Localport";
2971 } else if (phba->nvmet_support && !phba->targetport) {
2972 failwhy = "No Targetport";
2973 } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
2974 failwhy = "Bad NVME LS R_CTL";
2975 } else if (unlikely((fctl & 0x00FF0000) !=
2976 (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
2977 failwhy = "Bad NVME LS F_CTL";
2978 } else {
2979 axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC);
2980 if (!axchg)
2981 failwhy = "No CTX memory";
2982 }
2983
2984 if (unlikely(failwhy)) {
2985 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2986 "6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
2987 sid, oxid, failwhy);
2988 goto out_fail;
2989 }
2990
2991
2992 ndlp = lpfc_findnode_did(phba->pport, sid);
2993 if (!ndlp ||
2994 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2995 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2996 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2997 "6216 NVME Unsol rcv: No ndlp: "
2998 "NPort_ID x%x oxid x%x\n",
2999 sid, oxid);
3000 goto out_fail;
3001 }
3002
3003 axchg->phba = phba;
3004 axchg->ndlp = ndlp;
3005 axchg->size = size;
3006 axchg->oxid = oxid;
3007 axchg->sid = sid;
3008 axchg->wqeq = NULL;
3009 axchg->state = LPFC_NVME_STE_LS_RCV;
3010 axchg->entry_cnt = 1;
3011 axchg->rqb_buffer = (void *)nvmebuf;
3012 axchg->hdwq = &phba->sli4_hba.hdwq[0];
3013 axchg->payload = nvmebuf->dbuf.virt;
3014 INIT_LIST_HEAD(&axchg->list);
3015
3016 if (phba->nvmet_support) {
3017 ret = lpfc_nvmet_handle_lsreq(phba, axchg);
3018 spin_lock_irq(&ndlp->lock);
3019 if (!ret && !(ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH)) {
3020 ndlp->fc4_xpt_flags |= NLP_XPT_HAS_HH;
3021 spin_unlock_irq(&ndlp->lock);
3022
3023
3024
3025
3026
3027 if (!lpfc_nlp_get(ndlp))
3028 goto out_fail;
3029
3030 lpfc_printf_log(phba, KERN_ERR, LOG_NODE,
3031 "6206 NVMET unsol ls_req ndlp x%px "
3032 "DID x%x xflags x%x refcnt %d\n",
3033 ndlp, ndlp->nlp_DID,
3034 ndlp->fc4_xpt_flags,
3035 kref_read(&ndlp->kref));
3036 } else {
3037 spin_unlock_irq(&ndlp->lock);
3038 }
3039 } else {
3040 ret = lpfc_nvme_handle_lsreq(phba, axchg);
3041 }
3042
3043
3044 if (!ret)
3045 return;
3046
3047out_fail:
3048 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3049 "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
3050 "NVMe%s handler failed %d\n",
3051 did, sid, oxid,
3052 (phba->nvmet_support) ? "T" : "I", ret);
3053
3054
3055 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
3056
3057
3058 if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX)))
3059 ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
3060
3061 if (ret)
3062 kfree(axchg);
3063}
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077static int
3078lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3079 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
3080 uint32_t fch_type)
3081{
3082 int i;
3083
3084 switch (fch_type) {
3085 case FC_TYPE_NVME:
3086 lpfc_nvme_unsol_ls_handler(phba, saveq);
3087 return 1;
3088 default:
3089 break;
3090 }
3091
3092
3093 if (pring->prt[0].profile) {
3094 if (pring->prt[0].lpfc_sli_rcv_unsol_event)
3095 (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
3096 saveq);
3097 return 1;
3098 }
3099
3100
3101 for (i = 0; i < pring->num_mask; i++) {
3102 if ((pring->prt[i].rctl == fch_r_ctl) &&
3103 (pring->prt[i].type == fch_type)) {
3104 if (pring->prt[i].lpfc_sli_rcv_unsol_event)
3105 (pring->prt[i].lpfc_sli_rcv_unsol_event)
3106 (phba, pring, saveq);
3107 return 1;
3108 }
3109 }
3110 return 0;
3111}
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127static int
3128lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3129 struct lpfc_iocbq *saveq)
3130{
3131 IOCB_t * irsp;
3132 WORD5 * w5p;
3133 uint32_t Rctl, Type;
3134 struct lpfc_iocbq *iocbq;
3135 struct lpfc_dmabuf *dmzbuf;
3136
3137 irsp = &(saveq->iocb);
3138
3139 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
3140 if (pring->lpfc_sli_rcv_async_status)
3141 pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
3142 else
3143 lpfc_printf_log(phba,
3144 KERN_WARNING,
3145 LOG_SLI,
3146 "0316 Ring %d handler: unexpected "
3147 "ASYNC_STATUS iocb received evt_code "
3148 "0x%x\n",
3149 pring->ringno,
3150 irsp->un.asyncstat.evt_code);
3151 return 1;
3152 }
3153
3154 if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
3155 (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
3156 if (irsp->ulpBdeCount > 0) {
3157 dmzbuf = lpfc_sli_get_buff(phba, pring,
3158 irsp->un.ulpWord[3]);
3159 lpfc_in_buf_free(phba, dmzbuf);
3160 }
3161
3162 if (irsp->ulpBdeCount > 1) {
3163 dmzbuf = lpfc_sli_get_buff(phba, pring,
3164 irsp->unsli3.sli3Words[3]);
3165 lpfc_in_buf_free(phba, dmzbuf);
3166 }
3167
3168 if (irsp->ulpBdeCount > 2) {
3169 dmzbuf = lpfc_sli_get_buff(phba, pring,
3170 irsp->unsli3.sli3Words[7]);
3171 lpfc_in_buf_free(phba, dmzbuf);
3172 }
3173
3174 return 1;
3175 }
3176
3177 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3178 if (irsp->ulpBdeCount != 0) {
3179 saveq->context2 = lpfc_sli_get_buff(phba, pring,
3180 irsp->un.ulpWord[3]);
3181 if (!saveq->context2)
3182 lpfc_printf_log(phba,
3183 KERN_ERR,
3184 LOG_SLI,
3185 "0341 Ring %d Cannot find buffer for "
3186 "an unsolicited iocb. tag 0x%x\n",
3187 pring->ringno,
3188 irsp->un.ulpWord[3]);
3189 }
3190 if (irsp->ulpBdeCount == 2) {
3191 saveq->context3 = lpfc_sli_get_buff(phba, pring,
3192 irsp->unsli3.sli3Words[7]);
3193 if (!saveq->context3)
3194 lpfc_printf_log(phba,
3195 KERN_ERR,
3196 LOG_SLI,
3197 "0342 Ring %d Cannot find buffer for an"
3198 " unsolicited iocb. tag 0x%x\n",
3199 pring->ringno,
3200 irsp->unsli3.sli3Words[7]);
3201 }
3202 list_for_each_entry(iocbq, &saveq->list, list) {
3203 irsp = &(iocbq->iocb);
3204 if (irsp->ulpBdeCount != 0) {
3205 iocbq->context2 = lpfc_sli_get_buff(phba, pring,
3206 irsp->un.ulpWord[3]);
3207 if (!iocbq->context2)
3208 lpfc_printf_log(phba,
3209 KERN_ERR,
3210 LOG_SLI,
3211 "0343 Ring %d Cannot find "
3212 "buffer for an unsolicited iocb"
3213 ". tag 0x%x\n", pring->ringno,
3214 irsp->un.ulpWord[3]);
3215 }
3216 if (irsp->ulpBdeCount == 2) {
3217 iocbq->context3 = lpfc_sli_get_buff(phba, pring,
3218 irsp->unsli3.sli3Words[7]);
3219 if (!iocbq->context3)
3220 lpfc_printf_log(phba,
3221 KERN_ERR,
3222 LOG_SLI,
3223 "0344 Ring %d Cannot find "
3224 "buffer for an unsolicited "
3225 "iocb. tag 0x%x\n",
3226 pring->ringno,
3227 irsp->unsli3.sli3Words[7]);
3228 }
3229 }
3230 }
3231 if (irsp->ulpBdeCount != 0 &&
3232 (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
3233 irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
3234 int found = 0;
3235
3236
3237 list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
3238 if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
3239 saveq->iocb.unsli3.rcvsli3.ox_id) {
3240 list_add_tail(&saveq->list, &iocbq->list);
3241 found = 1;
3242 break;
3243 }
3244 }
3245 if (!found)
3246 list_add_tail(&saveq->clist,
3247 &pring->iocb_continue_saveq);
3248 if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
3249 list_del_init(&iocbq->clist);
3250 saveq = iocbq;
3251 irsp = &(saveq->iocb);
3252 } else
3253 return 0;
3254 }
3255 if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
3256 (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
3257 (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
3258 Rctl = FC_RCTL_ELS_REQ;
3259 Type = FC_TYPE_ELS;
3260 } else {
3261 w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
3262 Rctl = w5p->hcsw.Rctl;
3263 Type = w5p->hcsw.Type;
3264
3265
3266 if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
3267 (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
3268 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3269 Rctl = FC_RCTL_ELS_REQ;
3270 Type = FC_TYPE_ELS;
3271 w5p->hcsw.Rctl = Rctl;
3272 w5p->hcsw.Type = Type;
3273 }
3274 }
3275
3276 if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
3277 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3278 "0313 Ring %d handler: unexpected Rctl x%x "
3279 "Type x%x received\n",
3280 pring->ringno, Rctl, Type);
3281
3282 return 1;
3283}
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298static struct lpfc_iocbq *
3299lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3300 struct lpfc_sli_ring *pring,
3301 struct lpfc_iocbq *prspiocb)
3302{
3303 struct lpfc_iocbq *cmd_iocb = NULL;
3304 uint16_t iotag;
3305 spinlock_t *temp_lock = NULL;
3306 unsigned long iflag = 0;
3307
3308 if (phba->sli_rev == LPFC_SLI_REV4)
3309 temp_lock = &pring->ring_lock;
3310 else
3311 temp_lock = &phba->hbalock;
3312
3313 spin_lock_irqsave(temp_lock, iflag);
3314 iotag = prspiocb->iocb.ulpIoTag;
3315
3316 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3317 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3318 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3319
3320 list_del_init(&cmd_iocb->list);
3321 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3322 pring->txcmplq_cnt--;
3323 spin_unlock_irqrestore(temp_lock, iflag);
3324 return cmd_iocb;
3325 }
3326 }
3327
3328 spin_unlock_irqrestore(temp_lock, iflag);
3329 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3330 "0317 iotag x%x is out of "
3331 "range: max iotag x%x wd0 x%x\n",
3332 iotag, phba->sli.last_iotag,
3333 *(((uint32_t *) &prspiocb->iocb) + 7));
3334 return NULL;
3335}
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349static struct lpfc_iocbq *
3350lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3351 struct lpfc_sli_ring *pring, uint16_t iotag)
3352{
3353 struct lpfc_iocbq *cmd_iocb = NULL;
3354 spinlock_t *temp_lock = NULL;
3355 unsigned long iflag = 0;
3356
3357 if (phba->sli_rev == LPFC_SLI_REV4)
3358 temp_lock = &pring->ring_lock;
3359 else
3360 temp_lock = &phba->hbalock;
3361
3362 spin_lock_irqsave(temp_lock, iflag);
3363 if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3364 cmd_iocb = phba->sli.iocbq_lookup[iotag];
3365 if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3366
3367 list_del_init(&cmd_iocb->list);
3368 cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3369 pring->txcmplq_cnt--;
3370 spin_unlock_irqrestore(temp_lock, iflag);
3371 return cmd_iocb;
3372 }
3373 }
3374
3375 spin_unlock_irqrestore(temp_lock, iflag);
3376 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3377 "0372 iotag x%x lookup error: max iotag (x%x) "
3378 "iocb_flag x%x\n",
3379 iotag, phba->sli.last_iotag,
3380 cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
3381 return NULL;
3382}
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401static int
3402lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3403 struct lpfc_iocbq *saveq)
3404{
3405 struct lpfc_iocbq *cmdiocbp;
3406 int rc = 1;
3407 unsigned long iflag;
3408
3409 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3410 if (cmdiocbp) {
3411 if (cmdiocbp->iocb_cmpl) {
3412
3413
3414
3415
3416 if (saveq->iocb.ulpStatus &&
3417 (pring->ringno == LPFC_ELS_RING) &&
3418 (cmdiocbp->iocb.ulpCommand ==
3419 CMD_ELS_REQUEST64_CR))
3420 lpfc_send_els_failure_event(phba,
3421 cmdiocbp, saveq);
3422
3423
3424
3425
3426
3427 if (pring->ringno == LPFC_ELS_RING) {
3428 if ((phba->sli_rev < LPFC_SLI_REV4) &&
3429 (cmdiocbp->iocb_flag &
3430 LPFC_DRIVER_ABORTED)) {
3431 spin_lock_irqsave(&phba->hbalock,
3432 iflag);
3433 cmdiocbp->iocb_flag &=
3434 ~LPFC_DRIVER_ABORTED;
3435 spin_unlock_irqrestore(&phba->hbalock,
3436 iflag);
3437 saveq->iocb.ulpStatus =
3438 IOSTAT_LOCAL_REJECT;
3439 saveq->iocb.un.ulpWord[4] =
3440 IOERR_SLI_ABORTED;
3441
3442
3443
3444
3445
3446 spin_lock_irqsave(&phba->hbalock,
3447 iflag);
3448 saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
3449 spin_unlock_irqrestore(&phba->hbalock,
3450 iflag);
3451 }
3452 if (phba->sli_rev == LPFC_SLI_REV4) {
3453 if (saveq->iocb_flag &
3454 LPFC_EXCHANGE_BUSY) {
3455
3456
3457
3458
3459
3460
3461 spin_lock_irqsave(
3462 &phba->hbalock, iflag);
3463 cmdiocbp->iocb_flag |=
3464 LPFC_EXCHANGE_BUSY;
3465 spin_unlock_irqrestore(
3466 &phba->hbalock, iflag);
3467 }
3468 if (cmdiocbp->iocb_flag &
3469 LPFC_DRIVER_ABORTED) {
3470
3471
3472
3473
3474
3475 spin_lock_irqsave(
3476 &phba->hbalock, iflag);
3477 cmdiocbp->iocb_flag &=
3478 ~LPFC_DRIVER_ABORTED;
3479 spin_unlock_irqrestore(
3480 &phba->hbalock, iflag);
3481 cmdiocbp->iocb.ulpStatus =
3482 IOSTAT_LOCAL_REJECT;
3483 cmdiocbp->iocb.un.ulpWord[4] =
3484 IOERR_ABORT_REQUESTED;
3485
3486
3487
3488
3489
3490
3491 saveq->iocb.ulpStatus =
3492 IOSTAT_LOCAL_REJECT;
3493 saveq->iocb.un.ulpWord[4] =
3494 IOERR_SLI_ABORTED;
3495 spin_lock_irqsave(
3496 &phba->hbalock, iflag);
3497 saveq->iocb_flag |=
3498 LPFC_DELAY_MEM_FREE;
3499 spin_unlock_irqrestore(
3500 &phba->hbalock, iflag);
3501 }
3502 }
3503 }
3504 (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
3505 } else
3506 lpfc_sli_release_iocbq(phba, cmdiocbp);
3507 } else {
3508
3509
3510
3511
3512
3513 if (pring->ringno != LPFC_ELS_RING) {
3514
3515
3516
3517
3518 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3519 "0322 Ring %d handler: "
3520 "unexpected completion IoTag x%x "
3521 "Data: x%x x%x x%x x%x\n",
3522 pring->ringno,
3523 saveq->iocb.ulpIoTag,
3524 saveq->iocb.ulpStatus,
3525 saveq->iocb.un.ulpWord[4],
3526 saveq->iocb.ulpCommand,
3527 saveq->iocb.ulpContext);
3528 }
3529 }
3530
3531 return rc;
3532}
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544static void
3545lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3546{
3547 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3548
3549
3550
3551
3552 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3553 "0312 Ring %d handler: portRspPut %d "
3554 "is bigger than rsp ring %d\n",
3555 pring->ringno, le32_to_cpu(pgp->rspPutInx),
3556 pring->sli.sli3.numRiocb);
3557
3558 phba->link_state = LPFC_HBA_ERROR;
3559
3560
3561
3562
3563
3564 phba->work_ha |= HA_ERATT;
3565 phba->work_hs = HS_FFER3;
3566
3567 lpfc_worker_wake_up(phba);
3568
3569 return;
3570}
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582void lpfc_poll_eratt(struct timer_list *t)
3583{
3584 struct lpfc_hba *phba;
3585 uint32_t eratt = 0;
3586 uint64_t sli_intr, cnt;
3587
3588 phba = from_timer(phba, t, eratt_poll);
3589
3590
3591 sli_intr = phba->sli.slistat.sli_intr;
3592
3593 if (phba->sli.slistat.sli_prev_intr > sli_intr)
3594 cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3595 sli_intr);
3596 else
3597 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3598
3599
3600 do_div(cnt, phba->eratt_poll_interval);
3601 phba->sli.slistat.sli_ips = cnt;
3602
3603 phba->sli.slistat.sli_prev_intr = sli_intr;
3604
3605
3606 eratt = lpfc_sli_check_eratt(phba);
3607
3608 if (eratt)
3609
3610 lpfc_worker_wake_up(phba);
3611 else
3612
3613 mod_timer(&phba->eratt_poll,
3614 jiffies +
3615 msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3616 return;
3617}
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637int
3638lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3639 struct lpfc_sli_ring *pring, uint32_t mask)
3640{
3641 struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3642 IOCB_t *irsp = NULL;
3643 IOCB_t *entry = NULL;
3644 struct lpfc_iocbq *cmdiocbq = NULL;
3645 struct lpfc_iocbq rspiocbq;
3646 uint32_t status;
3647 uint32_t portRspPut, portRspMax;
3648 int rc = 1;
3649 lpfc_iocb_type type;
3650 unsigned long iflag;
3651 uint32_t rsp_cmpl = 0;
3652
3653 spin_lock_irqsave(&phba->hbalock, iflag);
3654 pring->stats.iocb_event++;
3655
3656
3657
3658
3659
3660 portRspMax = pring->sli.sli3.numRiocb;
3661 portRspPut = le32_to_cpu(pgp->rspPutInx);
3662 if (unlikely(portRspPut >= portRspMax)) {
3663 lpfc_sli_rsp_pointers_error(phba, pring);
3664 spin_unlock_irqrestore(&phba->hbalock, iflag);
3665 return 1;
3666 }
3667 if (phba->fcp_ring_in_use) {
3668 spin_unlock_irqrestore(&phba->hbalock, iflag);
3669 return 1;
3670 } else
3671 phba->fcp_ring_in_use = 1;
3672
3673 rmb();
3674 while (pring->sli.sli3.rspidx != portRspPut) {
3675
3676
3677
3678
3679
3680 entry = lpfc_resp_iocb(phba, pring);
3681 phba->last_completion_time = jiffies;
3682
3683 if (++pring->sli.sli3.rspidx >= portRspMax)
3684 pring->sli.sli3.rspidx = 0;
3685
3686 lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3687 (uint32_t *) &rspiocbq.iocb,
3688 phba->iocb_rsp_size);
3689 INIT_LIST_HEAD(&(rspiocbq.list));
3690 irsp = &rspiocbq.iocb;
3691
3692 type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3693 pring->stats.iocb_rsp++;
3694 rsp_cmpl++;
3695
3696 if (unlikely(irsp->ulpStatus)) {
3697
3698
3699
3700
3701 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3702 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3703 IOERR_NO_RESOURCES)) {
3704 spin_unlock_irqrestore(&phba->hbalock, iflag);
3705 phba->lpfc_rampdown_queue_depth(phba);
3706 spin_lock_irqsave(&phba->hbalock, iflag);
3707 }
3708
3709
3710 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3711 "0336 Rsp Ring %d error: IOCB Data: "
3712 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3713 pring->ringno,
3714 irsp->un.ulpWord[0],
3715 irsp->un.ulpWord[1],
3716 irsp->un.ulpWord[2],
3717 irsp->un.ulpWord[3],
3718 irsp->un.ulpWord[4],
3719 irsp->un.ulpWord[5],
3720 *(uint32_t *)&irsp->un1,
3721 *((uint32_t *)&irsp->un1 + 1));
3722 }
3723
3724 switch (type) {
3725 case LPFC_ABORT_IOCB:
3726 case LPFC_SOL_IOCB:
3727
3728
3729
3730
3731 if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3732 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3733 "0333 IOCB cmd 0x%x"
3734 " processed. Skipping"
3735 " completion\n",
3736 irsp->ulpCommand);
3737 break;
3738 }
3739
3740 spin_unlock_irqrestore(&phba->hbalock, iflag);
3741 cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3742 &rspiocbq);
3743 spin_lock_irqsave(&phba->hbalock, iflag);
3744 if (unlikely(!cmdiocbq))
3745 break;
3746 if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3747 cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3748 if (cmdiocbq->iocb_cmpl) {
3749 spin_unlock_irqrestore(&phba->hbalock, iflag);
3750 (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3751 &rspiocbq);
3752 spin_lock_irqsave(&phba->hbalock, iflag);
3753 }
3754 break;
3755 case LPFC_UNSOL_IOCB:
3756 spin_unlock_irqrestore(&phba->hbalock, iflag);
3757 lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3758 spin_lock_irqsave(&phba->hbalock, iflag);
3759 break;
3760 default:
3761 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3762 char adaptermsg[LPFC_MAX_ADPTMSG];
3763 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3764 memcpy(&adaptermsg[0], (uint8_t *) irsp,
3765 MAX_MSG_DATA);
3766 dev_warn(&((phba->pcidev)->dev),
3767 "lpfc%d: %s\n",
3768 phba->brd_no, adaptermsg);
3769 } else {
3770
3771 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3772 "0334 Unknown IOCB command "
3773 "Data: x%x, x%x x%x x%x x%x\n",
3774 type, irsp->ulpCommand,
3775 irsp->ulpStatus,
3776 irsp->ulpIoTag,
3777 irsp->ulpContext);
3778 }
3779 break;
3780 }
3781
3782
3783
3784
3785
3786
3787
3788 writel(pring->sli.sli3.rspidx,
3789 &phba->host_gp[pring->ringno].rspGetInx);
3790
3791 if (pring->sli.sli3.rspidx == portRspPut)
3792 portRspPut = le32_to_cpu(pgp->rspPutInx);
3793 }
3794
3795 if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3796 pring->stats.iocb_rsp_full++;
3797 status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3798 writel(status, phba->CAregaddr);
3799 readl(phba->CAregaddr);
3800 }
3801 if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3802 pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3803 pring->stats.iocb_cmd_empty++;
3804
3805
3806 pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3807 lpfc_sli_resume_iocb(phba, pring);
3808
3809 if ((pring->lpfc_sli_cmd_available))
3810 (pring->lpfc_sli_cmd_available) (phba, pring);
3811
3812 }
3813
3814 phba->fcp_ring_in_use = 0;
3815 spin_unlock_irqrestore(&phba->hbalock, iflag);
3816 return rc;
3817}
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833
3834
3835
3836
3837static struct lpfc_iocbq *
3838lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3839 struct lpfc_iocbq *rspiocbp)
3840{
3841 struct lpfc_iocbq *saveq;
3842 struct lpfc_iocbq *cmdiocbp;
3843 struct lpfc_iocbq *next_iocb;
3844 IOCB_t *irsp = NULL;
3845 uint32_t free_saveq;
3846 uint8_t iocb_cmd_type;
3847 lpfc_iocb_type type;
3848 unsigned long iflag;
3849 int rc;
3850
3851 spin_lock_irqsave(&phba->hbalock, iflag);
3852
3853 list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3854 pring->iocb_continueq_cnt++;
3855
3856
3857 irsp = &rspiocbp->iocb;
3858 if (irsp->ulpLe) {
3859
3860
3861
3862
3863 free_saveq = 1;
3864 saveq = list_get_first(&pring->iocb_continueq,
3865 struct lpfc_iocbq, list);
3866 irsp = &(saveq->iocb);
3867 list_del_init(&pring->iocb_continueq);
3868 pring->iocb_continueq_cnt = 0;
3869
3870 pring->stats.iocb_rsp++;
3871
3872
3873
3874
3875
3876 if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3877 ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3878 IOERR_NO_RESOURCES)) {
3879 spin_unlock_irqrestore(&phba->hbalock, iflag);
3880 phba->lpfc_rampdown_queue_depth(phba);
3881 spin_lock_irqsave(&phba->hbalock, iflag);
3882 }
3883
3884 if (irsp->ulpStatus) {
3885
3886 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3887 "0328 Rsp Ring %d error: "
3888 "IOCB Data: "
3889 "x%x x%x x%x x%x "
3890 "x%x x%x x%x x%x "
3891 "x%x x%x x%x x%x "
3892 "x%x x%x x%x x%x\n",
3893 pring->ringno,
3894 irsp->un.ulpWord[0],
3895 irsp->un.ulpWord[1],
3896 irsp->un.ulpWord[2],
3897 irsp->un.ulpWord[3],
3898 irsp->un.ulpWord[4],
3899 irsp->un.ulpWord[5],
3900 *(((uint32_t *) irsp) + 6),
3901 *(((uint32_t *) irsp) + 7),
3902 *(((uint32_t *) irsp) + 8),
3903 *(((uint32_t *) irsp) + 9),
3904 *(((uint32_t *) irsp) + 10),
3905 *(((uint32_t *) irsp) + 11),
3906 *(((uint32_t *) irsp) + 12),
3907 *(((uint32_t *) irsp) + 13),
3908 *(((uint32_t *) irsp) + 14),
3909 *(((uint32_t *) irsp) + 15));
3910 }
3911
3912
3913
3914
3915
3916
3917
3918 iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3919 type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3920 switch (type) {
3921 case LPFC_SOL_IOCB:
3922 spin_unlock_irqrestore(&phba->hbalock, iflag);
3923 rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3924 spin_lock_irqsave(&phba->hbalock, iflag);
3925 break;
3926
3927 case LPFC_UNSOL_IOCB:
3928 spin_unlock_irqrestore(&phba->hbalock, iflag);
3929 rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3930 spin_lock_irqsave(&phba->hbalock, iflag);
3931 if (!rc)
3932 free_saveq = 0;
3933 break;
3934
3935 case LPFC_ABORT_IOCB:
3936 cmdiocbp = NULL;
3937 if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) {
3938 spin_unlock_irqrestore(&phba->hbalock, iflag);
3939 cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3940 saveq);
3941 spin_lock_irqsave(&phba->hbalock, iflag);
3942 }
3943 if (cmdiocbp) {
3944
3945 if (cmdiocbp->iocb_cmpl) {
3946 spin_unlock_irqrestore(&phba->hbalock,
3947 iflag);
3948 (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3949 saveq);
3950 spin_lock_irqsave(&phba->hbalock,
3951 iflag);
3952 } else
3953 __lpfc_sli_release_iocbq(phba,
3954 cmdiocbp);
3955 }
3956 break;
3957
3958 case LPFC_UNKNOWN_IOCB:
3959 if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3960 char adaptermsg[LPFC_MAX_ADPTMSG];
3961 memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3962 memcpy(&adaptermsg[0], (uint8_t *)irsp,
3963 MAX_MSG_DATA);
3964 dev_warn(&((phba->pcidev)->dev),
3965 "lpfc%d: %s\n",
3966 phba->brd_no, adaptermsg);
3967 } else {
3968
3969 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3970 "0335 Unknown IOCB "
3971 "command Data: x%x "
3972 "x%x x%x x%x\n",
3973 irsp->ulpCommand,
3974 irsp->ulpStatus,
3975 irsp->ulpIoTag,
3976 irsp->ulpContext);
3977 }
3978 break;
3979 }
3980
3981 if (free_saveq) {
3982 list_for_each_entry_safe(rspiocbp, next_iocb,
3983 &saveq->list, list) {
3984 list_del_init(&rspiocbp->list);
3985 __lpfc_sli_release_iocbq(phba, rspiocbp);
3986 }
3987 __lpfc_sli_release_iocbq(phba, saveq);
3988 }
3989 rspiocbp = NULL;
3990 }
3991 spin_unlock_irqrestore(&phba->hbalock, iflag);
3992 return rspiocbp;
3993}
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003
4004void
4005lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
4006 struct lpfc_sli_ring *pring, uint32_t mask)
4007{
4008 phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
4009}
4010
4011
4012
4013
4014
4015
4016
4017
4018
4019
4020
4021
4022static void
4023lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
4024 struct lpfc_sli_ring *pring, uint32_t mask)
4025{
4026 struct lpfc_pgp *pgp;
4027 IOCB_t *entry;
4028 IOCB_t *irsp = NULL;
4029 struct lpfc_iocbq *rspiocbp = NULL;
4030 uint32_t portRspPut, portRspMax;
4031 unsigned long iflag;
4032 uint32_t status;
4033
4034 pgp = &phba->port_gp[pring->ringno];
4035 spin_lock_irqsave(&phba->hbalock, iflag);
4036 pring->stats.iocb_event++;
4037
4038
4039
4040
4041
4042 portRspMax = pring->sli.sli3.numRiocb;
4043 portRspPut = le32_to_cpu(pgp->rspPutInx);
4044 if (portRspPut >= portRspMax) {
4045
4046
4047
4048
4049 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4050 "0303 Ring %d handler: portRspPut %d "
4051 "is bigger than rsp ring %d\n",
4052 pring->ringno, portRspPut, portRspMax);
4053
4054 phba->link_state = LPFC_HBA_ERROR;
4055 spin_unlock_irqrestore(&phba->hbalock, iflag);
4056
4057 phba->work_hs = HS_FFER3;
4058 lpfc_handle_eratt(phba);
4059
4060 return;
4061 }
4062
4063 rmb();
4064 while (pring->sli.sli3.rspidx != portRspPut) {
4065
4066
4067
4068
4069
4070
4071
4072
4073
4074
4075
4076
4077
4078 entry = lpfc_resp_iocb(phba, pring);
4079
4080 phba->last_completion_time = jiffies;
4081 rspiocbp = __lpfc_sli_get_iocbq(phba);
4082 if (rspiocbp == NULL) {
4083 printk(KERN_ERR "%s: out of buffers! Failing "
4084 "completion.\n", __func__);
4085 break;
4086 }
4087
4088 lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
4089 phba->iocb_rsp_size);
4090 irsp = &rspiocbp->iocb;
4091
4092 if (++pring->sli.sli3.rspidx >= portRspMax)
4093 pring->sli.sli3.rspidx = 0;
4094
4095 if (pring->ringno == LPFC_ELS_RING) {