1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/blkdev.h>
25#include <linux/delay.h>
26#include <linux/dma-mapping.h>
27#include <linux/idr.h>
28#include <linux/interrupt.h>
29#include <linux/module.h>
30#include <linux/kthread.h>
31#include <linux/pci.h>
32#include <linux/spinlock.h>
33#include <linux/ctype.h>
34#include <linux/aer.h>
35#include <linux/slab.h>
36#include <linux/firmware.h>
37#include <linux/miscdevice.h>
38#include <linux/percpu.h>
39#include <linux/msi.h>
40#include <linux/irq.h>
41#include <linux/bitops.h>
42#include <linux/crash_dump.h>
43#include <linux/cpu.h>
44#include <linux/cpuhotplug.h>
45
46#include <scsi/scsi.h>
47#include <scsi/scsi_device.h>
48#include <scsi/scsi_host.h>
49#include <scsi/scsi_transport_fc.h>
50#include <scsi/scsi_tcq.h>
51#include <scsi/fc/fc_fs.h>
52
53#include "lpfc_hw4.h"
54#include "lpfc_hw.h"
55#include "lpfc_sli.h"
56#include "lpfc_sli4.h"
57#include "lpfc_nl.h"
58#include "lpfc_disc.h"
59#include "lpfc.h"
60#include "lpfc_scsi.h"
61#include "lpfc_nvme.h"
62#include "lpfc_logmsg.h"
63#include "lpfc_crtn.h"
64#include "lpfc_vport.h"
65#include "lpfc_version.h"
66#include "lpfc_ids.h"
67
68static enum cpuhp_state lpfc_cpuhp_state;
69
70static uint32_t lpfc_present_cpu;
71
72static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
73static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
74static void lpfc_cpuhp_add(struct lpfc_hba *phba);
75static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
76static int lpfc_post_rcv_buf(struct lpfc_hba *);
77static int lpfc_sli4_queue_verify(struct lpfc_hba *);
78static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
79static int lpfc_setup_endian_order(struct lpfc_hba *);
80static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
81static void lpfc_free_els_sgl_list(struct lpfc_hba *);
82static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
83static void lpfc_init_sgl_list(struct lpfc_hba *);
84static int lpfc_init_active_sgl_array(struct lpfc_hba *);
85static void lpfc_free_active_sgl(struct lpfc_hba *);
86static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
87static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
88static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
89static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
90static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
91static void lpfc_sli4_disable_intr(struct lpfc_hba *);
92static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
93static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
94static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
95static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
96
97static struct scsi_transport_template *lpfc_transport_template = NULL;
98static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
99static DEFINE_IDR(lpfc_hba_index);
100#define LPFC_NVMET_BUF_POST 254
101static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport);
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117int
118lpfc_config_port_prep(struct lpfc_hba *phba)
119{
120 lpfc_vpd_t *vp = &phba->vpd;
121 int i = 0, rc;
122 LPFC_MBOXQ_t *pmb;
123 MAILBOX_t *mb;
124 char *lpfc_vpd_data = NULL;
125 uint16_t offset = 0;
126 static char licensed[56] =
127 "key unlock for use with gnu public licensed code only\0";
128 static int init_key = 1;
129
130 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
131 if (!pmb) {
132 phba->link_state = LPFC_HBA_ERROR;
133 return -ENOMEM;
134 }
135
136 mb = &pmb->u.mb;
137 phba->link_state = LPFC_INIT_MBX_CMDS;
138
139 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
140 if (init_key) {
141 uint32_t *ptext = (uint32_t *) licensed;
142
143 for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
144 *ptext = cpu_to_be32(*ptext);
145 init_key = 0;
146 }
147
148 lpfc_read_nv(phba, pmb);
149 memset((char*)mb->un.varRDnvp.rsvd3, 0,
150 sizeof (mb->un.varRDnvp.rsvd3));
151 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
152 sizeof (licensed));
153
154 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
155
156 if (rc != MBX_SUCCESS) {
157 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
158 "0324 Config Port initialization "
159 "error, mbxCmd x%x READ_NVPARM, "
160 "mbxStatus x%x\n",
161 mb->mbxCommand, mb->mbxStatus);
162 mempool_free(pmb, phba->mbox_mem_pool);
163 return -ERESTART;
164 }
165 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
166 sizeof(phba->wwnn));
167 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
168 sizeof(phba->wwpn));
169 }
170
171
172
173
174
175 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
176
177
178 lpfc_read_rev(phba, pmb);
179 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
180 if (rc != MBX_SUCCESS) {
181 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
182 "0439 Adapter failed to init, mbxCmd x%x "
183 "READ_REV, mbxStatus x%x\n",
184 mb->mbxCommand, mb->mbxStatus);
185 mempool_free( pmb, phba->mbox_mem_pool);
186 return -ERESTART;
187 }
188
189
190
191
192
193
194 if (mb->un.varRdRev.rr == 0) {
195 vp->rev.rBit = 0;
196 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
197 "0440 Adapter failed to init, READ_REV has "
198 "missing revision information.\n");
199 mempool_free(pmb, phba->mbox_mem_pool);
200 return -ERESTART;
201 }
202
203 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
204 mempool_free(pmb, phba->mbox_mem_pool);
205 return -EINVAL;
206 }
207
208
209 vp->rev.rBit = 1;
210 memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
211 vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
212 memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
213 vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
214 memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
215 vp->rev.biuRev = mb->un.varRdRev.biuRev;
216 vp->rev.smRev = mb->un.varRdRev.smRev;
217 vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
218 vp->rev.endecRev = mb->un.varRdRev.endecRev;
219 vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
220 vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
221 vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
222 vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
223 vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
224 vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
225
226
227
228
229
230 if (vp->rev.feaLevelHigh < 9)
231 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
232
233 if (lpfc_is_LC_HBA(phba->pcidev->device))
234 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
235 sizeof (phba->RandomData));
236
237
238 lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
239 if (!lpfc_vpd_data)
240 goto out_free_mbox;
241 do {
242 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
243 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
244
245 if (rc != MBX_SUCCESS) {
246 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
247 "0441 VPD not present on adapter, "
248 "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
249 mb->mbxCommand, mb->mbxStatus);
250 mb->un.varDmp.word_cnt = 0;
251 }
252
253
254
255 if (mb->un.varDmp.word_cnt == 0)
256 break;
257
258 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
259 mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
260 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
261 lpfc_vpd_data + offset,
262 mb->un.varDmp.word_cnt);
263 offset += mb->un.varDmp.word_cnt;
264 } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
265
266 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
267
268 kfree(lpfc_vpd_data);
269out_free_mbox:
270 mempool_free(pmb, phba->mbox_mem_pool);
271 return 0;
272}
273
274
275
276
277
278
279
280
281
282
283
284static void
285lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
286{
287 if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
288 phba->temp_sensor_support = 1;
289 else
290 phba->temp_sensor_support = 0;
291 mempool_free(pmboxq, phba->mbox_mem_pool);
292 return;
293}
294
295
296
297
298
299
300
301
302
303
304
305static void
306lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
307{
308 struct prog_id *prg;
309 uint32_t prog_id_word;
310 char dist = ' ';
311
312 char dist_char[] = "nabx";
313
314 if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
315 mempool_free(pmboxq, phba->mbox_mem_pool);
316 return;
317 }
318
319 prg = (struct prog_id *) &prog_id_word;
320
321
322 prog_id_word = pmboxq->u.mb.un.varWords[7];
323
324
325 if (prg->dist < 4)
326 dist = dist_char[prg->dist];
327
328 if ((prg->dist == 3) && (prg->num == 0))
329 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
330 prg->ver, prg->rev, prg->lev);
331 else
332 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
333 prg->ver, prg->rev, prg->lev,
334 dist, prg->num);
335 mempool_free(pmboxq, phba->mbox_mem_pool);
336 return;
337}
338
339
340
341
342
343
344
345
346
347
348void
349lpfc_update_vport_wwn(struct lpfc_vport *vport)
350{
351 uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
352 u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
353
354
355 if (vport->phba->cfg_soft_wwnn)
356 u64_to_wwn(vport->phba->cfg_soft_wwnn,
357 vport->fc_sparam.nodeName.u.wwn);
358 if (vport->phba->cfg_soft_wwpn)
359 u64_to_wwn(vport->phba->cfg_soft_wwpn,
360 vport->fc_sparam.portName.u.wwn);
361
362
363
364
365
366 if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
367 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
368 sizeof(struct lpfc_name));
369 else
370 memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
371 sizeof(struct lpfc_name));
372
373
374
375
376
377 if (vport->fc_portname.u.wwn[0] != 0 &&
378 memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
379 sizeof(struct lpfc_name)))
380 vport->vport_flag |= FAWWPN_PARAM_CHG;
381
382 if (vport->fc_portname.u.wwn[0] == 0 ||
383 vport->phba->cfg_soft_wwpn ||
384 (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
385 vport->vport_flag & FAWWPN_SET) {
386 memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
387 sizeof(struct lpfc_name));
388 vport->vport_flag &= ~FAWWPN_SET;
389 if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
390 vport->vport_flag |= FAWWPN_SET;
391 }
392 else
393 memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
394 sizeof(struct lpfc_name));
395}
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410int
411lpfc_config_port_post(struct lpfc_hba *phba)
412{
413 struct lpfc_vport *vport = phba->pport;
414 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
415 LPFC_MBOXQ_t *pmb;
416 MAILBOX_t *mb;
417 struct lpfc_dmabuf *mp;
418 struct lpfc_sli *psli = &phba->sli;
419 uint32_t status, timeout;
420 int i, j;
421 int rc;
422
423 spin_lock_irq(&phba->hbalock);
424
425
426
427
428 if (phba->over_temp_state == HBA_OVER_TEMP)
429 phba->over_temp_state = HBA_NORMAL_TEMP;
430 spin_unlock_irq(&phba->hbalock);
431
432 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
433 if (!pmb) {
434 phba->link_state = LPFC_HBA_ERROR;
435 return -ENOMEM;
436 }
437 mb = &pmb->u.mb;
438
439
440 rc = lpfc_read_sparam(phba, pmb, 0);
441 if (rc) {
442 mempool_free(pmb, phba->mbox_mem_pool);
443 return -ENOMEM;
444 }
445
446 pmb->vport = vport;
447 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
448 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
449 "0448 Adapter failed init, mbxCmd x%x "
450 "READ_SPARM mbxStatus x%x\n",
451 mb->mbxCommand, mb->mbxStatus);
452 phba->link_state = LPFC_HBA_ERROR;
453 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
454 mempool_free(pmb, phba->mbox_mem_pool);
455 lpfc_mbuf_free(phba, mp->virt, mp->phys);
456 kfree(mp);
457 return -EIO;
458 }
459
460 mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
461
462 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
463 lpfc_mbuf_free(phba, mp->virt, mp->phys);
464 kfree(mp);
465 pmb->ctx_buf = NULL;
466 lpfc_update_vport_wwn(vport);
467
468
469 fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
470 fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
471 fc_host_max_npiv_vports(shost) = phba->max_vpi;
472
473
474
475 if (phba->SerialNumber[0] == 0) {
476 uint8_t *outptr;
477
478 outptr = &vport->fc_nodename.u.s.IEEE[0];
479 for (i = 0; i < 12; i++) {
480 status = *outptr++;
481 j = ((status & 0xf0) >> 4);
482 if (j <= 9)
483 phba->SerialNumber[i] =
484 (char)((uint8_t) 0x30 + (uint8_t) j);
485 else
486 phba->SerialNumber[i] =
487 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
488 i++;
489 j = (status & 0xf);
490 if (j <= 9)
491 phba->SerialNumber[i] =
492 (char)((uint8_t) 0x30 + (uint8_t) j);
493 else
494 phba->SerialNumber[i] =
495 (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
496 }
497 }
498
499 lpfc_read_config(phba, pmb);
500 pmb->vport = vport;
501 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
502 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
503 "0453 Adapter failed to init, mbxCmd x%x "
504 "READ_CONFIG, mbxStatus x%x\n",
505 mb->mbxCommand, mb->mbxStatus);
506 phba->link_state = LPFC_HBA_ERROR;
507 mempool_free( pmb, phba->mbox_mem_pool);
508 return -EIO;
509 }
510
511
512 lpfc_sli_read_link_ste(phba);
513
514
515 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
516 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
517 "3359 HBA queue depth changed from %d to %d\n",
518 phba->cfg_hba_queue_depth,
519 mb->un.varRdConfig.max_xri);
520 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
521 }
522
523 phba->lmt = mb->un.varRdConfig.lmt;
524
525
526 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
527
528 phba->link_state = LPFC_LINK_DOWN;
529
530
531 if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
532 psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
533 if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
534 psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
535
536
537 if (phba->sli_rev != 3)
538 lpfc_post_rcv_buf(phba);
539
540
541
542
543 if (phba->intr_type == MSIX) {
544 rc = lpfc_config_msi(phba, pmb);
545 if (rc) {
546 mempool_free(pmb, phba->mbox_mem_pool);
547 return -EIO;
548 }
549 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
550 if (rc != MBX_SUCCESS) {
551 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
552 "0352 Config MSI mailbox command "
553 "failed, mbxCmd x%x, mbxStatus x%x\n",
554 pmb->u.mb.mbxCommand,
555 pmb->u.mb.mbxStatus);
556 mempool_free(pmb, phba->mbox_mem_pool);
557 return -EIO;
558 }
559 }
560
561 spin_lock_irq(&phba->hbalock);
562
563 phba->hba_flag &= ~HBA_ERATT_HANDLED;
564
565
566 if (lpfc_readl(phba->HCregaddr, &status)) {
567 spin_unlock_irq(&phba->hbalock);
568 return -EIO;
569 }
570 status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
571 if (psli->num_rings > 0)
572 status |= HC_R0INT_ENA;
573 if (psli->num_rings > 1)
574 status |= HC_R1INT_ENA;
575 if (psli->num_rings > 2)
576 status |= HC_R2INT_ENA;
577 if (psli->num_rings > 3)
578 status |= HC_R3INT_ENA;
579
580 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
581 (phba->cfg_poll & DISABLE_FCP_RING_INT))
582 status &= ~(HC_R0INT_ENA);
583
584 writel(status, phba->HCregaddr);
585 readl(phba->HCregaddr);
586 spin_unlock_irq(&phba->hbalock);
587
588
589 timeout = phba->fc_ratov * 2;
590 mod_timer(&vport->els_tmofunc,
591 jiffies + msecs_to_jiffies(1000 * timeout));
592
593 mod_timer(&phba->hb_tmofunc,
594 jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
595 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
596 phba->last_completion_time = jiffies;
597
598 mod_timer(&phba->eratt_poll,
599 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
600
601 if (phba->hba_flag & LINK_DISABLED) {
602 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
603 "2598 Adapter Link is disabled.\n");
604 lpfc_down_link(phba, pmb);
605 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
606 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
607 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
608 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
609 "2599 Adapter failed to issue DOWN_LINK"
610 " mbox command rc 0x%x\n", rc);
611
612 mempool_free(pmb, phba->mbox_mem_pool);
613 return -EIO;
614 }
615 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
616 mempool_free(pmb, phba->mbox_mem_pool);
617 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
618 if (rc)
619 return rc;
620 }
621
622 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
623 if (!pmb) {
624 phba->link_state = LPFC_HBA_ERROR;
625 return -ENOMEM;
626 }
627
628 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
629 pmb->mbox_cmpl = lpfc_config_async_cmpl;
630 pmb->vport = phba->pport;
631 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
632
633 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
634 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
635 "0456 Adapter failed to issue "
636 "ASYNCEVT_ENABLE mbox status x%x\n",
637 rc);
638 mempool_free(pmb, phba->mbox_mem_pool);
639 }
640
641
642 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
643 if (!pmb) {
644 phba->link_state = LPFC_HBA_ERROR;
645 return -ENOMEM;
646 }
647
648 lpfc_dump_wakeup_param(phba, pmb);
649 pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
650 pmb->vport = phba->pport;
651 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
652
653 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
654 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
655 "0435 Adapter failed "
656 "to get Option ROM version status x%x\n", rc);
657 mempool_free(pmb, phba->mbox_mem_pool);
658 }
659
660 return 0;
661}
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677static int
678lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
679{
680 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
681}
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698int
699lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
700 uint32_t flag)
701{
702 struct lpfc_vport *vport = phba->pport;
703 LPFC_MBOXQ_t *pmb;
704 MAILBOX_t *mb;
705 int rc;
706
707 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
708 if (!pmb) {
709 phba->link_state = LPFC_HBA_ERROR;
710 return -ENOMEM;
711 }
712 mb = &pmb->u.mb;
713 pmb->vport = vport;
714
715 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
716 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
717 !(phba->lmt & LMT_1Gb)) ||
718 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
719 !(phba->lmt & LMT_2Gb)) ||
720 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
721 !(phba->lmt & LMT_4Gb)) ||
722 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
723 !(phba->lmt & LMT_8Gb)) ||
724 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
725 !(phba->lmt & LMT_10Gb)) ||
726 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
727 !(phba->lmt & LMT_16Gb)) ||
728 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
729 !(phba->lmt & LMT_32Gb)) ||
730 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
731 !(phba->lmt & LMT_64Gb))) {
732
733 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
734 "1302 Invalid speed for this board:%d "
735 "Reset link speed to auto.\n",
736 phba->cfg_link_speed);
737 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
738 }
739 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
740 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
741 if (phba->sli_rev < LPFC_SLI_REV4)
742 lpfc_set_loopback_flag(phba);
743 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
744 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
745 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
746 "0498 Adapter failed to init, mbxCmd x%x "
747 "INIT_LINK, mbxStatus x%x\n",
748 mb->mbxCommand, mb->mbxStatus);
749 if (phba->sli_rev <= LPFC_SLI_REV3) {
750
751 writel(0, phba->HCregaddr);
752 readl(phba->HCregaddr);
753
754 writel(0xffffffff, phba->HAregaddr);
755 readl(phba->HAregaddr);
756 }
757 phba->link_state = LPFC_HBA_ERROR;
758 if (rc != MBX_BUSY || flag == MBX_POLL)
759 mempool_free(pmb, phba->mbox_mem_pool);
760 return -EIO;
761 }
762 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
763 if (flag == MBX_POLL)
764 mempool_free(pmb, phba->mbox_mem_pool);
765
766 return 0;
767}
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782static int
783lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
784{
785 LPFC_MBOXQ_t *pmb;
786 int rc;
787
788 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
789 if (!pmb) {
790 phba->link_state = LPFC_HBA_ERROR;
791 return -ENOMEM;
792 }
793
794 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
795 "0491 Adapter Link is disabled.\n");
796 lpfc_down_link(phba, pmb);
797 pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
798 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
799 if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
800 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
801 "2522 Adapter failed to issue DOWN_LINK"
802 " mbox command rc 0x%x\n", rc);
803
804 mempool_free(pmb, phba->mbox_mem_pool);
805 return -EIO;
806 }
807 if (flag == MBX_POLL)
808 mempool_free(pmb, phba->mbox_mem_pool);
809
810 return 0;
811}
812
813
814
815
816
817
818
819
820
821
822
823
824int
825lpfc_hba_down_prep(struct lpfc_hba *phba)
826{
827 struct lpfc_vport **vports;
828 int i;
829
830 if (phba->sli_rev <= LPFC_SLI_REV3) {
831
832 writel(0, phba->HCregaddr);
833 readl(phba->HCregaddr);
834 }
835
836 if (phba->pport->load_flag & FC_UNLOADING)
837 lpfc_cleanup_discovery_resources(phba->pport);
838 else {
839 vports = lpfc_create_vport_work_array(phba);
840 if (vports != NULL)
841 for (i = 0; i <= phba->max_vports &&
842 vports[i] != NULL; i++)
843 lpfc_cleanup_discovery_resources(vports[i]);
844 lpfc_destroy_vport_work_array(phba, vports);
845 }
846 return 0;
847}
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862static void
863lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
864{
865 struct lpfc_iocbq *rspiocbq;
866 struct hbq_dmabuf *dmabuf;
867 struct lpfc_cq_event *cq_event;
868
869 spin_lock_irq(&phba->hbalock);
870 phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
871 spin_unlock_irq(&phba->hbalock);
872
873 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
874
875 spin_lock_irq(&phba->hbalock);
876 list_remove_head(&phba->sli4_hba.sp_queue_event,
877 cq_event, struct lpfc_cq_event, list);
878 spin_unlock_irq(&phba->hbalock);
879
880 switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
881 case CQE_CODE_COMPL_WQE:
882 rspiocbq = container_of(cq_event, struct lpfc_iocbq,
883 cq_event);
884 lpfc_sli_release_iocbq(phba, rspiocbq);
885 break;
886 case CQE_CODE_RECEIVE:
887 case CQE_CODE_RECEIVE_V1:
888 dmabuf = container_of(cq_event, struct hbq_dmabuf,
889 cq_event);
890 lpfc_in_buf_free(phba, &dmabuf->dbuf);
891 }
892 }
893}
894
895
896
897
898
899
900
901
902
903
904
905
906static void
907lpfc_hba_free_post_buf(struct lpfc_hba *phba)
908{
909 struct lpfc_sli *psli = &phba->sli;
910 struct lpfc_sli_ring *pring;
911 struct lpfc_dmabuf *mp, *next_mp;
912 LIST_HEAD(buflist);
913 int count;
914
915 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
916 lpfc_sli_hbqbuf_free_all(phba);
917 else {
918
919 pring = &psli->sli3_ring[LPFC_ELS_RING];
920 spin_lock_irq(&phba->hbalock);
921 list_splice_init(&pring->postbufq, &buflist);
922 spin_unlock_irq(&phba->hbalock);
923
924 count = 0;
925 list_for_each_entry_safe(mp, next_mp, &buflist, list) {
926 list_del(&mp->list);
927 count++;
928 lpfc_mbuf_free(phba, mp->virt, mp->phys);
929 kfree(mp);
930 }
931
932 spin_lock_irq(&phba->hbalock);
933 pring->postbufq_cnt -= count;
934 spin_unlock_irq(&phba->hbalock);
935 }
936}
937
938
939
940
941
942
943
944
945
946
947
948static void
949lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
950{
951 struct lpfc_sli *psli = &phba->sli;
952 struct lpfc_queue *qp = NULL;
953 struct lpfc_sli_ring *pring;
954 LIST_HEAD(completions);
955 int i;
956 struct lpfc_iocbq *piocb, *next_iocb;
957
958 if (phba->sli_rev != LPFC_SLI_REV4) {
959 for (i = 0; i < psli->num_rings; i++) {
960 pring = &psli->sli3_ring[i];
961 spin_lock_irq(&phba->hbalock);
962
963
964
965
966 list_splice_init(&pring->txcmplq, &completions);
967 pring->txcmplq_cnt = 0;
968 spin_unlock_irq(&phba->hbalock);
969
970 lpfc_sli_abort_iocb_ring(phba, pring);
971 }
972
973 lpfc_sli_cancel_iocbs(phba, &completions,
974 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
975 return;
976 }
977 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
978 pring = qp->pring;
979 if (!pring)
980 continue;
981 spin_lock_irq(&pring->ring_lock);
982 list_for_each_entry_safe(piocb, next_iocb,
983 &pring->txcmplq, list)
984 piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
985 list_splice_init(&pring->txcmplq, &completions);
986 pring->txcmplq_cnt = 0;
987 spin_unlock_irq(&pring->ring_lock);
988 lpfc_sli_abort_iocb_ring(phba, pring);
989 }
990
991 lpfc_sli_cancel_iocbs(phba, &completions,
992 IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
993}
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006static int
1007lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1008{
1009 lpfc_hba_free_post_buf(phba);
1010 lpfc_hba_clean_txcmplq(phba);
1011 return 0;
1012}
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025static int
1026lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1027{
1028 struct lpfc_io_buf *psb, *psb_next;
1029 struct lpfc_async_xchg_ctx *ctxp, *ctxp_next;
1030 struct lpfc_sli4_hdw_queue *qp;
1031 LIST_HEAD(aborts);
1032 LIST_HEAD(nvme_aborts);
1033 LIST_HEAD(nvmet_aborts);
1034 struct lpfc_sglq *sglq_entry = NULL;
1035 int cnt, idx;
1036
1037
1038 lpfc_sli_hbqbuf_free_all(phba);
1039 lpfc_hba_clean_txcmplq(phba);
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
1052 list_for_each_entry(sglq_entry,
1053 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1054 sglq_entry->state = SGL_FREED;
1055
1056 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1057 &phba->sli4_hba.lpfc_els_sgl_list);
1058
1059
1060 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
1061
1062
1063
1064
1065 spin_lock_irq(&phba->hbalock);
1066 cnt = 0;
1067 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1068 qp = &phba->sli4_hba.hdwq[idx];
1069
1070 spin_lock(&qp->abts_io_buf_list_lock);
1071 list_splice_init(&qp->lpfc_abts_io_buf_list,
1072 &aborts);
1073
1074 list_for_each_entry_safe(psb, psb_next, &aborts, list) {
1075 psb->pCmd = NULL;
1076 psb->status = IOSTAT_SUCCESS;
1077 cnt++;
1078 }
1079 spin_lock(&qp->io_buf_list_put_lock);
1080 list_splice_init(&aborts, &qp->lpfc_io_buf_list_put);
1081 qp->put_io_bufs += qp->abts_scsi_io_bufs;
1082 qp->put_io_bufs += qp->abts_nvme_io_bufs;
1083 qp->abts_scsi_io_bufs = 0;
1084 qp->abts_nvme_io_bufs = 0;
1085 spin_unlock(&qp->io_buf_list_put_lock);
1086 spin_unlock(&qp->abts_io_buf_list_lock);
1087 }
1088 spin_unlock_irq(&phba->hbalock);
1089
1090 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1091 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1092 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1093 &nvmet_aborts);
1094 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1095 list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
1096 ctxp->flag &= ~(LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP);
1097 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1098 }
1099 }
1100
1101 lpfc_sli4_free_sp_events(phba);
1102 return cnt;
1103}
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116int
1117lpfc_hba_down_post(struct lpfc_hba *phba)
1118{
1119 return (*phba->lpfc_hba_down_post)(phba);
1120}
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134static void
1135lpfc_hb_timeout(struct timer_list *t)
1136{
1137 struct lpfc_hba *phba;
1138 uint32_t tmo_posted;
1139 unsigned long iflag;
1140
1141 phba = from_timer(phba, t, hb_tmofunc);
1142
1143
1144 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1145 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1146 if (!tmo_posted)
1147 phba->pport->work_port_events |= WORKER_HB_TMO;
1148 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1149
1150
1151 if (!tmo_posted)
1152 lpfc_worker_wake_up(phba);
1153 return;
1154}
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168static void
1169lpfc_rrq_timeout(struct timer_list *t)
1170{
1171 struct lpfc_hba *phba;
1172 unsigned long iflag;
1173
1174 phba = from_timer(phba, t, rrq_tmr);
1175 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1176 if (!(phba->pport->load_flag & FC_UNLOADING))
1177 phba->hba_flag |= HBA_RRQ_ACTIVE;
1178 else
1179 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1180 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1181
1182 if (!(phba->pport->load_flag & FC_UNLOADING))
1183 lpfc_worker_wake_up(phba);
1184}
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202static void
1203lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1204{
1205 unsigned long drvr_flag;
1206
1207 spin_lock_irqsave(&phba->hbalock, drvr_flag);
1208 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
1209 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
1210
1211
1212 mempool_free(pmboxq, phba->mbox_mem_pool);
1213 if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
1214 !(phba->link_state == LPFC_HBA_ERROR) &&
1215 !(phba->pport->load_flag & FC_UNLOADING))
1216 mod_timer(&phba->hb_tmofunc,
1217 jiffies +
1218 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
1219 return;
1220}
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230static void
1231lpfc_idle_stat_delay_work(struct work_struct *work)
1232{
1233 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1234 struct lpfc_hba,
1235 idle_stat_delay_work);
1236 struct lpfc_queue *cq;
1237 struct lpfc_sli4_hdw_queue *hdwq;
1238 struct lpfc_idle_stat *idle_stat;
1239 u32 i, idle_percent;
1240 u64 wall, wall_idle, diff_wall, diff_idle, busy_time;
1241
1242 if (phba->pport->load_flag & FC_UNLOADING)
1243 return;
1244
1245 if (phba->link_state == LPFC_HBA_ERROR ||
1246 phba->pport->fc_flag & FC_OFFLINE_MODE)
1247 goto requeue;
1248
1249 for_each_present_cpu(i) {
1250 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
1251 cq = hdwq->io_cq;
1252
1253
1254 if (cq->chann != i)
1255 continue;
1256
1257 idle_stat = &phba->sli4_hba.idle_stat[i];
1258
1259
1260
1261
1262
1263
1264
1265 wall_idle = get_cpu_idle_time(i, &wall, 1);
1266 diff_idle = wall_idle - idle_stat->prev_idle;
1267 diff_wall = wall - idle_stat->prev_wall;
1268
1269 if (diff_wall <= diff_idle)
1270 busy_time = 0;
1271 else
1272 busy_time = diff_wall - diff_idle;
1273
1274 idle_percent = div64_u64(100 * busy_time, diff_wall);
1275 idle_percent = 100 - idle_percent;
1276
1277 if (idle_percent < 15)
1278 cq->poll_mode = LPFC_QUEUE_WORK;
1279 else
1280 cq->poll_mode = LPFC_IRQ_POLL;
1281
1282 idle_stat->prev_idle = wall_idle;
1283 idle_stat->prev_wall = wall;
1284 }
1285
1286requeue:
1287 schedule_delayed_work(&phba->idle_stat_delay_work,
1288 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
1289}
1290
1291static void
1292lpfc_hb_eq_delay_work(struct work_struct *work)
1293{
1294 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1295 struct lpfc_hba, eq_delay_work);
1296 struct lpfc_eq_intr_info *eqi, *eqi_new;
1297 struct lpfc_queue *eq, *eq_next;
1298 unsigned char *ena_delay = NULL;
1299 uint32_t usdelay;
1300 int i;
1301
1302 if (!phba->cfg_auto_imax || phba->pport->load_flag & FC_UNLOADING)
1303 return;
1304
1305 if (phba->link_state == LPFC_HBA_ERROR ||
1306 phba->pport->fc_flag & FC_OFFLINE_MODE)
1307 goto requeue;
1308
1309 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
1310 GFP_KERNEL);
1311 if (!ena_delay)
1312 goto requeue;
1313
1314 for (i = 0; i < phba->cfg_irq_chann; i++) {
1315
1316 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1317 if (!eq)
1318 continue;
1319 if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) {
1320 eq->q_flag &= ~HBA_EQ_DELAY_CHK;
1321 ena_delay[eq->last_cpu] = 1;
1322 }
1323 }
1324
1325 for_each_present_cpu(i) {
1326 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1327 if (ena_delay[i]) {
1328 usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP;
1329 if (usdelay > LPFC_MAX_AUTO_EQ_DELAY)
1330 usdelay = LPFC_MAX_AUTO_EQ_DELAY;
1331 } else {
1332 usdelay = 0;
1333 }
1334
1335 eqi->icnt = 0;
1336
1337 list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) {
1338 if (unlikely(eq->last_cpu != i)) {
1339 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1340 eq->last_cpu);
1341 list_move_tail(&eq->cpu_list, &eqi_new->list);
1342 continue;
1343 }
1344 if (usdelay != eq->q_mode)
1345 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1346 usdelay);
1347 }
1348 }
1349
1350 kfree(ena_delay);
1351
1352requeue:
1353 queue_delayed_work(phba->wq, &phba->eq_delay_work,
1354 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
1355}
1356
1357
1358
1359
1360
1361
1362
1363
1364static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1365{
1366 u32 i;
1367 u32 hwq_count;
1368
1369 hwq_count = phba->cfg_hdw_queue;
1370 for (i = 0; i < hwq_count; i++) {
1371
1372 lpfc_adjust_pvt_pool_count(phba, i);
1373
1374
1375 lpfc_adjust_high_watermark(phba, i);
1376
1377#ifdef LPFC_MXP_STAT
1378
1379 lpfc_snapshot_mxp(phba, i);
1380#endif
1381 }
1382}
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392int
1393lpfc_issue_hb_mbox(struct lpfc_hba *phba)
1394{
1395 LPFC_MBOXQ_t *pmboxq;
1396 int retval;
1397
1398
1399 if (phba->hba_flag & HBA_HBEAT_INP)
1400 return 0;
1401
1402 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1403 if (!pmboxq)
1404 return -ENOMEM;
1405
1406 lpfc_heart_beat(phba, pmboxq);
1407 pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
1408 pmboxq->vport = phba->pport;
1409 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
1410
1411 if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
1412 mempool_free(pmboxq, phba->mbox_mem_pool);
1413 return -ENXIO;
1414 }
1415 phba->hba_flag |= HBA_HBEAT_INP;
1416
1417 return 0;
1418}
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430void
1431lpfc_issue_hb_tmo(struct lpfc_hba *phba)
1432{
1433 if (phba->cfg_enable_hba_heartbeat)
1434 return;
1435 phba->hba_flag |= HBA_HBEAT_TMO;
1436}
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454void
1455lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1456{
1457 struct lpfc_vport **vports;
1458 struct lpfc_dmabuf *buf_ptr;
1459 int retval = 0;
1460 int i, tmo;
1461 struct lpfc_sli *psli = &phba->sli;
1462 LIST_HEAD(completions);
1463
1464 if (phba->cfg_xri_rebalancing) {
1465
1466 lpfc_hb_mxp_handler(phba);
1467 }
1468
1469 vports = lpfc_create_vport_work_array(phba);
1470 if (vports != NULL)
1471 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1472 lpfc_rcv_seq_check_edtov(vports[i]);
1473 lpfc_fdmi_change_check(vports[i]);
1474 }
1475 lpfc_destroy_vport_work_array(phba, vports);
1476
1477 if ((phba->link_state == LPFC_HBA_ERROR) ||
1478 (phba->pport->load_flag & FC_UNLOADING) ||
1479 (phba->pport->fc_flag & FC_OFFLINE_MODE))
1480 return;
1481
1482 if (phba->elsbuf_cnt &&
1483 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1484 spin_lock_irq(&phba->hbalock);
1485 list_splice_init(&phba->elsbuf, &completions);
1486 phba->elsbuf_cnt = 0;
1487 phba->elsbuf_prev_cnt = 0;
1488 spin_unlock_irq(&phba->hbalock);
1489
1490 while (!list_empty(&completions)) {
1491 list_remove_head(&completions, buf_ptr,
1492 struct lpfc_dmabuf, list);
1493 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1494 kfree(buf_ptr);
1495 }
1496 }
1497 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1498
1499
1500 if (phba->cfg_enable_hba_heartbeat) {
1501
1502 spin_lock_irq(&phba->pport->work_port_lock);
1503 if (time_after(phba->last_completion_time +
1504 msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
1505 jiffies)) {
1506 spin_unlock_irq(&phba->pport->work_port_lock);
1507 if (phba->hba_flag & HBA_HBEAT_INP)
1508 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1509 else
1510 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1511 goto out;
1512 }
1513 spin_unlock_irq(&phba->pport->work_port_lock);
1514
1515
1516 if (phba->hba_flag & HBA_HBEAT_INP) {
1517
1518
1519
1520
1521
1522 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1523 "0459 Adapter heartbeat still outstanding: "
1524 "last compl time was %d ms.\n",
1525 jiffies_to_msecs(jiffies
1526 - phba->last_completion_time));
1527 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1528 } else {
1529 if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
1530 (list_empty(&psli->mboxq))) {
1531
1532 retval = lpfc_issue_hb_mbox(phba);
1533 if (retval) {
1534 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1535 goto out;
1536 }
1537 phba->skipped_hb = 0;
1538 } else if (time_before_eq(phba->last_completion_time,
1539 phba->skipped_hb)) {
1540 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1541 "2857 Last completion time not "
1542 " updated in %d ms\n",
1543 jiffies_to_msecs(jiffies
1544 - phba->last_completion_time));
1545 } else
1546 phba->skipped_hb = jiffies;
1547
1548 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1549 goto out;
1550 }
1551 } else {
1552
1553 if (phba->hba_flag & HBA_HBEAT_TMO) {
1554 retval = lpfc_issue_hb_mbox(phba);
1555 if (retval)
1556 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1557 else
1558 tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
1559 goto out;
1560 }
1561 tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
1562 }
1563out:
1564 mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo));
1565}
1566
1567
1568
1569
1570
1571
1572
1573
1574static void
1575lpfc_offline_eratt(struct lpfc_hba *phba)
1576{
1577 struct lpfc_sli *psli = &phba->sli;
1578
1579 spin_lock_irq(&phba->hbalock);
1580 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1581 spin_unlock_irq(&phba->hbalock);
1582 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1583
1584 lpfc_offline(phba);
1585 lpfc_reset_barrier(phba);
1586 spin_lock_irq(&phba->hbalock);
1587 lpfc_sli_brdreset(phba);
1588 spin_unlock_irq(&phba->hbalock);
1589 lpfc_hba_down_post(phba);
1590 lpfc_sli_brdready(phba, HS_MBRDY);
1591 lpfc_unblock_mgmt_io(phba);
1592 phba->link_state = LPFC_HBA_ERROR;
1593 return;
1594}
1595
1596
1597
1598
1599
1600
1601
1602
1603void
1604lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1605{
1606 spin_lock_irq(&phba->hbalock);
1607 phba->link_state = LPFC_HBA_ERROR;
1608 spin_unlock_irq(&phba->hbalock);
1609
1610 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1611 lpfc_sli_flush_io_rings(phba);
1612 lpfc_offline(phba);
1613 lpfc_hba_down_post(phba);
1614 lpfc_unblock_mgmt_io(phba);
1615}
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626static void
1627lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1628{
1629 uint32_t old_host_status = phba->work_hs;
1630 struct lpfc_sli *psli = &phba->sli;
1631
1632
1633
1634
1635 if (pci_channel_offline(phba->pcidev)) {
1636 spin_lock_irq(&phba->hbalock);
1637 phba->hba_flag &= ~DEFER_ERATT;
1638 spin_unlock_irq(&phba->hbalock);
1639 return;
1640 }
1641
1642 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1643 "0479 Deferred Adapter Hardware Error "
1644 "Data: x%x x%x x%x\n",
1645 phba->work_hs, phba->work_status[0],
1646 phba->work_status[1]);
1647
1648 spin_lock_irq(&phba->hbalock);
1649 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1650 spin_unlock_irq(&phba->hbalock);
1651
1652
1653
1654
1655
1656
1657
1658 lpfc_sli_abort_fcp_rings(phba);
1659
1660
1661
1662
1663
1664 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1665 lpfc_offline(phba);
1666
1667
1668 while (phba->work_hs & HS_FFER1) {
1669 msleep(100);
1670 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1671 phba->work_hs = UNPLUG_ERR ;
1672 break;
1673 }
1674
1675 if (phba->pport->load_flag & FC_UNLOADING) {
1676 phba->work_hs = 0;
1677 break;
1678 }
1679 }
1680
1681
1682
1683
1684
1685
1686 if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
1687 phba->work_hs = old_host_status & ~HS_FFER1;
1688
1689 spin_lock_irq(&phba->hbalock);
1690 phba->hba_flag &= ~DEFER_ERATT;
1691 spin_unlock_irq(&phba->hbalock);
1692 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1693 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1694}
1695
1696static void
1697lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1698{
1699 struct lpfc_board_event_header board_event;
1700 struct Scsi_Host *shost;
1701
1702 board_event.event_type = FC_REG_BOARD_EVENT;
1703 board_event.subcategory = LPFC_EVENT_PORTINTERR;
1704 shost = lpfc_shost_from_vport(phba->pport);
1705 fc_host_post_vendor_event(shost, fc_get_event_number(),
1706 sizeof(board_event),
1707 (char *) &board_event,
1708 LPFC_NL_VENDOR_ID);
1709}
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721static void
1722lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1723{
1724 struct lpfc_vport *vport = phba->pport;
1725 struct lpfc_sli *psli = &phba->sli;
1726 uint32_t event_data;
1727 unsigned long temperature;
1728 struct temp_event temp_event_data;
1729 struct Scsi_Host *shost;
1730
1731
1732
1733
1734 if (pci_channel_offline(phba->pcidev)) {
1735 spin_lock_irq(&phba->hbalock);
1736 phba->hba_flag &= ~DEFER_ERATT;
1737 spin_unlock_irq(&phba->hbalock);
1738 return;
1739 }
1740
1741
1742 if (!phba->cfg_enable_hba_reset)
1743 return;
1744
1745
1746 lpfc_board_errevt_to_mgmt(phba);
1747
1748 if (phba->hba_flag & DEFER_ERATT)
1749 lpfc_handle_deferred_eratt(phba);
1750
1751 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1752 if (phba->work_hs & HS_FFER6)
1753
1754 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1755 "1301 Re-establishing Link "
1756 "Data: x%x x%x x%x\n",
1757 phba->work_hs, phba->work_status[0],
1758 phba->work_status[1]);
1759 if (phba->work_hs & HS_FFER8)
1760
1761 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1762 "2861 Host Authentication device "
1763 "zeroization Data:x%x x%x x%x\n",
1764 phba->work_hs, phba->work_status[0],
1765 phba->work_status[1]);
1766
1767 spin_lock_irq(&phba->hbalock);
1768 psli->sli_flag &= ~LPFC_SLI_ACTIVE;
1769 spin_unlock_irq(&phba->hbalock);
1770
1771
1772
1773
1774
1775
1776
1777 lpfc_sli_abort_fcp_rings(phba);
1778
1779
1780
1781
1782
1783 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1784 lpfc_offline(phba);
1785 lpfc_sli_brdrestart(phba);
1786 if (lpfc_online(phba) == 0) {
1787 lpfc_unblock_mgmt_io(phba);
1788 return;
1789 }
1790 lpfc_unblock_mgmt_io(phba);
1791 } else if (phba->work_hs & HS_CRIT_TEMP) {
1792 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1793 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
1794 temp_event_data.event_code = LPFC_CRIT_TEMP;
1795 temp_event_data.data = (uint32_t)temperature;
1796
1797 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1798 "0406 Adapter maximum temperature exceeded "
1799 "(%ld), taking this port offline "
1800 "Data: x%x x%x x%x\n",
1801 temperature, phba->work_hs,
1802 phba->work_status[0], phba->work_status[1]);
1803
1804 shost = lpfc_shost_from_vport(phba->pport);
1805 fc_host_post_vendor_event(shost, fc_get_event_number(),
1806 sizeof(temp_event_data),
1807 (char *) &temp_event_data,
1808 SCSI_NL_VID_TYPE_PCI
1809 | PCI_VENDOR_ID_EMULEX);
1810
1811 spin_lock_irq(&phba->hbalock);
1812 phba->over_temp_state = HBA_OVER_TEMP;
1813 spin_unlock_irq(&phba->hbalock);
1814 lpfc_offline_eratt(phba);
1815
1816 } else {
1817
1818
1819
1820
1821 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1822 "0457 Adapter Hardware Error "
1823 "Data: x%x x%x x%x\n",
1824 phba->work_hs,
1825 phba->work_status[0], phba->work_status[1]);
1826
1827 event_data = FC_REG_DUMP_EVENT;
1828 shost = lpfc_shost_from_vport(vport);
1829 fc_host_post_vendor_event(shost, fc_get_event_number(),
1830 sizeof(event_data), (char *) &event_data,
1831 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1832
1833 lpfc_offline_eratt(phba);
1834 }
1835 return;
1836}
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849static int
1850lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1851 bool en_rn_msg)
1852{
1853 int rc;
1854 uint32_t intr_mode;
1855
1856 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1857 LPFC_SLI_INTF_IF_TYPE_2) {
1858
1859
1860
1861
1862 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1863 if (rc)
1864 return rc;
1865 }
1866
1867
1868 if (en_rn_msg)
1869 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1870 "2887 Reset Needed: Attempting Port "
1871 "Recovery...\n");
1872
1873
1874
1875
1876 if (mbx_action == LPFC_MBX_NO_WAIT) {
1877 spin_lock_irq(&phba->hbalock);
1878 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
1879 spin_unlock_irq(&phba->hbalock);
1880 }
1881
1882 lpfc_offline_prep(phba, mbx_action);
1883 lpfc_sli_flush_io_rings(phba);
1884 lpfc_offline(phba);
1885
1886 lpfc_sli4_disable_intr(phba);
1887 rc = lpfc_sli_brdrestart(phba);
1888 if (rc) {
1889 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1890 "6309 Failed to restart board\n");
1891 return rc;
1892 }
1893
1894 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1895 if (intr_mode == LPFC_INTR_ERROR) {
1896 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1897 "3175 Failed to enable interrupt\n");
1898 return -EIO;
1899 }
1900 phba->intr_mode = intr_mode;
1901 rc = lpfc_online(phba);
1902 if (rc == 0)
1903 lpfc_unblock_mgmt_io(phba);
1904
1905 return rc;
1906}
1907
1908
1909
1910
1911
1912
1913
1914
1915static void
1916lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1917{
1918 struct lpfc_vport *vport = phba->pport;
1919 uint32_t event_data;
1920 struct Scsi_Host *shost;
1921 uint32_t if_type;
1922 struct lpfc_register portstat_reg = {0};
1923 uint32_t reg_err1, reg_err2;
1924 uint32_t uerrlo_reg, uemasklo_reg;
1925 uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
1926 bool en_rn_msg = true;
1927 struct temp_event temp_event_data;
1928 struct lpfc_register portsmphr_reg;
1929 int rc, i;
1930
1931
1932
1933
1934 if (pci_channel_offline(phba->pcidev)) {
1935 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1936 "3166 pci channel is offline\n");
1937 lpfc_sli4_offline_eratt(phba);
1938 return;
1939 }
1940
1941 memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
1942 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
1943 switch (if_type) {
1944 case LPFC_SLI_INTF_IF_TYPE_0:
1945 pci_rd_rc1 = lpfc_readl(
1946 phba->sli4_hba.u.if_type0.UERRLOregaddr,
1947 &uerrlo_reg);
1948 pci_rd_rc2 = lpfc_readl(
1949 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
1950 &uemasklo_reg);
1951
1952 if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
1953 return;
1954 if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
1955 lpfc_sli4_offline_eratt(phba);
1956 return;
1957 }
1958 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1959 "7623 Checking UE recoverable");
1960
1961 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
1962 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1963 &portsmphr_reg.word0))
1964 continue;
1965
1966 smphr_port_status = bf_get(lpfc_port_smphr_port_status,
1967 &portsmphr_reg);
1968 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1969 LPFC_PORT_SEM_UE_RECOVERABLE)
1970 break;
1971
1972 msleep(1000);
1973 }
1974
1975 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1976 "4827 smphr_port_status x%x : Waited %dSec",
1977 smphr_port_status, i);
1978
1979
1980 if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
1981 LPFC_PORT_SEM_UE_RECOVERABLE) {
1982 for (i = 0; i < 20; i++) {
1983 msleep(1000);
1984 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
1985 &portsmphr_reg.word0) &&
1986 (LPFC_POST_STAGE_PORT_READY ==
1987 bf_get(lpfc_port_smphr_port_status,
1988 &portsmphr_reg))) {
1989 rc = lpfc_sli4_port_sta_fn_reset(phba,
1990 LPFC_MBX_NO_WAIT, en_rn_msg);
1991 if (rc == 0)
1992 return;
1993 lpfc_printf_log(phba, KERN_ERR,
1994 LOG_TRACE_EVENT,
1995 "4215 Failed to recover UE");
1996 break;
1997 }
1998 }
1999 }
2000 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2001 "7624 Firmware not ready: Failing UE recovery,"
2002 " waited %dSec", i);
2003 phba->link_state = LPFC_HBA_ERROR;
2004 break;
2005
2006 case LPFC_SLI_INTF_IF_TYPE_2:
2007 case LPFC_SLI_INTF_IF_TYPE_6:
2008 pci_rd_rc1 = lpfc_readl(
2009 phba->sli4_hba.u.if_type2.STATUSregaddr,
2010 &portstat_reg.word0);
2011
2012 if (pci_rd_rc1 == -EIO) {
2013 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2014 "3151 PCI bus read access failure: x%x\n",
2015 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
2016 lpfc_sli4_offline_eratt(phba);
2017 return;
2018 }
2019 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
2020 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
2021 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
2022 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2023 "2889 Port Overtemperature event, "
2024 "taking port offline Data: x%x x%x\n",
2025 reg_err1, reg_err2);
2026
2027 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
2028 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
2029 temp_event_data.event_code = LPFC_CRIT_TEMP;
2030 temp_event_data.data = 0xFFFFFFFF;
2031
2032 shost = lpfc_shost_from_vport(phba->pport);
2033 fc_host_post_vendor_event(shost, fc_get_event_number(),
2034 sizeof(temp_event_data),
2035 (char *)&temp_event_data,
2036 SCSI_NL_VID_TYPE_PCI
2037 | PCI_VENDOR_ID_EMULEX);
2038
2039 spin_lock_irq(&phba->hbalock);
2040 phba->over_temp_state = HBA_OVER_TEMP;
2041 spin_unlock_irq(&phba->hbalock);
2042 lpfc_sli4_offline_eratt(phba);
2043 return;
2044 }
2045 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2046 reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
2047 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2048 "3143 Port Down: Firmware Update "
2049 "Detected\n");
2050 en_rn_msg = false;
2051 } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2052 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2053 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2054 "3144 Port Down: Debug Dump\n");
2055 else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2056 reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
2057 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2058 "3145 Port Down: Provisioning\n");
2059
2060
2061 if (!phba->cfg_enable_hba_reset)
2062 return;
2063
2064
2065 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
2066 en_rn_msg);
2067 if (rc == 0) {
2068
2069 if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
2070 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
2071 return;
2072 else
2073 break;
2074 }
2075
2076 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2077 "3152 Unrecoverable error\n");
2078 phba->link_state = LPFC_HBA_ERROR;
2079 break;
2080 case LPFC_SLI_INTF_IF_TYPE_1:
2081 default:
2082 break;
2083 }
2084 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2085 "3123 Report dump event to upper layer\n");
2086
2087 lpfc_board_errevt_to_mgmt(phba);
2088
2089 event_data = FC_REG_DUMP_EVENT;
2090 shost = lpfc_shost_from_vport(vport);
2091 fc_host_post_vendor_event(shost, fc_get_event_number(),
2092 sizeof(event_data), (char *) &event_data,
2093 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
2094}
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107void
2108lpfc_handle_eratt(struct lpfc_hba *phba)
2109{
2110 (*phba->lpfc_handle_eratt)(phba);
2111}
2112
2113
2114
2115
2116
2117
2118
2119
2120void
2121lpfc_handle_latt(struct lpfc_hba *phba)
2122{
2123 struct lpfc_vport *vport = phba->pport;
2124 struct lpfc_sli *psli = &phba->sli;
2125 LPFC_MBOXQ_t *pmb;
2126 volatile uint32_t control;
2127 struct lpfc_dmabuf *mp;
2128 int rc = 0;
2129
2130 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2131 if (!pmb) {
2132 rc = 1;
2133 goto lpfc_handle_latt_err_exit;
2134 }
2135
2136 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2137 if (!mp) {
2138 rc = 2;
2139 goto lpfc_handle_latt_free_pmb;
2140 }
2141
2142 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2143 if (!mp->virt) {
2144 rc = 3;
2145 goto lpfc_handle_latt_free_mp;
2146 }
2147
2148
2149 lpfc_els_flush_all_cmd(phba);
2150
2151 psli->slistat.link_event++;
2152 lpfc_read_topology(phba, pmb, mp);
2153 pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
2154 pmb->vport = vport;
2155
2156 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2157 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
2158 if (rc == MBX_NOT_FINISHED) {
2159 rc = 4;
2160 goto lpfc_handle_latt_free_mbuf;
2161 }
2162
2163
2164 spin_lock_irq(&phba->hbalock);
2165 writel(HA_LATT, phba->HAregaddr);
2166 readl(phba->HAregaddr);
2167 spin_unlock_irq(&phba->hbalock);
2168
2169 return;
2170
2171lpfc_handle_latt_free_mbuf:
2172 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2173 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2174lpfc_handle_latt_free_mp:
2175 kfree(mp);
2176lpfc_handle_latt_free_pmb:
2177 mempool_free(pmb, phba->mbox_mem_pool);
2178lpfc_handle_latt_err_exit:
2179
2180 spin_lock_irq(&phba->hbalock);
2181 psli->sli_flag |= LPFC_PROCESS_LA;
2182 control = readl(phba->HCregaddr);
2183 control |= HC_LAINT_ENA;
2184 writel(control, phba->HCregaddr);
2185 readl(phba->HCregaddr);
2186
2187
2188 writel(HA_LATT, phba->HAregaddr);
2189 readl(phba->HAregaddr);
2190 spin_unlock_irq(&phba->hbalock);
2191 lpfc_linkdown(phba);
2192 phba->link_state = LPFC_HBA_ERROR;
2193
2194 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2195 "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
2196
2197 return;
2198}
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214int
2215lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
2216{
2217 uint8_t lenlo, lenhi;
2218 int Length;
2219 int i, j;
2220 int finished = 0;
2221 int index = 0;
2222
2223 if (!vpd)
2224 return 0;
2225
2226
2227 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2228 "0455 Vital Product Data: x%x x%x x%x x%x\n",
2229 (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
2230 (uint32_t) vpd[3]);
2231 while (!finished && (index < (len - 4))) {
2232 switch (vpd[index]) {
2233 case 0x82:
2234 case 0x91:
2235 index += 1;
2236 lenlo = vpd[index];
2237 index += 1;
2238 lenhi = vpd[index];
2239 index += 1;
2240 i = ((((unsigned short)lenhi) << 8) + lenlo);
2241 index += i;
2242 break;
2243 case 0x90:
2244 index += 1;
2245 lenlo = vpd[index];
2246 index += 1;
2247 lenhi = vpd[index];
2248 index += 1;
2249 Length = ((((unsigned short)lenhi) << 8) + lenlo);
2250 if (Length > len - index)
2251 Length = len - index;
2252 while (Length > 0) {
2253
2254 if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
2255 index += 2;
2256 i = vpd[index];
2257 index += 1;
2258 j = 0;
2259 Length -= (3+i);
2260 while(i--) {
2261 phba->SerialNumber[j++] = vpd[index++];
2262 if (j == 31)
2263 break;
2264 }
2265 phba->SerialNumber[j] = 0;
2266 continue;
2267 }
2268 else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
2269 phba->vpd_flag |= VPD_MODEL_DESC;
2270 index += 2;
2271 i = vpd[index];
2272 index += 1;
2273 j = 0;
2274 Length -= (3+i);
2275 while(i--) {
2276 phba->ModelDesc[j++] = vpd[index++];
2277 if (j == 255)
2278 break;
2279 }
2280 phba->ModelDesc[j] = 0;
2281 continue;
2282 }
2283 else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
2284 phba->vpd_flag |= VPD_MODEL_NAME;
2285 index += 2;
2286 i = vpd[index];
2287 index += 1;
2288 j = 0;
2289 Length -= (3+i);
2290 while(i--) {
2291 phba->ModelName[j++] = vpd[index++];
2292 if (j == 79)
2293 break;
2294 }
2295 phba->ModelName[j] = 0;
2296 continue;
2297 }
2298 else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
2299 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2300 index += 2;
2301 i = vpd[index];
2302 index += 1;
2303 j = 0;
2304 Length -= (3+i);
2305 while(i--) {
2306 phba->ProgramType[j++] = vpd[index++];
2307 if (j == 255)
2308 break;
2309 }
2310 phba->ProgramType[j] = 0;
2311 continue;
2312 }
2313 else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
2314 phba->vpd_flag |= VPD_PORT;
2315 index += 2;
2316 i = vpd[index];
2317 index += 1;
2318 j = 0;
2319 Length -= (3+i);
2320 while(i--) {
2321 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2322 (phba->sli4_hba.pport_name_sta ==
2323 LPFC_SLI4_PPNAME_GET)) {
2324 j++;
2325 index++;
2326 } else
2327 phba->Port[j++] = vpd[index++];
2328 if (j == 19)
2329 break;
2330 }
2331 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2332 (phba->sli4_hba.pport_name_sta ==
2333 LPFC_SLI4_PPNAME_NON))
2334 phba->Port[j] = 0;
2335 continue;
2336 }
2337 else {
2338 index += 2;
2339 i = vpd[index];
2340 index += 1;
2341 index += i;
2342 Length -= (3 + i);
2343 }
2344 }
2345 finished = 0;
2346 break;
2347 case 0x78:
2348 finished = 1;
2349 break;
2350 default:
2351 index ++;
2352 break;
2353 }
2354 }
2355
2356 return(1);
2357}
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371static void
2372lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2373{
2374 lpfc_vpd_t *vp;
2375 uint16_t dev_id = phba->pcidev->device;
2376 int max_speed;
2377 int GE = 0;
2378 int oneConnect = 0;
2379 struct {
2380 char *name;
2381 char *bus;
2382 char *function;
2383 } m = {"<Unknown>", "", ""};
2384
2385 if (mdp && mdp[0] != '\0'
2386 && descp && descp[0] != '\0')
2387 return;
2388
2389 if (phba->lmt & LMT_64Gb)
2390 max_speed = 64;
2391 else if (phba->lmt & LMT_32Gb)
2392 max_speed = 32;
2393 else if (phba->lmt & LMT_16Gb)
2394 max_speed = 16;
2395 else if (phba->lmt & LMT_10Gb)
2396 max_speed = 10;
2397 else if (phba->lmt & LMT_8Gb)
2398 max_speed = 8;
2399 else if (phba->lmt & LMT_4Gb)
2400 max_speed = 4;
2401 else if (phba->lmt & LMT_2Gb)
2402 max_speed = 2;
2403 else if (phba->lmt & LMT_1Gb)
2404 max_speed = 1;
2405 else
2406 max_speed = 0;
2407
2408 vp = &phba->vpd;
2409
2410 switch (dev_id) {
2411 case PCI_DEVICE_ID_FIREFLY:
2412 m = (typeof(m)){"LP6000", "PCI",
2413 "Obsolete, Unsupported Fibre Channel Adapter"};
2414 break;
2415 case PCI_DEVICE_ID_SUPERFLY:
2416 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
2417 m = (typeof(m)){"LP7000", "PCI", ""};
2418 else
2419 m = (typeof(m)){"LP7000E", "PCI", ""};
2420 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2421 break;
2422 case PCI_DEVICE_ID_DRAGONFLY:
2423 m = (typeof(m)){"LP8000", "PCI",
2424 "Obsolete, Unsupported Fibre Channel Adapter"};
2425 break;
2426 case PCI_DEVICE_ID_CENTAUR:
2427 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
2428 m = (typeof(m)){"LP9002", "PCI", ""};
2429 else
2430 m = (typeof(m)){"LP9000", "PCI", ""};
2431 m.function = "Obsolete, Unsupported Fibre Channel Adapter";
2432 break;
2433 case PCI_DEVICE_ID_RFLY:
2434 m = (typeof(m)){"LP952", "PCI",
2435 "Obsolete, Unsupported Fibre Channel Adapter"};
2436 break;
2437 case PCI_DEVICE_ID_PEGASUS:
2438 m = (typeof(m)){"LP9802", "PCI-X",
2439 "Obsolete, Unsupported Fibre Channel Adapter"};
2440 break;
2441 case PCI_DEVICE_ID_THOR:
2442 m = (typeof(m)){"LP10000", "PCI-X",
2443 "Obsolete, Unsupported Fibre Channel Adapter"};
2444 break;
2445 case PCI_DEVICE_ID_VIPER:
2446 m = (typeof(m)){"LPX1000", "PCI-X",
2447 "Obsolete, Unsupported Fibre Channel Adapter"};
2448 break;
2449 case PCI_DEVICE_ID_PFLY:
2450 m = (typeof(m)){"LP982", "PCI-X",
2451 "Obsolete, Unsupported Fibre Channel Adapter"};
2452 break;
2453 case PCI_DEVICE_ID_TFLY:
2454 m = (typeof(m)){"LP1050", "PCI-X",
2455 "Obsolete, Unsupported Fibre Channel Adapter"};
2456 break;
2457 case PCI_DEVICE_ID_HELIOS:
2458 m = (typeof(m)){"LP11000", "PCI-X2",
2459 "Obsolete, Unsupported Fibre Channel Adapter"};
2460 break;
2461 case PCI_DEVICE_ID_HELIOS_SCSP:
2462 m = (typeof(m)){"LP11000-SP", "PCI-X2",
2463 "Obsolete, Unsupported Fibre Channel Adapter"};
2464 break;
2465 case PCI_DEVICE_ID_HELIOS_DCSP:
2466 m = (typeof(m)){"LP11002-SP", "PCI-X2",
2467 "Obsolete, Unsupported Fibre Channel Adapter"};
2468 break;
2469 case PCI_DEVICE_ID_NEPTUNE:
2470 m = (typeof(m)){"LPe1000", "PCIe",
2471 "Obsolete, Unsupported Fibre Channel Adapter"};
2472 break;
2473 case PCI_DEVICE_ID_NEPTUNE_SCSP:
2474 m = (typeof(m)){"LPe1000-SP", "PCIe",
2475 "Obsolete, Unsupported Fibre Channel Adapter"};
2476 break;
2477 case PCI_DEVICE_ID_NEPTUNE_DCSP:
2478 m = (typeof(m)){"LPe1002-SP", "PCIe",
2479 "Obsolete, Unsupported Fibre Channel Adapter"};
2480 break;
2481 case PCI_DEVICE_ID_BMID:
2482 m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
2483 break;
2484 case PCI_DEVICE_ID_BSMB:
2485 m = (typeof(m)){"LP111", "PCI-X2",
2486 "Obsolete, Unsupported Fibre Channel Adapter"};
2487 break;
2488 case PCI_DEVICE_ID_ZEPHYR:
2489 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2490 break;
2491 case PCI_DEVICE_ID_ZEPHYR_SCSP:
2492 m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
2493 break;
2494 case PCI_DEVICE_ID_ZEPHYR_DCSP:
2495 m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
2496 GE = 1;
2497 break;
2498 case PCI_DEVICE_ID_ZMID:
2499 m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
2500 break;
2501 case PCI_DEVICE_ID_ZSMB:
2502 m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
2503 break;
2504 case PCI_DEVICE_ID_LP101:
2505 m = (typeof(m)){"LP101", "PCI-X",
2506 "Obsolete, Unsupported Fibre Channel Adapter"};
2507 break;
2508 case PCI_DEVICE_ID_LP10000S:
2509 m = (typeof(m)){"LP10000-S", "PCI",
2510 "Obsolete, Unsupported Fibre Channel Adapter"};
2511 break;
2512 case PCI_DEVICE_ID_LP11000S:
2513 m = (typeof(m)){"LP11000-S", "PCI-X2",
2514 "Obsolete, Unsupported Fibre Channel Adapter"};
2515 break;
2516 case PCI_DEVICE_ID_LPE11000S:
2517 m = (typeof(m)){"LPe11000-S", "PCIe",
2518 "Obsolete, Unsupported Fibre Channel Adapter"};
2519 break;
2520 case PCI_DEVICE_ID_SAT:
2521 m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
2522 break;
2523 case PCI_DEVICE_ID_SAT_MID:
2524 m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
2525 break;
2526 case PCI_DEVICE_ID_SAT_SMB:
2527 m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
2528 break;
2529 case PCI_DEVICE_ID_SAT_DCSP:
2530 m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
2531 break;
2532 case PCI_DEVICE_ID_SAT_SCSP:
2533 m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
2534 break;
2535 case PCI_DEVICE_ID_SAT_S:
2536 m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
2537 break;
2538 case PCI_DEVICE_ID_HORNET:
2539 m = (typeof(m)){"LP21000", "PCIe",
2540 "Obsolete, Unsupported FCoE Adapter"};
2541 GE = 1;
2542 break;
2543 case PCI_DEVICE_ID_PROTEUS_VF:
2544 m = (typeof(m)){"LPev12000", "PCIe IOV",
2545 "Obsolete, Unsupported Fibre Channel Adapter"};
2546 break;
2547 case PCI_DEVICE_ID_PROTEUS_PF:
2548 m = (typeof(m)){"LPev12000", "PCIe IOV",
2549 "Obsolete, Unsupported Fibre Channel Adapter"};
2550 break;
2551 case PCI_DEVICE_ID_PROTEUS_S:
2552 m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
2553 "Obsolete, Unsupported Fibre Channel Adapter"};
2554 break;
2555 case PCI_DEVICE_ID_TIGERSHARK:
2556 oneConnect = 1;
2557 m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
2558 break;
2559 case PCI_DEVICE_ID_TOMCAT:
2560 oneConnect = 1;
2561 m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
2562 break;
2563 case PCI_DEVICE_ID_FALCON:
2564 m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
2565 "EmulexSecure Fibre"};
2566 break;
2567 case PCI_DEVICE_ID_BALIUS:
2568 m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
2569 "Obsolete, Unsupported Fibre Channel Adapter"};
2570 break;
2571 case PCI_DEVICE_ID_LANCER_FC:
2572 m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
2573 break;
2574 case PCI_DEVICE_ID_LANCER_FC_VF:
2575 m = (typeof(m)){"LPe16000", "PCIe",
2576 "Obsolete, Unsupported Fibre Channel Adapter"};
2577 break;
2578 case PCI_DEVICE_ID_LANCER_FCOE:
2579 oneConnect = 1;
2580 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
2581 break;
2582 case PCI_DEVICE_ID_LANCER_FCOE_VF:
2583 oneConnect = 1;
2584 m = (typeof(m)){"OCe15100", "PCIe",
2585 "Obsolete, Unsupported FCoE"};
2586 break;
2587 case PCI_DEVICE_ID_LANCER_G6_FC:
2588 m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
2589 break;
2590 case PCI_DEVICE_ID_LANCER_G7_FC:
2591 m = (typeof(m)){"LPe36000", "PCIe", "Fibre Channel Adapter"};
2592 break;
2593 case PCI_DEVICE_ID_SKYHAWK:
2594 case PCI_DEVICE_ID_SKYHAWK_VF:
2595 oneConnect = 1;
2596 m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
2597 break;
2598 default:
2599 m = (typeof(m)){"Unknown", "", ""};
2600 break;
2601 }
2602
2603 if (mdp && mdp[0] == '\0')
2604 snprintf(mdp, 79,"%s", m.name);
2605
2606
2607
2608
2609 if (descp && descp[0] == '\0') {
2610 if (oneConnect)
2611 snprintf(descp, 255,
2612 "Emulex OneConnect %s, %s Initiator %s",
2613 m.name, m.function,
2614 phba->Port);
2615 else if (max_speed == 0)
2616 snprintf(descp, 255,
2617 "Emulex %s %s %s",
2618 m.name, m.bus, m.function);
2619 else
2620 snprintf(descp, 255,
2621 "Emulex %s %d%s %s %s",
2622 m.name, max_speed, (GE) ? "GE" : "Gb",
2623 m.bus, m.function);
2624 }
2625}
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639int
2640lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2641{
2642 IOCB_t *icmd;
2643 struct lpfc_iocbq *iocb;
2644 struct lpfc_dmabuf *mp1, *mp2;
2645
2646 cnt += pring->missbufcnt;
2647
2648
2649 while (cnt > 0) {
2650
2651 iocb = lpfc_sli_get_iocbq(phba);
2652 if (iocb == NULL) {
2653 pring->missbufcnt = cnt;
2654 return cnt;
2655 }
2656 icmd = &iocb->iocb;
2657
2658
2659
2660 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2661 if (mp1)
2662 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2663 if (!mp1 || !mp1->virt) {
2664 kfree(mp1);
2665 lpfc_sli_release_iocbq(phba, iocb);
2666 pring->missbufcnt = cnt;
2667 return cnt;
2668 }
2669
2670 INIT_LIST_HEAD(&mp1->list);
2671
2672 if (cnt > 1) {
2673 mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
2674 if (mp2)
2675 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2676 &mp2->phys);
2677 if (!mp2 || !mp2->virt) {
2678 kfree(mp2);
2679 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2680 kfree(mp1);
2681 lpfc_sli_release_iocbq(phba, iocb);
2682 pring->missbufcnt = cnt;
2683 return cnt;
2684 }
2685
2686 INIT_LIST_HEAD(&mp2->list);
2687 } else {
2688 mp2 = NULL;
2689 }
2690
2691 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
2692 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
2693 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
2694 icmd->ulpBdeCount = 1;
2695 cnt--;
2696 if (mp2) {
2697 icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
2698 icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
2699 icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
2700 cnt--;
2701 icmd->ulpBdeCount = 2;
2702 }
2703
2704 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
2705 icmd->ulpLe = 1;
2706
2707 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2708 IOCB_ERROR) {
2709 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2710 kfree(mp1);
2711 cnt++;
2712 if (mp2) {
2713 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2714 kfree(mp2);
2715 cnt++;
2716 }
2717 lpfc_sli_release_iocbq(phba, iocb);
2718 pring->missbufcnt = cnt;
2719 return cnt;
2720 }
2721 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2722 if (mp2)
2723 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2724 }
2725 pring->missbufcnt = 0;
2726 return 0;
2727}
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740static int
2741lpfc_post_rcv_buf(struct lpfc_hba *phba)
2742{
2743 struct lpfc_sli *psli = &phba->sli;
2744
2745
2746 lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2747
2748
2749 return 0;
2750}
2751
2752#define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
2753
2754
2755
2756
2757
2758
2759
2760
2761static void
2762lpfc_sha_init(uint32_t * HashResultPointer)
2763{
2764 HashResultPointer[0] = 0x67452301;
2765 HashResultPointer[1] = 0xEFCDAB89;
2766 HashResultPointer[2] = 0x98BADCFE;
2767 HashResultPointer[3] = 0x10325476;
2768 HashResultPointer[4] = 0xC3D2E1F0;
2769}
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781static void
2782lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
2783{
2784 int t;
2785 uint32_t TEMP;
2786 uint32_t A, B, C, D, E;
2787 t = 16;
2788 do {
2789 HashWorkingPointer[t] =
2790 S(1,
2791 HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
2792 8] ^
2793 HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
2794 } while (++t <= 79);
2795 t = 0;
2796 A = HashResultPointer[0];
2797 B = HashResultPointer[1];
2798 C = HashResultPointer[2];
2799 D = HashResultPointer[3];
2800 E = HashResultPointer[4];
2801
2802 do {
2803 if (t < 20) {
2804 TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
2805 } else if (t < 40) {
2806 TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
2807 } else if (t < 60) {
2808 TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
2809 } else {
2810 TEMP = (B ^ C ^ D) + 0xCA62C1D6;
2811 }
2812 TEMP += S(5, A) + E + HashWorkingPointer[t];
2813 E = D;
2814 D = C;
2815 C = S(30, B);
2816 B = A;
2817 A = TEMP;
2818 } while (++t <= 79);
2819
2820 HashResultPointer[0] += A;
2821 HashResultPointer[1] += B;
2822 HashResultPointer[2] += C;
2823 HashResultPointer[3] += D;
2824 HashResultPointer[4] += E;
2825
2826}
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838static void
2839lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
2840{
2841 *HashWorking = (*RandomChallenge ^ *HashWorking);
2842}
2843
2844
2845
2846
2847
2848
2849
2850
2851void
2852lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2853{
2854 int t;
2855 uint32_t *HashWorking;
2856 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
2857
2858 HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
2859 if (!HashWorking)
2860 return;
2861
2862 HashWorking[0] = HashWorking[78] = *pwwnn++;
2863 HashWorking[1] = HashWorking[79] = *pwwnn;
2864
2865 for (t = 0; t < 7; t++)
2866 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
2867
2868 lpfc_sha_init(hbainit);
2869 lpfc_sha_iterate(hbainit, HashWorking);
2870 kfree(HashWorking);
2871}
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882void
2883lpfc_cleanup(struct lpfc_vport *vport)
2884{
2885 struct lpfc_hba *phba = vport->phba;
2886 struct lpfc_nodelist *ndlp, *next_ndlp;
2887 int i = 0;
2888
2889 if (phba->link_state > LPFC_LINK_DOWN)
2890 lpfc_port_link_failure(vport);
2891
2892
2893 if (lpfc_is_vmid_enabled(phba))
2894 lpfc_vmid_vport_cleanup(vport);
2895
2896 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
2897 if (vport->port_type != LPFC_PHYSICAL_PORT &&
2898 ndlp->nlp_DID == Fabric_DID) {
2899
2900 lpfc_nlp_put(ndlp);
2901 continue;
2902 }
2903
2904 if (ndlp->nlp_DID == Fabric_Cntl_DID &&
2905 ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2906 lpfc_nlp_put(ndlp);
2907 continue;
2908 }
2909
2910
2911
2912
2913 if (ndlp->nlp_type & NLP_FABRIC &&
2914 ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
2915 lpfc_disc_state_machine(vport, ndlp, NULL,
2916 NLP_EVT_DEVICE_RECOVERY);
2917
2918 if (!(ndlp->fc4_xpt_flags & (NVME_XPT_REGD|SCSI_XPT_REGD)))
2919 lpfc_disc_state_machine(vport, ndlp, NULL,
2920 NLP_EVT_DEVICE_RM);
2921 }
2922
2923
2924
2925
2926
2927 while (!list_empty(&vport->fc_nodes)) {
2928 if (i++ > 3000) {
2929 lpfc_printf_vlog(vport, KERN_ERR,
2930 LOG_TRACE_EVENT,
2931 "0233 Nodelist not empty\n");
2932 list_for_each_entry_safe(ndlp, next_ndlp,
2933 &vport->fc_nodes, nlp_listp) {
2934 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
2935 LOG_TRACE_EVENT,
2936 "0282 did:x%x ndlp:x%px "
2937 "refcnt:%d xflags x%x nflag x%x\n",
2938 ndlp->nlp_DID, (void *)ndlp,
2939 kref_read(&ndlp->kref),
2940 ndlp->fc4_xpt_flags,
2941 ndlp->nlp_flag);
2942 }
2943 break;
2944 }
2945
2946
2947 msleep(10);
2948 }
2949 lpfc_cleanup_vports_rrqs(vport, NULL);
2950}
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960void
2961lpfc_stop_vport_timers(struct lpfc_vport *vport)
2962{
2963 del_timer_sync(&vport->els_tmofunc);
2964 del_timer_sync(&vport->delayed_disc_tmo);
2965 lpfc_can_disctmo(vport);
2966 return;
2967}
2968
2969
2970
2971
2972
2973
2974
2975
2976void
2977__lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2978{
2979
2980 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
2981
2982
2983 del_timer(&phba->fcf.redisc_wait);
2984}
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995void
2996lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
2997{
2998 spin_lock_irq(&phba->hbalock);
2999 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3000
3001 spin_unlock_irq(&phba->hbalock);
3002 return;
3003 }
3004 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3005
3006 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
3007 spin_unlock_irq(&phba->hbalock);
3008}
3009
3010
3011
3012
3013
3014
3015
3016
3017void
3018lpfc_stop_hba_timers(struct lpfc_hba *phba)
3019{
3020 if (phba->pport)
3021 lpfc_stop_vport_timers(phba->pport);
3022 cancel_delayed_work_sync(&phba->eq_delay_work);
3023 cancel_delayed_work_sync(&phba->idle_stat_delay_work);
3024 del_timer_sync(&phba->sli.mbox_tmo);
3025 del_timer_sync(&phba->fabric_block_timer);
3026 del_timer_sync(&phba->eratt_poll);
3027 del_timer_sync(&phba->hb_tmofunc);
3028 if (phba->sli_rev == LPFC_SLI_REV4) {
3029 del_timer_sync(&phba->rrq_tmr);
3030 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
3031 }
3032 phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
3033
3034 switch (phba->pci_dev_grp) {
3035 case LPFC_PCI_DEV_LP:
3036
3037 del_timer_sync(&phba->fcp_poll_timer);
3038 break;
3039 case LPFC_PCI_DEV_OC:
3040
3041 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3042 break;
3043 default:
3044 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3045 "0297 Invalid device group (x%x)\n",
3046 phba->pci_dev_grp);
3047 break;
3048 }
3049 return;
3050}
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063static void
3064lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
3065{
3066 unsigned long iflag;
3067 uint8_t actcmd = MBX_HEARTBEAT;
3068 unsigned long timeout;
3069
3070 spin_lock_irqsave(&phba->hbalock, iflag);
3071 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
3072 spin_unlock_irqrestore(&phba->hbalock, iflag);
3073 if (mbx_action == LPFC_MBX_NO_WAIT)
3074 return;
3075 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
3076 spin_lock_irqsave(&phba->hbalock, iflag);
3077 if (phba->sli.mbox_active) {
3078 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
3079
3080
3081
3082 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
3083 phba->sli.mbox_active) * 1000) + jiffies;
3084 }
3085 spin_unlock_irqrestore(&phba->hbalock, iflag);
3086
3087
3088 while (phba->sli.mbox_active) {
3089
3090 msleep(2);
3091 if (time_after(jiffies, timeout)) {
3092 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3093 "2813 Mgmt IO is Blocked %x "
3094 "- mbox cmd %x still active\n",
3095 phba->sli.sli_flag, actcmd);
3096 break;
3097 }
3098 }
3099}
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109void
3110lpfc_sli4_node_prep(struct lpfc_hba *phba)
3111{
3112 struct lpfc_nodelist *ndlp, *next_ndlp;
3113 struct lpfc_vport **vports;
3114 int i, rpi;
3115
3116 if (phba->sli_rev != LPFC_SLI_REV4)
3117 return;
3118
3119 vports = lpfc_create_vport_work_array(phba);
3120 if (vports == NULL)
3121 return;
3122
3123 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3124 if (vports[i]->load_flag & FC_UNLOADING)
3125 continue;
3126
3127 list_for_each_entry_safe(ndlp, next_ndlp,
3128 &vports[i]->fc_nodes,
3129 nlp_listp) {
3130 rpi = lpfc_sli4_alloc_rpi(phba);
3131 if (rpi == LPFC_RPI_ALLOC_ERROR) {
3132
3133 continue;
3134 }
3135 ndlp->nlp_rpi = rpi;
3136 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3137 LOG_NODE | LOG_DISCOVERY,
3138 "0009 Assign RPI x%x to ndlp x%px "
3139 "DID:x%06x flg:x%x\n",
3140 ndlp->nlp_rpi, ndlp, ndlp->nlp_DID,
3141 ndlp->nlp_flag);
3142 }
3143 }
3144 lpfc_destroy_vport_work_array(phba, vports);
3145}
3146
3147
3148
3149
3150
3151
3152
3153
3154static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
3155{
3156 struct lpfc_sli4_hdw_queue *qp;
3157 struct lpfc_io_buf *lpfc_ncmd;
3158 struct lpfc_io_buf *lpfc_ncmd_next;
3159 struct lpfc_epd_pool *epd_pool;
3160 unsigned long iflag;
3161
3162 epd_pool = &phba->epd_pool;
3163 qp = &phba->sli4_hba.hdwq[0];
3164
3165 spin_lock_init(&epd_pool->lock);
3166 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3167 spin_lock(&epd_pool->lock);
3168 INIT_LIST_HEAD(&epd_pool->list);
3169 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3170 &qp->lpfc_io_buf_list_put, list) {
3171 list_move_tail(&lpfc_ncmd->list, &epd_pool->list);
3172 lpfc_ncmd->expedite = true;
3173 qp->put_io_bufs--;
3174 epd_pool->count++;
3175 if (epd_pool->count >= XRI_BATCH)
3176 break;
3177 }
3178 spin_unlock(&epd_pool->lock);
3179 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3180}
3181
3182
3183
3184
3185
3186
3187
3188
3189static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
3190{
3191 struct lpfc_sli4_hdw_queue *qp;
3192 struct lpfc_io_buf *lpfc_ncmd;
3193 struct lpfc_io_buf *lpfc_ncmd_next;
3194 struct lpfc_epd_pool *epd_pool;
3195 unsigned long iflag;
3196
3197 epd_pool = &phba->epd_pool;
3198 qp = &phba->sli4_hba.hdwq[0];
3199
3200 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3201 spin_lock(&epd_pool->lock);
3202 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3203 &epd_pool->list, list) {
3204 list_move_tail(&lpfc_ncmd->list,
3205 &qp->lpfc_io_buf_list_put);
3206 lpfc_ncmd->flags = false;
3207 qp->put_io_bufs++;
3208 epd_pool->count--;
3209 }
3210 spin_unlock(&epd_pool->lock);
3211 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3212}
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3223{
3224 u32 i, j;
3225 u32 hwq_count;
3226 u32 count_per_hwq;
3227 struct lpfc_io_buf *lpfc_ncmd;
3228 struct lpfc_io_buf *lpfc_ncmd_next;
3229 unsigned long iflag;
3230 struct lpfc_sli4_hdw_queue *qp;
3231 struct lpfc_multixri_pool *multixri_pool;
3232 struct lpfc_pbl_pool *pbl_pool;
3233 struct lpfc_pvt_pool *pvt_pool;
3234
3235 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3236 "1234 num_hdw_queue=%d num_present_cpu=%d common_xri_cnt=%d\n",
3237 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3238 phba->sli4_hba.io_xri_cnt);
3239
3240 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3241 lpfc_create_expedite_pool(phba);
3242
3243 hwq_count = phba->cfg_hdw_queue;
3244 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3245
3246 for (i = 0; i < hwq_count; i++) {
3247 multixri_pool = kzalloc(sizeof(*multixri_pool), GFP_KERNEL);
3248
3249 if (!multixri_pool) {
3250 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3251 "1238 Failed to allocate memory for "
3252 "multixri_pool\n");
3253
3254 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3255 lpfc_destroy_expedite_pool(phba);
3256
3257 j = 0;
3258 while (j < i) {
3259 qp = &phba->sli4_hba.hdwq[j];
3260 kfree(qp->p_multixri_pool);
3261 j++;
3262 }
3263 phba->cfg_xri_rebalancing = 0;
3264 return;
3265 }
3266
3267 qp = &phba->sli4_hba.hdwq[i];
3268 qp->p_multixri_pool = multixri_pool;
3269
3270 multixri_pool->xri_limit = count_per_hwq;
3271 multixri_pool->rrb_next_hwqid = i;
3272
3273
3274 pbl_pool = &multixri_pool->pbl_pool;
3275 spin_lock_init(&pbl_pool->lock);
3276 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3277 spin_lock(&pbl_pool->lock);
3278 INIT_LIST_HEAD(&pbl_pool->list);
3279 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3280 &qp->lpfc_io_buf_list_put, list) {
3281 list_move_tail(&lpfc_ncmd->list, &pbl_pool->list);
3282 qp->put_io_bufs--;
3283 pbl_pool->count++;
3284 }
3285 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3286 "1235 Moved %d buffers from PUT list over to pbl_pool[%d]\n",
3287 pbl_pool->count, i);
3288 spin_unlock(&pbl_pool->lock);
3289 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3290
3291
3292 pvt_pool = &multixri_pool->pvt_pool;
3293 pvt_pool->high_watermark = multixri_pool->xri_limit / 2;
3294 pvt_pool->low_watermark = XRI_BATCH;
3295 spin_lock_init(&pvt_pool->lock);
3296 spin_lock_irqsave(&pvt_pool->lock, iflag);
3297 INIT_LIST_HEAD(&pvt_pool->list);
3298 pvt_pool->count = 0;
3299 spin_unlock_irqrestore(&pvt_pool->lock, iflag);
3300 }
3301}
3302
3303
3304
3305
3306
3307
3308
3309static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3310{
3311 u32 i;
3312 u32 hwq_count;
3313 struct lpfc_io_buf *lpfc_ncmd;
3314 struct lpfc_io_buf *lpfc_ncmd_next;
3315 unsigned long iflag;
3316 struct lpfc_sli4_hdw_queue *qp;
3317 struct lpfc_multixri_pool *multixri_pool;
3318 struct lpfc_pbl_pool *pbl_pool;
3319 struct lpfc_pvt_pool *pvt_pool;
3320
3321 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3322 lpfc_destroy_expedite_pool(phba);
3323
3324 if (!(phba->pport->load_flag & FC_UNLOADING))
3325 lpfc_sli_flush_io_rings(phba);
3326
3327 hwq_count = phba->cfg_hdw_queue;
3328
3329 for (i = 0; i < hwq_count; i++) {
3330 qp = &phba->sli4_hba.hdwq[i];
3331 multixri_pool = qp->p_multixri_pool;
3332 if (!multixri_pool)
3333 continue;
3334
3335 qp->p_multixri_pool = NULL;
3336
3337 spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag);
3338
3339
3340 pbl_pool = &multixri_pool->pbl_pool;
3341 spin_lock(&pbl_pool->lock);
3342
3343 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3344 "1236 Moving %d buffers from pbl_pool[%d] TO PUT list\n",
3345 pbl_pool->count, i);
3346
3347 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3348 &pbl_pool->list, list) {
3349 list_move_tail(&lpfc_ncmd->list,
3350 &qp->lpfc_io_buf_list_put);
3351 qp->put_io_bufs++;
3352 pbl_pool->count--;
3353 }
3354
3355 INIT_LIST_HEAD(&pbl_pool->list);
3356 pbl_pool->count = 0;
3357
3358 spin_unlock(&pbl_pool->lock);
3359
3360
3361 pvt_pool = &multixri_pool->pvt_pool;
3362 spin_lock(&pvt_pool->lock);
3363
3364 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3365 "1237 Moving %d buffers from pvt_pool[%d] TO PUT list\n",
3366 pvt_pool->count, i);
3367
3368 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3369 &pvt_pool->list, list) {
3370 list_move_tail(&lpfc_ncmd->list,
3371 &qp->lpfc_io_buf_list_put);
3372 qp->put_io_bufs++;
3373 pvt_pool->count--;
3374 }
3375
3376 INIT_LIST_HEAD(&pvt_pool->list);
3377 pvt_pool->count = 0;
3378
3379 spin_unlock(&pvt_pool->lock);
3380 spin_unlock_irqrestore(&qp->io_buf_list_put_lock, iflag);
3381
3382 kfree(multixri_pool);
3383 }
3384}
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398int
3399lpfc_online(struct lpfc_hba *phba)
3400{
3401 struct lpfc_vport *vport;
3402 struct lpfc_vport **vports;
3403 int i, error = 0;
3404 bool vpis_cleared = false;
3405
3406 if (!phba)
3407 return 0;
3408 vport = phba->pport;
3409
3410 if (!(vport->fc_flag & FC_OFFLINE_MODE))
3411 return 0;
3412
3413 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3414 "0458 Bring Adapter online\n");
3415
3416 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
3417
3418 if (phba->sli_rev == LPFC_SLI_REV4) {
3419 if (lpfc_sli4_hba_setup(phba)) {
3420 lpfc_unblock_mgmt_io(phba);
3421 return 1;
3422 }
3423 spin_lock_irq(&phba->hbalock);
3424 if (!phba->sli4_hba.max_cfg_param.vpi_used)
3425 vpis_cleared = true;
3426 spin_unlock_irq(&phba->hbalock);
3427
3428
3429
3430
3431 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3432 !phba->nvmet_support) {
3433 error = lpfc_nvme_create_localport(phba->pport);
3434 if (error)
3435 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3436 "6132 NVME restore reg failed "
3437 "on nvmei error x%x\n", error);
3438 }
3439 } else {
3440 lpfc_sli_queue_init(phba);
3441 if (lpfc_sli_hba_setup(phba)) {
3442 lpfc_unblock_mgmt_io(phba);
3443 return 1;
3444 }
3445 }
3446
3447 vports = lpfc_create_vport_work_array(phba);
3448 if (vports != NULL) {
3449 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3450 struct Scsi_Host *shost;
3451 shost = lpfc_shost_from_vport(vports[i]);
3452 spin_lock_irq(shost->host_lock);
3453 vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
3454 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3455 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3456 if (phba->sli_rev == LPFC_SLI_REV4) {
3457 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
3458 if ((vpis_cleared) &&
3459 (vports[i]->port_type !=
3460 LPFC_PHYSICAL_PORT))
3461 vports[i]->vpi = 0;
3462 }
3463 spin_unlock_irq(shost->host_lock);
3464 }
3465 }
3466 lpfc_destroy_vport_work_array(phba, vports);
3467
3468 if (phba->cfg_xri_rebalancing)
3469 lpfc_create_multixri_pools(phba);
3470
3471 lpfc_cpuhp_add(phba);
3472
3473 lpfc_unblock_mgmt_io(phba);
3474 return 0;
3475}
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488void
3489lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3490{
3491 unsigned long iflag;
3492
3493 spin_lock_irqsave(&phba->hbalock, iflag);
3494 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3495 spin_unlock_irqrestore(&phba->hbalock, iflag);
3496}
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507void
3508lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3509{
3510 struct lpfc_vport *vport = phba->pport;
3511 struct lpfc_nodelist *ndlp, *next_ndlp;
3512 struct lpfc_vport **vports;
3513 struct Scsi_Host *shost;
3514 int i;
3515
3516 if (vport->fc_flag & FC_OFFLINE_MODE)
3517 return;
3518
3519 lpfc_block_mgmt_io(phba, mbx_action);
3520
3521 lpfc_linkdown(phba);
3522
3523
3524 vports = lpfc_create_vport_work_array(phba);
3525 if (vports != NULL) {
3526 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3527 if (vports[i]->load_flag & FC_UNLOADING)
3528 continue;
3529 shost = lpfc_shost_from_vport(vports[i]);
3530 spin_lock_irq(shost->host_lock);
3531 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
3532 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3533 vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
3534 spin_unlock_irq(shost->host_lock);
3535
3536 shost = lpfc_shost_from_vport(vports[i]);
3537 list_for_each_entry_safe(ndlp, next_ndlp,
3538 &vports[i]->fc_nodes,
3539 nlp_listp) {
3540
3541 spin_lock_irq(&ndlp->lock);
3542 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3543 spin_unlock_irq(&ndlp->lock);
3544
3545
3546
3547
3548
3549 if (phba->sli_rev == LPFC_SLI_REV4) {
3550 lpfc_printf_vlog(vports[i], KERN_INFO,
3551 LOG_NODE | LOG_DISCOVERY,
3552 "0011 Free RPI x%x on "
3553 "ndlp: x%px did x%x\n",
3554 ndlp->nlp_rpi, ndlp,
3555 ndlp->nlp_DID);
3556 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3557 ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
3558 }
3559 lpfc_unreg_rpi(vports[i], ndlp);
3560
3561 if (ndlp->nlp_type & NLP_FABRIC) {
3562 lpfc_disc_state_machine(vports[i], ndlp,
3563 NULL, NLP_EVT_DEVICE_RECOVERY);
3564
3565
3566
3567
3568
3569
3570 if (!(ndlp->fc4_xpt_flags &
3571 (NVME_XPT_REGD | SCSI_XPT_REGD)))
3572 lpfc_disc_state_machine
3573 (vports[i], ndlp,
3574 NULL,
3575 NLP_EVT_DEVICE_RM);
3576 }
3577 }
3578 }
3579 }
3580 lpfc_destroy_vport_work_array(phba, vports);
3581
3582 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3583
3584 if (phba->wq)
3585 flush_workqueue(phba->wq);
3586}
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596void
3597lpfc_offline(struct lpfc_hba *phba)
3598{
3599 struct Scsi_Host *shost;
3600 struct lpfc_vport **vports;
3601 int i;
3602
3603 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3604 return;
3605
3606
3607 lpfc_stop_port(phba);
3608
3609
3610
3611
3612 lpfc_nvmet_destroy_targetport(phba);
3613 lpfc_nvme_destroy_localport(phba->pport);
3614
3615 vports = lpfc_create_vport_work_array(phba);
3616 if (vports != NULL)
3617 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3618 lpfc_stop_vport_timers(vports[i]);
3619 lpfc_destroy_vport_work_array(phba, vports);
3620 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3621 "0460 Bring Adapter offline\n");
3622
3623
3624 lpfc_sli_hba_down(phba);
3625 spin_lock_irq(&phba->hbalock);
3626 phba->work_ha = 0;
3627 spin_unlock_irq(&phba->hbalock);
3628 vports = lpfc_create_vport_work_array(phba);
3629 if (vports != NULL)
3630 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3631 shost = lpfc_shost_from_vport(vports[i]);
3632 spin_lock_irq(shost->host_lock);
3633 vports[i]->work_port_events = 0;
3634 vports[i]->fc_flag |= FC_OFFLINE_MODE;
3635 spin_unlock_irq(shost->host_lock);
3636 }
3637 lpfc_destroy_vport_work_array(phba, vports);
3638
3639
3640
3641 if (phba->pport->fc_flag & FC_OFFLINE_MODE)
3642 __lpfc_cpuhp_remove(phba);
3643
3644 if (phba->cfg_xri_rebalancing)
3645 lpfc_destroy_multixri_pools(phba);
3646}
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656static void
3657lpfc_scsi_free(struct lpfc_hba *phba)
3658{
3659 struct lpfc_io_buf *sb, *sb_next;
3660
3661 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3662 return;
3663
3664 spin_lock_irq(&phba->hbalock);
3665
3666
3667
3668 spin_lock(&phba->scsi_buf_list_put_lock);
3669 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3670 list) {
3671 list_del(&sb->list);
3672 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3673 sb->dma_handle);
3674 kfree(sb);
3675 phba->total_scsi_bufs--;
3676 }
3677 spin_unlock(&phba->scsi_buf_list_put_lock);
3678
3679 spin_lock(&phba->scsi_buf_list_get_lock);
3680 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3681 list) {
3682 list_del(&sb->list);
3683 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3684 sb->dma_handle);
3685 kfree(sb);
3686 phba->total_scsi_bufs--;
3687 }
3688 spin_unlock(&phba->scsi_buf_list_get_lock);
3689 spin_unlock_irq(&phba->hbalock);
3690}
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700void
3701lpfc_io_free(struct lpfc_hba *phba)
3702{
3703 struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
3704 struct lpfc_sli4_hdw_queue *qp;
3705 int idx;
3706
3707 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3708 qp = &phba->sli4_hba.hdwq[idx];
3709
3710 spin_lock(&qp->io_buf_list_put_lock);
3711 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3712 &qp->lpfc_io_buf_list_put,
3713 list) {
3714 list_del(&lpfc_ncmd->list);
3715 qp->put_io_bufs--;
3716 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3717 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3718 if (phba->cfg_xpsgl && !phba->nvmet_support)
3719 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3720 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3721 kfree(lpfc_ncmd);
3722 qp->total_io_bufs--;
3723 }
3724 spin_unlock(&qp->io_buf_list_put_lock);
3725
3726 spin_lock(&qp->io_buf_list_get_lock);
3727 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
3728 &qp->lpfc_io_buf_list_get,
3729 list) {
3730 list_del(&lpfc_ncmd->list);
3731 qp->get_io_bufs--;
3732 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
3733 lpfc_ncmd->data, lpfc_ncmd->dma_handle);
3734 if (phba->cfg_xpsgl && !phba->nvmet_support)
3735 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
3736 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
3737 kfree(lpfc_ncmd);
3738 qp->total_io_bufs--;
3739 }
3740 spin_unlock(&qp->io_buf_list_get_lock);
3741 }
3742}
3743
3744
3745
3746
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756int
3757lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
3758{
3759 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3760 uint16_t i, lxri, xri_cnt, els_xri_cnt;
3761 LIST_HEAD(els_sgl_list);
3762 int rc;
3763
3764
3765
3766
3767 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3768
3769 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
3770
3771 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
3772 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3773 "3157 ELS xri-sgl count increased from "
3774 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3775 els_xri_cnt);
3776
3777 for (i = 0; i < xri_cnt; i++) {
3778 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3779 GFP_KERNEL);
3780 if (sglq_entry == NULL) {
3781 lpfc_printf_log(phba, KERN_ERR,
3782 LOG_TRACE_EVENT,
3783 "2562 Failure to allocate an "
3784 "ELS sgl entry:%d\n", i);
3785 rc = -ENOMEM;
3786 goto out_free_mem;
3787 }
3788 sglq_entry->buff_type = GEN_BUFF_TYPE;
3789 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
3790 &sglq_entry->phys);
3791 if (sglq_entry->virt == NULL) {
3792 kfree(sglq_entry);
3793 lpfc_printf_log(phba, KERN_ERR,
3794 LOG_TRACE_EVENT,
3795 "2563 Failure to allocate an "
3796 "ELS mbuf:%d\n", i);
3797 rc = -ENOMEM;
3798 goto out_free_mem;
3799 }
3800 sglq_entry->sgl = sglq_entry->virt;
3801 memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
3802 sglq_entry->state = SGL_FREED;
3803 list_add_tail(&sglq_entry->list, &els_sgl_list);
3804 }
3805 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
3806 list_splice_init(&els_sgl_list,
3807 &phba->sli4_hba.lpfc_els_sgl_list);
3808 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
3809 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
3810
3811 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
3812 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3813 "3158 ELS xri-sgl count decreased from "
3814 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3815 els_xri_cnt);
3816 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
3817 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
3818 &els_sgl_list);
3819
3820 for (i = 0; i < xri_cnt; i++) {
3821 list_remove_head(&els_sgl_list,
3822 sglq_entry, struct lpfc_sglq, list);
3823 if (sglq_entry) {
3824 __lpfc_mbuf_free(phba, sglq_entry->virt,
3825 sglq_entry->phys);
3826 kfree(sglq_entry);
3827 }
3828 }
3829 list_splice_init(&els_sgl_list,
3830 &phba->sli4_hba.lpfc_els_sgl_list);
3831 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
3832 } else
3833 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3834 "3163 ELS xri-sgl count unchanged: %d\n",
3835 els_xri_cnt);
3836 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
3837
3838
3839 sglq_entry = NULL;
3840 sglq_entry_next = NULL;
3841 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3842 &phba->sli4_hba.lpfc_els_sgl_list, list) {
3843 lxri = lpfc_sli4_next_xritag(phba);
3844 if (lxri == NO_XRI) {
3845 lpfc_printf_log(phba, KERN_ERR,
3846 LOG_TRACE_EVENT,
3847 "2400 Failed to allocate xri for "
3848 "ELS sgl\n");
3849 rc = -ENOMEM;
3850 goto out_free_mem;
3851 }
3852 sglq_entry->sli4_lxritag = lxri;
3853 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3854 }
3855 return 0;
3856
3857out_free_mem:
3858 lpfc_free_els_sgl_list(phba);
3859 return rc;
3860}
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874int
3875lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
3876{
3877 struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
3878 uint16_t i, lxri, xri_cnt, els_xri_cnt;
3879 uint16_t nvmet_xri_cnt;
3880 LIST_HEAD(nvmet_sgl_list);
3881 int rc;
3882
3883
3884
3885
3886 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
3887
3888
3889 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
3890 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
3891
3892 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
3893 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3894 "6302 NVMET xri-sgl cnt grew from %d to %d\n",
3895 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
3896
3897 for (i = 0; i < xri_cnt; i++) {
3898 sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
3899 GFP_KERNEL);
3900 if (sglq_entry == NULL) {
3901 lpfc_printf_log(phba, KERN_ERR,
3902 LOG_TRACE_EVENT,
3903 "6303 Failure to allocate an "
3904 "NVMET sgl entry:%d\n", i);
3905 rc = -ENOMEM;
3906 goto out_free_mem;
3907 }
3908 sglq_entry->buff_type = NVMET_BUFF_TYPE;
3909 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
3910 &sglq_entry->phys);
3911 if (sglq_entry->virt == NULL) {
3912 kfree(sglq_entry);
3913 lpfc_printf_log(phba, KERN_ERR,
3914 LOG_TRACE_EVENT,
3915 "6304 Failure to allocate an "
3916 "NVMET buf:%d\n", i);
3917 rc = -ENOMEM;
3918 goto out_free_mem;
3919 }
3920 sglq_entry->sgl = sglq_entry->virt;
3921 memset(sglq_entry->sgl, 0,
3922 phba->cfg_sg_dma_buf_size);
3923 sglq_entry->state = SGL_FREED;
3924 list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
3925 }
3926 spin_lock_irq(&phba->hbalock);
3927 spin_lock(&phba->sli4_hba.sgl_list_lock);
3928 list_splice_init(&nvmet_sgl_list,
3929 &phba->sli4_hba.lpfc_nvmet_sgl_list);
3930 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3931 spin_unlock_irq(&phba->hbalock);
3932 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
3933
3934 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
3935 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3936 "6305 NVMET xri-sgl count decreased from "
3937 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
3938 nvmet_xri_cnt);
3939 spin_lock_irq(&phba->hbalock);
3940 spin_lock(&phba->sli4_hba.sgl_list_lock);
3941 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
3942 &nvmet_sgl_list);
3943
3944 for (i = 0; i < xri_cnt; i++) {
3945 list_remove_head(&nvmet_sgl_list,
3946 sglq_entry, struct lpfc_sglq, list);
3947 if (sglq_entry) {
3948 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
3949 sglq_entry->phys);
3950 kfree(sglq_entry);
3951 }
3952 }
3953 list_splice_init(&nvmet_sgl_list,
3954 &phba->sli4_hba.lpfc_nvmet_sgl_list);
3955 spin_unlock(&phba->sli4_hba.sgl_list_lock);
3956 spin_unlock_irq(&phba->hbalock);
3957 } else
3958 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3959 "6306 NVMET xri-sgl count unchanged: %d\n",
3960 nvmet_xri_cnt);
3961 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
3962
3963
3964 sglq_entry = NULL;
3965 sglq_entry_next = NULL;
3966 list_for_each_entry_safe(sglq_entry, sglq_entry_next,
3967 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
3968 lxri = lpfc_sli4_next_xritag(phba);
3969 if (lxri == NO_XRI) {
3970 lpfc_printf_log(phba, KERN_ERR,
3971 LOG_TRACE_EVENT,
3972 "6307 Failed to allocate xri for "
3973 "NVMET sgl\n");
3974 rc = -ENOMEM;
3975 goto out_free_mem;
3976 }
3977 sglq_entry->sli4_lxritag = lxri;
3978 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
3979 }
3980 return 0;
3981
3982out_free_mem:
3983 lpfc_free_nvmet_sgl_list(phba);
3984 return rc;
3985}
3986
3987int
3988lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
3989{
3990 LIST_HEAD(blist);
3991 struct lpfc_sli4_hdw_queue *qp;
3992 struct lpfc_io_buf *lpfc_cmd;
3993 struct lpfc_io_buf *iobufp, *prev_iobufp;
3994 int idx, cnt, xri, inserted;
3995
3996 cnt = 0;
3997 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
3998 qp = &phba->sli4_hba.hdwq[idx];
3999 spin_lock_irq(&qp->io_buf_list_get_lock);
4000 spin_lock(&qp->io_buf_list_put_lock);
4001
4002
4003 list_splice_init(&qp->lpfc_io_buf_list_get, &blist);
4004 list_splice(&qp->lpfc_io_buf_list_put, &blist);
4005 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_get);
4006 INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
4007 cnt += qp->get_io_bufs + qp->put_io_bufs;
4008 qp->get_io_bufs = 0;
4009 qp->put_io_bufs = 0;
4010 qp->total_io_bufs = 0;
4011 spin_unlock(&qp->io_buf_list_put_lock);
4012 spin_unlock_irq(&qp->io_buf_list_get_lock);
4013 }
4014
4015
4016
4017
4018
4019
4020 for (idx = 0; idx < cnt; idx++) {
4021 list_remove_head(&blist, lpfc_cmd, struct lpfc_io_buf, list);
4022 if (!lpfc_cmd)
4023 return cnt;
4024 if (idx == 0) {
4025 list_add_tail(&lpfc_cmd->list, cbuf);
4026 continue;
4027 }
4028 xri = lpfc_cmd->cur_iocbq.sli4_xritag;
4029 inserted = 0;
4030 prev_iobufp = NULL;
4031 list_for_each_entry(iobufp, cbuf, list) {
4032 if (xri < iobufp->cur_iocbq.sli4_xritag) {
4033 if (prev_iobufp)
4034 list_add(&lpfc_cmd->list,
4035 &prev_iobufp->list);
4036 else
4037 list_add(&lpfc_cmd->list, cbuf);
4038 inserted = 1;
4039 break;
4040 }
4041 prev_iobufp = iobufp;
4042 }
4043 if (!inserted)
4044 list_add_tail(&lpfc_cmd->list, cbuf);
4045 }
4046 return cnt;
4047}
4048
4049int
4050lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
4051{
4052 struct lpfc_sli4_hdw_queue *qp;
4053 struct lpfc_io_buf *lpfc_cmd;
4054 int idx, cnt;
4055
4056 qp = phba->sli4_hba.hdwq;
4057 cnt = 0;
4058 while (!list_empty(cbuf)) {
4059 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4060 list_remove_head(cbuf, lpfc_cmd,
4061 struct lpfc_io_buf, list);
4062 if (!lpfc_cmd)
4063 return cnt;
4064 cnt++;
4065 qp = &phba->sli4_hba.hdwq[idx];
4066 lpfc_cmd->hdwq_no = idx;
4067 lpfc_cmd->hdwq = qp;
4068 lpfc_cmd->cur_iocbq.wqe_cmpl = NULL;
4069 lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
4070 spin_lock(&qp->io_buf_list_put_lock);
4071 list_add_tail(&lpfc_cmd->list,
4072 &qp->lpfc_io_buf_list_put);
4073 qp->put_io_bufs++;
4074 qp->total_io_bufs++;
4075 spin_unlock(&qp->io_buf_list_put_lock);
4076 }
4077 }
4078 return cnt;
4079}
4080
4081
4082
4083
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093int
4094lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
4095{
4096 struct lpfc_io_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
4097 uint16_t i, lxri, els_xri_cnt;
4098 uint16_t io_xri_cnt, io_xri_max;
4099 LIST_HEAD(io_sgl_list);
4100 int rc, cnt;
4101
4102
4103
4104
4105
4106
4107 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4108 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4109 phba->sli4_hba.io_xri_max = io_xri_max;
4110
4111 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4112 "6074 Current allocated XRI sgl count:%d, "
4113 "maximum XRI count:%d\n",
4114 phba->sli4_hba.io_xri_cnt,
4115 phba->sli4_hba.io_xri_max);
4116
4117 cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
4118
4119 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4120
4121 io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4122 phba->sli4_hba.io_xri_max;
4123
4124 for (i = 0; i < io_xri_cnt; i++) {
4125 list_remove_head(&io_sgl_list, lpfc_ncmd,
4126 struct lpfc_io_buf, list);
4127 if (lpfc_ncmd) {
4128 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4129 lpfc_ncmd->data,
4130 lpfc_ncmd->dma_handle);
4131 kfree(lpfc_ncmd);
4132 }
4133 }
4134 phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
4135 }
4136
4137
4138 lpfc_ncmd = NULL;
4139 lpfc_ncmd_next = NULL;
4140 phba->sli4_hba.io_xri_cnt = cnt;
4141 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
4142 &io_sgl_list, list) {
4143 lxri = lpfc_sli4_next_xritag(phba);
4144 if (lxri == NO_XRI) {
4145 lpfc_printf_log(phba, KERN_ERR,
4146 LOG_TRACE_EVENT,
4147 "6075 Failed to allocate xri for "
4148 "nvme buffer\n");
4149 rc = -ENOMEM;
4150 goto out_free_mem;
4151 }
4152 lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
4153 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4154 }
4155 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
4156 return 0;
4157
4158out_free_mem:
4159 lpfc_io_free(phba);
4160 return rc;
4161}
4162
4163
4164
4165
4166
4167
4168
4169
4170
4171
4172
4173
4174
4175