linux/drivers/scsi/lpfc/lpfc_sli.c
<<
>>
Prefs
   1/*******************************************************************
   2 * This file is part of the Emulex Linux Device Driver for         *
   3 * Fibre Channel Host Bus Adapters.                                *
   4 * Copyright (C) 2017-2021 Broadcom. All Rights Reserved. The term *
   5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
   6 * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
   7 * EMULEX and SLI are trademarks of Emulex.                        *
   8 * www.broadcom.com                                                *
   9 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
  10 *                                                                 *
  11 * This program is free software; you can redistribute it and/or   *
  12 * modify it under the terms of version 2 of the GNU General       *
  13 * Public License as published by the Free Software Foundation.    *
  14 * This program is distributed in the hope that it will be useful. *
  15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
  16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
  17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
  18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  19 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
  20 * more details, a copy of which can be found in the file COPYING  *
  21 * included with this package.                                     *
  22 *******************************************************************/
  23
  24#include <linux/blkdev.h>
  25#include <linux/pci.h>
  26#include <linux/interrupt.h>
  27#include <linux/delay.h>
  28#include <linux/slab.h>
  29#include <linux/lockdep.h>
  30
  31#include <scsi/scsi.h>
  32#include <scsi/scsi_cmnd.h>
  33#include <scsi/scsi_device.h>
  34#include <scsi/scsi_host.h>
  35#include <scsi/scsi_transport_fc.h>
  36#include <scsi/fc/fc_fs.h>
  37#include <linux/aer.h>
  38#include <linux/crash_dump.h>
  39#ifdef CONFIG_X86
  40#include <asm/set_memory.h>
  41#endif
  42
  43#include "lpfc_hw4.h"
  44#include "lpfc_hw.h"
  45#include "lpfc_sli.h"
  46#include "lpfc_sli4.h"
  47#include "lpfc_nl.h"
  48#include "lpfc_disc.h"
  49#include "lpfc.h"
  50#include "lpfc_scsi.h"
  51#include "lpfc_nvme.h"
  52#include "lpfc_crtn.h"
  53#include "lpfc_logmsg.h"
  54#include "lpfc_compat.h"
  55#include "lpfc_debugfs.h"
  56#include "lpfc_vport.h"
  57#include "lpfc_version.h"
  58
  59/* There are only four IOCB completion types. */
  60typedef enum _lpfc_iocb_type {
  61        LPFC_UNKNOWN_IOCB,
  62        LPFC_UNSOL_IOCB,
  63        LPFC_SOL_IOCB,
  64        LPFC_ABORT_IOCB
  65} lpfc_iocb_type;
  66
  67
  68/* Provide function prototypes local to this module. */
  69static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
  70                                  uint32_t);
  71static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
  72                              uint8_t *, uint32_t *);
  73static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
  74                                                         struct lpfc_iocbq *);
  75static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
  76                                      struct hbq_dmabuf *);
  77static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
  78                                          struct hbq_dmabuf *dmabuf);
  79static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
  80                                   struct lpfc_queue *cq, struct lpfc_cqe *cqe);
  81static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
  82                                       int);
  83static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
  84                                     struct lpfc_queue *eq,
  85                                     struct lpfc_eqe *eqe);
  86static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
  87static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
  88static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
  89static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
  90                                    struct lpfc_queue *cq,
  91                                    struct lpfc_cqe *cqe);
  92
  93union lpfc_wqe128 lpfc_iread_cmd_template;
  94union lpfc_wqe128 lpfc_iwrite_cmd_template;
  95union lpfc_wqe128 lpfc_icmnd_cmd_template;
  96
  97static IOCB_t *
  98lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
  99{
 100        return &iocbq->iocb;
 101}
 102
 103/* Setup WQE templates for IOs */
 104void lpfc_wqe_cmd_template(void)
 105{
 106        union lpfc_wqe128 *wqe;
 107
 108        /* IREAD template */
 109        wqe = &lpfc_iread_cmd_template;
 110        memset(wqe, 0, sizeof(union lpfc_wqe128));
 111
 112        /* Word 0, 1, 2 - BDE is variable */
 113
 114        /* Word 3 - cmd_buff_len, payload_offset_len is zero */
 115
 116        /* Word 4 - total_xfer_len is variable */
 117
 118        /* Word 5 - is zero */
 119
 120        /* Word 6 - ctxt_tag, xri_tag is variable */
 121
 122        /* Word 7 */
 123        bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
 124        bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
 125        bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
 126        bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
 127
 128        /* Word 8 - abort_tag is variable */
 129
 130        /* Word 9  - reqtag is variable */
 131
 132        /* Word 10 - dbde, wqes is variable */
 133        bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
 134        bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
 135        bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
 136        bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
 137        bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
 138
 139        /* Word 11 - pbde is variable */
 140        bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN);
 141        bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
 142        bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
 143
 144        /* Word 12 - is zero */
 145
 146        /* Word 13, 14, 15 - PBDE is variable */
 147
 148        /* IWRITE template */
 149        wqe = &lpfc_iwrite_cmd_template;
 150        memset(wqe, 0, sizeof(union lpfc_wqe128));
 151
 152        /* Word 0, 1, 2 - BDE is variable */
 153
 154        /* Word 3 - cmd_buff_len, payload_offset_len is zero */
 155
 156        /* Word 4 - total_xfer_len is variable */
 157
 158        /* Word 5 - initial_xfer_len is variable */
 159
 160        /* Word 6 - ctxt_tag, xri_tag is variable */
 161
 162        /* Word 7 */
 163        bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
 164        bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
 165        bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
 166        bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
 167
 168        /* Word 8 - abort_tag is variable */
 169
 170        /* Word 9  - reqtag is variable */
 171
 172        /* Word 10 - dbde, wqes is variable */
 173        bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
 174        bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
 175        bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
 176        bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
 177        bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
 178
 179        /* Word 11 - pbde is variable */
 180        bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT);
 181        bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
 182        bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
 183
 184        /* Word 12 - is zero */
 185
 186        /* Word 13, 14, 15 - PBDE is variable */
 187
 188        /* ICMND template */
 189        wqe = &lpfc_icmnd_cmd_template;
 190        memset(wqe, 0, sizeof(union lpfc_wqe128));
 191
 192        /* Word 0, 1, 2 - BDE is variable */
 193
 194        /* Word 3 - payload_offset_len is variable */
 195
 196        /* Word 4, 5 - is zero */
 197
 198        /* Word 6 - ctxt_tag, xri_tag is variable */
 199
 200        /* Word 7 */
 201        bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
 202        bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
 203        bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
 204        bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
 205
 206        /* Word 8 - abort_tag is variable */
 207
 208        /* Word 9  - reqtag is variable */
 209
 210        /* Word 10 - dbde, wqes is variable */
 211        bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
 212        bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
 213        bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
 214        bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
 215        bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
 216
 217        /* Word 11 */
 218        bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN);
 219        bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
 220        bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
 221
 222        /* Word 12, 13, 14, 15 - is zero */
 223}
 224
 225#if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
 226/**
 227 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
 228 * @srcp: Source memory pointer.
 229 * @destp: Destination memory pointer.
 230 * @cnt: Number of words required to be copied.
 231 *       Must be a multiple of sizeof(uint64_t)
 232 *
 233 * This function is used for copying data between driver memory
 234 * and the SLI WQ. This function also changes the endianness
 235 * of each word if native endianness is different from SLI
 236 * endianness. This function can be called with or without
 237 * lock.
 238 **/
 239static void
 240lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
 241{
 242        uint64_t *src = srcp;
 243        uint64_t *dest = destp;
 244        int i;
 245
 246        for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
 247                *dest++ = *src++;
 248}
 249#else
 250#define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
 251#endif
 252
 253/**
 254 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
 255 * @q: The Work Queue to operate on.
 256 * @wqe: The work Queue Entry to put on the Work queue.
 257 *
 258 * This routine will copy the contents of @wqe to the next available entry on
 259 * the @q. This function will then ring the Work Queue Doorbell to signal the
 260 * HBA to start processing the Work Queue Entry. This function returns 0 if
 261 * successful. If no entries are available on @q then this function will return
 262 * -ENOMEM.
 263 * The caller is expected to hold the hbalock when calling this routine.
 264 **/
 265static int
 266lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
 267{
 268        union lpfc_wqe *temp_wqe;
 269        struct lpfc_register doorbell;
 270        uint32_t host_index;
 271        uint32_t idx;
 272        uint32_t i = 0;
 273        uint8_t *tmp;
 274        u32 if_type;
 275
 276        /* sanity check on queue memory */
 277        if (unlikely(!q))
 278                return -ENOMEM;
 279
 280        temp_wqe = lpfc_sli4_qe(q, q->host_index);
 281
 282        /* If the host has not yet processed the next entry then we are done */
 283        idx = ((q->host_index + 1) % q->entry_count);
 284        if (idx == q->hba_index) {
 285                q->WQ_overflow++;
 286                return -EBUSY;
 287        }
 288        q->WQ_posted++;
 289        /* set consumption flag every once in a while */
 290        if (!((q->host_index + 1) % q->notify_interval))
 291                bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
 292        else
 293                bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
 294        if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
 295                bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
 296        lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
 297        if (q->dpp_enable && q->phba->cfg_enable_dpp) {
 298                /* write to DPP aperture taking advatage of Combined Writes */
 299                tmp = (uint8_t *)temp_wqe;
 300#ifdef __raw_writeq
 301                for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
 302                        __raw_writeq(*((uint64_t *)(tmp + i)),
 303                                        q->dpp_regaddr + i);
 304#else
 305                for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
 306                        __raw_writel(*((uint32_t *)(tmp + i)),
 307                                        q->dpp_regaddr + i);
 308#endif
 309        }
 310        /* ensure WQE bcopy and DPP flushed before doorbell write */
 311        wmb();
 312
 313        /* Update the host index before invoking device */
 314        host_index = q->host_index;
 315
 316        q->host_index = idx;
 317
 318        /* Ring Doorbell */
 319        doorbell.word0 = 0;
 320        if (q->db_format == LPFC_DB_LIST_FORMAT) {
 321                if (q->dpp_enable && q->phba->cfg_enable_dpp) {
 322                        bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
 323                        bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
 324                        bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
 325                            q->dpp_id);
 326                        bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
 327                            q->queue_id);
 328                } else {
 329                        bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
 330                        bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
 331
 332                        /* Leave bits <23:16> clear for if_type 6 dpp */
 333                        if_type = bf_get(lpfc_sli_intf_if_type,
 334                                         &q->phba->sli4_hba.sli_intf);
 335                        if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
 336                                bf_set(lpfc_wq_db_list_fm_index, &doorbell,
 337                                       host_index);
 338                }
 339        } else if (q->db_format == LPFC_DB_RING_FORMAT) {
 340                bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
 341                bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
 342        } else {
 343                return -EINVAL;
 344        }
 345        writel(doorbell.word0, q->db_regaddr);
 346
 347        return 0;
 348}
 349
 350/**
 351 * lpfc_sli4_wq_release - Updates internal hba index for WQ
 352 * @q: The Work Queue to operate on.
 353 * @index: The index to advance the hba index to.
 354 *
 355 * This routine will update the HBA index of a queue to reflect consumption of
 356 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
 357 * an entry the host calls this function to update the queue's internal
 358 * pointers.
 359 **/
 360static void
 361lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
 362{
 363        /* sanity check on queue memory */
 364        if (unlikely(!q))
 365                return;
 366
 367        q->hba_index = index;
 368}
 369
 370/**
 371 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
 372 * @q: The Mailbox Queue to operate on.
 373 * @mqe: The Mailbox Queue Entry to put on the Work queue.
 374 *
 375 * This routine will copy the contents of @mqe to the next available entry on
 376 * the @q. This function will then ring the Work Queue Doorbell to signal the
 377 * HBA to start processing the Work Queue Entry. This function returns 0 if
 378 * successful. If no entries are available on @q then this function will return
 379 * -ENOMEM.
 380 * The caller is expected to hold the hbalock when calling this routine.
 381 **/
 382static uint32_t
 383lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
 384{
 385        struct lpfc_mqe *temp_mqe;
 386        struct lpfc_register doorbell;
 387
 388        /* sanity check on queue memory */
 389        if (unlikely(!q))
 390                return -ENOMEM;
 391        temp_mqe = lpfc_sli4_qe(q, q->host_index);
 392
 393        /* If the host has not yet processed the next entry then we are done */
 394        if (((q->host_index + 1) % q->entry_count) == q->hba_index)
 395                return -ENOMEM;
 396        lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
 397        /* Save off the mailbox pointer for completion */
 398        q->phba->mbox = (MAILBOX_t *)temp_mqe;
 399
 400        /* Update the host index before invoking device */
 401        q->host_index = ((q->host_index + 1) % q->entry_count);
 402
 403        /* Ring Doorbell */
 404        doorbell.word0 = 0;
 405        bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
 406        bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
 407        writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
 408        return 0;
 409}
 410
 411/**
 412 * lpfc_sli4_mq_release - Updates internal hba index for MQ
 413 * @q: The Mailbox Queue to operate on.
 414 *
 415 * This routine will update the HBA index of a queue to reflect consumption of
 416 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
 417 * an entry the host calls this function to update the queue's internal
 418 * pointers. This routine returns the number of entries that were consumed by
 419 * the HBA.
 420 **/
 421static uint32_t
 422lpfc_sli4_mq_release(struct lpfc_queue *q)
 423{
 424        /* sanity check on queue memory */
 425        if (unlikely(!q))
 426                return 0;
 427
 428        /* Clear the mailbox pointer for completion */
 429        q->phba->mbox = NULL;
 430        q->hba_index = ((q->hba_index + 1) % q->entry_count);
 431        return 1;
 432}
 433
 434/**
 435 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
 436 * @q: The Event Queue to get the first valid EQE from
 437 *
 438 * This routine will get the first valid Event Queue Entry from @q, update
 439 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
 440 * the Queue (no more work to do), or the Queue is full of EQEs that have been
 441 * processed, but not popped back to the HBA then this routine will return NULL.
 442 **/
 443static struct lpfc_eqe *
 444lpfc_sli4_eq_get(struct lpfc_queue *q)
 445{
 446        struct lpfc_eqe *eqe;
 447
 448        /* sanity check on queue memory */
 449        if (unlikely(!q))
 450                return NULL;
 451        eqe = lpfc_sli4_qe(q, q->host_index);
 452
 453        /* If the next EQE is not valid then we are done */
 454        if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
 455                return NULL;
 456
 457        /*
 458         * insert barrier for instruction interlock : data from the hardware
 459         * must have the valid bit checked before it can be copied and acted
 460         * upon. Speculative instructions were allowing a bcopy at the start
 461         * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
 462         * after our return, to copy data before the valid bit check above
 463         * was done. As such, some of the copied data was stale. The barrier
 464         * ensures the check is before any data is copied.
 465         */
 466        mb();
 467        return eqe;
 468}
 469
 470/**
 471 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
 472 * @q: The Event Queue to disable interrupts
 473 *
 474 **/
 475void
 476lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
 477{
 478        struct lpfc_register doorbell;
 479
 480        doorbell.word0 = 0;
 481        bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
 482        bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
 483        bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
 484                (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
 485        bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
 486        writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
 487}
 488
 489/**
 490 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
 491 * @q: The Event Queue to disable interrupts
 492 *
 493 **/
 494void
 495lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
 496{
 497        struct lpfc_register doorbell;
 498
 499        doorbell.word0 = 0;
 500        bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
 501        writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
 502}
 503
 504/**
 505 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
 506 * @phba: adapter with EQ
 507 * @q: The Event Queue that the host has completed processing for.
 508 * @count: Number of elements that have been consumed
 509 * @arm: Indicates whether the host wants to arms this CQ.
 510 *
 511 * This routine will notify the HBA, by ringing the doorbell, that count
 512 * number of EQEs have been processed. The @arm parameter indicates whether
 513 * the queue should be rearmed when ringing the doorbell.
 514 **/
 515void
 516lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
 517                     uint32_t count, bool arm)
 518{
 519        struct lpfc_register doorbell;
 520
 521        /* sanity check on queue memory */
 522        if (unlikely(!q || (count == 0 && !arm)))
 523                return;
 524
 525        /* ring doorbell for number popped */
 526        doorbell.word0 = 0;
 527        if (arm) {
 528                bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
 529                bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
 530        }
 531        bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
 532        bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
 533        bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
 534                        (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
 535        bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
 536        writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
 537        /* PCI read to flush PCI pipeline on re-arming for INTx mode */
 538        if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
 539                readl(q->phba->sli4_hba.EQDBregaddr);
 540}
 541
 542/**
 543 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
 544 * @phba: adapter with EQ
 545 * @q: The Event Queue that the host has completed processing for.
 546 * @count: Number of elements that have been consumed
 547 * @arm: Indicates whether the host wants to arms this CQ.
 548 *
 549 * This routine will notify the HBA, by ringing the doorbell, that count
 550 * number of EQEs have been processed. The @arm parameter indicates whether
 551 * the queue should be rearmed when ringing the doorbell.
 552 **/
 553void
 554lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
 555                          uint32_t count, bool arm)
 556{
 557        struct lpfc_register doorbell;
 558
 559        /* sanity check on queue memory */
 560        if (unlikely(!q || (count == 0 && !arm)))
 561                return;
 562
 563        /* ring doorbell for number popped */
 564        doorbell.word0 = 0;
 565        if (arm)
 566                bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
 567        bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
 568        bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
 569        writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
 570        /* PCI read to flush PCI pipeline on re-arming for INTx mode */
 571        if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
 572                readl(q->phba->sli4_hba.EQDBregaddr);
 573}
 574
 575static void
 576__lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
 577                        struct lpfc_eqe *eqe)
 578{
 579        if (!phba->sli4_hba.pc_sli4_params.eqav)
 580                bf_set_le32(lpfc_eqe_valid, eqe, 0);
 581
 582        eq->host_index = ((eq->host_index + 1) % eq->entry_count);
 583
 584        /* if the index wrapped around, toggle the valid bit */
 585        if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
 586                eq->qe_valid = (eq->qe_valid) ? 0 : 1;
 587}
 588
 589static void
 590lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
 591{
 592        struct lpfc_eqe *eqe = NULL;
 593        u32 eq_count = 0, cq_count = 0;
 594        struct lpfc_cqe *cqe = NULL;
 595        struct lpfc_queue *cq = NULL, *childq = NULL;
 596        int cqid = 0;
 597
 598        /* walk all the EQ entries and drop on the floor */
 599        eqe = lpfc_sli4_eq_get(eq);
 600        while (eqe) {
 601                /* Get the reference to the corresponding CQ */
 602                cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
 603                cq = NULL;
 604
 605                list_for_each_entry(childq, &eq->child_list, list) {
 606                        if (childq->queue_id == cqid) {
 607                                cq = childq;
 608                                break;
 609                        }
 610                }
 611                /* If CQ is valid, iterate through it and drop all the CQEs */
 612                if (cq) {
 613                        cqe = lpfc_sli4_cq_get(cq);
 614                        while (cqe) {
 615                                __lpfc_sli4_consume_cqe(phba, cq, cqe);
 616                                cq_count++;
 617                                cqe = lpfc_sli4_cq_get(cq);
 618                        }
 619                        /* Clear and re-arm the CQ */
 620                        phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
 621                            LPFC_QUEUE_REARM);
 622                        cq_count = 0;
 623                }
 624                __lpfc_sli4_consume_eqe(phba, eq, eqe);
 625                eq_count++;
 626                eqe = lpfc_sli4_eq_get(eq);
 627        }
 628
 629        /* Clear and re-arm the EQ */
 630        phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
 631}
 632
 633static int
 634lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
 635                     uint8_t rearm)
 636{
 637        struct lpfc_eqe *eqe;
 638        int count = 0, consumed = 0;
 639
 640        if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
 641                goto rearm_and_exit;
 642
 643        eqe = lpfc_sli4_eq_get(eq);
 644        while (eqe) {
 645                lpfc_sli4_hba_handle_eqe(phba, eq, eqe);
 646                __lpfc_sli4_consume_eqe(phba, eq, eqe);
 647
 648                consumed++;
 649                if (!(++count % eq->max_proc_limit))
 650                        break;
 651
 652                if (!(count % eq->notify_interval)) {
 653                        phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
 654                                                        LPFC_QUEUE_NOARM);
 655                        consumed = 0;
 656                }
 657
 658                eqe = lpfc_sli4_eq_get(eq);
 659        }
 660        eq->EQ_processed += count;
 661
 662        /* Track the max number of EQEs processed in 1 intr */
 663        if (count > eq->EQ_max_eqe)
 664                eq->EQ_max_eqe = count;
 665
 666        xchg(&eq->queue_claimed, 0);
 667
 668rearm_and_exit:
 669        /* Always clear the EQ. */
 670        phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
 671
 672        return count;
 673}
 674
 675/**
 676 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
 677 * @q: The Completion Queue to get the first valid CQE from
 678 *
 679 * This routine will get the first valid Completion Queue Entry from @q, update
 680 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
 681 * the Queue (no more work to do), or the Queue is full of CQEs that have been
 682 * processed, but not popped back to the HBA then this routine will return NULL.
 683 **/
 684static struct lpfc_cqe *
 685lpfc_sli4_cq_get(struct lpfc_queue *q)
 686{
 687        struct lpfc_cqe *cqe;
 688
 689        /* sanity check on queue memory */
 690        if (unlikely(!q))
 691                return NULL;
 692        cqe = lpfc_sli4_qe(q, q->host_index);
 693
 694        /* If the next CQE is not valid then we are done */
 695        if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
 696                return NULL;
 697
 698        /*
 699         * insert barrier for instruction interlock : data from the hardware
 700         * must have the valid bit checked before it can be copied and acted
 701         * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
 702         * instructions allowing action on content before valid bit checked,
 703         * add barrier here as well. May not be needed as "content" is a
 704         * single 32-bit entity here (vs multi word structure for cq's).
 705         */
 706        mb();
 707        return cqe;
 708}
 709
 710static void
 711__lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
 712                        struct lpfc_cqe *cqe)
 713{
 714        if (!phba->sli4_hba.pc_sli4_params.cqav)
 715                bf_set_le32(lpfc_cqe_valid, cqe, 0);
 716
 717        cq->host_index = ((cq->host_index + 1) % cq->entry_count);
 718
 719        /* if the index wrapped around, toggle the valid bit */
 720        if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
 721                cq->qe_valid = (cq->qe_valid) ? 0 : 1;
 722}
 723
 724/**
 725 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
 726 * @phba: the adapter with the CQ
 727 * @q: The Completion Queue that the host has completed processing for.
 728 * @count: the number of elements that were consumed
 729 * @arm: Indicates whether the host wants to arms this CQ.
 730 *
 731 * This routine will notify the HBA, by ringing the doorbell, that the
 732 * CQEs have been processed. The @arm parameter specifies whether the
 733 * queue should be rearmed when ringing the doorbell.
 734 **/
 735void
 736lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
 737                     uint32_t count, bool arm)
 738{
 739        struct lpfc_register doorbell;
 740
 741        /* sanity check on queue memory */
 742        if (unlikely(!q || (count == 0 && !arm)))
 743                return;
 744
 745        /* ring doorbell for number popped */
 746        doorbell.word0 = 0;
 747        if (arm)
 748                bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
 749        bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
 750        bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
 751        bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
 752                        (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
 753        bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
 754        writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
 755}
 756
 757/**
 758 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
 759 * @phba: the adapter with the CQ
 760 * @q: The Completion Queue that the host has completed processing for.
 761 * @count: the number of elements that were consumed
 762 * @arm: Indicates whether the host wants to arms this CQ.
 763 *
 764 * This routine will notify the HBA, by ringing the doorbell, that the
 765 * CQEs have been processed. The @arm parameter specifies whether the
 766 * queue should be rearmed when ringing the doorbell.
 767 **/
 768void
 769lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
 770                         uint32_t count, bool arm)
 771{
 772        struct lpfc_register doorbell;
 773
 774        /* sanity check on queue memory */
 775        if (unlikely(!q || (count == 0 && !arm)))
 776                return;
 777
 778        /* ring doorbell for number popped */
 779        doorbell.word0 = 0;
 780        if (arm)
 781                bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
 782        bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
 783        bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
 784        writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
 785}
 786
 787/*
 788 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
 789 *
 790 * This routine will copy the contents of @wqe to the next available entry on
 791 * the @q. This function will then ring the Receive Queue Doorbell to signal the
 792 * HBA to start processing the Receive Queue Entry. This function returns the
 793 * index that the rqe was copied to if successful. If no entries are available
 794 * on @q then this function will return -ENOMEM.
 795 * The caller is expected to hold the hbalock when calling this routine.
 796 **/
 797int
 798lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
 799                 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
 800{
 801        struct lpfc_rqe *temp_hrqe;
 802        struct lpfc_rqe *temp_drqe;
 803        struct lpfc_register doorbell;
 804        int hq_put_index;
 805        int dq_put_index;
 806
 807        /* sanity check on queue memory */
 808        if (unlikely(!hq) || unlikely(!dq))
 809                return -ENOMEM;
 810        hq_put_index = hq->host_index;
 811        dq_put_index = dq->host_index;
 812        temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
 813        temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
 814
 815        if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
 816                return -EINVAL;
 817        if (hq_put_index != dq_put_index)
 818                return -EINVAL;
 819        /* If the host has not yet processed the next entry then we are done */
 820        if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
 821                return -EBUSY;
 822        lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
 823        lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
 824
 825        /* Update the host index to point to the next slot */
 826        hq->host_index = ((hq_put_index + 1) % hq->entry_count);
 827        dq->host_index = ((dq_put_index + 1) % dq->entry_count);
 828        hq->RQ_buf_posted++;
 829
 830        /* Ring The Header Receive Queue Doorbell */
 831        if (!(hq->host_index % hq->notify_interval)) {
 832                doorbell.word0 = 0;
 833                if (hq->db_format == LPFC_DB_RING_FORMAT) {
 834                        bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
 835                               hq->notify_interval);
 836                        bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
 837                } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
 838                        bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
 839                               hq->notify_interval);
 840                        bf_set(lpfc_rq_db_list_fm_index, &doorbell,
 841                               hq->host_index);
 842                        bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
 843                } else {
 844                        return -EINVAL;
 845                }
 846                writel(doorbell.word0, hq->db_regaddr);
 847        }
 848        return hq_put_index;
 849}
 850
 851/*
 852 * lpfc_sli4_rq_release - Updates internal hba index for RQ
 853 *
 854 * This routine will update the HBA index of a queue to reflect consumption of
 855 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
 856 * consumed an entry the host calls this function to update the queue's
 857 * internal pointers. This routine returns the number of entries that were
 858 * consumed by the HBA.
 859 **/
 860static uint32_t
 861lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
 862{
 863        /* sanity check on queue memory */
 864        if (unlikely(!hq) || unlikely(!dq))
 865                return 0;
 866
 867        if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
 868                return 0;
 869        hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
 870        dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
 871        return 1;
 872}
 873
 874/**
 875 * lpfc_cmd_iocb - Get next command iocb entry in the ring
 876 * @phba: Pointer to HBA context object.
 877 * @pring: Pointer to driver SLI ring object.
 878 *
 879 * This function returns pointer to next command iocb entry
 880 * in the command ring. The caller must hold hbalock to prevent
 881 * other threads consume the next command iocb.
 882 * SLI-2/SLI-3 provide different sized iocbs.
 883 **/
 884static inline IOCB_t *
 885lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
 886{
 887        return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
 888                           pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
 889}
 890
 891/**
 892 * lpfc_resp_iocb - Get next response iocb entry in the ring
 893 * @phba: Pointer to HBA context object.
 894 * @pring: Pointer to driver SLI ring object.
 895 *
 896 * This function returns pointer to next response iocb entry
 897 * in the response ring. The caller must hold hbalock to make sure
 898 * that no other thread consume the next response iocb.
 899 * SLI-2/SLI-3 provide different sized iocbs.
 900 **/
 901static inline IOCB_t *
 902lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
 903{
 904        return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
 905                           pring->sli.sli3.rspidx * phba->iocb_rsp_size);
 906}
 907
 908/**
 909 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
 910 * @phba: Pointer to HBA context object.
 911 *
 912 * This function is called with hbalock held. This function
 913 * allocates a new driver iocb object from the iocb pool. If the
 914 * allocation is successful, it returns pointer to the newly
 915 * allocated iocb object else it returns NULL.
 916 **/
 917struct lpfc_iocbq *
 918__lpfc_sli_get_iocbq(struct lpfc_hba *phba)
 919{
 920        struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
 921        struct lpfc_iocbq * iocbq = NULL;
 922
 923        lockdep_assert_held(&phba->hbalock);
 924
 925        list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
 926        if (iocbq)
 927                phba->iocb_cnt++;
 928        if (phba->iocb_cnt > phba->iocb_max)
 929                phba->iocb_max = phba->iocb_cnt;
 930        return iocbq;
 931}
 932
 933/**
 934 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
 935 * @phba: Pointer to HBA context object.
 936 * @xritag: XRI value.
 937 *
 938 * This function clears the sglq pointer from the array of active
 939 * sglq's. The xritag that is passed in is used to index into the
 940 * array. Before the xritag can be used it needs to be adjusted
 941 * by subtracting the xribase.
 942 *
 943 * Returns sglq ponter = success, NULL = Failure.
 944 **/
 945struct lpfc_sglq *
 946__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
 947{
 948        struct lpfc_sglq *sglq;
 949
 950        sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
 951        phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
 952        return sglq;
 953}
 954
 955/**
 956 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
 957 * @phba: Pointer to HBA context object.
 958 * @xritag: XRI value.
 959 *
 960 * This function returns the sglq pointer from the array of active
 961 * sglq's. The xritag that is passed in is used to index into the
 962 * array. Before the xritag can be used it needs to be adjusted
 963 * by subtracting the xribase.
 964 *
 965 * Returns sglq ponter = success, NULL = Failure.
 966 **/
 967struct lpfc_sglq *
 968__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
 969{
 970        struct lpfc_sglq *sglq;
 971
 972        sglq =  phba->sli4_hba.lpfc_sglq_active_list[xritag];
 973        return sglq;
 974}
 975
 976/**
 977 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
 978 * @phba: Pointer to HBA context object.
 979 * @xritag: xri used in this exchange.
 980 * @rrq: The RRQ to be cleared.
 981 *
 982 **/
 983void
 984lpfc_clr_rrq_active(struct lpfc_hba *phba,
 985                    uint16_t xritag,
 986                    struct lpfc_node_rrq *rrq)
 987{
 988        struct lpfc_nodelist *ndlp = NULL;
 989
 990        /* Lookup did to verify if did is still active on this vport */
 991        if (rrq->vport)
 992                ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
 993
 994        if (!ndlp)
 995                goto out;
 996
 997        if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
 998                rrq->send_rrq = 0;
 999                rrq->xritag = 0;
1000                rrq->rrq_stop_time = 0;
1001        }
1002out:
1003        mempool_free(rrq, phba->rrq_pool);
1004}
1005
1006/**
1007 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
1008 * @phba: Pointer to HBA context object.
1009 *
1010 * This function is called with hbalock held. This function
1011 * Checks if stop_time (ratov from setting rrq active) has
1012 * been reached, if it has and the send_rrq flag is set then
1013 * it will call lpfc_send_rrq. If the send_rrq flag is not set
1014 * then it will just call the routine to clear the rrq and
1015 * free the rrq resource.
1016 * The timer is set to the next rrq that is going to expire before
1017 * leaving the routine.
1018 *
1019 **/
1020void
1021lpfc_handle_rrq_active(struct lpfc_hba *phba)
1022{
1023        struct lpfc_node_rrq *rrq;
1024        struct lpfc_node_rrq *nextrrq;
1025        unsigned long next_time;
1026        unsigned long iflags;
1027        LIST_HEAD(send_rrq);
1028
1029        spin_lock_irqsave(&phba->hbalock, iflags);
1030        phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1031        next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1032        list_for_each_entry_safe(rrq, nextrrq,
1033                                 &phba->active_rrq_list, list) {
1034                if (time_after(jiffies, rrq->rrq_stop_time))
1035                        list_move(&rrq->list, &send_rrq);
1036                else if (time_before(rrq->rrq_stop_time, next_time))
1037                        next_time = rrq->rrq_stop_time;
1038        }
1039        spin_unlock_irqrestore(&phba->hbalock, iflags);
1040        if ((!list_empty(&phba->active_rrq_list)) &&
1041            (!(phba->pport->load_flag & FC_UNLOADING)))
1042                mod_timer(&phba->rrq_tmr, next_time);
1043        list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
1044                list_del(&rrq->list);
1045                if (!rrq->send_rrq) {
1046                        /* this call will free the rrq */
1047                        lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1048                } else if (lpfc_send_rrq(phba, rrq)) {
1049                        /* if we send the rrq then the completion handler
1050                        *  will clear the bit in the xribitmap.
1051                        */
1052                        lpfc_clr_rrq_active(phba, rrq->xritag,
1053                                            rrq);
1054                }
1055        }
1056}
1057
1058/**
1059 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
1060 * @vport: Pointer to vport context object.
1061 * @xri: The xri used in the exchange.
1062 * @did: The targets DID for this exchange.
1063 *
1064 * returns NULL = rrq not found in the phba->active_rrq_list.
1065 *         rrq = rrq for this xri and target.
1066 **/
1067struct lpfc_node_rrq *
1068lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
1069{
1070        struct lpfc_hba *phba = vport->phba;
1071        struct lpfc_node_rrq *rrq;
1072        struct lpfc_node_rrq *nextrrq;
1073        unsigned long iflags;
1074
1075        if (phba->sli_rev != LPFC_SLI_REV4)
1076                return NULL;
1077        spin_lock_irqsave(&phba->hbalock, iflags);
1078        list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1079                if (rrq->vport == vport && rrq->xritag == xri &&
1080                                rrq->nlp_DID == did){
1081                        list_del(&rrq->list);
1082                        spin_unlock_irqrestore(&phba->hbalock, iflags);
1083                        return rrq;
1084                }
1085        }
1086        spin_unlock_irqrestore(&phba->hbalock, iflags);
1087        return NULL;
1088}
1089
1090/**
1091 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
1092 * @vport: Pointer to vport context object.
1093 * @ndlp: Pointer to the lpfc_node_list structure.
1094 * If ndlp is NULL Remove all active RRQs for this vport from the
1095 * phba->active_rrq_list and clear the rrq.
1096 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
1097 **/
1098void
1099lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1100
1101{
1102        struct lpfc_hba *phba = vport->phba;
1103        struct lpfc_node_rrq *rrq;
1104        struct lpfc_node_rrq *nextrrq;
1105        unsigned long iflags;
1106        LIST_HEAD(rrq_list);
1107
1108        if (phba->sli_rev != LPFC_SLI_REV4)
1109                return;
1110        if (!ndlp) {
1111                lpfc_sli4_vport_delete_els_xri_aborted(vport);
1112                lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
1113        }
1114        spin_lock_irqsave(&phba->hbalock, iflags);
1115        list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1116                if (rrq->vport != vport)
1117                        continue;
1118
1119                if (!ndlp || ndlp == lpfc_findnode_did(vport, rrq->nlp_DID))
1120                        list_move(&rrq->list, &rrq_list);
1121
1122        }
1123        spin_unlock_irqrestore(&phba->hbalock, iflags);
1124
1125        list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1126                list_del(&rrq->list);
1127                lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1128        }
1129}
1130
1131/**
1132 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
1133 * @phba: Pointer to HBA context object.
1134 * @ndlp: Targets nodelist pointer for this exchange.
1135 * @xritag: the xri in the bitmap to test.
1136 *
1137 * This function returns:
1138 * 0 = rrq not active for this xri
1139 * 1 = rrq is valid for this xri.
1140 **/
1141int
1142lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1143                        uint16_t  xritag)
1144{
1145        if (!ndlp)
1146                return 0;
1147        if (!ndlp->active_rrqs_xri_bitmap)
1148                return 0;
1149        if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1150                return 1;
1151        else
1152                return 0;
1153}
1154
1155/**
1156 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1157 * @phba: Pointer to HBA context object.
1158 * @ndlp: nodelist pointer for this target.
1159 * @xritag: xri used in this exchange.
1160 * @rxid: Remote Exchange ID.
1161 * @send_rrq: Flag used to determine if we should send rrq els cmd.
1162 *
1163 * This function takes the hbalock.
1164 * The active bit is always set in the active rrq xri_bitmap even
1165 * if there is no slot avaiable for the other rrq information.
1166 *
1167 * returns 0 rrq actived for this xri
1168 *         < 0 No memory or invalid ndlp.
1169 **/
1170int
1171lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1172                    uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1173{
1174        unsigned long iflags;
1175        struct lpfc_node_rrq *rrq;
1176        int empty;
1177
1178        if (!ndlp)
1179                return -EINVAL;
1180
1181        if (!phba->cfg_enable_rrq)
1182                return -EINVAL;
1183
1184        spin_lock_irqsave(&phba->hbalock, iflags);
1185        if (phba->pport->load_flag & FC_UNLOADING) {
1186                phba->hba_flag &= ~HBA_RRQ_ACTIVE;
1187                goto out;
1188        }
1189
1190        if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
1191                goto out;
1192
1193        if (!ndlp->active_rrqs_xri_bitmap)
1194                goto out;
1195
1196        if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1197                goto out;
1198
1199        spin_unlock_irqrestore(&phba->hbalock, iflags);
1200        rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC);
1201        if (!rrq) {
1202                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1203                                "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1204                                " DID:0x%x Send:%d\n",
1205                                xritag, rxid, ndlp->nlp_DID, send_rrq);
1206                return -EINVAL;
1207        }
1208        if (phba->cfg_enable_rrq == 1)
1209                rrq->send_rrq = send_rrq;
1210        else
1211                rrq->send_rrq = 0;
1212        rrq->xritag = xritag;
1213        rrq->rrq_stop_time = jiffies +
1214                                msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1215        rrq->nlp_DID = ndlp->nlp_DID;
1216        rrq->vport = ndlp->vport;
1217        rrq->rxid = rxid;
1218        spin_lock_irqsave(&phba->hbalock, iflags);
1219        empty = list_empty(&phba->active_rrq_list);
1220        list_add_tail(&rrq->list, &phba->active_rrq_list);
1221        phba->hba_flag |= HBA_RRQ_ACTIVE;
1222        if (empty)
1223                lpfc_worker_wake_up(phba);
1224        spin_unlock_irqrestore(&phba->hbalock, iflags);
1225        return 0;
1226out:
1227        spin_unlock_irqrestore(&phba->hbalock, iflags);
1228        lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1229                        "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1230                        " DID:0x%x Send:%d\n",
1231                        xritag, rxid, ndlp->nlp_DID, send_rrq);
1232        return -EINVAL;
1233}
1234
1235/**
1236 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1237 * @phba: Pointer to HBA context object.
1238 * @piocbq: Pointer to the iocbq.
1239 *
1240 * The driver calls this function with either the nvme ls ring lock
1241 * or the fc els ring lock held depending on the iocb usage.  This function
1242 * gets a new driver sglq object from the sglq list. If the list is not empty
1243 * then it is successful, it returns pointer to the newly allocated sglq
1244 * object else it returns NULL.
1245 **/
1246static struct lpfc_sglq *
1247__lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1248{
1249        struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1250        struct lpfc_sglq *sglq = NULL;
1251        struct lpfc_sglq *start_sglq = NULL;
1252        struct lpfc_io_buf *lpfc_cmd;
1253        struct lpfc_nodelist *ndlp;
1254        struct lpfc_sli_ring *pring = NULL;
1255        int found = 0;
1256
1257        if (piocbq->iocb_flag & LPFC_IO_NVME_LS)
1258                pring =  phba->sli4_hba.nvmels_wq->pring;
1259        else
1260                pring = lpfc_phba_elsring(phba);
1261
1262        lockdep_assert_held(&pring->ring_lock);
1263
1264        if (piocbq->iocb_flag &  LPFC_IO_FCP) {
1265                lpfc_cmd = (struct lpfc_io_buf *) piocbq->context1;
1266                ndlp = lpfc_cmd->rdata->pnode;
1267        } else  if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
1268                        !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
1269                ndlp = piocbq->context_un.ndlp;
1270        } else  if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
1271                if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
1272                        ndlp = NULL;
1273                else
1274                        ndlp = piocbq->context_un.ndlp;
1275        } else {
1276                ndlp = piocbq->context1;
1277        }
1278
1279        spin_lock(&phba->sli4_hba.sgl_list_lock);
1280        list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1281        start_sglq = sglq;
1282        while (!found) {
1283                if (!sglq)
1284                        break;
1285                if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1286                    test_bit(sglq->sli4_lxritag,
1287                    ndlp->active_rrqs_xri_bitmap)) {
1288                        /* This xri has an rrq outstanding for this DID.
1289                         * put it back in the list and get another xri.
1290                         */
1291                        list_add_tail(&sglq->list, lpfc_els_sgl_list);
1292                        sglq = NULL;
1293                        list_remove_head(lpfc_els_sgl_list, sglq,
1294                                                struct lpfc_sglq, list);
1295                        if (sglq == start_sglq) {
1296                                list_add_tail(&sglq->list, lpfc_els_sgl_list);
1297                                sglq = NULL;
1298                                break;
1299                        } else
1300                                continue;
1301                }
1302                sglq->ndlp = ndlp;
1303                found = 1;
1304                phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1305                sglq->state = SGL_ALLOCATED;
1306        }
1307        spin_unlock(&phba->sli4_hba.sgl_list_lock);
1308        return sglq;
1309}
1310
1311/**
1312 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1313 * @phba: Pointer to HBA context object.
1314 * @piocbq: Pointer to the iocbq.
1315 *
1316 * This function is called with the sgl_list lock held. This function
1317 * gets a new driver sglq object from the sglq list. If the
1318 * list is not empty then it is successful, it returns pointer to the newly
1319 * allocated sglq object else it returns NULL.
1320 **/
1321struct lpfc_sglq *
1322__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1323{
1324        struct list_head *lpfc_nvmet_sgl_list;
1325        struct lpfc_sglq *sglq = NULL;
1326
1327        lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1328
1329        lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1330
1331        list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1332        if (!sglq)
1333                return NULL;
1334        phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1335        sglq->state = SGL_ALLOCATED;
1336        return sglq;
1337}
1338
1339/**
1340 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1341 * @phba: Pointer to HBA context object.
1342 *
1343 * This function is called with no lock held. This function
1344 * allocates a new driver iocb object from the iocb pool. If the
1345 * allocation is successful, it returns pointer to the newly
1346 * allocated iocb object else it returns NULL.
1347 **/
1348struct lpfc_iocbq *
1349lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1350{
1351        struct lpfc_iocbq * iocbq = NULL;
1352        unsigned long iflags;
1353
1354        spin_lock_irqsave(&phba->hbalock, iflags);
1355        iocbq = __lpfc_sli_get_iocbq(phba);
1356        spin_unlock_irqrestore(&phba->hbalock, iflags);
1357        return iocbq;
1358}
1359
1360/**
1361 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1362 * @phba: Pointer to HBA context object.
1363 * @iocbq: Pointer to driver iocb object.
1364 *
1365 * This function is called to release the driver iocb object
1366 * to the iocb pool. The iotag in the iocb object
1367 * does not change for each use of the iocb object. This function
1368 * clears all other fields of the iocb object when it is freed.
1369 * The sqlq structure that holds the xritag and phys and virtual
1370 * mappings for the scatter gather list is retrieved from the
1371 * active array of sglq. The get of the sglq pointer also clears
1372 * the entry in the array. If the status of the IO indiactes that
1373 * this IO was aborted then the sglq entry it put on the
1374 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1375 * IO has good status or fails for any other reason then the sglq
1376 * entry is added to the free list (lpfc_els_sgl_list). The hbalock is
1377 *  asserted held in the code path calling this routine.
1378 **/
1379static void
1380__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1381{
1382        struct lpfc_sglq *sglq;
1383        size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1384        unsigned long iflag = 0;
1385        struct lpfc_sli_ring *pring;
1386
1387        if (iocbq->sli4_xritag == NO_XRI)
1388                sglq = NULL;
1389        else
1390                sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1391
1392
1393        if (sglq)  {
1394                if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1395                        spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1396                                          iflag);
1397                        sglq->state = SGL_FREED;
1398                        sglq->ndlp = NULL;
1399                        list_add_tail(&sglq->list,
1400                                      &phba->sli4_hba.lpfc_nvmet_sgl_list);
1401                        spin_unlock_irqrestore(
1402                                &phba->sli4_hba.sgl_list_lock, iflag);
1403                        goto out;
1404                }
1405
1406                if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1407                        (sglq->state != SGL_XRI_ABORTED)) {
1408                        spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1409                                          iflag);
1410
1411                        /* Check if we can get a reference on ndlp */
1412                        if (sglq->ndlp && !lpfc_nlp_get(sglq->ndlp))
1413                                sglq->ndlp = NULL;
1414
1415                        list_add(&sglq->list,
1416                                 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1417                        spin_unlock_irqrestore(
1418                                &phba->sli4_hba.sgl_list_lock, iflag);
1419                } else {
1420                        spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1421                                          iflag);
1422                        sglq->state = SGL_FREED;
1423                        sglq->ndlp = NULL;
1424                        list_add_tail(&sglq->list,
1425                                      &phba->sli4_hba.lpfc_els_sgl_list);
1426                        spin_unlock_irqrestore(
1427                                &phba->sli4_hba.sgl_list_lock, iflag);
1428                        pring = lpfc_phba_elsring(phba);
1429                        /* Check if TXQ queue needs to be serviced */
1430                        if (pring && (!list_empty(&pring->txq)))
1431                                lpfc_worker_wake_up(phba);
1432                }
1433        }
1434
1435out:
1436        /*
1437         * Clean all volatile data fields, preserve iotag and node struct.
1438         */
1439        memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1440        iocbq->sli4_lxritag = NO_XRI;
1441        iocbq->sli4_xritag = NO_XRI;
1442        iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1443                              LPFC_IO_NVME_LS);
1444        list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1445}
1446
1447
1448/**
1449 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1450 * @phba: Pointer to HBA context object.
1451 * @iocbq: Pointer to driver iocb object.
1452 *
1453 * This function is called to release the driver iocb object to the
1454 * iocb pool. The iotag in the iocb object does not change for each
1455 * use of the iocb object. This function clears all other fields of
1456 * the iocb object when it is freed. The hbalock is asserted held in
1457 * the code path calling this routine.
1458 **/
1459static void
1460__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1461{
1462        size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1463
1464        /*
1465         * Clean all volatile data fields, preserve iotag and node struct.
1466         */
1467        memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1468        iocbq->sli4_xritag = NO_XRI;
1469        list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1470}
1471
1472/**
1473 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1474 * @phba: Pointer to HBA context object.
1475 * @iocbq: Pointer to driver iocb object.
1476 *
1477 * This function is called with hbalock held to release driver
1478 * iocb object to the iocb pool. The iotag in the iocb object
1479 * does not change for each use of the iocb object. This function
1480 * clears all other fields of the iocb object when it is freed.
1481 **/
1482static void
1483__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1484{
1485        lockdep_assert_held(&phba->hbalock);
1486
1487        phba->__lpfc_sli_release_iocbq(phba, iocbq);
1488        phba->iocb_cnt--;
1489}
1490
1491/**
1492 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1493 * @phba: Pointer to HBA context object.
1494 * @iocbq: Pointer to driver iocb object.
1495 *
1496 * This function is called with no lock held to release the iocb to
1497 * iocb pool.
1498 **/
1499void
1500lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1501{
1502        unsigned long iflags;
1503
1504        /*
1505         * Clean all volatile data fields, preserve iotag and node struct.
1506         */
1507        spin_lock_irqsave(&phba->hbalock, iflags);
1508        __lpfc_sli_release_iocbq(phba, iocbq);
1509        spin_unlock_irqrestore(&phba->hbalock, iflags);
1510}
1511
1512/**
1513 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1514 * @phba: Pointer to HBA context object.
1515 * @iocblist: List of IOCBs.
1516 * @ulpstatus: ULP status in IOCB command field.
1517 * @ulpWord4: ULP word-4 in IOCB command field.
1518 *
1519 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1520 * on the list by invoking the complete callback function associated with the
1521 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1522 * fields.
1523 **/
1524void
1525lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1526                      uint32_t ulpstatus, uint32_t ulpWord4)
1527{
1528        struct lpfc_iocbq *piocb;
1529
1530        while (!list_empty(iocblist)) {
1531                list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1532                if (piocb->wqe_cmpl) {
1533                        if (piocb->iocb_flag & LPFC_IO_NVME)
1534                                lpfc_nvme_cancel_iocb(phba, piocb,
1535                                                      ulpstatus, ulpWord4);
1536                        else
1537                                lpfc_sli_release_iocbq(phba, piocb);
1538
1539                } else if (piocb->iocb_cmpl) {
1540                        piocb->iocb.ulpStatus = ulpstatus;
1541                        piocb->iocb.un.ulpWord[4] = ulpWord4;
1542                        (piocb->iocb_cmpl) (phba, piocb, piocb);
1543                } else {
1544                        lpfc_sli_release_iocbq(phba, piocb);
1545                }
1546        }
1547        return;
1548}
1549
1550/**
1551 * lpfc_sli_iocb_cmd_type - Get the iocb type
1552 * @iocb_cmnd: iocb command code.
1553 *
1554 * This function is called by ring event handler function to get the iocb type.
1555 * This function translates the iocb command to an iocb command type used to
1556 * decide the final disposition of each completed IOCB.
1557 * The function returns
1558 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1559 * LPFC_SOL_IOCB     if it is a solicited iocb completion
1560 * LPFC_ABORT_IOCB   if it is an abort iocb
1561 * LPFC_UNSOL_IOCB   if it is an unsolicited iocb
1562 *
1563 * The caller is not required to hold any lock.
1564 **/
1565static lpfc_iocb_type
1566lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1567{
1568        lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1569
1570        if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1571                return 0;
1572
1573        switch (iocb_cmnd) {
1574        case CMD_XMIT_SEQUENCE_CR:
1575        case CMD_XMIT_SEQUENCE_CX:
1576        case CMD_XMIT_BCAST_CN:
1577        case CMD_XMIT_BCAST_CX:
1578        case CMD_ELS_REQUEST_CR:
1579        case CMD_ELS_REQUEST_CX:
1580        case CMD_CREATE_XRI_CR:
1581        case CMD_CREATE_XRI_CX:
1582        case CMD_GET_RPI_CN:
1583        case CMD_XMIT_ELS_RSP_CX:
1584        case CMD_GET_RPI_CR:
1585        case CMD_FCP_IWRITE_CR:
1586        case CMD_FCP_IWRITE_CX:
1587        case CMD_FCP_IREAD_CR:
1588        case CMD_FCP_IREAD_CX:
1589        case CMD_FCP_ICMND_CR:
1590        case CMD_FCP_ICMND_CX:
1591        case CMD_FCP_TSEND_CX:
1592        case CMD_FCP_TRSP_CX:
1593        case CMD_FCP_TRECEIVE_CX:
1594        case CMD_FCP_AUTO_TRSP_CX:
1595        case CMD_ADAPTER_MSG:
1596        case CMD_ADAPTER_DUMP:
1597        case CMD_XMIT_SEQUENCE64_CR:
1598        case CMD_XMIT_SEQUENCE64_CX:
1599        case CMD_XMIT_BCAST64_CN:
1600        case CMD_XMIT_BCAST64_CX:
1601        case CMD_ELS_REQUEST64_CR:
1602        case CMD_ELS_REQUEST64_CX:
1603        case CMD_FCP_IWRITE64_CR:
1604        case CMD_FCP_IWRITE64_CX:
1605        case CMD_FCP_IREAD64_CR:
1606        case CMD_FCP_IREAD64_CX:
1607        case CMD_FCP_ICMND64_CR:
1608        case CMD_FCP_ICMND64_CX:
1609        case CMD_FCP_TSEND64_CX:
1610        case CMD_FCP_TRSP64_CX:
1611        case CMD_FCP_TRECEIVE64_CX:
1612        case CMD_GEN_REQUEST64_CR:
1613        case CMD_GEN_REQUEST64_CX:
1614        case CMD_XMIT_ELS_RSP64_CX:
1615        case DSSCMD_IWRITE64_CR:
1616        case DSSCMD_IWRITE64_CX:
1617        case DSSCMD_IREAD64_CR:
1618        case DSSCMD_IREAD64_CX:
1619        case CMD_SEND_FRAME:
1620                type = LPFC_SOL_IOCB;
1621                break;
1622        case CMD_ABORT_XRI_CN:
1623        case CMD_ABORT_XRI_CX:
1624        case CMD_CLOSE_XRI_CN:
1625        case CMD_CLOSE_XRI_CX:
1626        case CMD_XRI_ABORTED_CX:
1627        case CMD_ABORT_MXRI64_CN:
1628        case CMD_XMIT_BLS_RSP64_CX:
1629                type = LPFC_ABORT_IOCB;
1630                break;
1631        case CMD_RCV_SEQUENCE_CX:
1632        case CMD_RCV_ELS_REQ_CX:
1633        case CMD_RCV_SEQUENCE64_CX:
1634        case CMD_RCV_ELS_REQ64_CX:
1635        case CMD_ASYNC_STATUS:
1636        case CMD_IOCB_RCV_SEQ64_CX:
1637        case CMD_IOCB_RCV_ELS64_CX:
1638        case CMD_IOCB_RCV_CONT64_CX:
1639        case CMD_IOCB_RET_XRI64_CX:
1640                type = LPFC_UNSOL_IOCB;
1641                break;
1642        case CMD_IOCB_XMIT_MSEQ64_CR:
1643        case CMD_IOCB_XMIT_MSEQ64_CX:
1644        case CMD_IOCB_RCV_SEQ_LIST64_CX:
1645        case CMD_IOCB_RCV_ELS_LIST64_CX:
1646        case CMD_IOCB_CLOSE_EXTENDED_CN:
1647        case CMD_IOCB_ABORT_EXTENDED_CN:
1648        case CMD_IOCB_RET_HBQE64_CN:
1649        case CMD_IOCB_FCP_IBIDIR64_CR:
1650        case CMD_IOCB_FCP_IBIDIR64_CX:
1651        case CMD_IOCB_FCP_ITASKMGT64_CX:
1652        case CMD_IOCB_LOGENTRY_CN:
1653        case CMD_IOCB_LOGENTRY_ASYNC_CN:
1654                printk("%s - Unhandled SLI-3 Command x%x\n",
1655                                __func__, iocb_cmnd);
1656                type = LPFC_UNKNOWN_IOCB;
1657                break;
1658        default:
1659                type = LPFC_UNKNOWN_IOCB;
1660                break;
1661        }
1662
1663        return type;
1664}
1665
1666/**
1667 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1668 * @phba: Pointer to HBA context object.
1669 *
1670 * This function is called from SLI initialization code
1671 * to configure every ring of the HBA's SLI interface. The
1672 * caller is not required to hold any lock. This function issues
1673 * a config_ring mailbox command for each ring.
1674 * This function returns zero if successful else returns a negative
1675 * error code.
1676 **/
1677static int
1678lpfc_sli_ring_map(struct lpfc_hba *phba)
1679{
1680        struct lpfc_sli *psli = &phba->sli;
1681        LPFC_MBOXQ_t *pmb;
1682        MAILBOX_t *pmbox;
1683        int i, rc, ret = 0;
1684
1685        pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1686        if (!pmb)
1687                return -ENOMEM;
1688        pmbox = &pmb->u.mb;
1689        phba->link_state = LPFC_INIT_MBX_CMDS;
1690        for (i = 0; i < psli->num_rings; i++) {
1691                lpfc_config_ring(phba, i, pmb);
1692                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1693                if (rc != MBX_SUCCESS) {
1694                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1695                                        "0446 Adapter failed to init (%d), "
1696                                        "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1697                                        "ring %d\n",
1698                                        rc, pmbox->mbxCommand,
1699                                        pmbox->mbxStatus, i);
1700                        phba->link_state = LPFC_HBA_ERROR;
1701                        ret = -ENXIO;
1702                        break;
1703                }
1704        }
1705        mempool_free(pmb, phba->mbox_mem_pool);
1706        return ret;
1707}
1708
1709/**
1710 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1711 * @phba: Pointer to HBA context object.
1712 * @pring: Pointer to driver SLI ring object.
1713 * @piocb: Pointer to the driver iocb object.
1714 *
1715 * The driver calls this function with the hbalock held for SLI3 ports or
1716 * the ring lock held for SLI4 ports. The function adds the
1717 * new iocb to txcmplq of the given ring. This function always returns
1718 * 0. If this function is called for ELS ring, this function checks if
1719 * there is a vport associated with the ELS command. This function also
1720 * starts els_tmofunc timer if this is an ELS command.
1721 **/
1722static int
1723lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1724                        struct lpfc_iocbq *piocb)
1725{
1726        if (phba->sli_rev == LPFC_SLI_REV4)
1727                lockdep_assert_held(&pring->ring_lock);
1728        else
1729                lockdep_assert_held(&phba->hbalock);
1730
1731        BUG_ON(!piocb);
1732
1733        list_add_tail(&piocb->list, &pring->txcmplq);
1734        piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1735        pring->txcmplq_cnt++;
1736
1737        if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1738           (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1739           (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1740                BUG_ON(!piocb->vport);
1741                if (!(piocb->vport->load_flag & FC_UNLOADING))
1742                        mod_timer(&piocb->vport->els_tmofunc,
1743                                  jiffies +
1744                                  msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1745        }
1746
1747        return 0;
1748}
1749
1750/**
1751 * lpfc_sli_ringtx_get - Get first element of the txq
1752 * @phba: Pointer to HBA context object.
1753 * @pring: Pointer to driver SLI ring object.
1754 *
1755 * This function is called with hbalock held to get next
1756 * iocb in txq of the given ring. If there is any iocb in
1757 * the txq, the function returns first iocb in the list after
1758 * removing the iocb from the list, else it returns NULL.
1759 **/
1760struct lpfc_iocbq *
1761lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1762{
1763        struct lpfc_iocbq *cmd_iocb;
1764
1765        lockdep_assert_held(&phba->hbalock);
1766
1767        list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1768        return cmd_iocb;
1769}
1770
1771/**
1772 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1773 * @phba: Pointer to HBA context object.
1774 * @pring: Pointer to driver SLI ring object.
1775 *
1776 * This function is called with hbalock held and the caller must post the
1777 * iocb without releasing the lock. If the caller releases the lock,
1778 * iocb slot returned by the function is not guaranteed to be available.
1779 * The function returns pointer to the next available iocb slot if there
1780 * is available slot in the ring, else it returns NULL.
1781 * If the get index of the ring is ahead of the put index, the function
1782 * will post an error attention event to the worker thread to take the
1783 * HBA to offline state.
1784 **/
1785static IOCB_t *
1786lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1787{
1788        struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1789        uint32_t  max_cmd_idx = pring->sli.sli3.numCiocb;
1790
1791        lockdep_assert_held(&phba->hbalock);
1792
1793        if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1794           (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1795                pring->sli.sli3.next_cmdidx = 0;
1796
1797        if (unlikely(pring->sli.sli3.local_getidx ==
1798                pring->sli.sli3.next_cmdidx)) {
1799
1800                pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1801
1802                if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1803                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1804                                        "0315 Ring %d issue: portCmdGet %d "
1805                                        "is bigger than cmd ring %d\n",
1806                                        pring->ringno,
1807                                        pring->sli.sli3.local_getidx,
1808                                        max_cmd_idx);
1809
1810                        phba->link_state = LPFC_HBA_ERROR;
1811                        /*
1812                         * All error attention handlers are posted to
1813                         * worker thread
1814                         */
1815                        phba->work_ha |= HA_ERATT;
1816                        phba->work_hs = HS_FFER3;
1817
1818                        lpfc_worker_wake_up(phba);
1819
1820                        return NULL;
1821                }
1822
1823                if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1824                        return NULL;
1825        }
1826
1827        return lpfc_cmd_iocb(phba, pring);
1828}
1829
1830/**
1831 * lpfc_sli_next_iotag - Get an iotag for the iocb
1832 * @phba: Pointer to HBA context object.
1833 * @iocbq: Pointer to driver iocb object.
1834 *
1835 * This function gets an iotag for the iocb. If there is no unused iotag and
1836 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1837 * array and assigns a new iotag.
1838 * The function returns the allocated iotag if successful, else returns zero.
1839 * Zero is not a valid iotag.
1840 * The caller is not required to hold any lock.
1841 **/
1842uint16_t
1843lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1844{
1845        struct lpfc_iocbq **new_arr;
1846        struct lpfc_iocbq **old_arr;
1847        size_t new_len;
1848        struct lpfc_sli *psli = &phba->sli;
1849        uint16_t iotag;
1850
1851        spin_lock_irq(&phba->hbalock);
1852        iotag = psli->last_iotag;
1853        if(++iotag < psli->iocbq_lookup_len) {
1854                psli->last_iotag = iotag;
1855                psli->iocbq_lookup[iotag] = iocbq;
1856                spin_unlock_irq(&phba->hbalock);
1857                iocbq->iotag = iotag;
1858                return iotag;
1859        } else if (psli->iocbq_lookup_len < (0xffff
1860                                           - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1861                new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1862                spin_unlock_irq(&phba->hbalock);
1863                new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
1864                                  GFP_KERNEL);
1865                if (new_arr) {
1866                        spin_lock_irq(&phba->hbalock);
1867                        old_arr = psli->iocbq_lookup;
1868                        if (new_len <= psli->iocbq_lookup_len) {
1869                                /* highly unprobable case */
1870                                kfree(new_arr);
1871                                iotag = psli->last_iotag;
1872                                if(++iotag < psli->iocbq_lookup_len) {
1873                                        psli->last_iotag = iotag;
1874                                        psli->iocbq_lookup[iotag] = iocbq;
1875                                        spin_unlock_irq(&phba->hbalock);
1876                                        iocbq->iotag = iotag;
1877                                        return iotag;
1878                                }
1879                                spin_unlock_irq(&phba->hbalock);
1880                                return 0;
1881                        }
1882                        if (psli->iocbq_lookup)
1883                                memcpy(new_arr, old_arr,
1884                                       ((psli->last_iotag  + 1) *
1885                                        sizeof (struct lpfc_iocbq *)));
1886                        psli->iocbq_lookup = new_arr;
1887                        psli->iocbq_lookup_len = new_len;
1888                        psli->last_iotag = iotag;
1889                        psli->iocbq_lookup[iotag] = iocbq;
1890                        spin_unlock_irq(&phba->hbalock);
1891                        iocbq->iotag = iotag;
1892                        kfree(old_arr);
1893                        return iotag;
1894                }
1895        } else
1896                spin_unlock_irq(&phba->hbalock);
1897
1898        lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1899                        "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1900                        psli->last_iotag);
1901
1902        return 0;
1903}
1904
1905/**
1906 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1907 * @phba: Pointer to HBA context object.
1908 * @pring: Pointer to driver SLI ring object.
1909 * @iocb: Pointer to iocb slot in the ring.
1910 * @nextiocb: Pointer to driver iocb object which need to be
1911 *            posted to firmware.
1912 *
1913 * This function is called to post a new iocb to the firmware. This
1914 * function copies the new iocb to ring iocb slot and updates the
1915 * ring pointers. It adds the new iocb to txcmplq if there is
1916 * a completion call back for this iocb else the function will free the
1917 * iocb object.  The hbalock is asserted held in the code path calling
1918 * this routine.
1919 **/
1920static void
1921lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1922                IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1923{
1924        /*
1925         * Set up an iotag
1926         */
1927        nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1928
1929
1930        if (pring->ringno == LPFC_ELS_RING) {
1931                lpfc_debugfs_slow_ring_trc(phba,
1932                        "IOCB cmd ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
1933                        *(((uint32_t *) &nextiocb->iocb) + 4),
1934                        *(((uint32_t *) &nextiocb->iocb) + 6),
1935                        *(((uint32_t *) &nextiocb->iocb) + 7));
1936        }
1937
1938        /*
1939         * Issue iocb command to adapter
1940         */
1941        lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1942        wmb();
1943        pring->stats.iocb_cmd++;
1944
1945        /*
1946         * If there is no completion routine to call, we can release the
1947         * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1948         * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1949         */
1950        if (nextiocb->iocb_cmpl)
1951                lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1952        else
1953                __lpfc_sli_release_iocbq(phba, nextiocb);
1954
1955        /*
1956         * Let the HBA know what IOCB slot will be the next one the
1957         * driver will put a command into.
1958         */
1959        pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1960        writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1961}
1962
1963/**
1964 * lpfc_sli_update_full_ring - Update the chip attention register
1965 * @phba: Pointer to HBA context object.
1966 * @pring: Pointer to driver SLI ring object.
1967 *
1968 * The caller is not required to hold any lock for calling this function.
1969 * This function updates the chip attention bits for the ring to inform firmware
1970 * that there are pending work to be done for this ring and requests an
1971 * interrupt when there is space available in the ring. This function is
1972 * called when the driver is unable to post more iocbs to the ring due
1973 * to unavailability of space in the ring.
1974 **/
1975static void
1976lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1977{
1978        int ringno = pring->ringno;
1979
1980        pring->flag |= LPFC_CALL_RING_AVAILABLE;
1981
1982        wmb();
1983
1984        /*
1985         * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1986         * The HBA will tell us when an IOCB entry is available.
1987         */
1988        writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1989        readl(phba->CAregaddr); /* flush */
1990
1991        pring->stats.iocb_cmd_full++;
1992}
1993
1994/**
1995 * lpfc_sli_update_ring - Update chip attention register
1996 * @phba: Pointer to HBA context object.
1997 * @pring: Pointer to driver SLI ring object.
1998 *
1999 * This function updates the chip attention register bit for the
2000 * given ring to inform HBA that there is more work to be done
2001 * in this ring. The caller is not required to hold any lock.
2002 **/
2003static void
2004lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2005{
2006        int ringno = pring->ringno;
2007
2008        /*
2009         * Tell the HBA that there is work to do in this ring.
2010         */
2011        if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
2012                wmb();
2013                writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
2014                readl(phba->CAregaddr); /* flush */
2015        }
2016}
2017
2018/**
2019 * lpfc_sli_resume_iocb - Process iocbs in the txq
2020 * @phba: Pointer to HBA context object.
2021 * @pring: Pointer to driver SLI ring object.
2022 *
2023 * This function is called with hbalock held to post pending iocbs
2024 * in the txq to the firmware. This function is called when driver
2025 * detects space available in the ring.
2026 **/
2027static void
2028lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2029{
2030        IOCB_t *iocb;
2031        struct lpfc_iocbq *nextiocb;
2032
2033        lockdep_assert_held(&phba->hbalock);
2034
2035        /*
2036         * Check to see if:
2037         *  (a) there is anything on the txq to send
2038         *  (b) link is up
2039         *  (c) link attention events can be processed (fcp ring only)
2040         *  (d) IOCB processing is not blocked by the outstanding mbox command.
2041         */
2042
2043        if (lpfc_is_link_up(phba) &&
2044            (!list_empty(&pring->txq)) &&
2045            (pring->ringno != LPFC_FCP_RING ||
2046             phba->sli.sli_flag & LPFC_PROCESS_LA)) {
2047
2048                while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2049                       (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
2050                        lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2051
2052                if (iocb)
2053                        lpfc_sli_update_ring(phba, pring);
2054                else
2055                        lpfc_sli_update_full_ring(phba, pring);
2056        }
2057
2058        return;
2059}
2060
2061/**
2062 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
2063 * @phba: Pointer to HBA context object.
2064 * @hbqno: HBQ number.
2065 *
2066 * This function is called with hbalock held to get the next
2067 * available slot for the given HBQ. If there is free slot
2068 * available for the HBQ it will return pointer to the next available
2069 * HBQ entry else it will return NULL.
2070 **/
2071static struct lpfc_hbq_entry *
2072lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
2073{
2074        struct hbq_s *hbqp = &phba->hbqs[hbqno];
2075
2076        lockdep_assert_held(&phba->hbalock);
2077
2078        if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
2079            ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
2080                hbqp->next_hbqPutIdx = 0;
2081
2082        if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
2083                uint32_t raw_index = phba->hbq_get[hbqno];
2084                uint32_t getidx = le32_to_cpu(raw_index);
2085
2086                hbqp->local_hbqGetIdx = getidx;
2087
2088                if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
2089                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2090                                        "1802 HBQ %d: local_hbqGetIdx "
2091                                        "%u is > than hbqp->entry_count %u\n",
2092                                        hbqno, hbqp->local_hbqGetIdx,
2093                                        hbqp->entry_count);
2094
2095                        phba->link_state = LPFC_HBA_ERROR;
2096                        return NULL;
2097                }
2098
2099                if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
2100                        return NULL;
2101        }
2102
2103        return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
2104                        hbqp->hbqPutIdx;
2105}
2106
2107/**
2108 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
2109 * @phba: Pointer to HBA context object.
2110 *
2111 * This function is called with no lock held to free all the
2112 * hbq buffers while uninitializing the SLI interface. It also
2113 * frees the HBQ buffers returned by the firmware but not yet
2114 * processed by the upper layers.
2115 **/
2116void
2117lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
2118{
2119        struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2120        struct hbq_dmabuf *hbq_buf;
2121        unsigned long flags;
2122        int i, hbq_count;
2123
2124        hbq_count = lpfc_sli_hbq_count();
2125        /* Return all memory used by all HBQs */
2126        spin_lock_irqsave(&phba->hbalock, flags);
2127        for (i = 0; i < hbq_count; ++i) {
2128                list_for_each_entry_safe(dmabuf, next_dmabuf,
2129                                &phba->hbqs[i].hbq_buffer_list, list) {
2130                        hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
2131                        list_del(&hbq_buf->dbuf.list);
2132                        (phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
2133                }
2134                phba->hbqs[i].buffer_count = 0;
2135        }
2136
2137        /* Mark the HBQs not in use */
2138        phba->hbq_in_use = 0;
2139        spin_unlock_irqrestore(&phba->hbalock, flags);
2140}
2141
2142/**
2143 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
2144 * @phba: Pointer to HBA context object.
2145 * @hbqno: HBQ number.
2146 * @hbq_buf: Pointer to HBQ buffer.
2147 *
2148 * This function is called with the hbalock held to post a
2149 * hbq buffer to the firmware. If the function finds an empty
2150 * slot in the HBQ, it will post the buffer. The function will return
2151 * pointer to the hbq entry if it successfully post the buffer
2152 * else it will return NULL.
2153 **/
2154static int
2155lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2156                         struct hbq_dmabuf *hbq_buf)
2157{
2158        lockdep_assert_held(&phba->hbalock);
2159        return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2160}
2161
2162/**
2163 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2164 * @phba: Pointer to HBA context object.
2165 * @hbqno: HBQ number.
2166 * @hbq_buf: Pointer to HBQ buffer.
2167 *
2168 * This function is called with the hbalock held to post a hbq buffer to the
2169 * firmware. If the function finds an empty slot in the HBQ, it will post the
2170 * buffer and place it on the hbq_buffer_list. The function will return zero if
2171 * it successfully post the buffer else it will return an error.
2172 **/
2173static int
2174lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2175                            struct hbq_dmabuf *hbq_buf)
2176{
2177        struct lpfc_hbq_entry *hbqe;
2178        dma_addr_t physaddr = hbq_buf->dbuf.phys;
2179
2180        lockdep_assert_held(&phba->hbalock);
2181        /* Get next HBQ entry slot to use */
2182        hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2183        if (hbqe) {
2184                struct hbq_s *hbqp = &phba->hbqs[hbqno];
2185
2186                hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2187                hbqe->bde.addrLow  = le32_to_cpu(putPaddrLow(physaddr));
2188                hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2189                hbqe->bde.tus.f.bdeFlags = 0;
2190                hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2191                hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2192                                /* Sync SLIM */
2193                hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2194                writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2195                                /* flush */
2196                readl(phba->hbq_put + hbqno);
2197                list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2198                return 0;
2199        } else
2200                return -ENOMEM;
2201}
2202
2203/**
2204 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2205 * @phba: Pointer to HBA context object.
2206 * @hbqno: HBQ number.
2207 * @hbq_buf: Pointer to HBQ buffer.
2208 *
2209 * This function is called with the hbalock held to post an RQE to the SLI4
2210 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2211 * the hbq_buffer_list and return zero, otherwise it will return an error.
2212 **/
2213static int
2214lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2215                            struct hbq_dmabuf *hbq_buf)
2216{
2217        int rc;
2218        struct lpfc_rqe hrqe;
2219        struct lpfc_rqe drqe;
2220        struct lpfc_queue *hrq;
2221        struct lpfc_queue *drq;
2222
2223        if (hbqno != LPFC_ELS_HBQ)
2224                return 1;
2225        hrq = phba->sli4_hba.hdr_rq;
2226        drq = phba->sli4_hba.dat_rq;
2227
2228        lockdep_assert_held(&phba->hbalock);
2229        hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2230        hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2231        drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2232        drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2233        rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2234        if (rc < 0)
2235                return rc;
2236        hbq_buf->tag = (rc | (hbqno << 16));
2237        list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2238        return 0;
2239}
2240
2241/* HBQ for ELS and CT traffic. */
2242static struct lpfc_hbq_init lpfc_els_hbq = {
2243        .rn = 1,
2244        .entry_count = 256,
2245        .mask_count = 0,
2246        .profile = 0,
2247        .ring_mask = (1 << LPFC_ELS_RING),
2248        .buffer_count = 0,
2249        .init_count = 40,
2250        .add_count = 40,
2251};
2252
2253/* Array of HBQs */
2254struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2255        &lpfc_els_hbq,
2256};
2257
2258/**
2259 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2260 * @phba: Pointer to HBA context object.
2261 * @hbqno: HBQ number.
2262 * @count: Number of HBQ buffers to be posted.
2263 *
2264 * This function is called with no lock held to post more hbq buffers to the
2265 * given HBQ. The function returns the number of HBQ buffers successfully
2266 * posted.
2267 **/
2268static int
2269lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2270{
2271        uint32_t i, posted = 0;
2272        unsigned long flags;
2273        struct hbq_dmabuf *hbq_buffer;
2274        LIST_HEAD(hbq_buf_list);
2275        if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2276                return 0;
2277
2278        if ((phba->hbqs[hbqno].buffer_count + count) >
2279            lpfc_hbq_defs[hbqno]->entry_count)
2280                count = lpfc_hbq_defs[hbqno]->entry_count -
2281                                        phba->hbqs[hbqno].buffer_count;
2282        if (!count)
2283                return 0;
2284        /* Allocate HBQ entries */
2285        for (i = 0; i < count; i++) {
2286                hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2287                if (!hbq_buffer)
2288                        break;
2289                list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2290        }
2291        /* Check whether HBQ is still in use */
2292        spin_lock_irqsave(&phba->hbalock, flags);
2293        if (!phba->hbq_in_use)
2294                goto err;
2295        while (!list_empty(&hbq_buf_list)) {
2296                list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2297                                 dbuf.list);
2298                hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2299                                      (hbqno << 16));
2300                if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2301                        phba->hbqs[hbqno].buffer_count++;
2302                        posted++;
2303                } else
2304                        (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2305        }
2306        spin_unlock_irqrestore(&phba->hbalock, flags);
2307        return posted;
2308err:
2309        spin_unlock_irqrestore(&phba->hbalock, flags);
2310        while (!list_empty(&hbq_buf_list)) {
2311                list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2312                                 dbuf.list);
2313                (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2314        }
2315        return 0;
2316}
2317
2318/**
2319 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2320 * @phba: Pointer to HBA context object.
2321 * @qno: HBQ number.
2322 *
2323 * This function posts more buffers to the HBQ. This function
2324 * is called with no lock held. The function returns the number of HBQ entries
2325 * successfully allocated.
2326 **/
2327int
2328lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2329{
2330        if (phba->sli_rev == LPFC_SLI_REV4)
2331                return 0;
2332        else
2333                return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2334                                         lpfc_hbq_defs[qno]->add_count);
2335}
2336
2337/**
2338 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2339 * @phba: Pointer to HBA context object.
2340 * @qno:  HBQ queue number.
2341 *
2342 * This function is called from SLI initialization code path with
2343 * no lock held to post initial HBQ buffers to firmware. The
2344 * function returns the number of HBQ entries successfully allocated.
2345 **/
2346static int
2347lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2348{
2349        if (phba->sli_rev == LPFC_SLI_REV4)
2350                return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2351                                        lpfc_hbq_defs[qno]->entry_count);
2352        else
2353                return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2354                                         lpfc_hbq_defs[qno]->init_count);
2355}
2356
2357/*
2358 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2359 *
2360 * This function removes the first hbq buffer on an hbq list and returns a
2361 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2362 **/
2363static struct hbq_dmabuf *
2364lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2365{
2366        struct lpfc_dmabuf *d_buf;
2367
2368        list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2369        if (!d_buf)
2370                return NULL;
2371        return container_of(d_buf, struct hbq_dmabuf, dbuf);
2372}
2373
2374/**
2375 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2376 * @phba: Pointer to HBA context object.
2377 * @hrq: HBQ number.
2378 *
2379 * This function removes the first RQ buffer on an RQ buffer list and returns a
2380 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2381 **/
2382static struct rqb_dmabuf *
2383lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2384{
2385        struct lpfc_dmabuf *h_buf;
2386        struct lpfc_rqb *rqbp;
2387
2388        rqbp = hrq->rqbp;
2389        list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2390                         struct lpfc_dmabuf, list);
2391        if (!h_buf)
2392                return NULL;
2393        rqbp->buffer_count--;
2394        return container_of(h_buf, struct rqb_dmabuf, hbuf);
2395}
2396
2397/**
2398 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2399 * @phba: Pointer to HBA context object.
2400 * @tag: Tag of the hbq buffer.
2401 *
2402 * This function searches for the hbq buffer associated with the given tag in
2403 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2404 * otherwise it returns NULL.
2405 **/
2406static struct hbq_dmabuf *
2407lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2408{
2409        struct lpfc_dmabuf *d_buf;
2410        struct hbq_dmabuf *hbq_buf;
2411        uint32_t hbqno;
2412
2413        hbqno = tag >> 16;
2414        if (hbqno >= LPFC_MAX_HBQS)
2415                return NULL;
2416
2417        spin_lock_irq(&phba->hbalock);
2418        list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2419                hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2420                if (hbq_buf->tag == tag) {
2421                        spin_unlock_irq(&phba->hbalock);
2422                        return hbq_buf;
2423                }
2424        }
2425        spin_unlock_irq(&phba->hbalock);
2426        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2427                        "1803 Bad hbq tag. Data: x%x x%x\n",
2428                        tag, phba->hbqs[tag >> 16].buffer_count);
2429        return NULL;
2430}
2431
2432/**
2433 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2434 * @phba: Pointer to HBA context object.
2435 * @hbq_buffer: Pointer to HBQ buffer.
2436 *
2437 * This function is called with hbalock. This function gives back
2438 * the hbq buffer to firmware. If the HBQ does not have space to
2439 * post the buffer, it will free the buffer.
2440 **/
2441void
2442lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2443{
2444        uint32_t hbqno;
2445
2446        if (hbq_buffer) {
2447                hbqno = hbq_buffer->tag >> 16;
2448                if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2449                        (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2450        }
2451}
2452
2453/**
2454 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2455 * @mbxCommand: mailbox command code.
2456 *
2457 * This function is called by the mailbox event handler function to verify
2458 * that the completed mailbox command is a legitimate mailbox command. If the
2459 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2460 * and the mailbox event handler will take the HBA offline.
2461 **/
2462static int
2463lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2464{
2465        uint8_t ret;
2466
2467        switch (mbxCommand) {
2468        case MBX_LOAD_SM:
2469        case MBX_READ_NV:
2470        case MBX_WRITE_NV:
2471        case MBX_WRITE_VPARMS:
2472        case MBX_RUN_BIU_DIAG:
2473        case MBX_INIT_LINK:
2474        case MBX_DOWN_LINK:
2475        case MBX_CONFIG_LINK:
2476        case MBX_CONFIG_RING:
2477        case MBX_RESET_RING:
2478        case MBX_READ_CONFIG:
2479        case MBX_READ_RCONFIG:
2480        case MBX_READ_SPARM:
2481        case MBX_READ_STATUS:
2482        case MBX_READ_RPI:
2483        case MBX_READ_XRI:
2484        case MBX_READ_REV:
2485        case MBX_READ_LNK_STAT:
2486        case MBX_REG_LOGIN:
2487        case MBX_UNREG_LOGIN:
2488        case MBX_CLEAR_LA:
2489        case MBX_DUMP_MEMORY:
2490        case MBX_DUMP_CONTEXT:
2491        case MBX_RUN_DIAGS:
2492        case MBX_RESTART:
2493        case MBX_UPDATE_CFG:
2494        case MBX_DOWN_LOAD:
2495        case MBX_DEL_LD_ENTRY:
2496        case MBX_RUN_PROGRAM:
2497        case MBX_SET_MASK:
2498        case MBX_SET_VARIABLE:
2499        case MBX_UNREG_D_ID:
2500        case MBX_KILL_BOARD:
2501        case MBX_CONFIG_FARP:
2502        case MBX_BEACON:
2503        case MBX_LOAD_AREA:
2504        case MBX_RUN_BIU_DIAG64:
2505        case MBX_CONFIG_PORT:
2506        case MBX_READ_SPARM64:
2507        case MBX_READ_RPI64:
2508        case MBX_REG_LOGIN64:
2509        case MBX_READ_TOPOLOGY:
2510        case MBX_WRITE_WWN:
2511        case MBX_SET_DEBUG:
2512        case MBX_LOAD_EXP_ROM:
2513        case MBX_ASYNCEVT_ENABLE:
2514        case MBX_REG_VPI:
2515        case MBX_UNREG_VPI:
2516        case MBX_HEARTBEAT:
2517        case MBX_PORT_CAPABILITIES:
2518        case MBX_PORT_IOV_CONTROL:
2519        case MBX_SLI4_CONFIG:
2520        case MBX_SLI4_REQ_FTRS:
2521        case MBX_REG_FCFI:
2522        case MBX_UNREG_FCFI:
2523        case MBX_REG_VFI:
2524        case MBX_UNREG_VFI:
2525        case MBX_INIT_VPI:
2526        case MBX_INIT_VFI:
2527        case MBX_RESUME_RPI:
2528        case MBX_READ_EVENT_LOG_STATUS:
2529        case MBX_READ_EVENT_LOG:
2530        case MBX_SECURITY_MGMT:
2531        case MBX_AUTH_PORT:
2532        case MBX_ACCESS_VDATA:
2533                ret = mbxCommand;
2534                break;
2535        default:
2536                ret = MBX_SHUTDOWN;
2537                break;
2538        }
2539        return ret;
2540}
2541
2542/**
2543 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2544 * @phba: Pointer to HBA context object.
2545 * @pmboxq: Pointer to mailbox command.
2546 *
2547 * This is completion handler function for mailbox commands issued from
2548 * lpfc_sli_issue_mbox_wait function. This function is called by the
2549 * mailbox event handler function with no lock held. This function
2550 * will wake up thread waiting on the wait queue pointed by context1
2551 * of the mailbox.
2552 **/
2553void
2554lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2555{
2556        unsigned long drvr_flag;
2557        struct completion *pmbox_done;
2558
2559        /*
2560         * If pmbox_done is empty, the driver thread gave up waiting and
2561         * continued running.
2562         */
2563        pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2564        spin_lock_irqsave(&phba->hbalock, drvr_flag);
2565        pmbox_done = (struct completion *)pmboxq->context3;
2566        if (pmbox_done)
2567                complete(pmbox_done);
2568        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2569        return;
2570}
2571
2572static void
2573__lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2574{
2575        unsigned long iflags;
2576
2577        if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2578                lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2579                spin_lock_irqsave(&ndlp->lock, iflags);
2580                ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2581                ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2582                spin_unlock_irqrestore(&ndlp->lock, iflags);
2583        }
2584        ndlp->nlp_flag &= ~NLP_UNREG_INP;
2585}
2586
2587/**
2588 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2589 * @phba: Pointer to HBA context object.
2590 * @pmb: Pointer to mailbox object.
2591 *
2592 * This function is the default mailbox completion handler. It
2593 * frees the memory resources associated with the completed mailbox
2594 * command. If the completed command is a REG_LOGIN mailbox command,
2595 * this function will issue a UREG_LOGIN to re-claim the RPI.
2596 **/
2597void
2598lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2599{
2600        struct lpfc_vport  *vport = pmb->vport;
2601        struct lpfc_dmabuf *mp;
2602        struct lpfc_nodelist *ndlp;
2603        struct Scsi_Host *shost;
2604        uint16_t rpi, vpi;
2605        int rc;
2606
2607        mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
2608
2609        if (mp) {
2610                lpfc_mbuf_free(phba, mp->virt, mp->phys);
2611                kfree(mp);
2612        }
2613
2614        /*
2615         * If a REG_LOGIN succeeded  after node is destroyed or node
2616         * is in re-discovery driver need to cleanup the RPI.
2617         */
2618        if (!(phba->pport->load_flag & FC_UNLOADING) &&
2619            pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2620            !pmb->u.mb.mbxStatus) {
2621                rpi = pmb->u.mb.un.varWords[0];
2622                vpi = pmb->u.mb.un.varRegLogin.vpi;
2623                if (phba->sli_rev == LPFC_SLI_REV4)
2624                        vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
2625                lpfc_unreg_login(phba, vpi, rpi, pmb);
2626                pmb->vport = vport;
2627                pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2628                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2629                if (rc != MBX_NOT_FINISHED)
2630                        return;
2631        }
2632
2633        if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2634                !(phba->pport->load_flag & FC_UNLOADING) &&
2635                !pmb->u.mb.mbxStatus) {
2636                shost = lpfc_shost_from_vport(vport);
2637                spin_lock_irq(shost->host_lock);
2638                vport->vpi_state |= LPFC_VPI_REGISTERED;
2639                vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2640                spin_unlock_irq(shost->host_lock);
2641        }
2642
2643        if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2644                ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2645                lpfc_nlp_put(ndlp);
2646                pmb->ctx_buf = NULL;
2647                pmb->ctx_ndlp = NULL;
2648        }
2649
2650        if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2651                ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2652
2653                /* Check to see if there are any deferred events to process */
2654                if (ndlp) {
2655                        lpfc_printf_vlog(
2656                                vport,
2657                                KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2658                                "1438 UNREG cmpl deferred mbox x%x "
2659                                "on NPort x%x Data: x%x x%x x%px x%x x%x\n",
2660                                ndlp->nlp_rpi, ndlp->nlp_DID,
2661                                ndlp->nlp_flag, ndlp->nlp_defer_did,
2662                                ndlp, vport->load_flag, kref_read(&ndlp->kref));
2663
2664                        if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2665                            (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2666                                ndlp->nlp_flag &= ~NLP_UNREG_INP;
2667                                ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2668                                lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2669                        } else {
2670                                __lpfc_sli_rpi_release(vport, ndlp);
2671                        }
2672
2673                        /* The unreg_login mailbox is complete and had a
2674                         * reference that has to be released.  The PLOGI
2675                         * got its own ref.
2676                         */
2677                        lpfc_nlp_put(ndlp);
2678                        pmb->ctx_ndlp = NULL;
2679                }
2680        }
2681
2682        /* This nlp_put pairs with lpfc_sli4_resume_rpi */
2683        if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) {
2684                ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
2685                lpfc_nlp_put(ndlp);
2686        }
2687
2688        /* Check security permission status on INIT_LINK mailbox command */
2689        if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2690            (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2691                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2692                                "2860 SLI authentication is required "
2693                                "for INIT_LINK but has not done yet\n");
2694
2695        if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2696                lpfc_sli4_mbox_cmd_free(phba, pmb);
2697        else
2698                mempool_free(pmb, phba->mbox_mem_pool);
2699}
2700 /**
2701 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2702 * @phba: Pointer to HBA context object.
2703 * @pmb: Pointer to mailbox object.
2704 *
2705 * This function is the unreg rpi mailbox completion handler. It
2706 * frees the memory resources associated with the completed mailbox
2707 * command. An additional reference is put on the ndlp to prevent
2708 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2709 * the unreg mailbox command completes, this routine puts the
2710 * reference back.
2711 *
2712 **/
2713void
2714lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2715{
2716        struct lpfc_vport  *vport = pmb->vport;
2717        struct lpfc_nodelist *ndlp;
2718
2719        ndlp = pmb->ctx_ndlp;
2720        if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2721                if (phba->sli_rev == LPFC_SLI_REV4 &&
2722                    (bf_get(lpfc_sli_intf_if_type,
2723                     &phba->sli4_hba.sli_intf) >=
2724                     LPFC_SLI_INTF_IF_TYPE_2)) {
2725                        if (ndlp) {
2726                                lpfc_printf_vlog(
2727                                         vport, KERN_INFO, LOG_MBOX | LOG_SLI,
2728                                         "0010 UNREG_LOGIN vpi:%x "
2729                                         "rpi:%x DID:%x defer x%x flg x%x "
2730                                         "x%px\n",
2731                                         vport->vpi, ndlp->nlp_rpi,
2732                                         ndlp->nlp_DID, ndlp->nlp_defer_did,
2733                                         ndlp->nlp_flag,
2734                                         ndlp);
2735                                ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2736
2737                                /* Check to see if there are any deferred
2738                                 * events to process
2739                                 */
2740                                if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2741                                    (ndlp->nlp_defer_did !=
2742                                    NLP_EVT_NOTHING_PENDING)) {
2743                                        lpfc_printf_vlog(
2744                                                vport, KERN_INFO, LOG_DISCOVERY,
2745                                                "4111 UNREG cmpl deferred "
2746                                                "clr x%x on "
2747                                                "NPort x%x Data: x%x x%px\n",
2748                                                ndlp->nlp_rpi, ndlp->nlp_DID,
2749                                                ndlp->nlp_defer_did, ndlp);
2750                                        ndlp->nlp_flag &= ~NLP_UNREG_INP;
2751                                        ndlp->nlp_defer_did =
2752                                                NLP_EVT_NOTHING_PENDING;
2753                                        lpfc_issue_els_plogi(
2754                                                vport, ndlp->nlp_DID, 0);
2755                                } else {
2756                                        __lpfc_sli_rpi_release(vport, ndlp);
2757                                }
2758                                lpfc_nlp_put(ndlp);
2759                        }
2760                }
2761        }
2762
2763        mempool_free(pmb, phba->mbox_mem_pool);
2764}
2765
2766/**
2767 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2768 * @phba: Pointer to HBA context object.
2769 *
2770 * This function is called with no lock held. This function processes all
2771 * the completed mailbox commands and gives it to upper layers. The interrupt
2772 * service routine processes mailbox completion interrupt and adds completed
2773 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2774 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2775 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2776 * function returns the mailbox commands to the upper layer by calling the
2777 * completion handler function of each mailbox.
2778 **/
2779int
2780lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2781{
2782        MAILBOX_t *pmbox;
2783        LPFC_MBOXQ_t *pmb;
2784        int rc;
2785        LIST_HEAD(cmplq);
2786
2787        phba->sli.slistat.mbox_event++;
2788
2789        /* Get all completed mailboxe buffers into the cmplq */
2790        spin_lock_irq(&phba->hbalock);
2791        list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2792        spin_unlock_irq(&phba->hbalock);
2793
2794        /* Get a Mailbox buffer to setup mailbox commands for callback */
2795        do {
2796                list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2797                if (pmb == NULL)
2798                        break;
2799
2800                pmbox = &pmb->u.mb;
2801
2802                if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2803                        if (pmb->vport) {
2804                                lpfc_debugfs_disc_trc(pmb->vport,
2805                                        LPFC_DISC_TRC_MBOX_VPORT,
2806                                        "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2807                                        (uint32_t)pmbox->mbxCommand,
2808                                        pmbox->un.varWords[0],
2809                                        pmbox->un.varWords[1]);
2810                        }
2811                        else {
2812                                lpfc_debugfs_disc_trc(phba->pport,
2813                                        LPFC_DISC_TRC_MBOX,
2814                                        "MBOX cmpl:       cmd:x%x mb:x%x x%x",
2815                                        (uint32_t)pmbox->mbxCommand,
2816                                        pmbox->un.varWords[0],
2817                                        pmbox->un.varWords[1]);
2818                        }
2819                }
2820
2821                /*
2822                 * It is a fatal error if unknown mbox command completion.
2823                 */
2824                if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2825                    MBX_SHUTDOWN) {
2826                        /* Unknown mailbox command compl */
2827                        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2828                                        "(%d):0323 Unknown Mailbox command "
2829                                        "x%x (x%x/x%x) Cmpl\n",
2830                                        pmb->vport ? pmb->vport->vpi :
2831                                        LPFC_VPORT_UNKNOWN,
2832                                        pmbox->mbxCommand,
2833                                        lpfc_sli_config_mbox_subsys_get(phba,
2834                                                                        pmb),
2835                                        lpfc_sli_config_mbox_opcode_get(phba,
2836                                                                        pmb));
2837                        phba->link_state = LPFC_HBA_ERROR;
2838                        phba->work_hs = HS_FFER3;
2839                        lpfc_handle_eratt(phba);
2840                        continue;
2841                }
2842
2843                if (pmbox->mbxStatus) {
2844                        phba->sli.slistat.mbox_stat_err++;
2845                        if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2846                                /* Mbox cmd cmpl error - RETRYing */
2847                                lpfc_printf_log(phba, KERN_INFO,
2848                                        LOG_MBOX | LOG_SLI,
2849                                        "(%d):0305 Mbox cmd cmpl "
2850                                        "error - RETRYing Data: x%x "
2851                                        "(x%x/x%x) x%x x%x x%x\n",
2852                                        pmb->vport ? pmb->vport->vpi :
2853                                        LPFC_VPORT_UNKNOWN,
2854                                        pmbox->mbxCommand,
2855                                        lpfc_sli_config_mbox_subsys_get(phba,
2856                                                                        pmb),
2857                                        lpfc_sli_config_mbox_opcode_get(phba,
2858                                                                        pmb),
2859                                        pmbox->mbxStatus,
2860                                        pmbox->un.varWords[0],
2861                                        pmb->vport ? pmb->vport->port_state :
2862                                        LPFC_VPORT_UNKNOWN);
2863                                pmbox->mbxStatus = 0;
2864                                pmbox->mbxOwner = OWN_HOST;
2865                                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2866                                if (rc != MBX_NOT_FINISHED)
2867                                        continue;
2868                        }
2869                }
2870
2871                /* Mailbox cmd <cmd> Cmpl <cmpl> */
2872                lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2873                                "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
2874                                "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2875                                "x%x x%x x%x\n",
2876                                pmb->vport ? pmb->vport->vpi : 0,
2877                                pmbox->mbxCommand,
2878                                lpfc_sli_config_mbox_subsys_get(phba, pmb),
2879                                lpfc_sli_config_mbox_opcode_get(phba, pmb),
2880                                pmb->mbox_cmpl,
2881                                *((uint32_t *) pmbox),
2882                                pmbox->un.varWords[0],
2883                                pmbox->un.varWords[1],
2884                                pmbox->un.varWords[2],
2885                                pmbox->un.varWords[3],
2886                                pmbox->un.varWords[4],
2887                                pmbox->un.varWords[5],
2888                                pmbox->un.varWords[6],
2889                                pmbox->un.varWords[7],
2890                                pmbox->un.varWords[8],
2891                                pmbox->un.varWords[9],
2892                                pmbox->un.varWords[10]);
2893
2894                if (pmb->mbox_cmpl)
2895                        pmb->mbox_cmpl(phba,pmb);
2896        } while (1);
2897        return 0;
2898}
2899
2900/**
2901 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2902 * @phba: Pointer to HBA context object.
2903 * @pring: Pointer to driver SLI ring object.
2904 * @tag: buffer tag.
2905 *
2906 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2907 * is set in the tag the buffer is posted for a particular exchange,
2908 * the function will return the buffer without replacing the buffer.
2909 * If the buffer is for unsolicited ELS or CT traffic, this function
2910 * returns the buffer and also posts another buffer to the firmware.
2911 **/
2912static struct lpfc_dmabuf *
2913lpfc_sli_get_buff(struct lpfc_hba *phba,
2914                  struct lpfc_sli_ring *pring,
2915                  uint32_t tag)
2916{
2917        struct hbq_dmabuf *hbq_entry;
2918
2919        if (tag & QUE_BUFTAG_BIT)
2920                return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2921        hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2922        if (!hbq_entry)
2923                return NULL;
2924        return &hbq_entry->dbuf;
2925}
2926
2927/**
2928 * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer
2929 *                              containing a NVME LS request.
2930 * @phba: pointer to lpfc hba data structure.
2931 * @piocb: pointer to the iocbq struct representing the sequence starting
2932 *        frame.
2933 *
2934 * This routine initially validates the NVME LS, validates there is a login
2935 * with the port that sent the LS, and then calls the appropriate nvme host
2936 * or target LS request handler.
2937 **/
2938static void
2939lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
2940{
2941        struct lpfc_nodelist *ndlp;
2942        struct lpfc_dmabuf *d_buf;
2943        struct hbq_dmabuf *nvmebuf;
2944        struct fc_frame_header *fc_hdr;
2945        struct lpfc_async_xchg_ctx *axchg = NULL;
2946        char *failwhy = NULL;
2947        uint32_t oxid, sid, did, fctl, size;
2948        int ret = 1;
2949
2950        d_buf = piocb->context2;
2951
2952        nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2953        fc_hdr = nvmebuf->hbuf.virt;
2954        oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2955        sid = sli4_sid_from_fc_hdr(fc_hdr);
2956        did = sli4_did_from_fc_hdr(fc_hdr);
2957        fctl = (fc_hdr->fh_f_ctl[0] << 16 |
2958                fc_hdr->fh_f_ctl[1] << 8 |
2959                fc_hdr->fh_f_ctl[2]);
2960        size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
2961
2962        lpfc_nvmeio_data(phba, "NVME LS    RCV: xri x%x sz %d from %06x\n",
2963                         oxid, size, sid);
2964
2965        if (phba->pport->load_flag & FC_UNLOADING) {
2966                failwhy = "Driver Unloading";
2967        } else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
2968                failwhy = "NVME FC4 Disabled";
2969        } else if (!phba->nvmet_support && !phba->pport->localport) {
2970                failwhy = "No Localport";
2971        } else if (phba->nvmet_support && !phba->targetport) {
2972                failwhy = "No Targetport";
2973        } else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
2974                failwhy = "Bad NVME LS R_CTL";
2975        } else if (unlikely((fctl & 0x00FF0000) !=
2976                        (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
2977                failwhy = "Bad NVME LS F_CTL";
2978        } else {
2979                axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC);
2980                if (!axchg)
2981                        failwhy = "No CTX memory";
2982        }
2983
2984        if (unlikely(failwhy)) {
2985                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2986                                "6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
2987                                sid, oxid, failwhy);
2988                goto out_fail;
2989        }
2990
2991        /* validate the source of the LS is logged in */
2992        ndlp = lpfc_findnode_did(phba->pport, sid);
2993        if (!ndlp ||
2994            ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2995             (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2996                lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2997                                "6216 NVME Unsol rcv: No ndlp: "
2998                                "NPort_ID x%x oxid x%x\n",
2999                                sid, oxid);
3000                goto out_fail;
3001        }
3002
3003        axchg->phba = phba;
3004        axchg->ndlp = ndlp;
3005        axchg->size = size;
3006        axchg->oxid = oxid;
3007        axchg->sid = sid;
3008        axchg->wqeq = NULL;
3009        axchg->state = LPFC_NVME_STE_LS_RCV;
3010        axchg->entry_cnt = 1;
3011        axchg->rqb_buffer = (void *)nvmebuf;
3012        axchg->hdwq = &phba->sli4_hba.hdwq[0];
3013        axchg->payload = nvmebuf->dbuf.virt;
3014        INIT_LIST_HEAD(&axchg->list);
3015
3016        if (phba->nvmet_support) {
3017                ret = lpfc_nvmet_handle_lsreq(phba, axchg);
3018                spin_lock_irq(&ndlp->lock);
3019                if (!ret && !(ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH)) {
3020                        ndlp->fc4_xpt_flags |= NLP_XPT_HAS_HH;
3021                        spin_unlock_irq(&ndlp->lock);
3022
3023                        /* This reference is a single occurrence to hold the
3024                         * node valid until the nvmet transport calls
3025                         * host_release.
3026                         */
3027                        if (!lpfc_nlp_get(ndlp))
3028                                goto out_fail;
3029
3030                        lpfc_printf_log(phba, KERN_ERR, LOG_NODE,
3031                                        "6206 NVMET unsol ls_req ndlp x%px "
3032                                        "DID x%x xflags x%x refcnt %d\n",
3033                                        ndlp, ndlp->nlp_DID,
3034                                        ndlp->fc4_xpt_flags,
3035                                        kref_read(&ndlp->kref));
3036                } else {
3037                        spin_unlock_irq(&ndlp->lock);
3038                }
3039        } else {
3040                ret = lpfc_nvme_handle_lsreq(phba, axchg);
3041        }
3042
3043        /* if zero, LS was successfully handled. If non-zero, LS not handled */
3044        if (!ret)
3045                return;
3046
3047out_fail:
3048        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3049                        "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
3050                        "NVMe%s handler failed %d\n",
3051                        did, sid, oxid,
3052                        (phba->nvmet_support) ? "T" : "I", ret);
3053
3054        /* recycle receive buffer */
3055        lpfc_in_buf_free(phba, &nvmebuf->dbuf);
3056
3057        /* If start of new exchange, abort it */
3058        if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX)))
3059                ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
3060
3061        if (ret)
3062                kfree(axchg);
3063}
3064
3065/**
3066 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
3067 * @phba: Pointer to HBA context object.
3068 * @pring: Pointer to driver SLI ring object.
3069 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
3070 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
3071 * @fch_type: the type for the first frame of the sequence.
3072 *
3073 * This function is called with no lock held. This function uses the r_ctl and
3074 * type of the received sequence to find the correct callback function to call
3075 * to process the sequence.
3076 **/
3077static int
3078lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3079                         struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
3080                         uint32_t fch_type)
3081{
3082        int i;
3083
3084        switch (fch_type) {
3085        case FC_TYPE_NVME:
3086                lpfc_nvme_unsol_ls_handler(phba, saveq);
3087                return 1;
3088        default:
3089                break;
3090        }
3091
3092        /* unSolicited Responses */
3093        if (pring->prt[0].profile) {
3094                if (pring->prt[0].lpfc_sli_rcv_unsol_event)
3095                        (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
3096                                                                        saveq);
3097                return 1;
3098        }
3099        /* We must search, based on rctl / type
3100           for the right routine */
3101        for (i = 0; i < pring->num_mask; i++) {
3102                if ((pring->prt[i].rctl == fch_r_ctl) &&
3103                    (pring->prt[i].type == fch_type)) {
3104                        if (pring->prt[i].lpfc_sli_rcv_unsol_event)
3105                                (pring->prt[i].lpfc_sli_rcv_unsol_event)
3106                                                (phba, pring, saveq);
3107                        return 1;
3108                }
3109        }
3110        return 0;
3111}
3112
3113/**
3114 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
3115 * @phba: Pointer to HBA context object.
3116 * @pring: Pointer to driver SLI ring object.
3117 * @saveq: Pointer to the unsolicited iocb.
3118 *
3119 * This function is called with no lock held by the ring event handler
3120 * when there is an unsolicited iocb posted to the response ring by the
3121 * firmware. This function gets the buffer associated with the iocbs
3122 * and calls the event handler for the ring. This function handles both
3123 * qring buffers and hbq buffers.
3124 * When the function returns 1 the caller can free the iocb object otherwise
3125 * upper layer functions will free the iocb objects.
3126 **/
3127static int
3128lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3129                            struct lpfc_iocbq *saveq)
3130{
3131        IOCB_t           * irsp;
3132        WORD5            * w5p;
3133        uint32_t           Rctl, Type;
3134        struct lpfc_iocbq *iocbq;
3135        struct lpfc_dmabuf *dmzbuf;
3136
3137        irsp = &(saveq->iocb);
3138
3139        if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
3140                if (pring->lpfc_sli_rcv_async_status)
3141                        pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
3142                else
3143                        lpfc_printf_log(phba,
3144                                        KERN_WARNING,
3145                                        LOG_SLI,
3146                                        "0316 Ring %d handler: unexpected "
3147                                        "ASYNC_STATUS iocb received evt_code "
3148                                        "0x%x\n",
3149                                        pring->ringno,
3150                                        irsp->un.asyncstat.evt_code);
3151                return 1;
3152        }
3153
3154        if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
3155                (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
3156                if (irsp->ulpBdeCount > 0) {
3157                        dmzbuf = lpfc_sli_get_buff(phba, pring,
3158                                        irsp->un.ulpWord[3]);
3159                        lpfc_in_buf_free(phba, dmzbuf);
3160                }
3161
3162                if (irsp->ulpBdeCount > 1) {
3163                        dmzbuf = lpfc_sli_get_buff(phba, pring,
3164                                        irsp->unsli3.sli3Words[3]);
3165                        lpfc_in_buf_free(phba, dmzbuf);
3166                }
3167
3168                if (irsp->ulpBdeCount > 2) {
3169                        dmzbuf = lpfc_sli_get_buff(phba, pring,
3170                                irsp->unsli3.sli3Words[7]);
3171                        lpfc_in_buf_free(phba, dmzbuf);
3172                }
3173
3174                return 1;
3175        }
3176
3177        if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3178                if (irsp->ulpBdeCount != 0) {
3179                        saveq->context2 = lpfc_sli_get_buff(phba, pring,
3180                                                irsp->un.ulpWord[3]);
3181                        if (!saveq->context2)
3182                                lpfc_printf_log(phba,
3183                                        KERN_ERR,
3184                                        LOG_SLI,
3185                                        "0341 Ring %d Cannot find buffer for "
3186                                        "an unsolicited iocb. tag 0x%x\n",
3187                                        pring->ringno,
3188                                        irsp->un.ulpWord[3]);
3189                }
3190                if (irsp->ulpBdeCount == 2) {
3191                        saveq->context3 = lpfc_sli_get_buff(phba, pring,
3192                                                irsp->unsli3.sli3Words[7]);
3193                        if (!saveq->context3)
3194                                lpfc_printf_log(phba,
3195                                        KERN_ERR,
3196                                        LOG_SLI,
3197                                        "0342 Ring %d Cannot find buffer for an"
3198                                        " unsolicited iocb. tag 0x%x\n",
3199                                        pring->ringno,
3200                                        irsp->unsli3.sli3Words[7]);
3201                }
3202                list_for_each_entry(iocbq, &saveq->list, list) {
3203                        irsp = &(iocbq->iocb);
3204                        if (irsp->ulpBdeCount != 0) {
3205                                iocbq->context2 = lpfc_sli_get_buff(phba, pring,
3206                                                        irsp->un.ulpWord[3]);
3207                                if (!iocbq->context2)
3208                                        lpfc_printf_log(phba,
3209                                                KERN_ERR,
3210                                                LOG_SLI,
3211                                                "0343 Ring %d Cannot find "
3212                                                "buffer for an unsolicited iocb"
3213                                                ". tag 0x%x\n", pring->ringno,
3214                                                irsp->un.ulpWord[3]);
3215                        }
3216                        if (irsp->ulpBdeCount == 2) {
3217                                iocbq->context3 = lpfc_sli_get_buff(phba, pring,
3218                                                irsp->unsli3.sli3Words[7]);
3219                                if (!iocbq->context3)
3220                                        lpfc_printf_log(phba,
3221                                                KERN_ERR,
3222                                                LOG_SLI,
3223                                                "0344 Ring %d Cannot find "
3224                                                "buffer for an unsolicited "
3225                                                "iocb. tag 0x%x\n",
3226                                                pring->ringno,
3227                                                irsp->unsli3.sli3Words[7]);
3228                        }
3229                }
3230        }
3231        if (irsp->ulpBdeCount != 0 &&
3232            (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
3233             irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
3234                int found = 0;
3235
3236                /* search continue save q for same XRI */
3237                list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
3238                        if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
3239                                saveq->iocb.unsli3.rcvsli3.ox_id) {
3240                                list_add_tail(&saveq->list, &iocbq->list);
3241                                found = 1;
3242                                break;
3243                        }
3244                }
3245                if (!found)
3246                        list_add_tail(&saveq->clist,
3247                                      &pring->iocb_continue_saveq);
3248                if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
3249                        list_del_init(&iocbq->clist);
3250                        saveq = iocbq;
3251                        irsp = &(saveq->iocb);
3252                } else
3253                        return 0;
3254        }
3255        if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
3256            (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
3257            (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
3258                Rctl = FC_RCTL_ELS_REQ;
3259                Type = FC_TYPE_ELS;
3260        } else {
3261                w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
3262                Rctl = w5p->hcsw.Rctl;
3263                Type = w5p->hcsw.Type;
3264
3265                /* Firmware Workaround */
3266                if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
3267                        (irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
3268                         irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3269                        Rctl = FC_RCTL_ELS_REQ;
3270                        Type = FC_TYPE_ELS;
3271                        w5p->hcsw.Rctl = Rctl;
3272                        w5p->hcsw.Type = Type;
3273                }
3274        }
3275
3276        if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
3277                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3278                                "0313 Ring %d handler: unexpected Rctl x%x "
3279                                "Type x%x received\n",
3280                                pring->ringno, Rctl, Type);
3281
3282        return 1;
3283}
3284
3285/**
3286 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
3287 * @phba: Pointer to HBA context object.
3288 * @pring: Pointer to driver SLI ring object.
3289 * @prspiocb: Pointer to response iocb object.
3290 *
3291 * This function looks up the iocb_lookup table to get the command iocb
3292 * corresponding to the given response iocb using the iotag of the
3293 * response iocb. The driver calls this function with the hbalock held
3294 * for SLI3 ports or the ring lock held for SLI4 ports.
3295 * This function returns the command iocb object if it finds the command
3296 * iocb else returns NULL.
3297 **/
3298static struct lpfc_iocbq *
3299lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3300                      struct lpfc_sli_ring *pring,
3301                      struct lpfc_iocbq *prspiocb)
3302{
3303        struct lpfc_iocbq *cmd_iocb = NULL;
3304        uint16_t iotag;
3305        spinlock_t *temp_lock = NULL;
3306        unsigned long iflag = 0;
3307
3308        if (phba->sli_rev == LPFC_SLI_REV4)
3309                temp_lock = &pring->ring_lock;
3310        else
3311                temp_lock = &phba->hbalock;
3312
3313        spin_lock_irqsave(temp_lock, iflag);
3314        iotag = prspiocb->iocb.ulpIoTag;
3315
3316        if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3317                cmd_iocb = phba->sli.iocbq_lookup[iotag];
3318                if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3319                        /* remove from txcmpl queue list */
3320                        list_del_init(&cmd_iocb->list);
3321                        cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3322                        pring->txcmplq_cnt--;
3323                        spin_unlock_irqrestore(temp_lock, iflag);
3324                        return cmd_iocb;
3325                }
3326        }
3327
3328        spin_unlock_irqrestore(temp_lock, iflag);
3329        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3330                        "0317 iotag x%x is out of "
3331                        "range: max iotag x%x wd0 x%x\n",
3332                        iotag, phba->sli.last_iotag,
3333                        *(((uint32_t *) &prspiocb->iocb) + 7));
3334        return NULL;
3335}
3336
3337/**
3338 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3339 * @phba: Pointer to HBA context object.
3340 * @pring: Pointer to driver SLI ring object.
3341 * @iotag: IOCB tag.
3342 *
3343 * This function looks up the iocb_lookup table to get the command iocb
3344 * corresponding to the given iotag. The driver calls this function with
3345 * the ring lock held because this function is an SLI4 port only helper.
3346 * This function returns the command iocb object if it finds the command
3347 * iocb else returns NULL.
3348 **/
3349static struct lpfc_iocbq *
3350lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3351                             struct lpfc_sli_ring *pring, uint16_t iotag)
3352{
3353        struct lpfc_iocbq *cmd_iocb = NULL;
3354        spinlock_t *temp_lock = NULL;
3355        unsigned long iflag = 0;
3356
3357        if (phba->sli_rev == LPFC_SLI_REV4)
3358                temp_lock = &pring->ring_lock;
3359        else
3360                temp_lock = &phba->hbalock;
3361
3362        spin_lock_irqsave(temp_lock, iflag);
3363        if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3364                cmd_iocb = phba->sli.iocbq_lookup[iotag];
3365                if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
3366                        /* remove from txcmpl queue list */
3367                        list_del_init(&cmd_iocb->list);
3368                        cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
3369                        pring->txcmplq_cnt--;
3370                        spin_unlock_irqrestore(temp_lock, iflag);
3371                        return cmd_iocb;
3372                }
3373        }
3374
3375        spin_unlock_irqrestore(temp_lock, iflag);
3376        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3377                        "0372 iotag x%x lookup error: max iotag (x%x) "
3378                        "iocb_flag x%x\n",
3379                        iotag, phba->sli.last_iotag,
3380                        cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
3381        return NULL;
3382}
3383
3384/**
3385 * lpfc_sli_process_sol_iocb - process solicited iocb completion
3386 * @phba: Pointer to HBA context object.
3387 * @pring: Pointer to driver SLI ring object.
3388 * @saveq: Pointer to the response iocb to be processed.
3389 *
3390 * This function is called by the ring event handler for non-fcp
3391 * rings when there is a new response iocb in the response ring.
3392 * The caller is not required to hold any locks. This function
3393 * gets the command iocb associated with the response iocb and
3394 * calls the completion handler for the command iocb. If there
3395 * is no completion handler, the function will free the resources
3396 * associated with command iocb. If the response iocb is for
3397 * an already aborted command iocb, the status of the completion
3398 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3399 * This function always returns 1.
3400 **/
3401static int
3402lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3403                          struct lpfc_iocbq *saveq)
3404{
3405        struct lpfc_iocbq *cmdiocbp;
3406        int rc = 1;
3407        unsigned long iflag;
3408
3409        cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3410        if (cmdiocbp) {
3411                if (cmdiocbp->iocb_cmpl) {
3412                        /*
3413                         * If an ELS command failed send an event to mgmt
3414                         * application.
3415                         */
3416                        if (saveq->iocb.ulpStatus &&
3417                             (pring->ringno == LPFC_ELS_RING) &&
3418                             (cmdiocbp->iocb.ulpCommand ==
3419                                CMD_ELS_REQUEST64_CR))
3420                                lpfc_send_els_failure_event(phba,
3421                                        cmdiocbp, saveq);
3422
3423                        /*
3424                         * Post all ELS completions to the worker thread.
3425                         * All other are passed to the completion callback.
3426                         */
3427                        if (pring->ringno == LPFC_ELS_RING) {
3428                                if ((phba->sli_rev < LPFC_SLI_REV4) &&
3429                                    (cmdiocbp->iocb_flag &
3430                                                        LPFC_DRIVER_ABORTED)) {
3431                                        spin_lock_irqsave(&phba->hbalock,
3432                                                          iflag);
3433                                        cmdiocbp->iocb_flag &=
3434                                                ~LPFC_DRIVER_ABORTED;
3435                                        spin_unlock_irqrestore(&phba->hbalock,
3436                                                               iflag);
3437                                        saveq->iocb.ulpStatus =
3438                                                IOSTAT_LOCAL_REJECT;
3439                                        saveq->iocb.un.ulpWord[4] =
3440                                                IOERR_SLI_ABORTED;
3441
3442                                        /* Firmware could still be in progress
3443                                         * of DMAing payload, so don't free data
3444                                         * buffer till after a hbeat.
3445                                         */
3446                                        spin_lock_irqsave(&phba->hbalock,
3447                                                          iflag);
3448                                        saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
3449                                        spin_unlock_irqrestore(&phba->hbalock,
3450                                                               iflag);
3451                                }
3452                                if (phba->sli_rev == LPFC_SLI_REV4) {
3453                                        if (saveq->iocb_flag &
3454                                            LPFC_EXCHANGE_BUSY) {
3455                                                /* Set cmdiocb flag for the
3456                                                 * exchange busy so sgl (xri)
3457                                                 * will not be released until
3458                                                 * the abort xri is received
3459                                                 * from hba.
3460                                                 */
3461                                                spin_lock_irqsave(
3462                                                        &phba->hbalock, iflag);
3463                                                cmdiocbp->iocb_flag |=
3464                                                        LPFC_EXCHANGE_BUSY;
3465                                                spin_unlock_irqrestore(
3466                                                        &phba->hbalock, iflag);
3467                                        }
3468                                        if (cmdiocbp->iocb_flag &
3469                                            LPFC_DRIVER_ABORTED) {
3470                                                /*
3471                                                 * Clear LPFC_DRIVER_ABORTED
3472                                                 * bit in case it was driver
3473                                                 * initiated abort.
3474                                                 */
3475                                                spin_lock_irqsave(
3476                                                        &phba->hbalock, iflag);
3477                                                cmdiocbp->iocb_flag &=
3478                                                        ~LPFC_DRIVER_ABORTED;
3479                                                spin_unlock_irqrestore(
3480                                                        &phba->hbalock, iflag);
3481                                                cmdiocbp->iocb.ulpStatus =
3482                                                        IOSTAT_LOCAL_REJECT;
3483                                                cmdiocbp->iocb.un.ulpWord[4] =
3484                                                        IOERR_ABORT_REQUESTED;
3485                                                /*
3486                                                 * For SLI4, irsiocb contains
3487                                                 * NO_XRI in sli_xritag, it
3488                                                 * shall not affect releasing
3489                                                 * sgl (xri) process.
3490                                                 */
3491                                                saveq->iocb.ulpStatus =
3492                                                        IOSTAT_LOCAL_REJECT;
3493                                                saveq->iocb.un.ulpWord[4] =
3494                                                        IOERR_SLI_ABORTED;
3495                                                spin_lock_irqsave(
3496                                                        &phba->hbalock, iflag);
3497                                                saveq->iocb_flag |=
3498                                                        LPFC_DELAY_MEM_FREE;
3499                                                spin_unlock_irqrestore(
3500                                                        &phba->hbalock, iflag);
3501                                        }
3502                                }
3503                        }
3504                        (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
3505                } else
3506                        lpfc_sli_release_iocbq(phba, cmdiocbp);
3507        } else {
3508                /*
3509                 * Unknown initiating command based on the response iotag.
3510                 * This could be the case on the ELS ring because of
3511                 * lpfc_els_abort().
3512                 */
3513                if (pring->ringno != LPFC_ELS_RING) {
3514                        /*
3515                         * Ring <ringno> handler: unexpected completion IoTag
3516                         * <IoTag>
3517                         */
3518                        lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3519                                         "0322 Ring %d handler: "
3520                                         "unexpected completion IoTag x%x "
3521                                         "Data: x%x x%x x%x x%x\n",
3522                                         pring->ringno,
3523                                         saveq->iocb.ulpIoTag,
3524                                         saveq->iocb.ulpStatus,
3525                                         saveq->iocb.un.ulpWord[4],
3526                                         saveq->iocb.ulpCommand,
3527                                         saveq->iocb.ulpContext);
3528                }
3529        }
3530
3531        return rc;
3532}
3533
3534/**
3535 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3536 * @phba: Pointer to HBA context object.
3537 * @pring: Pointer to driver SLI ring object.
3538 *
3539 * This function is called from the iocb ring event handlers when
3540 * put pointer is ahead of the get pointer for a ring. This function signal
3541 * an error attention condition to the worker thread and the worker
3542 * thread will transition the HBA to offline state.
3543 **/
3544static void
3545lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3546{
3547        struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3548        /*
3549         * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3550         * rsp ring <portRspMax>
3551         */
3552        lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3553                        "0312 Ring %d handler: portRspPut %d "
3554                        "is bigger than rsp ring %d\n",
3555                        pring->ringno, le32_to_cpu(pgp->rspPutInx),
3556                        pring->sli.sli3.numRiocb);
3557
3558        phba->link_state = LPFC_HBA_ERROR;
3559
3560        /*
3561         * All error attention handlers are posted to
3562         * worker thread
3563         */
3564        phba->work_ha |= HA_ERATT;
3565        phba->work_hs = HS_FFER3;
3566
3567        lpfc_worker_wake_up(phba);
3568
3569        return;
3570}
3571
3572/**
3573 * lpfc_poll_eratt - Error attention polling timer timeout handler
3574 * @t: Context to fetch pointer to address of HBA context object from.
3575 *
3576 * This function is invoked by the Error Attention polling timer when the
3577 * timer times out. It will check the SLI Error Attention register for
3578 * possible attention events. If so, it will post an Error Attention event
3579 * and wake up worker thread to process it. Otherwise, it will set up the
3580 * Error Attention polling timer for the next poll.
3581 **/
3582void lpfc_poll_eratt(struct timer_list *t)
3583{
3584        struct lpfc_hba *phba;
3585        uint32_t eratt = 0;
3586        uint64_t sli_intr, cnt;
3587
3588        phba = from_timer(phba, t, eratt_poll);
3589
3590        /* Here we will also keep track of interrupts per sec of the hba */
3591        sli_intr = phba->sli.slistat.sli_intr;
3592
3593        if (phba->sli.slistat.sli_prev_intr > sli_intr)
3594                cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3595                        sli_intr);
3596        else
3597                cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3598
3599        /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3600        do_div(cnt, phba->eratt_poll_interval);
3601        phba->sli.slistat.sli_ips = cnt;
3602
3603        phba->sli.slistat.sli_prev_intr = sli_intr;
3604
3605        /* Check chip HA register for error event */
3606        eratt = lpfc_sli_check_eratt(phba);
3607
3608        if (eratt)
3609                /* Tell the worker thread there is work to do */
3610                lpfc_worker_wake_up(phba);
3611        else
3612                /* Restart the timer for next eratt poll */
3613                mod_timer(&phba->eratt_poll,
3614                          jiffies +
3615                          msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3616        return;
3617}
3618
3619
3620/**
3621 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3622 * @phba: Pointer to HBA context object.
3623 * @pring: Pointer to driver SLI ring object.
3624 * @mask: Host attention register mask for this ring.
3625 *
3626 * This function is called from the interrupt context when there is a ring
3627 * event for the fcp ring. The caller does not hold any lock.
3628 * The function processes each response iocb in the response ring until it
3629 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3630 * LE bit set. The function will call the completion handler of the command iocb
3631 * if the response iocb indicates a completion for a command iocb or it is
3632 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3633 * function if this is an unsolicited iocb.
3634 * This routine presumes LPFC_FCP_RING handling and doesn't bother
3635 * to check it explicitly.
3636 */
3637int
3638lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3639                                struct lpfc_sli_ring *pring, uint32_t mask)
3640{
3641        struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3642        IOCB_t *irsp = NULL;
3643        IOCB_t *entry = NULL;
3644        struct lpfc_iocbq *cmdiocbq = NULL;
3645        struct lpfc_iocbq rspiocbq;
3646        uint32_t status;
3647        uint32_t portRspPut, portRspMax;
3648        int rc = 1;
3649        lpfc_iocb_type type;
3650        unsigned long iflag;
3651        uint32_t rsp_cmpl = 0;
3652
3653        spin_lock_irqsave(&phba->hbalock, iflag);
3654        pring->stats.iocb_event++;
3655
3656        /*
3657         * The next available response entry should never exceed the maximum
3658         * entries.  If it does, treat it as an adapter hardware error.
3659         */
3660        portRspMax = pring->sli.sli3.numRiocb;
3661        portRspPut = le32_to_cpu(pgp->rspPutInx);
3662        if (unlikely(portRspPut >= portRspMax)) {
3663                lpfc_sli_rsp_pointers_error(phba, pring);
3664                spin_unlock_irqrestore(&phba->hbalock, iflag);
3665                return 1;
3666        }
3667        if (phba->fcp_ring_in_use) {
3668                spin_unlock_irqrestore(&phba->hbalock, iflag);
3669                return 1;
3670        } else
3671                phba->fcp_ring_in_use = 1;
3672
3673        rmb();
3674        while (pring->sli.sli3.rspidx != portRspPut) {
3675                /*
3676                 * Fetch an entry off the ring and copy it into a local data
3677                 * structure.  The copy involves a byte-swap since the
3678                 * network byte order and pci byte orders are different.
3679                 */
3680                entry = lpfc_resp_iocb(phba, pring);
3681                phba->last_completion_time = jiffies;
3682
3683                if (++pring->sli.sli3.rspidx >= portRspMax)
3684                        pring->sli.sli3.rspidx = 0;
3685
3686                lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3687                                      (uint32_t *) &rspiocbq.iocb,
3688                                      phba->iocb_rsp_size);
3689                INIT_LIST_HEAD(&(rspiocbq.list));
3690                irsp = &rspiocbq.iocb;
3691
3692                type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3693                pring->stats.iocb_rsp++;
3694                rsp_cmpl++;
3695
3696                if (unlikely(irsp->ulpStatus)) {
3697                        /*
3698                         * If resource errors reported from HBA, reduce
3699                         * queuedepths of the SCSI device.
3700                         */
3701                        if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3702                            ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3703                             IOERR_NO_RESOURCES)) {
3704                                spin_unlock_irqrestore(&phba->hbalock, iflag);
3705                                phba->lpfc_rampdown_queue_depth(phba);
3706                                spin_lock_irqsave(&phba->hbalock, iflag);
3707                        }
3708
3709                        /* Rsp ring <ringno> error: IOCB */
3710                        lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3711                                        "0336 Rsp Ring %d error: IOCB Data: "
3712                                        "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3713                                        pring->ringno,
3714                                        irsp->un.ulpWord[0],
3715                                        irsp->un.ulpWord[1],
3716                                        irsp->un.ulpWord[2],
3717                                        irsp->un.ulpWord[3],
3718                                        irsp->un.ulpWord[4],
3719                                        irsp->un.ulpWord[5],
3720                                        *(uint32_t *)&irsp->un1,
3721                                        *((uint32_t *)&irsp->un1 + 1));
3722                }
3723
3724                switch (type) {
3725                case LPFC_ABORT_IOCB:
3726                case LPFC_SOL_IOCB:
3727                        /*
3728                         * Idle exchange closed via ABTS from port.  No iocb
3729                         * resources need to be recovered.
3730                         */
3731                        if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3732                                lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3733                                                "0333 IOCB cmd 0x%x"
3734                                                " processed. Skipping"
3735                                                " completion\n",
3736                                                irsp->ulpCommand);
3737                                break;
3738                        }
3739
3740                        spin_unlock_irqrestore(&phba->hbalock, iflag);
3741                        cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3742                                                         &rspiocbq);
3743                        spin_lock_irqsave(&phba->hbalock, iflag);
3744                        if (unlikely(!cmdiocbq))
3745                                break;
3746                        if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3747                                cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3748                        if (cmdiocbq->iocb_cmpl) {
3749                                spin_unlock_irqrestore(&phba->hbalock, iflag);
3750                                (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3751                                                      &rspiocbq);
3752                                spin_lock_irqsave(&phba->hbalock, iflag);
3753                        }
3754                        break;
3755                case LPFC_UNSOL_IOCB:
3756                        spin_unlock_irqrestore(&phba->hbalock, iflag);
3757                        lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3758                        spin_lock_irqsave(&phba->hbalock, iflag);
3759                        break;
3760                default:
3761                        if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3762                                char adaptermsg[LPFC_MAX_ADPTMSG];
3763                                memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3764                                memcpy(&adaptermsg[0], (uint8_t *) irsp,
3765                                       MAX_MSG_DATA);
3766                                dev_warn(&((phba->pcidev)->dev),
3767                                         "lpfc%d: %s\n",
3768                                         phba->brd_no, adaptermsg);
3769                        } else {
3770                                /* Unknown IOCB command */
3771                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3772                                                "0334 Unknown IOCB command "
3773                                                "Data: x%x, x%x x%x x%x x%x\n",
3774                                                type, irsp->ulpCommand,
3775                                                irsp->ulpStatus,
3776                                                irsp->ulpIoTag,
3777                                                irsp->ulpContext);
3778                        }
3779                        break;
3780                }
3781
3782                /*
3783                 * The response IOCB has been processed.  Update the ring
3784                 * pointer in SLIM.  If the port response put pointer has not
3785                 * been updated, sync the pgp->rspPutInx and fetch the new port
3786                 * response put pointer.
3787                 */
3788                writel(pring->sli.sli3.rspidx,
3789                        &phba->host_gp[pring->ringno].rspGetInx);
3790
3791                if (pring->sli.sli3.rspidx == portRspPut)
3792                        portRspPut = le32_to_cpu(pgp->rspPutInx);
3793        }
3794
3795        if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3796                pring->stats.iocb_rsp_full++;
3797                status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3798                writel(status, phba->CAregaddr);
3799                readl(phba->CAregaddr);
3800        }
3801        if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3802                pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3803                pring->stats.iocb_cmd_empty++;
3804
3805                /* Force update of the local copy of cmdGetInx */
3806                pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3807                lpfc_sli_resume_iocb(phba, pring);
3808
3809                if ((pring->lpfc_sli_cmd_available))
3810                        (pring->lpfc_sli_cmd_available) (phba, pring);
3811
3812        }
3813
3814        phba->fcp_ring_in_use = 0;
3815        spin_unlock_irqrestore(&phba->hbalock, iflag);
3816        return rc;
3817}
3818
3819/**
3820 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3821 * @phba: Pointer to HBA context object.
3822 * @pring: Pointer to driver SLI ring object.
3823 * @rspiocbp: Pointer to driver response IOCB object.
3824 *
3825 * This function is called from the worker thread when there is a slow-path
3826 * response IOCB to process. This function chains all the response iocbs until
3827 * seeing the iocb with the LE bit set. The function will call
3828 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3829 * completion of a command iocb. The function will call the
3830 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3831 * The function frees the resources or calls the completion handler if this
3832 * iocb is an abort completion. The function returns NULL when the response
3833 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3834 * this function shall chain the iocb on to the iocb_continueq and return the
3835 * response iocb passed in.
3836 **/
3837static struct lpfc_iocbq *
3838lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3839                        struct lpfc_iocbq *rspiocbp)
3840{
3841        struct lpfc_iocbq *saveq;
3842        struct lpfc_iocbq *cmdiocbp;
3843        struct lpfc_iocbq *next_iocb;
3844        IOCB_t *irsp = NULL;
3845        uint32_t free_saveq;
3846        uint8_t iocb_cmd_type;
3847        lpfc_iocb_type type;
3848        unsigned long iflag;
3849        int rc;
3850
3851        spin_lock_irqsave(&phba->hbalock, iflag);
3852        /* First add the response iocb to the countinueq list */
3853        list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3854        pring->iocb_continueq_cnt++;
3855
3856        /* Now, determine whether the list is completed for processing */
3857        irsp = &rspiocbp->iocb;
3858        if (irsp->ulpLe) {
3859                /*
3860                 * By default, the driver expects to free all resources
3861                 * associated with this iocb completion.
3862                 */
3863                free_saveq = 1;
3864                saveq = list_get_first(&pring->iocb_continueq,
3865                                       struct lpfc_iocbq, list);
3866                irsp = &(saveq->iocb);
3867                list_del_init(&pring->iocb_continueq);
3868                pring->iocb_continueq_cnt = 0;
3869
3870                pring->stats.iocb_rsp++;
3871
3872                /*
3873                 * If resource errors reported from HBA, reduce
3874                 * queuedepths of the SCSI device.
3875                 */
3876                if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3877                    ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3878                     IOERR_NO_RESOURCES)) {
3879                        spin_unlock_irqrestore(&phba->hbalock, iflag);
3880                        phba->lpfc_rampdown_queue_depth(phba);
3881                        spin_lock_irqsave(&phba->hbalock, iflag);
3882                }
3883
3884                if (irsp->ulpStatus) {
3885                        /* Rsp ring <ringno> error: IOCB */
3886                        lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3887                                        "0328 Rsp Ring %d error: "
3888                                        "IOCB Data: "
3889                                        "x%x x%x x%x x%x "
3890                                        "x%x x%x x%x x%x "
3891                                        "x%x x%x x%x x%x "
3892                                        "x%x x%x x%x x%x\n",
3893                                        pring->ringno,
3894                                        irsp->un.ulpWord[0],
3895                                        irsp->un.ulpWord[1],
3896                                        irsp->un.ulpWord[2],
3897                                        irsp->un.ulpWord[3],
3898                                        irsp->un.ulpWord[4],
3899                                        irsp->un.ulpWord[5],
3900                                        *(((uint32_t *) irsp) + 6),
3901                                        *(((uint32_t *) irsp) + 7),
3902                                        *(((uint32_t *) irsp) + 8),
3903                                        *(((uint32_t *) irsp) + 9),
3904                                        *(((uint32_t *) irsp) + 10),
3905                                        *(((uint32_t *) irsp) + 11),
3906                                        *(((uint32_t *) irsp) + 12),
3907                                        *(((uint32_t *) irsp) + 13),
3908                                        *(((uint32_t *) irsp) + 14),
3909                                        *(((uint32_t *) irsp) + 15));
3910                }
3911
3912                /*
3913                 * Fetch the IOCB command type and call the correct completion
3914                 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3915                 * get freed back to the lpfc_iocb_list by the discovery
3916                 * kernel thread.
3917                 */
3918                iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3919                type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3920                switch (type) {
3921                case LPFC_SOL_IOCB:
3922                        spin_unlock_irqrestore(&phba->hbalock, iflag);
3923                        rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3924                        spin_lock_irqsave(&phba->hbalock, iflag);
3925                        break;
3926
3927                case LPFC_UNSOL_IOCB:
3928                        spin_unlock_irqrestore(&phba->hbalock, iflag);
3929                        rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3930                        spin_lock_irqsave(&phba->hbalock, iflag);
3931                        if (!rc)
3932                                free_saveq = 0;
3933                        break;
3934
3935                case LPFC_ABORT_IOCB:
3936                        cmdiocbp = NULL;
3937                        if (irsp->ulpCommand != CMD_XRI_ABORTED_CX) {
3938                                spin_unlock_irqrestore(&phba->hbalock, iflag);
3939                                cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3940                                                                 saveq);
3941                                spin_lock_irqsave(&phba->hbalock, iflag);
3942                        }
3943                        if (cmdiocbp) {
3944                                /* Call the specified completion routine */
3945                                if (cmdiocbp->iocb_cmpl) {
3946                                        spin_unlock_irqrestore(&phba->hbalock,
3947                                                               iflag);
3948                                        (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3949                                                              saveq);
3950                                        spin_lock_irqsave(&phba->hbalock,
3951                                                          iflag);
3952                                } else
3953                                        __lpfc_sli_release_iocbq(phba,
3954                                                                 cmdiocbp);
3955                        }
3956                        break;
3957
3958                case LPFC_UNKNOWN_IOCB:
3959                        if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3960                                char adaptermsg[LPFC_MAX_ADPTMSG];
3961                                memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3962                                memcpy(&adaptermsg[0], (uint8_t *)irsp,
3963                                       MAX_MSG_DATA);
3964                                dev_warn(&((phba->pcidev)->dev),
3965                                         "lpfc%d: %s\n",
3966                                         phba->brd_no, adaptermsg);
3967                        } else {
3968                                /* Unknown IOCB command */
3969                                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3970                                                "0335 Unknown IOCB "
3971                                                "command Data: x%x "
3972                                                "x%x x%x x%x\n",
3973                                                irsp->ulpCommand,
3974                                                irsp->ulpStatus,
3975                                                irsp->ulpIoTag,
3976                                                irsp->ulpContext);
3977                        }
3978                        break;
3979                }
3980
3981                if (free_saveq) {
3982                        list_for_each_entry_safe(rspiocbp, next_iocb,
3983                                                 &saveq->list, list) {
3984                                list_del_init(&rspiocbp->list);
3985                                __lpfc_sli_release_iocbq(phba, rspiocbp);
3986                        }
3987                        __lpfc_sli_release_iocbq(phba, saveq);
3988                }
3989                rspiocbp = NULL;
3990        }
3991        spin_unlock_irqrestore(&phba->hbalock, iflag);
3992        return rspiocbp;
3993}
3994
3995/**
3996 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3997 * @phba: Pointer to HBA context object.
3998 * @pring: Pointer to driver SLI ring object.
3999 * @mask: Host attention register mask for this ring.
4000 *
4001 * This routine wraps the actual slow_ring event process routine from the
4002 * API jump table function pointer from the lpfc_hba struct.
4003 **/
4004void
4005lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
4006                                struct lpfc_sli_ring *pring, uint32_t mask)
4007{
4008        phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
4009}
4010
4011/**
4012 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
4013 * @phba: Pointer to HBA context object.
4014 * @pring: Pointer to driver SLI ring object.
4015 * @mask: Host attention register mask for this ring.
4016 *
4017 * This function is called from the worker thread when there is a ring event
4018 * for non-fcp rings. The caller does not hold any lock. The function will
4019 * remove each response iocb in the response ring and calls the handle
4020 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4021 **/
4022static void
4023lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
4024                                   struct lpfc_sli_ring *pring, uint32_t mask)
4025{
4026        struct lpfc_pgp *pgp;
4027        IOCB_t *entry;
4028        IOCB_t *irsp = NULL;
4029        struct lpfc_iocbq *rspiocbp = NULL;
4030        uint32_t portRspPut, portRspMax;
4031        unsigned long iflag;
4032        uint32_t status;
4033
4034        pgp = &phba->port_gp[pring->ringno];
4035        spin_lock_irqsave(&phba->hbalock, iflag);
4036        pring->stats.iocb_event++;
4037
4038        /*
4039         * The next available response entry should never exceed the maximum
4040         * entries.  If it does, treat it as an adapter hardware error.
4041         */
4042        portRspMax = pring->sli.sli3.numRiocb;
4043        portRspPut = le32_to_cpu(pgp->rspPutInx);
4044        if (portRspPut >= portRspMax) {
4045                /*
4046                 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
4047                 * rsp ring <portRspMax>
4048                 */
4049                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4050                                "0303 Ring %d handler: portRspPut %d "
4051                                "is bigger than rsp ring %d\n",
4052                                pring->ringno, portRspPut, portRspMax);
4053
4054                phba->link_state = LPFC_HBA_ERROR;
4055                spin_unlock_irqrestore(&phba->hbalock, iflag);
4056
4057                phba->work_hs = HS_FFER3;
4058                lpfc_handle_eratt(phba);
4059
4060                return;
4061        }
4062
4063        rmb();
4064        while (pring->sli.sli3.rspidx != portRspPut) {
4065                /*
4066                 * Build a completion list and call the appropriate handler.
4067                 * The process is to get the next available response iocb, get
4068                 * a free iocb from the list, copy the response data into the
4069                 * free iocb, insert to the continuation list, and update the
4070                 * next response index to slim.  This process makes response
4071                 * iocb's in the ring available to DMA as fast as possible but
4072                 * pays a penalty for a copy operation.  Since the iocb is
4073                 * only 32 bytes, this penalty is considered small relative to
4074                 * the PCI reads for register values and a slim write.  When
4075                 * the ulpLe field is set, the entire Command has been
4076                 * received.
4077                 */
4078                entry = lpfc_resp_iocb(phba, pring);
4079
4080                phba->last_completion_time = jiffies;
4081                rspiocbp = __lpfc_sli_get_iocbq(phba);
4082                if (rspiocbp == NULL) {
4083                        printk(KERN_ERR "%s: out of buffers! Failing "
4084                               "completion.\n", __func__);
4085                        break;
4086                }
4087
4088                lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
4089                                      phba->iocb_rsp_size);
4090                irsp = &rspiocbp->iocb;
4091
4092                if (++pring->sli.sli3.rspidx >= portRspMax)
4093                        pring->sli.sli3.rspidx = 0;
4094
4095                if (pring->ringno == LPFC_ELS_RING) {
4096                        lpfc_debugfs_slow_ring_trc(phba,
4097                        "IOCB rsp ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
4098                                *(((uint32_t *) irsp) + 4),
4099                                *(((uint32_t *) irsp) + 6),
4100                                *(((uint32_t *) irsp) + 7));
4101                }
4102
4103                writel(pring->sli.sli3.rspidx,
4104                        &phba->host_gp[pring->ringno].rspGetInx);
4105
4106                spin_unlock_irqrestore(&phba->hbalock, iflag);
4107                /* Handle the response IOCB */
4108                rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
4109                spin_lock_irqsave(&phba->hbalock, iflag);
4110
4111                /*
4112                 * If the port response put pointer has not been updated, sync
4113                 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
4114                 * response put pointer.
4115                 */
4116                if (pring->sli.sli3.rspidx == portRspPut) {
4117                        portRspPut = le32_to_cpu(pgp->rspPutInx);
4118                }
4119        } /* while (pring->sli.sli3.rspidx != portRspPut) */
4120
4121        if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
4122                /* At least one response entry has been freed */
4123                pring->stats.iocb_rsp_full++;
4124                /* SET RxRE_RSP in Chip Att register */
4125                status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4126                writel(status, phba->CAregaddr);
4127                readl(phba->CAregaddr); /* flush */
4128        }
4129        if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4130                pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4131                pring->stats.iocb_cmd_empty++;
4132
4133                /* Force update of the local copy of cmdGetInx */
4134                pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4135                lpfc_sli_resume_iocb(phba, pring);
4136
4137                if ((pring->lpfc_sli_cmd_available))
4138                        (pring->lpfc_sli_cmd_available) (phba, pring);
4139
4140        }
4141
4142        spin_unlock_irqrestore(&phba->hbalock, iflag);
4143        return;
4144}
4145
4146/**
4147 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
4148 * @phba: Pointer to HBA context object.
4149 * @pring: Pointer to driver SLI ring object.
4150 * @mask: Host attention register mask for this ring.
4151 *
4152 * This function is called from the worker thread when there is a pending
4153 * ELS response iocb on the driver internal slow-path response iocb worker
4154 * queue. The caller does not hold any lock. The function will remove each
4155 * response iocb from the response worker queue and calls the handle
4156 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4157 **/
4158static void
4159lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
4160                                   struct lpfc_sli_ring *pring, uint32_t mask)
4161{
4162        struct lpfc_iocbq *irspiocbq;
4163        struct hbq_dmabuf *dmabuf;
4164        struct lpfc_cq_event *cq_event;
4165        unsigned long iflag;
4166        int count = 0;
4167
4168        spin_lock_irqsave(&phba->hbalock, iflag);
4169        phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
4170        spin_unlock_irqrestore(&phba->hbalock, iflag);
4171        while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4172                /* Get the response iocb from the head of work queue */
4173                spin_lock_irqsave(&phba->hbalock, iflag);
4174                list_remove_head(&phba->sli4_hba.sp_queue_event,
4175                                 cq_event, struct lpfc_cq_event, list);
4176                spin_unlock_irqrestore(&phba->hbalock, iflag);
4177
4178                switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
4179                case CQE_CODE_COMPL_WQE:
4180                        irspiocbq = container_of(cq_event, struct lpfc_iocbq,
4181                                                 cq_event);
4182                        /* Translate ELS WCQE to response IOCBQ */
4183                        irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
4184                                                                   irspiocbq);
4185                        if (irspiocbq)
4186                                lpfc_sli_sp_handle_rspiocb(phba, pring,
4187                                                           irspiocbq);
4188                        count++;
4189                        break;
4190                case CQE_CODE_RECEIVE:
4191                case CQE_CODE_RECEIVE_V1:
4192                        dmabuf = container_of(cq_event, struct hbq_dmabuf,
4193                                              cq_event);
4194                        lpfc_sli4_handle_received_buffer(phba, dmabuf);
4195                        count++;
4196                        break;
4197                default:
4198                        break;
4199                }
4200
4201                /* Limit the number of events to 64 to avoid soft lockups */
4202                if (count == 64)
4203                        break;
4204        }
4205}
4206
4207/**
4208 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
4209 * @phba: Pointer to HBA context object.
4210 * @pring: Pointer to driver SLI ring object.
4211 *
4212 * This function aborts all iocbs in the given ring and frees all the iocb
4213 * objects in txq. This function issues an abort iocb for all the iocb commands
4214 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4215 * the return of this function. The caller is not required to hold any locks.
4216 **/
4217void
4218lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
4219{
4220        LIST_HEAD(completions);
4221        struct lpfc_iocbq *iocb, *next_iocb;
4222
4223        if (pring->ringno == LPFC_ELS_RING) {
4224                lpfc_fabric_abort_hba(phba);
4225        }
4226
4227        /* Error everything on txq and txcmplq
4228         * First do the txq.
4229         */
4230        if (phba->sli_rev >= LPFC_SLI_REV4) {
4231                spin_lock_irq(&pring->ring_lock);
4232                list_splice_init(&pring->txq, &completions);
4233                pring->txq_cnt = 0;
4234                spin_unlock_irq(&pring->ring_lock);
4235
4236                spin_lock_irq(&phba->hbalock);
4237                /* Next issue ABTS for everything on the txcmplq */
4238                list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
4239                        lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
4240                spin_unlock_irq(&phba->hbalock);
4241        } else {
4242                spin_lock_irq(&phba->hbalock);
4243                list_splice_init(&pring->txq, &completions);
4244                pring->txq_cnt = 0;
4245
4246                /* Next issue ABTS for everything on the txcmplq */
4247                list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
4248                        lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
4249                spin_unlock_irq(&phba->hbalock);
4250        }
4251        /* Make sure HBA is alive */
4252        lpfc_issue_hb_tmo(phba);
4253
4254        /* Cancel all the IOCBs from the completions list */
4255        lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
4256                              IOERR_SLI_ABORTED);
4257}
4258
4259/**
4260 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
4261 * @phba: Pointer to HBA context object.
4262 *
4263 * This function aborts all iocbs in FCP rings and frees all the iocb
4264 * objects in txq. This function issues an abort iocb for all the iocb commands
4265 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4266 * the return of this function. The caller is not required to hold any locks.
4267 **/
4268void
4269lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
4270{
4271        struct lpfc_sli *psli = &phba->sli;
4272        struct lpfc_sli_ring  *pring;