linux/drivers/scsi/bfa/bfa_ioc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
   4 * Copyright (c) 2014- QLogic Corporation.
   5 * All rights reserved
   6 * www.qlogic.com
   7 *
   8 * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
   9 */
  10
  11#include "bfad_drv.h"
  12#include "bfad_im.h"
  13#include "bfa_ioc.h"
  14#include "bfi_reg.h"
  15#include "bfa_defs.h"
  16#include "bfa_defs_svc.h"
  17#include "bfi.h"
  18
  19BFA_TRC_FILE(CNA, IOC);
  20
  21/*
  22 * IOC local definitions
  23 */
  24#define BFA_IOC_TOV             3000    /* msecs */
  25#define BFA_IOC_HWSEM_TOV       500     /* msecs */
  26#define BFA_IOC_HB_TOV          500     /* msecs */
  27#define BFA_IOC_TOV_RECOVER      BFA_IOC_HB_TOV
  28#define BFA_IOC_POLL_TOV        BFA_TIMER_FREQ
  29
  30#define bfa_ioc_timer_start(__ioc)                                      \
  31        bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,        \
  32                        bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
  33#define bfa_ioc_timer_stop(__ioc)   bfa_timer_stop(&(__ioc)->ioc_timer)
  34
  35#define bfa_hb_timer_start(__ioc)                                       \
  36        bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer,         \
  37                        bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
  38#define bfa_hb_timer_stop(__ioc)        bfa_timer_stop(&(__ioc)->hb_timer)
  39
  40#define BFA_DBG_FWTRC_OFF(_fn)  (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
  41
  42#define bfa_ioc_state_disabled(__sm)            \
  43        (((__sm) == BFI_IOC_UNINIT) ||          \
  44        ((__sm) == BFI_IOC_INITING) ||          \
  45        ((__sm) == BFI_IOC_HWINIT) ||           \
  46        ((__sm) == BFI_IOC_DISABLED) ||         \
  47        ((__sm) == BFI_IOC_FAIL) ||             \
  48        ((__sm) == BFI_IOC_CFG_DISABLED))
  49
  50/*
  51 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
  52 */
  53
  54#define bfa_ioc_firmware_lock(__ioc)                    \
  55                        ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
  56#define bfa_ioc_firmware_unlock(__ioc)                  \
  57                        ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
  58#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
  59#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
  60#define bfa_ioc_notify_fail(__ioc)              \
  61                        ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
  62#define bfa_ioc_sync_start(__ioc)               \
  63                        ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
  64#define bfa_ioc_sync_join(__ioc)                \
  65                        ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
  66#define bfa_ioc_sync_leave(__ioc)               \
  67                        ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
  68#define bfa_ioc_sync_ack(__ioc)                 \
  69                        ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
  70#define bfa_ioc_sync_complete(__ioc)            \
  71                        ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
  72#define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate)           \
  73                        ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate))
  74#define bfa_ioc_get_cur_ioc_fwstate(__ioc)              \
  75                        ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
  76#define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate)           \
  77                ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
  78#define bfa_ioc_get_alt_ioc_fwstate(__ioc)              \
  79                        ((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc))
  80
  81#define bfa_ioc_mbox_cmd_pending(__ioc)         \
  82                        (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
  83                        readl((__ioc)->ioc_regs.hfn_mbox_cmd))
  84
  85bfa_boolean_t bfa_auto_recover = BFA_TRUE;
  86
  87/*
  88 * forward declarations
  89 */
  90static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
  91static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
  92static void bfa_ioc_timeout(void *ioc);
  93static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
  94static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
  95static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
  96static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
  97static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
  98static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
  99static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
 100static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
 101static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
 102                                enum bfa_ioc_event_e event);
 103static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
 104static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
 105static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
 106static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
 107static enum bfi_ioc_img_ver_cmp_e bfa_ioc_fw_ver_patch_cmp(
 108                                struct bfi_ioc_image_hdr_s *base_fwhdr,
 109                                struct bfi_ioc_image_hdr_s *fwhdr_to_cmp);
 110static enum bfi_ioc_img_ver_cmp_e bfa_ioc_flash_fwver_cmp(
 111                                struct bfa_ioc_s *ioc,
 112                                struct bfi_ioc_image_hdr_s *base_fwhdr);
 113
 114/*
 115 * IOC state machine definitions/declarations
 116 */
 117enum ioc_event {
 118        IOC_E_RESET             = 1,    /*  IOC reset request           */
 119        IOC_E_ENABLE            = 2,    /*  IOC enable request          */
 120        IOC_E_DISABLE           = 3,    /*  IOC disable request */
 121        IOC_E_DETACH            = 4,    /*  driver detach cleanup       */
 122        IOC_E_ENABLED           = 5,    /*  f/w enabled         */
 123        IOC_E_FWRSP_GETATTR     = 6,    /*  IOC get attribute response  */
 124        IOC_E_DISABLED          = 7,    /*  f/w disabled                */
 125        IOC_E_PFFAILED          = 8,    /*  failure notice by iocpf sm  */
 126        IOC_E_HBFAIL            = 9,    /*  heartbeat failure           */
 127        IOC_E_HWERROR           = 10,   /*  hardware error interrupt    */
 128        IOC_E_TIMEOUT           = 11,   /*  timeout                     */
 129        IOC_E_HWFAILED          = 12,   /*  PCI mapping failure notice  */
 130};
 131
 132bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
 133bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
 134bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
 135bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
 136bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
 137bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
 138bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
 139bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
 140bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
 141bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
 142
 143static struct bfa_sm_table_s ioc_sm_table[] = {
 144        {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
 145        {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
 146        {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
 147        {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
 148        {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
 149        {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
 150        {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
 151        {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
 152        {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
 153        {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
 154};
 155
 156/*
 157 * IOCPF state machine definitions/declarations
 158 */
 159
 160#define bfa_iocpf_timer_start(__ioc)                                    \
 161        bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,        \
 162                        bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
 163#define bfa_iocpf_timer_stop(__ioc)     bfa_timer_stop(&(__ioc)->ioc_timer)
 164
 165#define bfa_iocpf_poll_timer_start(__ioc)                               \
 166        bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,        \
 167                        bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
 168
 169#define bfa_sem_timer_start(__ioc)                                      \
 170        bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer,        \
 171                        bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
 172#define bfa_sem_timer_stop(__ioc)       bfa_timer_stop(&(__ioc)->sem_timer)
 173
 174/*
 175 * Forward declareations for iocpf state machine
 176 */
 177static void bfa_iocpf_timeout(void *ioc_arg);
 178static void bfa_iocpf_sem_timeout(void *ioc_arg);
 179static void bfa_iocpf_poll_timeout(void *ioc_arg);
 180
 181/*
 182 * IOCPF state machine events
 183 */
 184enum iocpf_event {
 185        IOCPF_E_ENABLE          = 1,    /*  IOCPF enable request        */
 186        IOCPF_E_DISABLE         = 2,    /*  IOCPF disable request       */
 187        IOCPF_E_STOP            = 3,    /*  stop on driver detach       */
 188        IOCPF_E_FWREADY         = 4,    /*  f/w initialization done     */
 189        IOCPF_E_FWRSP_ENABLE    = 5,    /*  enable f/w response */
 190        IOCPF_E_FWRSP_DISABLE   = 6,    /*  disable f/w response        */
 191        IOCPF_E_FAIL            = 7,    /*  failure notice by ioc sm    */
 192        IOCPF_E_INITFAIL        = 8,    /*  init fail notice by ioc sm  */
 193        IOCPF_E_GETATTRFAIL     = 9,    /*  init fail notice by ioc sm  */
 194        IOCPF_E_SEMLOCKED       = 10,   /*  h/w semaphore is locked     */
 195        IOCPF_E_TIMEOUT         = 11,   /*  f/w response timeout        */
 196        IOCPF_E_SEM_ERROR       = 12,   /*  h/w sem mapping error       */
 197};
 198
 199/*
 200 * IOCPF states
 201 */
 202enum bfa_iocpf_state {
 203        BFA_IOCPF_RESET         = 1,    /*  IOC is in reset state */
 204        BFA_IOCPF_SEMWAIT       = 2,    /*  Waiting for IOC h/w semaphore */
 205        BFA_IOCPF_HWINIT        = 3,    /*  IOC h/w is being initialized */
 206        BFA_IOCPF_READY         = 4,    /*  IOCPF is initialized */
 207        BFA_IOCPF_INITFAIL      = 5,    /*  IOCPF failed */
 208        BFA_IOCPF_FAIL          = 6,    /*  IOCPF failed */
 209        BFA_IOCPF_DISABLING     = 7,    /*  IOCPF is being disabled */
 210        BFA_IOCPF_DISABLED      = 8,    /*  IOCPF is disabled */
 211        BFA_IOCPF_FWMISMATCH    = 9,    /*  IOC f/w different from drivers */
 212};
 213
 214bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
 215bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
 216bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
 217bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
 218bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
 219bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
 220bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
 221bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
 222                                                enum iocpf_event);
 223bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
 224bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
 225bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
 226bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
 227bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
 228                                                enum iocpf_event);
 229bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
 230
 231static struct bfa_sm_table_s iocpf_sm_table[] = {
 232        {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
 233        {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
 234        {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
 235        {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
 236        {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
 237        {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
 238        {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
 239        {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
 240        {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
 241        {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
 242        {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
 243        {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
 244        {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
 245        {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
 246};
 247
 248/*
 249 * IOC State Machine
 250 */
 251
 252/*
 253 * Beginning state. IOC uninit state.
 254 */
 255
 256static void
 257bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
 258{
 259}
 260
 261/*
 262 * IOC is in uninit state.
 263 */
 264static void
 265bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
 266{
 267        bfa_trc(ioc, event);
 268
 269        switch (event) {
 270        case IOC_E_RESET:
 271                bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
 272                break;
 273
 274        default:
 275                bfa_sm_fault(ioc, event);
 276        }
 277}
 278/*
 279 * Reset entry actions -- initialize state machine
 280 */
 281static void
 282bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
 283{
 284        bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
 285}
 286
 287/*
 288 * IOC is in reset state.
 289 */
 290static void
 291bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
 292{
 293        bfa_trc(ioc, event);
 294
 295        switch (event) {
 296        case IOC_E_ENABLE:
 297                bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
 298                break;
 299
 300        case IOC_E_DISABLE:
 301                bfa_ioc_disable_comp(ioc);
 302                break;
 303
 304        case IOC_E_DETACH:
 305                bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 306                break;
 307
 308        default:
 309                bfa_sm_fault(ioc, event);
 310        }
 311}
 312
 313
 314static void
 315bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
 316{
 317        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
 318}
 319
 320/*
 321 * Host IOC function is being enabled, awaiting response from firmware.
 322 * Semaphore is acquired.
 323 */
 324static void
 325bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
 326{
 327        bfa_trc(ioc, event);
 328
 329        switch (event) {
 330        case IOC_E_ENABLED:
 331                bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
 332                break;
 333
 334        case IOC_E_PFFAILED:
 335                /* !!! fall through !!! */
 336        case IOC_E_HWERROR:
 337                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 338                bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
 339                if (event != IOC_E_PFFAILED)
 340                        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
 341                break;
 342
 343        case IOC_E_HWFAILED:
 344                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 345                bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
 346                break;
 347
 348        case IOC_E_DISABLE:
 349                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 350                break;
 351
 352        case IOC_E_DETACH:
 353                bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 354                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
 355                break;
 356
 357        case IOC_E_ENABLE:
 358                break;
 359
 360        default:
 361                bfa_sm_fault(ioc, event);
 362        }
 363}
 364
 365
 366static void
 367bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
 368{
 369        bfa_ioc_timer_start(ioc);
 370        bfa_ioc_send_getattr(ioc);
 371}
 372
 373/*
 374 * IOC configuration in progress. Timer is active.
 375 */
 376static void
 377bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
 378{
 379        bfa_trc(ioc, event);
 380
 381        switch (event) {
 382        case IOC_E_FWRSP_GETATTR:
 383                bfa_ioc_timer_stop(ioc);
 384                bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
 385                break;
 386
 387        case IOC_E_PFFAILED:
 388        case IOC_E_HWERROR:
 389                bfa_ioc_timer_stop(ioc);
 390                fallthrough;
 391        case IOC_E_TIMEOUT:
 392                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 393                bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
 394                if (event != IOC_E_PFFAILED)
 395                        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
 396                break;
 397
 398        case IOC_E_DISABLE:
 399                bfa_ioc_timer_stop(ioc);
 400                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 401                break;
 402
 403        case IOC_E_ENABLE:
 404                break;
 405
 406        default:
 407                bfa_sm_fault(ioc, event);
 408        }
 409}
 410
 411static void
 412bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
 413{
 414        struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
 415
 416        ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
 417        bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
 418        bfa_ioc_hb_monitor(ioc);
 419        BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
 420        bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
 421}
 422
 423static void
 424bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
 425{
 426        bfa_trc(ioc, event);
 427
 428        switch (event) {
 429        case IOC_E_ENABLE:
 430                break;
 431
 432        case IOC_E_DISABLE:
 433                bfa_hb_timer_stop(ioc);
 434                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 435                break;
 436
 437        case IOC_E_PFFAILED:
 438        case IOC_E_HWERROR:
 439                bfa_hb_timer_stop(ioc);
 440                fallthrough;
 441        case IOC_E_HBFAIL:
 442                if (ioc->iocpf.auto_recover)
 443                        bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
 444                else
 445                        bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
 446
 447                bfa_ioc_fail_notify(ioc);
 448
 449                if (event != IOC_E_PFFAILED)
 450                        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
 451                break;
 452
 453        default:
 454                bfa_sm_fault(ioc, event);
 455        }
 456}
 457
 458
 459static void
 460bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
 461{
 462        struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
 463        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
 464        BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
 465        bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
 466}
 467
 468/*
 469 * IOC is being disabled
 470 */
 471static void
 472bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
 473{
 474        bfa_trc(ioc, event);
 475
 476        switch (event) {
 477        case IOC_E_DISABLED:
 478                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
 479                break;
 480
 481        case IOC_E_HWERROR:
 482                /*
 483                 * No state change.  Will move to disabled state
 484                 * after iocpf sm completes failure processing and
 485                 * moves to disabled state.
 486                 */
 487                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
 488                break;
 489
 490        case IOC_E_HWFAILED:
 491                bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
 492                bfa_ioc_disable_comp(ioc);
 493                break;
 494
 495        default:
 496                bfa_sm_fault(ioc, event);
 497        }
 498}
 499
 500/*
 501 * IOC disable completion entry.
 502 */
 503static void
 504bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
 505{
 506        bfa_ioc_disable_comp(ioc);
 507}
 508
 509static void
 510bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
 511{
 512        bfa_trc(ioc, event);
 513
 514        switch (event) {
 515        case IOC_E_ENABLE:
 516                bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
 517                break;
 518
 519        case IOC_E_DISABLE:
 520                ioc->cbfn->disable_cbfn(ioc->bfa);
 521                break;
 522
 523        case IOC_E_DETACH:
 524                bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 525                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
 526                break;
 527
 528        default:
 529                bfa_sm_fault(ioc, event);
 530        }
 531}
 532
 533
 534static void
 535bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
 536{
 537        bfa_trc(ioc, 0);
 538}
 539
 540/*
 541 * Hardware initialization retry.
 542 */
 543static void
 544bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
 545{
 546        bfa_trc(ioc, event);
 547
 548        switch (event) {
 549        case IOC_E_ENABLED:
 550                bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
 551                break;
 552
 553        case IOC_E_PFFAILED:
 554        case IOC_E_HWERROR:
 555                /*
 556                 * Initialization retry failed.
 557                 */
 558                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 559                bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
 560                if (event != IOC_E_PFFAILED)
 561                        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
 562                break;
 563
 564        case IOC_E_HWFAILED:
 565                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 566                bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
 567                break;
 568
 569        case IOC_E_ENABLE:
 570                break;
 571
 572        case IOC_E_DISABLE:
 573                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 574                break;
 575
 576        case IOC_E_DETACH:
 577                bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 578                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
 579                break;
 580
 581        default:
 582                bfa_sm_fault(ioc, event);
 583        }
 584}
 585
 586
 587static void
 588bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
 589{
 590        bfa_trc(ioc, 0);
 591}
 592
 593/*
 594 * IOC failure.
 595 */
 596static void
 597bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
 598{
 599        bfa_trc(ioc, event);
 600
 601        switch (event) {
 602
 603        case IOC_E_ENABLE:
 604                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 605                break;
 606
 607        case IOC_E_DISABLE:
 608                bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
 609                break;
 610
 611        case IOC_E_DETACH:
 612                bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 613                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
 614                break;
 615
 616        case IOC_E_HWERROR:
 617        case IOC_E_HWFAILED:
 618                /*
 619                 * HB failure / HW error notification, ignore.
 620                 */
 621                break;
 622        default:
 623                bfa_sm_fault(ioc, event);
 624        }
 625}
 626
 627static void
 628bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
 629{
 630        bfa_trc(ioc, 0);
 631}
 632
 633static void
 634bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
 635{
 636        bfa_trc(ioc, event);
 637
 638        switch (event) {
 639        case IOC_E_ENABLE:
 640                ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 641                break;
 642
 643        case IOC_E_DISABLE:
 644                ioc->cbfn->disable_cbfn(ioc->bfa);
 645                break;
 646
 647        case IOC_E_DETACH:
 648                bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
 649                break;
 650
 651        case IOC_E_HWERROR:
 652                /* Ignore - already in hwfail state */
 653                break;
 654
 655        default:
 656                bfa_sm_fault(ioc, event);
 657        }
 658}
 659
 660/*
 661 * IOCPF State Machine
 662 */
 663
 664/*
 665 * Reset entry actions -- initialize state machine
 666 */
 667static void
 668bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
 669{
 670        iocpf->fw_mismatch_notified = BFA_FALSE;
 671        iocpf->auto_recover = bfa_auto_recover;
 672}
 673
 674/*
 675 * Beginning state. IOC is in reset state.
 676 */
 677static void
 678bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 679{
 680        struct bfa_ioc_s *ioc = iocpf->ioc;
 681
 682        bfa_trc(ioc, event);
 683
 684        switch (event) {
 685        case IOCPF_E_ENABLE:
 686                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
 687                break;
 688
 689        case IOCPF_E_STOP:
 690                break;
 691
 692        default:
 693                bfa_sm_fault(ioc, event);
 694        }
 695}
 696
 697/*
 698 * Semaphore should be acquired for version check.
 699 */
 700static void
 701bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
 702{
 703        struct bfi_ioc_image_hdr_s      fwhdr;
 704        u32     r32, fwstate, pgnum, loff = 0;
 705        int     i;
 706
 707        /*
 708         * Spin on init semaphore to serialize.
 709         */
 710        r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
 711        while (r32 & 0x1) {
 712                udelay(20);
 713                r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
 714        }
 715
 716        /* h/w sem init */
 717        fwstate = bfa_ioc_get_cur_ioc_fwstate(iocpf->ioc);
 718        if (fwstate == BFI_IOC_UNINIT) {
 719                writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
 720                goto sem_get;
 721        }
 722
 723        bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
 724
 725        if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
 726                writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
 727                goto sem_get;
 728        }
 729
 730        /*
 731         * Clear fwver hdr
 732         */
 733        pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff);
 734        writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn);
 735
 736        for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) {
 737                bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0);
 738                loff += sizeof(u32);
 739        }
 740
 741        bfa_trc(iocpf->ioc, fwstate);
 742        bfa_trc(iocpf->ioc, swab32(fwhdr.exec));
 743        bfa_ioc_set_cur_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT);
 744        bfa_ioc_set_alt_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT);
 745
 746        /*
 747         * Unlock the hw semaphore. Should be here only once per boot.
 748         */
 749        bfa_ioc_ownership_reset(iocpf->ioc);
 750
 751        /*
 752         * unlock init semaphore.
 753         */
 754        writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
 755
 756sem_get:
 757        bfa_ioc_hw_sem_get(iocpf->ioc);
 758}
 759
 760/*
 761 * Awaiting h/w semaphore to continue with version check.
 762 */
 763static void
 764bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 765{
 766        struct bfa_ioc_s *ioc = iocpf->ioc;
 767
 768        bfa_trc(ioc, event);
 769
 770        switch (event) {
 771        case IOCPF_E_SEMLOCKED:
 772                if (bfa_ioc_firmware_lock(ioc)) {
 773                        if (bfa_ioc_sync_start(ioc)) {
 774                                bfa_ioc_sync_join(ioc);
 775                                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
 776                        } else {
 777                                bfa_ioc_firmware_unlock(ioc);
 778                                writel(1, ioc->ioc_regs.ioc_sem_reg);
 779                                bfa_sem_timer_start(ioc);
 780                        }
 781                } else {
 782                        writel(1, ioc->ioc_regs.ioc_sem_reg);
 783                        bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
 784                }
 785                break;
 786
 787        case IOCPF_E_SEM_ERROR:
 788                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
 789                bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
 790                break;
 791
 792        case IOCPF_E_DISABLE:
 793                bfa_sem_timer_stop(ioc);
 794                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
 795                bfa_fsm_send_event(ioc, IOC_E_DISABLED);
 796                break;
 797
 798        case IOCPF_E_STOP:
 799                bfa_sem_timer_stop(ioc);
 800                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
 801                break;
 802
 803        default:
 804                bfa_sm_fault(ioc, event);
 805        }
 806}
 807
 808/*
 809 * Notify enable completion callback.
 810 */
 811static void
 812bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
 813{
 814        /*
 815         * Call only the first time sm enters fwmismatch state.
 816         */
 817        if (iocpf->fw_mismatch_notified == BFA_FALSE)
 818                bfa_ioc_pf_fwmismatch(iocpf->ioc);
 819
 820        iocpf->fw_mismatch_notified = BFA_TRUE;
 821        bfa_iocpf_timer_start(iocpf->ioc);
 822}
 823
 824/*
 825 * Awaiting firmware version match.
 826 */
 827static void
 828bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 829{
 830        struct bfa_ioc_s *ioc = iocpf->ioc;
 831
 832        bfa_trc(ioc, event);
 833
 834        switch (event) {
 835        case IOCPF_E_TIMEOUT:
 836                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
 837                break;
 838
 839        case IOCPF_E_DISABLE:
 840                bfa_iocpf_timer_stop(ioc);
 841                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
 842                bfa_fsm_send_event(ioc, IOC_E_DISABLED);
 843                break;
 844
 845        case IOCPF_E_STOP:
 846                bfa_iocpf_timer_stop(ioc);
 847                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
 848                break;
 849
 850        default:
 851                bfa_sm_fault(ioc, event);
 852        }
 853}
 854
 855/*
 856 * Request for semaphore.
 857 */
 858static void
 859bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
 860{
 861        bfa_ioc_hw_sem_get(iocpf->ioc);
 862}
 863
 864/*
 865 * Awaiting semaphore for h/w initialzation.
 866 */
 867static void
 868bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 869{
 870        struct bfa_ioc_s *ioc = iocpf->ioc;
 871
 872        bfa_trc(ioc, event);
 873
 874        switch (event) {
 875        case IOCPF_E_SEMLOCKED:
 876                if (bfa_ioc_sync_complete(ioc)) {
 877                        bfa_ioc_sync_join(ioc);
 878                        bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
 879                } else {
 880                        writel(1, ioc->ioc_regs.ioc_sem_reg);
 881                        bfa_sem_timer_start(ioc);
 882                }
 883                break;
 884
 885        case IOCPF_E_SEM_ERROR:
 886                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
 887                bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
 888                break;
 889
 890        case IOCPF_E_DISABLE:
 891                bfa_sem_timer_stop(ioc);
 892                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
 893                break;
 894
 895        default:
 896                bfa_sm_fault(ioc, event);
 897        }
 898}
 899
 900static void
 901bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
 902{
 903        iocpf->poll_time = 0;
 904        bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
 905}
 906
 907/*
 908 * Hardware is being initialized. Interrupts are enabled.
 909 * Holding hardware semaphore lock.
 910 */
 911static void
 912bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 913{
 914        struct bfa_ioc_s *ioc = iocpf->ioc;
 915
 916        bfa_trc(ioc, event);
 917
 918        switch (event) {
 919        case IOCPF_E_FWREADY:
 920                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
 921                break;
 922
 923        case IOCPF_E_TIMEOUT:
 924                writel(1, ioc->ioc_regs.ioc_sem_reg);
 925                bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
 926                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
 927                break;
 928
 929        case IOCPF_E_DISABLE:
 930                bfa_iocpf_timer_stop(ioc);
 931                bfa_ioc_sync_leave(ioc);
 932                writel(1, ioc->ioc_regs.ioc_sem_reg);
 933                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
 934                break;
 935
 936        default:
 937                bfa_sm_fault(ioc, event);
 938        }
 939}
 940
 941static void
 942bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
 943{
 944        bfa_iocpf_timer_start(iocpf->ioc);
 945        /*
 946         * Enable Interrupts before sending fw IOC ENABLE cmd.
 947         */
 948        iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
 949        bfa_ioc_send_enable(iocpf->ioc);
 950}
 951
 952/*
 953 * Host IOC function is being enabled, awaiting response from firmware.
 954 * Semaphore is acquired.
 955 */
 956static void
 957bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
 958{
 959        struct bfa_ioc_s *ioc = iocpf->ioc;
 960
 961        bfa_trc(ioc, event);
 962
 963        switch (event) {
 964        case IOCPF_E_FWRSP_ENABLE:
 965                bfa_iocpf_timer_stop(ioc);
 966                writel(1, ioc->ioc_regs.ioc_sem_reg);
 967                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
 968                break;
 969
 970        case IOCPF_E_INITFAIL:
 971                bfa_iocpf_timer_stop(ioc);
 972                fallthrough;
 973
 974        case IOCPF_E_TIMEOUT:
 975                writel(1, ioc->ioc_regs.ioc_sem_reg);
 976                if (event == IOCPF_E_TIMEOUT)
 977                        bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
 978                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
 979                break;
 980
 981        case IOCPF_E_DISABLE:
 982                bfa_iocpf_timer_stop(ioc);
 983                writel(1, ioc->ioc_regs.ioc_sem_reg);
 984                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
 985                break;
 986
 987        default:
 988                bfa_sm_fault(ioc, event);
 989        }
 990}
 991
 992static void
 993bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
 994{
 995        bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
 996}
 997
 998static void
 999bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1000{
1001        struct bfa_ioc_s *ioc = iocpf->ioc;
1002
1003        bfa_trc(ioc, event);
1004
1005        switch (event) {
1006        case IOCPF_E_DISABLE:
1007                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
1008                break;
1009
1010        case IOCPF_E_GETATTRFAIL:
1011                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
1012                break;
1013
1014        case IOCPF_E_FAIL:
1015                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
1016                break;
1017
1018        default:
1019                bfa_sm_fault(ioc, event);
1020        }
1021}
1022
1023static void
1024bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
1025{
1026        bfa_iocpf_timer_start(iocpf->ioc);
1027        bfa_ioc_send_disable(iocpf->ioc);
1028}
1029
1030/*
1031 * IOC is being disabled
1032 */
1033static void
1034bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1035{
1036        struct bfa_ioc_s *ioc = iocpf->ioc;
1037
1038        bfa_trc(ioc, event);
1039
1040        switch (event) {
1041        case IOCPF_E_FWRSP_DISABLE:
1042                bfa_iocpf_timer_stop(ioc);
1043                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1044                break;
1045
1046        case IOCPF_E_FAIL:
1047                bfa_iocpf_timer_stop(ioc);
1048                fallthrough;
1049
1050        case IOCPF_E_TIMEOUT:
1051                bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1052                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1053                break;
1054
1055        case IOCPF_E_FWRSP_ENABLE:
1056                break;
1057
1058        default:
1059                bfa_sm_fault(ioc, event);
1060        }
1061}
1062
1063static void
1064bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
1065{
1066        bfa_ioc_hw_sem_get(iocpf->ioc);
1067}
1068
1069/*
1070 * IOC hb ack request is being removed.
1071 */
1072static void
1073bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1074{
1075        struct bfa_ioc_s *ioc = iocpf->ioc;
1076
1077        bfa_trc(ioc, event);
1078
1079        switch (event) {
1080        case IOCPF_E_SEMLOCKED:
1081                bfa_ioc_sync_leave(ioc);
1082                writel(1, ioc->ioc_regs.ioc_sem_reg);
1083                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1084                break;
1085
1086        case IOCPF_E_SEM_ERROR:
1087                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1088                bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1089                break;
1090
1091        case IOCPF_E_FAIL:
1092                break;
1093
1094        default:
1095                bfa_sm_fault(ioc, event);
1096        }
1097}
1098
1099/*
1100 * IOC disable completion entry.
1101 */
1102static void
1103bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
1104{
1105        bfa_ioc_mbox_flush(iocpf->ioc);
1106        bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
1107}
1108
1109static void
1110bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1111{
1112        struct bfa_ioc_s *ioc = iocpf->ioc;
1113
1114        bfa_trc(ioc, event);
1115
1116        switch (event) {
1117        case IOCPF_E_ENABLE:
1118                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1119                break;
1120
1121        case IOCPF_E_STOP:
1122                bfa_ioc_firmware_unlock(ioc);
1123                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1124                break;
1125
1126        default:
1127                bfa_sm_fault(ioc, event);
1128        }
1129}
1130
1131static void
1132bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
1133{
1134        bfa_ioc_debug_save_ftrc(iocpf->ioc);
1135        bfa_ioc_hw_sem_get(iocpf->ioc);
1136}
1137
1138/*
1139 * Hardware initialization failed.
1140 */
1141static void
1142bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1143{
1144        struct bfa_ioc_s *ioc = iocpf->ioc;
1145
1146        bfa_trc(ioc, event);
1147
1148        switch (event) {
1149        case IOCPF_E_SEMLOCKED:
1150                bfa_ioc_notify_fail(ioc);
1151                bfa_ioc_sync_leave(ioc);
1152                bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1153                writel(1, ioc->ioc_regs.ioc_sem_reg);
1154                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1155                break;
1156
1157        case IOCPF_E_SEM_ERROR:
1158                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1159                bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1160                break;
1161
1162        case IOCPF_E_DISABLE:
1163                bfa_sem_timer_stop(ioc);
1164                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1165                break;
1166
1167        case IOCPF_E_STOP:
1168                bfa_sem_timer_stop(ioc);
1169                bfa_ioc_firmware_unlock(ioc);
1170                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1171                break;
1172
1173        case IOCPF_E_FAIL:
1174                break;
1175
1176        default:
1177                bfa_sm_fault(ioc, event);
1178        }
1179}
1180
1181static void
1182bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1183{
1184        bfa_trc(iocpf->ioc, 0);
1185}
1186
1187/*
1188 * Hardware initialization failed.
1189 */
1190static void
1191bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1192{
1193        struct bfa_ioc_s *ioc = iocpf->ioc;
1194
1195        bfa_trc(ioc, event);
1196
1197        switch (event) {
1198        case IOCPF_E_DISABLE:
1199                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1200                break;
1201
1202        case IOCPF_E_STOP:
1203                bfa_ioc_firmware_unlock(ioc);
1204                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1205                break;
1206
1207        default:
1208                bfa_sm_fault(ioc, event);
1209        }
1210}
1211
1212static void
1213bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
1214{
1215        /*
1216         * Mark IOC as failed in hardware and stop firmware.
1217         */
1218        bfa_ioc_lpu_stop(iocpf->ioc);
1219
1220        /*
1221         * Flush any queued up mailbox requests.
1222         */
1223        bfa_ioc_mbox_flush(iocpf->ioc);
1224
1225        bfa_ioc_hw_sem_get(iocpf->ioc);
1226}
1227
1228static void
1229bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1230{
1231        struct bfa_ioc_s *ioc = iocpf->ioc;
1232
1233        bfa_trc(ioc, event);
1234
1235        switch (event) {
1236        case IOCPF_E_SEMLOCKED:
1237                bfa_ioc_sync_ack(ioc);
1238                bfa_ioc_notify_fail(ioc);
1239                if (!iocpf->auto_recover) {
1240                        bfa_ioc_sync_leave(ioc);
1241                        bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1242                        writel(1, ioc->ioc_regs.ioc_sem_reg);
1243                        bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1244                } else {
1245                        if (bfa_ioc_sync_complete(ioc))
1246                                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1247                        else {
1248                                writel(1, ioc->ioc_regs.ioc_sem_reg);
1249                                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1250                        }
1251                }
1252                break;
1253
1254        case IOCPF_E_SEM_ERROR:
1255                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1256                bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1257                break;
1258
1259        case IOCPF_E_DISABLE:
1260                bfa_sem_timer_stop(ioc);
1261                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1262                break;
1263
1264        case IOCPF_E_FAIL:
1265                break;
1266
1267        default:
1268                bfa_sm_fault(ioc, event);
1269        }
1270}
1271
1272static void
1273bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1274{
1275        bfa_trc(iocpf->ioc, 0);
1276}
1277
1278/*
1279 * IOC is in failed state.
1280 */
1281static void
1282bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1283{
1284        struct bfa_ioc_s *ioc = iocpf->ioc;
1285
1286        bfa_trc(ioc, event);
1287
1288        switch (event) {
1289        case IOCPF_E_DISABLE:
1290                bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1291                break;
1292
1293        default:
1294                bfa_sm_fault(ioc, event);
1295        }
1296}
1297
1298/*
1299 *  BFA IOC private functions
1300 */
1301
1302/*
1303 * Notify common modules registered for notification.
1304 */
1305static void
1306bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
1307{
1308        struct bfa_ioc_notify_s *notify;
1309        struct list_head        *qe;
1310
1311        list_for_each(qe, &ioc->notify_q) {
1312                notify = (struct bfa_ioc_notify_s *)qe;
1313                notify->cbfn(notify->cbarg, event);
1314        }
1315}
1316
1317static void
1318bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1319{
1320        ioc->cbfn->disable_cbfn(ioc->bfa);
1321        bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1322}
1323
1324bfa_boolean_t
1325bfa_ioc_sem_get(void __iomem *sem_reg)
1326{
1327        u32 r32;
1328        int cnt = 0;
1329#define BFA_SEM_SPINCNT 3000
1330
1331        r32 = readl(sem_reg);
1332
1333        while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1334                cnt++;
1335                udelay(2);
1336                r32 = readl(sem_reg);
1337        }
1338
1339        if (!(r32 & 1))
1340                return BFA_TRUE;
1341
1342        return BFA_FALSE;
1343}
1344
1345static void
1346bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1347{
1348        u32     r32;
1349
1350        /*
1351         * First read to the semaphore register will return 0, subsequent reads
1352         * will return 1. Semaphore is released by writing 1 to the register
1353         */
1354        r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1355        if (r32 == ~0) {
1356                WARN_ON(r32 == ~0);
1357                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1358                return;
1359        }
1360        if (!(r32 & 1)) {
1361                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1362                return;
1363        }
1364
1365        bfa_sem_timer_start(ioc);
1366}
1367
1368/*
1369 * Initialize LPU local memory (aka secondary memory / SRAM)
1370 */
1371static void
1372bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1373{
1374        u32     pss_ctl;
1375        int             i;
1376#define PSS_LMEM_INIT_TIME  10000
1377
1378        pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1379        pss_ctl &= ~__PSS_LMEM_RESET;
1380        pss_ctl |= __PSS_LMEM_INIT_EN;
1381
1382        /*
1383         * i2c workaround 12.5khz clock
1384         */
1385        pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1386        writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1387
1388        /*
1389         * wait for memory initialization to be complete
1390         */
1391        i = 0;
1392        do {
1393                pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1394                i++;
1395        } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1396
1397        /*
1398         * If memory initialization is not successful, IOC timeout will catch
1399         * such failures.
1400         */
1401        WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1402        bfa_trc(ioc, pss_ctl);
1403
1404        pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1405        writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1406}
1407
1408static void
1409bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1410{
1411        u32     pss_ctl;
1412
1413        /*
1414         * Take processor out of reset.
1415         */
1416        pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1417        pss_ctl &= ~__PSS_LPU0_RESET;
1418
1419        writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1420}
1421
1422static void
1423bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1424{
1425        u32     pss_ctl;
1426
1427        /*
1428         * Put processors in reset.
1429         */
1430        pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1431        pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1432
1433        writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1434}
1435
1436/*
1437 * Get driver and firmware versions.
1438 */
1439void
1440bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1441{
1442        u32     pgnum;
1443        u32     loff = 0;
1444        int             i;
1445        u32     *fwsig = (u32 *) fwhdr;
1446
1447        pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1448        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1449
1450        for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
1451             i++) {
1452                fwsig[i] =
1453                        bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1454                loff += sizeof(u32);
1455        }
1456}
1457
1458/*
1459 * Returns TRUE if driver is willing to work with current smem f/w version.
1460 */
1461bfa_boolean_t
1462bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
1463                struct bfi_ioc_image_hdr_s *smem_fwhdr)
1464{
1465        struct bfi_ioc_image_hdr_s *drv_fwhdr;
1466        enum bfi_ioc_img_ver_cmp_e smem_flash_cmp, drv_smem_cmp;
1467
1468        drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1469                bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1470
1471        /*
1472         * If smem is incompatible or old, driver should not work with it.
1473         */
1474        drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, smem_fwhdr);
1475        if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP ||
1476                drv_smem_cmp == BFI_IOC_IMG_VER_OLD) {
1477                return BFA_FALSE;
1478        }
1479
1480        /*
1481         * IF Flash has a better F/W than smem do not work with smem.
1482         * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it.
1483         * If Flash is old or incomp work with smem iff smem f/w == drv f/w.
1484         */
1485        smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, smem_fwhdr);
1486
1487        if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER) {
1488                return BFA_FALSE;
1489        } else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME) {
1490                return BFA_TRUE;
1491        } else {
1492                return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ?
1493                        BFA_TRUE : BFA_FALSE;
1494        }
1495}
1496
1497/*
1498 * Return true if current running version is valid. Firmware signature and
1499 * execution context (driver/bios) must match.
1500 */
1501static bfa_boolean_t
1502bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1503{
1504        struct bfi_ioc_image_hdr_s fwhdr;
1505
1506        bfa_ioc_fwver_get(ioc, &fwhdr);
1507
1508        if (swab32(fwhdr.bootenv) != boot_env) {
1509                bfa_trc(ioc, fwhdr.bootenv);
1510                bfa_trc(ioc, boot_env);
1511                return BFA_FALSE;
1512        }
1513
1514        return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1515}
1516
1517static bfa_boolean_t
1518bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr_s *fwhdr_1,
1519                                struct bfi_ioc_image_hdr_s *fwhdr_2)
1520{
1521        int i;
1522
1523        for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++)
1524                if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i])
1525                        return BFA_FALSE;
1526
1527        return BFA_TRUE;
1528}
1529
1530/*
1531 * Returns TRUE if major minor and maintainence are same.
1532 * If patch versions are same, check for MD5 Checksum to be same.
1533 */
1534static bfa_boolean_t
1535bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr_s *drv_fwhdr,
1536                                struct bfi_ioc_image_hdr_s *fwhdr_to_cmp)
1537{
1538        if (drv_fwhdr->signature != fwhdr_to_cmp->signature)
1539                return BFA_FALSE;
1540
1541        if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major)
1542                return BFA_FALSE;
1543
1544        if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor)
1545                return BFA_FALSE;
1546
1547        if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint)
1548                return BFA_FALSE;
1549
1550        if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch &&
1551                drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase &&
1552                drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build) {
1553                return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp);
1554        }
1555
1556        return BFA_TRUE;
1557}
1558
1559static bfa_boolean_t
1560bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr_s *flash_fwhdr)
1561{
1562        if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF)
1563                return BFA_FALSE;
1564
1565        return BFA_TRUE;
1566}
1567
1568static bfa_boolean_t fwhdr_is_ga(struct bfi_ioc_image_hdr_s *fwhdr)
1569{
1570        if (fwhdr->fwver.phase == 0 &&
1571                fwhdr->fwver.build == 0)
1572                return BFA_TRUE;
1573
1574        return BFA_FALSE;
1575}
1576
1577/*
1578 * Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better.
1579 */
1580static enum bfi_ioc_img_ver_cmp_e
1581bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr_s *base_fwhdr,
1582                                struct bfi_ioc_image_hdr_s *fwhdr_to_cmp)
1583{
1584        if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == BFA_FALSE)
1585                return BFI_IOC_IMG_VER_INCOMP;
1586
1587        if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch)
1588                return BFI_IOC_IMG_VER_BETTER;
1589
1590        else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch)
1591                return BFI_IOC_IMG_VER_OLD;
1592
1593        /*
1594         * GA takes priority over internal builds of the same patch stream.
1595         * At this point major minor maint and patch numbers are same.
1596         */
1597
1598        if (fwhdr_is_ga(base_fwhdr) == BFA_TRUE) {
1599                if (fwhdr_is_ga(fwhdr_to_cmp))
1600                        return BFI_IOC_IMG_VER_SAME;
1601                else
1602                        return BFI_IOC_IMG_VER_OLD;
1603        } else {
1604                if (fwhdr_is_ga(fwhdr_to_cmp))
1605                        return BFI_IOC_IMG_VER_BETTER;
1606        }
1607
1608        if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase)
1609                return BFI_IOC_IMG_VER_BETTER;
1610        else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase)
1611                return BFI_IOC_IMG_VER_OLD;
1612
1613        if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build)
1614                return BFI_IOC_IMG_VER_BETTER;
1615        else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build)
1616                return BFI_IOC_IMG_VER_OLD;
1617
1618        /*
1619         * All Version Numbers are equal.
1620         * Md5 check to be done as a part of compatibility check.
1621         */
1622        return BFI_IOC_IMG_VER_SAME;
1623}
1624
1625#define BFA_FLASH_PART_FWIMG_ADDR       0x100000 /* fw image address */
1626
1627bfa_status_t
1628bfa_ioc_flash_img_get_chnk(struct bfa_ioc_s *ioc, u32 off,
1629                                u32 *fwimg)
1630{
1631        return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva,
1632                        BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)),
1633                        (char *)fwimg, BFI_FLASH_CHUNK_SZ);
1634}
1635
1636static enum bfi_ioc_img_ver_cmp_e
1637bfa_ioc_flash_fwver_cmp(struct bfa_ioc_s *ioc,
1638                        struct bfi_ioc_image_hdr_s *base_fwhdr)
1639{
1640        struct bfi_ioc_image_hdr_s *flash_fwhdr;
1641        bfa_status_t status;
1642        u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS];
1643
1644        status = bfa_ioc_flash_img_get_chnk(ioc, 0, fwimg);
1645        if (status != BFA_STATUS_OK)
1646                return BFI_IOC_IMG_VER_INCOMP;
1647
1648        flash_fwhdr = (struct bfi_ioc_image_hdr_s *) fwimg;
1649        if (bfa_ioc_flash_fwver_valid(flash_fwhdr) == BFA_TRUE)
1650                return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr);
1651        else
1652                return BFI_IOC_IMG_VER_INCOMP;
1653}
1654
1655
1656/*
1657 * Invalidate fwver signature
1658 */
1659bfa_status_t
1660bfa_ioc_fwsig_invalidate(struct bfa_ioc_s *ioc)
1661{
1662
1663        u32     pgnum;
1664        u32     loff = 0;
1665        enum bfi_ioc_state ioc_fwstate;
1666
1667        ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
1668        if (!bfa_ioc_state_disabled(ioc_fwstate))
1669                return BFA_STATUS_ADAPTER_ENABLED;
1670
1671        pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1672        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1673        bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, BFA_IOC_FW_INV_SIGN);
1674
1675        return BFA_STATUS_OK;
1676}
1677
1678/*
1679 * Conditionally flush any pending message from firmware at start.
1680 */
1681static void
1682bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1683{
1684        u32     r32;
1685
1686        r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1687        if (r32)
1688                writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1689}
1690
1691static void
1692bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1693{
1694        enum bfi_ioc_state ioc_fwstate;
1695        bfa_boolean_t fwvalid;
1696        u32 boot_type;
1697        u32 boot_env;
1698
1699        ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
1700
1701        if (force)
1702                ioc_fwstate = BFI_IOC_UNINIT;
1703
1704        bfa_trc(ioc, ioc_fwstate);
1705
1706        boot_type = BFI_FWBOOT_TYPE_NORMAL;
1707        boot_env = BFI_FWBOOT_ENV_OS;
1708
1709        /*
1710         * check if firmware is valid
1711         */
1712        fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1713                BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
1714
1715        if (!fwvalid) {
1716                if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK)
1717                        bfa_ioc_poll_fwinit(ioc);
1718                return;
1719        }
1720
1721        /*
1722         * If hardware initialization is in progress (initialized by other IOC),
1723         * just wait for an initialization completion interrupt.
1724         */
1725        if (ioc_fwstate == BFI_IOC_INITING) {
1726                bfa_ioc_poll_fwinit(ioc);
1727                return;
1728        }
1729
1730        /*
1731         * If IOC function is disabled and firmware version is same,
1732         * just re-enable IOC.
1733         *
1734         * If option rom, IOC must not be in operational state. With
1735         * convergence, IOC will be in operational state when 2nd driver
1736         * is loaded.
1737         */
1738        if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1739
1740                /*
1741                 * When using MSI-X any pending firmware ready event should
1742                 * be flushed. Otherwise MSI-X interrupts are not delivered.
1743                 */
1744                bfa_ioc_msgflush(ioc);
1745                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1746                return;
1747        }
1748
1749        /*
1750         * Initialize the h/w for any other states.
1751         */
1752        if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK)
1753                bfa_ioc_poll_fwinit(ioc);
1754}
1755
1756static void
1757bfa_ioc_timeout(void *ioc_arg)
1758{
1759        struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
1760
1761        bfa_trc(ioc, 0);
1762        bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1763}
1764
1765void
1766bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1767{
1768        u32 *msgp = (u32 *) ioc_msg;
1769        u32 i;
1770
1771        bfa_trc(ioc, msgp[0]);
1772        bfa_trc(ioc, len);
1773
1774        WARN_ON(len > BFI_IOC_MSGLEN_MAX);
1775
1776        /*
1777         * first write msg to mailbox registers
1778         */
1779        for (i = 0; i < len / sizeof(u32); i++)
1780                writel(cpu_to_le32(msgp[i]),
1781                        ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1782
1783        for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1784                writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1785
1786        /*
1787         * write 1 to mailbox CMD to trigger LPU event
1788         */
1789        writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1790        (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1791}
1792
1793static void
1794bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1795{
1796        struct bfi_ioc_ctrl_req_s enable_req;
1797
1798        bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1799                    bfa_ioc_portid(ioc));
1800        enable_req.clscode = cpu_to_be16(ioc->clscode);
1801        /* unsigned 32-bit time_t overflow in y2106 */
1802        enable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds());
1803        bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1804}
1805
1806static void
1807bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1808{
1809        struct bfi_ioc_ctrl_req_s disable_req;
1810
1811        bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1812                    bfa_ioc_portid(ioc));
1813        disable_req.clscode = cpu_to_be16(ioc->clscode);
1814        /* unsigned 32-bit time_t overflow in y2106 */
1815        disable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds());
1816        bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1817}
1818
1819static void
1820bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1821{
1822        struct bfi_ioc_getattr_req_s    attr_req;
1823
1824        bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1825                    bfa_ioc_portid(ioc));
1826        bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1827        bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1828}
1829
1830static void
1831bfa_ioc_hb_check(void *cbarg)
1832{
1833        struct bfa_ioc_s  *ioc = cbarg;
1834        u32     hb_count;
1835
1836        hb_count = readl(ioc->ioc_regs.heartbeat);
1837        if (ioc->hb_count == hb_count) {
1838                bfa_ioc_recover(ioc);
1839                return;
1840        } else {
1841                ioc->hb_count = hb_count;
1842        }
1843
1844        bfa_ioc_mbox_poll(ioc);
1845        bfa_hb_timer_start(ioc);
1846}
1847
1848static void
1849bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1850{
1851        ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1852        bfa_hb_timer_start(ioc);
1853}
1854
1855/*
1856 *      Initiate a full firmware download.
1857 */
1858static bfa_status_t
1859bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1860                    u32 boot_env)
1861{
1862        u32 *fwimg;
1863        u32 pgnum;
1864        u32 loff = 0;
1865        u32 chunkno = 0;
1866        u32 i;
1867        u32 asicmode;
1868        u32 fwimg_size;
1869        u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS];
1870        bfa_status_t status;
1871
1872        if (boot_env == BFI_FWBOOT_ENV_OS &&
1873                boot_type == BFI_FWBOOT_TYPE_FLASH) {
1874                fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32);
1875
1876                status = bfa_ioc_flash_img_get_chnk(ioc,
1877                        BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf);
1878                if (status != BFA_STATUS_OK)
1879                        return status;
1880
1881                fwimg = fwimg_buf;
1882        } else {
1883                fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc));
1884                fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
1885                                        BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1886        }
1887
1888        bfa_trc(ioc, fwimg_size);
1889
1890
1891        pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1892        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1893
1894        for (i = 0; i < fwimg_size; i++) {
1895
1896                if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1897                        chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1898
1899                        if (boot_env == BFI_FWBOOT_ENV_OS &&
1900                                boot_type == BFI_FWBOOT_TYPE_FLASH) {
1901                                status = bfa_ioc_flash_img_get_chnk(ioc,
1902                                        BFA_IOC_FLASH_CHUNK_ADDR(chunkno),
1903                                        fwimg_buf);
1904                                if (status != BFA_STATUS_OK)
1905                                        return status;
1906
1907                                fwimg = fwimg_buf;
1908                        } else {
1909                                fwimg = bfa_cb_image_get_chunk(
1910                                        bfa_ioc_asic_gen(ioc),
1911                                        BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1912                        }
1913                }
1914
1915                /*
1916                 * write smem
1917                 */
1918                bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1919                              fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
1920
1921                loff += sizeof(u32);
1922
1923                /*
1924                 * handle page offset wrap around
1925                 */
1926                loff = PSS_SMEM_PGOFF(loff);
1927                if (loff == 0) {
1928                        pgnum++;
1929                        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1930                }
1931        }
1932
1933        writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1934                        ioc->ioc_regs.host_page_num_fn);
1935
1936        /*
1937         * Set boot type, env and device mode at the end.
1938         */
1939        if (boot_env == BFI_FWBOOT_ENV_OS &&
1940                boot_type == BFI_FWBOOT_TYPE_FLASH) {
1941                boot_type = BFI_FWBOOT_TYPE_NORMAL;
1942        }
1943        asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1944                                ioc->port0_mode, ioc->port1_mode);
1945        bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
1946                        swab32(asicmode));
1947        bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
1948                        swab32(boot_type));
1949        bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
1950                        swab32(boot_env));
1951        return BFA_STATUS_OK;
1952}
1953
1954
1955/*
1956 * Update BFA configuration from firmware configuration.
1957 */
1958static void
1959bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1960{
1961        struct bfi_ioc_attr_s   *attr = ioc->attr;
1962
1963        attr->adapter_prop  = be32_to_cpu(attr->adapter_prop);
1964        attr->card_type     = be32_to_cpu(attr->card_type);
1965        attr->maxfrsize     = be16_to_cpu(attr->maxfrsize);
1966        ioc->fcmode     = (attr->port_mode == BFI_PORT_MODE_FC);
1967        attr->mfg_year  = be16_to_cpu(attr->mfg_year);
1968
1969        bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1970}
1971
1972/*
1973 * Attach time initialization of mbox logic.
1974 */
1975static void
1976bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1977{
1978        struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
1979        int     mc;
1980
1981        INIT_LIST_HEAD(&mod->cmd_q);
1982        for (mc = 0; mc < BFI_MC_MAX; mc++) {
1983                mod->mbhdlr[mc].cbfn = NULL;
1984                mod->mbhdlr[mc].cbarg = ioc->bfa;
1985        }
1986}
1987
1988/*
1989 * Mbox poll timer -- restarts any pending mailbox requests.
1990 */
1991static void
1992bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1993{
1994        struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
1995        struct bfa_mbox_cmd_s           *cmd;
1996        u32                     stat;
1997
1998        /*
1999         * If no command pending, do nothing
2000         */
2001        if (list_empty(&mod->cmd_q))
2002                return;
2003
2004        /*
2005         * If previous command is not yet fetched by firmware, do nothing
2006         */
2007        stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2008        if (stat)
2009                return;
2010
2011        /*
2012         * Enqueue command to firmware.
2013         */
2014        bfa_q_deq(&mod->cmd_q, &cmd);
2015        bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2016}
2017
2018/*
2019 * Cleanup any pending requests.
2020 */
2021static void
2022bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
2023{
2024        struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
2025        struct bfa_mbox_cmd_s           *cmd;
2026
2027        while (!list_empty(&mod->cmd_q))
2028                bfa_q_deq(&mod->cmd_q, &cmd);
2029}
2030
2031/*
2032 * Read data from SMEM to host through PCI memmap
2033 *
2034 * @param[in]   ioc     memory for IOC
2035 * @param[in]   tbuf    app memory to store data from smem
2036 * @param[in]   soff    smem offset
2037 * @param[in]   sz      size of smem in bytes
2038 */
2039static bfa_status_t
2040bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
2041{
2042        u32 pgnum, loff;
2043        __be32 r32;
2044        int i, len;
2045        u32 *buf = tbuf;
2046
2047        pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
2048        loff = PSS_SMEM_PGOFF(soff);
2049        bfa_trc(ioc, pgnum);
2050        bfa_trc(ioc, loff);
2051        bfa_trc(ioc, sz);
2052
2053        /*
2054         *  Hold semaphore to serialize pll init and fwtrc.
2055         */
2056        if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
2057                bfa_trc(ioc, 0);
2058                return BFA_STATUS_FAILED;
2059        }
2060
2061        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2062
2063        len = sz/sizeof(u32);
2064        bfa_trc(ioc, len);
2065        for (i = 0; i < len; i++) {
2066                r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
2067                buf[i] = swab32(r32);
2068                loff += sizeof(u32);
2069
2070                /*
2071                 * handle page offset wrap around
2072                 */
2073                loff = PSS_SMEM_PGOFF(loff);
2074                if (loff == 0) {
2075                        pgnum++;
2076                        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2077                }
2078        }
2079        writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
2080                        ioc->ioc_regs.host_page_num_fn);
2081        /*
2082         *  release semaphore.
2083         */
2084        readl(ioc->ioc_regs.ioc_init_sem_reg);
2085        writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2086
2087        bfa_trc(ioc, pgnum);
2088        return BFA_STATUS_OK;
2089}
2090
2091/*
2092 * Clear SMEM data from host through PCI memmap
2093 *
2094 * @param[in]   ioc     memory for IOC
2095 * @param[in]   soff    smem offset
2096 * @param[in]   sz      size of smem in bytes
2097 */
2098static bfa_status_t
2099bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
2100{
2101        int i, len;
2102        u32 pgnum, loff;
2103
2104        pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
2105        loff = PSS_SMEM_PGOFF(soff);
2106        bfa_trc(ioc, pgnum);
2107        bfa_trc(ioc, loff);
2108        bfa_trc(ioc, sz);
2109
2110        /*
2111         *  Hold semaphore to serialize pll init and fwtrc.
2112         */
2113        if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
2114                bfa_trc(ioc, 0);
2115                return BFA_STATUS_FAILED;
2116        }
2117
2118        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2119
2120        len = sz/sizeof(u32); /* len in words */
2121        bfa_trc(ioc, len);
2122        for (i = 0; i < len; i++) {
2123                bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
2124                loff += sizeof(u32);
2125
2126                /*
2127                 * handle page offset wrap around
2128                 */
2129                loff = PSS_SMEM_PGOFF(loff);
2130                if (loff == 0) {
2131                        pgnum++;
2132                        writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2133                }
2134        }
2135        writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
2136                        ioc->ioc_regs.host_page_num_fn);
2137
2138        /*
2139         *  release semaphore.
2140         */
2141        readl(ioc->ioc_regs.ioc_init_sem_reg);
2142        writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2143        bfa_trc(ioc, pgnum);
2144        return BFA_STATUS_OK;
2145}
2146
2147static void
2148bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
2149{
2150        struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2151
2152        /*
2153         * Notify driver and common modules registered for notification.
2154         */
2155        ioc->cbfn->hbfail_cbfn(ioc->bfa);
2156        bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
2157
2158        bfa_ioc_debug_save_ftrc(ioc);
2159
2160        BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
2161                "Heart Beat of IOC has failed\n");
2162        bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
2163
2164}
2165
2166static void
2167bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
2168{
2169        struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2170        /*
2171         * Provide enable completion callback.
2172         */
2173        ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
2174        BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
2175                "Running firmware version is incompatible "
2176                "with the driver version\n");
2177        bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
2178}
2179
2180bfa_status_t
2181bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
2182{
2183
2184        /*
2185         *  Hold semaphore so that nobody can access the chip during init.
2186         */
2187        bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
2188
2189        bfa_ioc_pll_init_asic(ioc);
2190
2191        ioc->pllinit = BFA_TRUE;
2192
2193        /*
2194         * Initialize LMEM
2195         */
2196        bfa_ioc_lmem_init(ioc);
2197
2198        /*
2199         *  release semaphore.
2200         */
2201        readl(ioc->ioc_regs.ioc_init_sem_reg);
2202        writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2203
2204        return BFA_STATUS_OK;
2205}
2206
2207/*
2208 * Interface used by diag module to do firmware boot with memory test
2209 * as the entry vector.
2210 */
2211bfa_status_t
2212bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
2213{
2214        struct bfi_ioc_image_hdr_s *drv_fwhdr;
2215        bfa_status_t status;
2216        bfa_ioc_stats(ioc, ioc_boots);
2217
2218        if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
2219                return BFA_STATUS_FAILED;
2220
2221        if (boot_env == BFI_FWBOOT_ENV_OS &&
2222                boot_type == BFI_FWBOOT_TYPE_NORMAL) {
2223
2224                drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
2225                        bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
2226
2227                /*
2228                 * Work with Flash iff flash f/w is better than driver f/w.
2229                 * Otherwise push drivers firmware.
2230                 */
2231                if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) ==
2232                                                BFI_IOC_IMG_VER_BETTER)
2233                        boot_type = BFI_FWBOOT_TYPE_FLASH;
2234        }
2235
2236        /*
2237         * Initialize IOC state of all functions on a chip reset.
2238         */
2239        if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
2240                bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2241                bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2242        } else {
2243                bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING);
2244                bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING);
2245        }
2246
2247        bfa_ioc_msgflush(ioc);
2248        status = bfa_ioc_download_fw(ioc, boot_type, boot_env);
2249        if (status == BFA_STATUS_OK)
2250                bfa_ioc_lpu_start(ioc);
2251        else {
2252                WARN_ON(boot_type == BFI_FWBOOT_TYPE_MEMTEST);
2253                bfa_iocpf_timeout(ioc);
2254        }
2255        return status;
2256}
2257
2258/*
2259 * Enable/disable IOC failure auto recovery.
2260 */
2261void
2262bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
2263{
2264        bfa_auto_recover = auto_recover;
2265}
2266
2267
2268
2269bfa_boolean_t
2270bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
2271{
2272        return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2273}
2274
2275bfa_boolean_t
2276bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
2277{
2278        u32 r32 = bfa_ioc_get_cur_ioc_fwstate(ioc);
2279
2280        return ((r32 != BFI_IOC_UNINIT) &&
2281                (r32 != BFI_IOC_INITING) &&
2282                (r32 != BFI_IOC_MEMTEST));
2283}
2284
2285bfa_boolean_t
2286bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
2287{
2288        __be32  *msgp = mbmsg;
2289        u32     r32;
2290        int             i;
2291
2292        r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
2293        if ((r32 & 1) == 0)
2294                return BFA_FALSE;
2295
2296        /*
2297         * read the MBOX msg
2298         */
2299        for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
2300             i++) {
2301                r32 = readl(ioc->ioc_regs.lpu_mbox +
2302                                   i * sizeof(u32));
2303                msgp[i] = cpu_to_be32(r32);
2304        }
2305
2306        /*
2307         * turn off mailbox interrupt by clearing mailbox status
2308         */
2309        writel(1, ioc->ioc_regs.lpu_mbox_cmd);
2310        readl(ioc->ioc_regs.lpu_mbox_cmd);
2311
2312        return BFA_TRUE;
2313}
2314
2315void
2316bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
2317{
2318        union bfi_ioc_i2h_msg_u *msg;
2319        struct bfa_iocpf_s *iocpf = &ioc->iocpf;
2320
2321        msg = (union bfi_ioc_i2h_msg_u *) m;
2322
2323        bfa_ioc_stats(ioc, ioc_isrs);
2324
2325        switch (msg->mh.msg_id) {
2326        case BFI_IOC_I2H_HBEAT:
2327                break;
2328
2329        case BFI_IOC_I2H_ENABLE_REPLY:
2330                ioc->port_mode = ioc->port_mode_cfg =
2331                                (enum bfa_mode_s)msg->fw_event.port_mode;
2332                ioc->ad_cap_bm = msg->fw_event.cap_bm;
2333                bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
2334                break;
2335
2336        case BFI_IOC_I2H_DISABLE_REPLY:
2337                bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
2338                break;
2339
2340        case BFI_IOC_I2H_GETATTR_REPLY:
2341                bfa_ioc_getattr_reply(ioc);
2342                break;
2343
2344        default:
2345                bfa_trc(ioc, msg->mh.msg_id);
2346                WARN_ON(1);
2347        }
2348}
2349
2350/*
2351 * IOC attach time initialization and setup.
2352 *
2353 * @param[in]   ioc     memory for IOC
2354 * @param[in]   bfa     driver instance structure
2355 */
2356void
2357bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
2358               struct bfa_timer_mod_s *timer_mod)
2359{
2360        ioc->bfa        = bfa;
2361        ioc->cbfn       = cbfn;
2362        ioc->timer_mod  = timer_mod;
2363        ioc->fcmode     = BFA_FALSE;
2364        ioc->pllinit    = BFA_FALSE;
2365        ioc->dbg_fwsave_once = BFA_TRUE;
2366        ioc->iocpf.ioc  = ioc;
2367
2368        bfa_ioc_mbox_attach(ioc);
2369        INIT_LIST_HEAD(&ioc->notify_q);
2370
2371        bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2372        bfa_fsm_send_event(ioc, IOC_E_RESET);
2373}
2374
2375/*
2376 * Driver detach time IOC cleanup.
2377 */
2378void
2379bfa_ioc_detach(struct bfa_ioc_s *ioc)
2380{
2381        bfa_fsm_send_event(ioc, IOC_E_DETACH);
2382        INIT_LIST_HEAD(&ioc->notify_q);
2383}
2384
2385/*
2386 * Setup IOC PCI properties.
2387 *
2388 * @param[in]   pcidev  PCI device information for this IOC
2389 */
2390void
2391bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2392                enum bfi_pcifn_class clscode)
2393{
2394        ioc->clscode    = clscode;
2395        ioc->pcidev     = *pcidev;
2396
2397        /*
2398         * Initialize IOC and device personality
2399         */
2400        ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
2401        ioc->asic_mode  = BFI_ASIC_MODE_FC;
2402
2403        switch (pcidev->device_id) {
2404        case BFA_PCI_DEVICE_ID_FC_8G1P:
2405        case BFA_PCI_DEVICE_ID_FC_8G2P:
2406                ioc->asic_gen = BFI_ASIC_GEN_CB;
2407                ioc->fcmode = BFA_TRUE;
2408                ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2409                ioc->ad_cap_bm = BFA_CM_HBA;
2410                break;
2411
2412        case BFA_PCI_DEVICE_ID_CT:
2413                ioc->asic_gen = BFI_ASIC_GEN_CT;
2414                ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2415                ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2416                ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2417                ioc->ad_cap_bm = BFA_CM_CNA;
2418                break;
2419
2420        case BFA_PCI_DEVICE_ID_CT_FC:
2421                ioc->asic_gen = BFI_ASIC_GEN_CT;
2422                ioc->fcmode = BFA_TRUE;
2423                ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2424                ioc->ad_cap_bm = BFA_CM_HBA;
2425                break;
2426
2427        case BFA_PCI_DEVICE_ID_CT2:
2428        case BFA_PCI_DEVICE_ID_CT2_QUAD:
2429                ioc->asic_gen = BFI_ASIC_GEN_CT2;
2430                if (clscode == BFI_PCIFN_CLASS_FC &&
2431                    pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
2432                        ioc->asic_mode  = BFI_ASIC_MODE_FC16;
2433                        ioc->fcmode = BFA_TRUE;
2434                        ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2435                        ioc->ad_cap_bm = BFA_CM_HBA;
2436                } else {
2437                        ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2438                        ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2439                        if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2440                                ioc->port_mode =
2441                                ioc->port_mode_cfg = BFA_MODE_CNA;
2442                                ioc->ad_cap_bm = BFA_CM_CNA;
2443                        } else {
2444                                ioc->port_mode =
2445                                ioc->port_mode_cfg = BFA_MODE_NIC;
2446                                ioc->ad_cap_bm = BFA_CM_NIC;
2447                        }
2448                }
2449                break;
2450
2451        default:
2452                WARN_ON(1);
2453        }
2454
2455        /*
2456         * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2457         */
2458        if (ioc->asic_gen == BFI_ASIC_GEN_CB)
2459                bfa_ioc_set_cb_hwif(ioc);
2460        else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2461                bfa_ioc_set_ct_hwif(ioc);
2462        else {
2463                WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2464                bfa_ioc_set_ct2_hwif(ioc);
2465                bfa_ioc_ct2_poweron(ioc);
2466        }
2467
2468        bfa_ioc_map_port(ioc);
2469        bfa_ioc_reg_init(ioc);
2470}
2471
2472/*
2473 * Initialize IOC dma memory
2474 *
2475 * @param[in]   dm_kva  kernel virtual address of IOC dma memory
2476 * @param[in]   dm_pa   physical address of IOC dma memory
2477 */
2478void
2479bfa_ioc_mem_claim(struct bfa_ioc_s *ioc,  u8 *dm_kva, u64 dm_pa)
2480{
2481        /*
2482         * dma memory for firmware attribute
2483         */
2484        ioc->attr_dma.kva = dm_kva;
2485        ioc->attr_dma.pa = dm_pa;
2486        ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
2487}
2488
2489void
2490bfa_ioc_enable(struct bfa_ioc_s *ioc)
2491{
2492        bfa_ioc_stats(ioc, ioc_enables);
2493        ioc->dbg_fwsave_once = BFA_TRUE;
2494
2495        bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2496}
2497
2498void
2499bfa_ioc_disable(struct bfa_ioc_s *ioc)
2500{
2501        bfa_ioc_stats(ioc, ioc_disables);
2502        bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2503}
2504
2505void
2506bfa_ioc_suspend(struct bfa_ioc_s *ioc)
2507{
2508        ioc->dbg_fwsave_once = BFA_TRUE;
2509        bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2510}
2511
2512/*
2513 * Initialize memory for saving firmware trace. Driver must initialize
2514 * trace memory before call bfa_ioc_enable().
2515 */
2516void
2517bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2518{
2519        ioc->dbg_fwsave     = dbg_fwsave;
2520        ioc->dbg_fwsave_len = BFA_DBG_FWTRC_LEN;
2521}
2522
2523/*
2524 * Register mailbox message handler functions
2525 *
2526 * @param[in]   ioc             IOC instance
2527 * @param[in]   mcfuncs         message class handler functions
2528 */
2529void
2530bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
2531{
2532        struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
2533        int                             mc;
2534
2535        for (mc = 0; mc < BFI_MC_MAX; mc++)
2536                mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2537}
2538
2539/*
2540 * Register mailbox message handler function, to be called by common modules
2541 */
2542void
2543bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
2544                    bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2545{
2546        struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
2547
2548        mod->mbhdlr[mc].cbfn    = cbfn;
2549        mod->mbhdlr[mc].cbarg   = cbarg;
2550}
2551
2552/*
2553 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2554 * Responsibility of caller to serialize
2555 *
2556 * @param[in]   ioc     IOC instance
2557 * @param[i]    cmd     Mailbox command
2558 */
2559void
2560bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2561{
2562        struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
2563        u32                     stat;
2564
2565        /*
2566         * If a previous command is pending, queue new command
2567         */
2568        if (!list_empty(&mod->cmd_q)) {
2569                list_add_tail(&cmd->qe, &mod->cmd_q);
2570                return;
2571        }
2572
2573        /*
2574         * If mailbox is busy, queue command for poll timer
2575         */
2576        stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2577        if (stat) {
2578                list_add_tail(&cmd->qe, &mod->cmd_q);
2579                return;
2580        }
2581
2582        /*
2583         * mailbox is free -- queue command to firmware
2584         */
2585        bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2586}
2587
2588/*
2589 * Handle mailbox interrupts
2590 */
2591void
2592bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2593{
2594        struct bfa_ioc_mbox_mod_s       *mod = &ioc->mbox_mod;
2595        struct bfi_mbmsg_s              m;
2596        int                             mc;
2597
2598        if (bfa_ioc_msgget(ioc, &m)) {
2599                /*
2600                 * Treat IOC message class as special.
2601                 */
2602                mc = m.mh.msg_class;
2603                if (mc == BFI_MC_IOC) {
2604                        bfa_ioc_isr(ioc, &m);
2605                        return;
2606                }
2607
2608                if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2609                        return;
2610
2611                mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2612        }
2613
2614        bfa_ioc_lpu_read_stat(ioc);
2615
2616        /*
2617         * Try to send pending mailbox commands
2618         */
2619        bfa_ioc_mbox_poll(ioc);
2620}
2621
2622void
2623bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
2624{
2625        bfa_ioc_stats(ioc, ioc_hbfails);
2626        ioc->stats.hb_count = ioc->hb_count;
2627        bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2628}
2629
2630/*
2631 * return true if IOC is disabled
2632 */
2633bfa_boolean_t
2634bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2635{
2636        return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2637                bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2638}
2639
2640/*
2641 * return true if IOC firmware is different.
2642 */
2643bfa_boolean_t
2644bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
2645{
2646        return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
2647                bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
2648                bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
2649}
2650
2651/*
2652 * Check if adapter is disabled -- both IOCs should be in a disabled
2653 * state.
2654 */
2655bfa_boolean_t
2656bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2657{
2658        u32     ioc_state;
2659
2660        if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2661                return BFA_FALSE;
2662
2663        ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc);
2664        if (!bfa_ioc_state_disabled(ioc_state))
2665                return BFA_FALSE;
2666
2667        if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
2668                ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc);
2669                if (!bfa_ioc_state_disabled(ioc_state))
2670                        return BFA_FALSE;
2671        }
2672
2673        return BFA_TRUE;
2674}
2675
2676/*
2677 * Reset IOC fwstate registers.
2678 */
2679void
2680bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
2681{
2682        bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT);
2683        bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT);
2684}
2685
2686#define BFA_MFG_NAME "QLogic"
2687void
2688bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2689                         struct bfa_adapter_attr_s *ad_attr)
2690{
2691        struct bfi_ioc_attr_s   *ioc_attr;
2692
2693        ioc_attr = ioc->attr;
2694
2695        bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2696        bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2697        bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2698        bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2699        memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2700                      sizeof(struct bfa_mfg_vpd_s));
2701
2702        ad_attr->nports = bfa_ioc_get_nports(ioc);
2703        ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2704
2705        bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2706        /* For now, model descr uses same model string */
2707        bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2708
2709        ad_attr->card_type = ioc_attr->card_type;
2710        ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2711
2712        if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2713                ad_attr->prototype = 1;
2714        else
2715                ad_attr->prototype = 0;
2716
2717        ad_attr->pwwn = ioc->attr->pwwn;
2718        ad_attr->mac  = bfa_ioc_get_mac(ioc);
2719
2720        ad_attr->pcie_gen = ioc_attr->pcie_gen;
2721        ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2722        ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2723        ad_attr->asic_rev = ioc_attr->asic_rev;
2724
2725        bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2726
2727        ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
2728        ad_attr->trunk_capable = (ad_attr->nports > 1) &&
2729                                  !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
2730        ad_attr->mfg_day = ioc_attr->mfg_day;
2731        ad_attr->mfg_month = ioc_attr->mfg_month;
2732        ad_attr->mfg_year = ioc_attr->mfg_year;
2733        memcpy(ad_attr->uuid, ioc_attr->uuid, BFA_ADAPTER_UUID_LEN);
2734}
2735
2736enum bfa_ioc_type_e
2737bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2738{
2739        if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2740                return BFA_IOC_TYPE_LL;
2741
2742        WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
2743
2744        return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2745                ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2746}
2747
2748void
2749bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
2750{
2751        memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2752        memcpy((void *)serial_num,
2753                        (void *)ioc->attr->brcd_serialnum,
2754                        BFA_ADAPTER_SERIAL_NUM_LEN);
2755}
2756
2757void
2758bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2759{
2760        memset((void *)fw_ver, 0, BFA_VERSION_LEN);
2761        memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2762}
2763
2764void
2765bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2766{
2767        WARN_ON(!chip_rev);
2768
2769        memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2770
2771        chip_rev[0] = 'R';
2772        chip_rev[1] = 'e';
2773        chip_rev[2] = 'v';
2774        chip_rev[3] = '-';
2775        chip_rev[4] = ioc->attr->asic_rev;
2776        chip_rev[5] = '\0';
2777}
2778
2779void
2780bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
2781{
2782        memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
2783        memcpy(optrom_ver, ioc->attr->optrom_version,
2784                      BFA_VERSION_LEN);
2785}
2786
2787void
2788bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
2789{
2790        memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2791        strlcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2792}
2793
2794void
2795bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2796{
2797        struct bfi_ioc_attr_s   *ioc_attr;
2798        u8 nports = bfa_ioc_get_nports(ioc);
2799
2800        WARN_ON(!model);
2801        memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2802
2803        ioc_attr = ioc->attr;
2804
2805        if (bfa_asic_id_ct2(ioc->pcidev.device_id) &&
2806                (!bfa_mfg_is_mezz(ioc_attr->card_type)))
2807                snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u-%u%s",
2808                        BFA_MFG_NAME, ioc_attr->card_type, nports, "p");
2809        else
2810                snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2811                        BFA_MFG_NAME, ioc_attr->card_type);
2812}
2813
2814enum bfa_ioc_state
2815bfa_ioc_get_state(struct bfa_ioc_s *ioc)
2816{
2817        enum bfa_iocpf_state iocpf_st;
2818        enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2819
2820        if (ioc_st == BFA_IOC_ENABLING ||
2821                ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2822
2823                iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2824
2825                switch (iocpf_st) {
2826                case BFA_IOCPF_SEMWAIT:
2827                        ioc_st = BFA_IOC_SEMWAIT;
2828                        break;
2829
2830                case BFA_IOCPF_HWINIT:
2831                        ioc_st = BFA_IOC_HWINIT;
2832                        break;
2833
2834                case BFA_IOCPF_FWMISMATCH:
2835                        ioc_st = BFA_IOC_FWMISMATCH;
2836                        break;
2837
2838                case BFA_IOCPF_FAIL:
2839                        ioc_st = BFA_IOC_FAIL;
2840                        break;
2841
2842                case BFA_IOCPF_INITFAIL:
2843                        ioc_st = BFA_IOC_INITFAIL;
2844                        break;
2845
2846                default:
2847                        break;
2848                }
2849        }
2850
2851        return ioc_st;
2852}
2853
2854void
2855bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2856{
2857        memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
2858
2859        ioc_attr->state = bfa_ioc_get_state(ioc);
2860        ioc_attr->port_id = bfa_ioc_portid(ioc);
2861        ioc_attr->port_mode = ioc->port_mode;
2862        ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2863        ioc_attr->cap_bm = ioc->ad_cap_bm;
2864
2865        ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2866
2867        bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2868
2869        ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc);
2870        ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc);
2871        ioc_attr->def_fn = (bfa_ioc_pcifn(ioc) == bfa_ioc_portid(ioc));
2872        bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2873}
2874
2875mac_t
2876bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2877{
2878        /*
2879         * Check the IOC type and return the appropriate MAC
2880         */
2881        if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
2882                return ioc->attr->fcoe_mac;
2883        else
2884                return ioc->attr->mac;
2885}
2886
2887mac_t
2888bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2889{
2890        mac_t   m;
2891
2892        m = ioc->attr->mfg_mac;
2893        if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2894                m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2895        else
2896                bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
2897                        bfa_ioc_pcifn(ioc));
2898
2899        return m;
2900}
2901
2902/*
2903 * Send AEN notification
2904 */
2905void
2906bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2907{
2908        struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2909        struct bfa_aen_entry_s  *aen_entry;
2910        enum bfa_ioc_type_e ioc_type;
2911
2912        bfad_get_aen_entry(bfad, aen_entry);
2913        if (!aen_entry)
2914                return;
2915
2916        ioc_type = bfa_ioc_get_type(ioc);
2917        switch (ioc_type) {
2918        case BFA_IOC_TYPE_FC:
2919                aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2920                break;
2921        case BFA_IOC_TYPE_FCoE:
2922                aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2923                aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2924                break;
2925        case BFA_IOC_TYPE_LL:
2926                aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2927                break;
2928        default:
2929                WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
2930                break;
2931        }
2932
2933        /* Send the AEN notification */
2934        aen_entry->aen_data.ioc.ioc_type = ioc_type;
2935        bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
2936                                  BFA_AEN_CAT_IOC, event);
2937}
2938
2939/*
2940 * Retrieve saved firmware trace from a prior IOC failure.
2941 */
2942bfa_status_t
2943bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2944{
2945        int     tlen;
2946
2947        if (ioc->dbg_fwsave_len == 0)
2948                return BFA_STATUS_ENOFSAVE;
2949
2950        tlen = *trclen;
2951        if (tlen > ioc->dbg_fwsave_len)
2952                tlen = ioc->dbg_fwsave_len;
2953
2954        memcpy(trcdata, ioc->dbg_fwsave, tlen);
2955        *trclen = tlen;
2956        return BFA_STATUS_OK;
2957}
2958
2959
2960/*
2961 * Retrieve saved firmware trace from a prior IOC failure.
2962 */
2963bfa_status_t
2964bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2965{
2966        u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2967        int tlen;
2968        bfa_status_t status;
2969
2970        bfa_trc(ioc, *trclen);
2971
2972        tlen = *trclen;
2973        if (tlen > BFA_DBG_FWTRC_LEN)
2974                tlen = BFA_DBG_FWTRC_LEN;
2975
2976        status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2977        *trclen = tlen;
2978        return status;
2979}
2980
2981static void
2982bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
2983{
2984        struct bfa_mbox_cmd_s cmd;
2985        struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
2986
2987        bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
2988                    bfa_ioc_portid(ioc));
2989        req->clscode = cpu_to_be16(ioc->clscode);
2990        bfa_ioc_mbox_queue(ioc, &cmd);
2991}
2992
2993static void
2994bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
2995{
2996        u32 fwsync_iter = 1000;
2997
2998        bfa_ioc_send_fwsync(ioc);
2999
3000        /*
3001         * After sending a fw sync mbox command wait for it to
3002         * take effect.  We will not wait for a response because
3003         *    1. fw_sync mbox cmd doesn't have a response.
3004         *    2. Even if we implement that,  interrupts might not
3005         *       be enabled when we call this function.
3006         * So, just keep checking if any mbox cmd is pending, and
3007         * after waiting for a reasonable amount of time, go ahead.
3008         * It is possible that fw has crashed and the mbox command
3009         * is never acknowledged.
3010         */
3011        while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
3012                fwsync_iter--;
3013}
3014
3015/*
3016 * Dump firmware smem
3017 */
3018bfa_status_t
3019bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
3020                                u32 *offset, int *buflen)
3021{
3022        u32 loff;
3023        int dlen;
3024        bfa_status_t status;
3025        u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
3026
3027        if (*offset >= smem_len) {
3028                *offset = *buflen = 0;
3029                return BFA_STATUS_EINVAL;
3030        }
3031
3032        loff = *offset;
3033        dlen = *buflen;
3034
3035        /*
3036         * First smem read, sync smem before proceeding
3037         * No need to sync before reading every chunk.
3038         */
3039        if (loff == 0)
3040                bfa_ioc_fwsync(ioc);
3041
3042        if ((loff + dlen) >= smem_len)
3043                dlen = smem_len - loff;
3044
3045        status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
3046
3047        if (status != BFA_STATUS_OK) {
3048                *offset = *buflen = 0;
3049                return status;
3050        }
3051
3052        *offset += dlen;
3053
3054        if (*offset >= smem_len)
3055                *offset = 0;
3056
3057        *buflen = dlen;
3058
3059        return status;
3060}
3061
3062/*
3063 * Firmware statistics
3064 */
3065bfa_status_t
3066bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
3067{
3068        u32 loff = BFI_IOC_FWSTATS_OFF + \
3069                BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
3070        int tlen;
3071        bfa_status_t status;
3072
3073        if (ioc->stats_busy) {
3074                bfa_trc(ioc, ioc->stats_busy);
3075                return BFA_STATUS_DEVBUSY;
3076        }
3077        ioc->stats_busy = BFA_TRUE;
3078
3079        tlen = sizeof(struct bfa_fw_stats_s);
3080        status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
3081
3082        ioc->stats_busy = BFA_FALSE;
3083        return status;
3084}
3085
3086bfa_status_t
3087bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
3088{
3089        u32 loff = BFI_IOC_FWSTATS_OFF + \
3090                BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
3091        int tlen;
3092        bfa_status_t status;
3093
3094        if (ioc->stats_busy) {
3095                bfa_trc(ioc, ioc->stats_busy);
3096                return BFA_STATUS_DEVBUSY;
3097        }
3098        ioc->stats_busy = BFA_TRUE;
3099
3100        tlen = sizeof(struct bfa_fw_stats_s);
3101        status = bfa_ioc_smem_clr(ioc, loff, tlen);
3102
3103        ioc->stats_busy = BFA_FALSE;
3104        return status;
3105}
3106
3107/*
3108 * Save firmware trace if configured.
3109 */
3110void
3111bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
3112{
3113        int             tlen;
3114
3115        if (ioc->dbg_fwsave_once) {
3116                ioc->dbg_fwsave_once = BFA_FALSE;
3117                if (ioc->dbg_fwsave_len) {
3118                        tlen = ioc->dbg_fwsave_len;
3119                        bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
3120                }
3121        }
3122}
3123
3124/*
3125 * Firmware failure detected. Start recovery actions.
3126 */
3127static void
3128bfa_ioc_recover(struct bfa_ioc_s *ioc)
3129{
3130        bfa_ioc_stats(ioc, ioc_hbfails);
3131        ioc->stats.hb_count = ioc->hb_count;
3132        bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
3133}
3134
3135/*
3136 *  BFA IOC PF private functions
3137 */
3138static void
3139bfa_iocpf_timeout(void *ioc_arg)
3140{
3141        struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
3142
3143        bfa_trc(ioc, 0);
3144        bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
3145}
3146
3147static void
3148bfa_iocpf_sem_timeout(void *ioc_arg)
3149{
3150        struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
3151
3152        bfa_ioc_hw_sem_get(ioc);
3153}
3154
3155static void
3156bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
3157{
3158        u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
3159
3160        bfa_trc(ioc, fwstate);
3161
3162        if (fwstate == BFI_IOC_DISABLED) {
3163                bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
3164                return;
3165        }
3166
3167        if (ioc->iocpf.poll_time >= (3 * BFA_IOC_TOV))
3168                bfa_iocpf_timeout(ioc);
3169        else {
3170                ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
3171                bfa_iocpf_poll_timer_start(ioc);
3172        }
3173}
3174
3175static void
3176bfa_iocpf_poll_timeout(void *ioc_arg)
3177{
3178        struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
3179
3180        bfa_ioc_poll_fwinit(ioc);
3181}
3182
3183/*
3184 *  bfa timer function
3185 */
3186void
3187bfa_timer_beat(struct bfa_timer_mod_s *mod)
3188{
3189        struct list_head *qh = &mod->timer_q;
3190        struct list_head *qe, *qe_next;
3191        struct bfa_timer_s *elem;
3192        struct list_head timedout_q;
3193
3194        INIT_LIST_HEAD(&timedout_q);
3195
3196        qe = bfa_q_next(qh);
3197
3198        while (qe != qh) {
3199                qe_next = bfa_q_next(qe);
3200
3201                elem = (struct bfa_timer_s *) qe;
3202                if (elem->timeout <= BFA_TIMER_FREQ) {
3203                        elem->timeout = 0;
3204                        list_del(&elem->qe);
3205                        list_add_tail(&elem->qe, &timedout_q);
3206                } else {
3207                        elem->timeout -= BFA_TIMER_FREQ;
3208                }
3209
3210                qe = qe_next;   /* go to next elem */
3211        }
3212
3213        /*
3214         * Pop all the timeout entries
3215         */
3216        while (!list_empty(&timedout_q)) {
3217                bfa_q_deq(&timedout_q, &elem);
3218                elem->timercb(elem->arg);
3219        }
3220}
3221
3222/*
3223 * Should be called with lock protection
3224 */
3225void
3226bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
3227                    void (*timercb) (void *), void *arg, unsigned int timeout)
3228{
3229
3230        WARN_ON(timercb == NULL);
3231        WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
3232
3233        timer->timeout = timeout;
3234        timer->timercb = timercb;
3235        timer->arg = arg;
3236
3237        list_add_tail(&timer->qe, &mod->timer_q);
3238}
3239
3240/*
3241 * Should be called with lock protection
3242 */
3243void
3244bfa_timer_stop(struct bfa_timer_s *timer)
3245{
3246        WARN_ON(list_empty(&timer->qe));
3247
3248        list_del(&timer->qe);
3249}
3250
3251/*
3252 *      ASIC block related
3253 */
3254static void
3255bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
3256{
3257        struct bfa_ablk_cfg_inst_s *cfg_inst;
3258        int i, j;
3259        u16     be16;
3260
3261        for (i = 0; i < BFA_ABLK_MAX; i++) {
3262                cfg_inst = &cfg->inst[i];
3263                for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
3264                        be16 = cfg_inst->pf_cfg[j].pers;
3265                        cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
3266                        be16 = cfg_inst->pf_cfg[j].num_qpairs;
3267                        cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
3268                        be16 = cfg_inst->pf_cfg[j].num_vectors;
3269                        cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
3270                        be16 = cfg_inst->pf_cfg[j].bw_min;
3271                        cfg_inst->pf_cfg[j].bw_min = be16_to_cpu(be16);
3272                        be16 = cfg_inst->pf_cfg[j].bw_max;
3273                        cfg_inst->pf_cfg[j].bw_max = be16_to_cpu(be16);
3274                }
3275        }
3276}
3277
3278static void
3279bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
3280{
3281        struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3282        struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
3283        bfa_ablk_cbfn_t cbfn;
3284
3285        WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
3286        bfa_trc(ablk->ioc, msg->mh.msg_id);
3287
3288        switch (msg->mh.msg_id) {
3289        case BFI_ABLK_I2H_QUERY:
3290                if (rsp->status == BFA_STATUS_OK) {
3291                        memcpy(ablk->cfg, ablk->dma_addr.kva,
3292                                sizeof(struct bfa_ablk_cfg_s));
3293                        bfa_ablk_config_swap(ablk->cfg);
3294                        ablk->cfg = NULL;
3295                }
3296                break;
3297
3298        case BFI_ABLK_I2H_ADPT_CONFIG:
3299        case BFI_ABLK_I2H_PORT_CONFIG:
3300                /* update config port mode */
3301                ablk->ioc->port_mode_cfg = rsp->port_mode;
3302                break;
3303
3304        case BFI_ABLK_I2H_PF_DELETE:
3305        case BFI_ABLK_I2H_PF_UPDATE:
3306        case BFI_ABLK_I2H_OPTROM_ENABLE:
3307        case BFI_ABLK_I2H_OPTROM_DISABLE:
3308                /* No-op */
3309                break;
3310
3311        case BFI_ABLK_I2H_PF_CREATE:
3312                *(ablk->pcifn) = rsp->pcifn;
3313                ablk->pcifn = NULL;
3314                break;
3315
3316        default:
3317                WARN_ON(1);
3318        }
3319
3320        ablk->busy = BFA_FALSE;
3321        if (ablk->cbfn) {
3322                cbfn = ablk->cbfn;
3323                ablk->cbfn = NULL;
3324                cbfn(ablk->cbarg, rsp->status);
3325        }
3326}
3327
3328static void
3329bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
3330{
3331        struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3332
3333        bfa_trc(ablk->ioc, event);
3334
3335        switch (event) {
3336        case BFA_IOC_E_ENABLED:
3337                WARN_ON(ablk->busy != BFA_FALSE);
3338                break;
3339
3340        case BFA_IOC_E_DISABLED:
3341        case BFA_IOC_E_FAILED:
3342                /* Fail any pending requests */
3343                ablk->pcifn = NULL;
3344                if (ablk->busy) {
3345                        if (ablk->cbfn)
3346                                ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
3347                        ablk->cbfn = NULL;
3348                        ablk->busy = BFA_FALSE;
3349                }
3350                break;
3351
3352        default:
3353                WARN_ON(1);
3354                break;
3355        }
3356}
3357
3358u32
3359bfa_ablk_meminfo(void)
3360{
3361        return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
3362}
3363
3364void
3365bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
3366{
3367        ablk->dma_addr.kva = dma_kva;
3368        ablk->dma_addr.pa  = dma_pa;
3369}
3370
3371void
3372bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
3373{
3374        ablk->ioc = ioc;
3375
3376        bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
3377        bfa_q_qe_init(&ablk->ioc_notify);
3378        bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
3379        list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
3380}
3381
3382bfa_status_t
3383bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
3384                bfa_ablk_cbfn_t cbfn, void *cbarg)
3385{
3386        struct bfi_ablk_h2i_query_s *m;
3387
3388        WARN_ON(!ablk_cfg);
3389
3390        if (!bfa_ioc_is_operational(ablk->ioc)) {
3391                bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3392                return BFA_STATUS_IOC_FAILURE;
3393        }
3394
3395        if (ablk->busy) {
3396                bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3397                return  BFA_STATUS_DEVBUSY;
3398        }
3399
3400        ablk->cfg = ablk_cfg;
3401        ablk->cbfn  = cbfn;
3402        ablk->cbarg = cbarg;
3403        ablk->busy  = BFA_TRUE;
3404
3405        m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
3406        bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY,
3407                    bfa_ioc_portid(ablk->ioc));
3408        bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
3409        bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3410
3411        return BFA_STATUS_OK;
3412}
3413
3414bfa_status_t
3415bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
3416                u8 port, enum bfi_pcifn_class personality,
3417                u16 bw_min, u16 bw_max,
3418                bfa_ablk_cbfn_t cbfn, void *cbarg)
3419{
3420        struct bfi_ablk_h2i_pf_req_s *m;
3421
3422        if (!bfa_ioc_is_operational(ablk->ioc)) {
3423                bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3424                return BFA_STATUS_IOC_FAILURE;
3425        }
3426
3427        if (ablk->busy) {
3428                bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3429                return  BFA_STATUS_DEVBUSY;
3430        }
3431
3432        ablk->pcifn = pcifn;
3433        ablk->cbfn = cbfn;
3434        ablk->cbarg = cbarg;
3435        ablk->busy  = BFA_TRUE;
3436
3437        m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3438        bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
3439                    bfa_ioc_portid(ablk->ioc));
3440        m->pers = cpu_to_be16((u16)personality);
3441        m->bw_min = cpu_to_be16(bw_min);
3442        m->bw_max = cpu_to_be16(bw_max);
3443        m->port = port;
3444        bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3445
3446        return BFA_STATUS_OK;
3447}
3448
3449bfa_status_t
3450bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
3451                bfa_ablk_cbfn_t cbfn, void *cbarg)
3452{
3453        struct bfi_ablk_h2i_pf_req_s *m;
3454
3455        if (!bfa_ioc_is_operational(ablk->ioc)) {
3456                bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3457                return BFA_STATUS_IOC_FAILURE;
3458        }
3459
3460        if (ablk->busy) {
3461                bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3462                return  BFA_STATUS_DEVBUSY;
3463        }
3464
3465        ablk->cbfn  = cbfn;
3466        ablk->cbarg = cbarg;
3467        ablk->busy  = BFA_TRUE;
3468
3469        m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3470        bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE,
3471                    bfa_ioc_portid(ablk->ioc));
3472        m->pcifn = (u8)pcifn;
3473        bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3474
3475        return BFA_STATUS_OK;
3476}
3477
3478bfa_status_t
3479bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode,
3480                int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3481{
3482        struct bfi_ablk_h2i_cfg_req_s *m;
3483
3484        if (!bfa_ioc_is_operational(ablk->ioc)) {
3485                bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3486                return BFA_STATUS_IOC_FAILURE;
3487        }
3488
3489        if (ablk->busy) {
3490                bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3491                return  BFA_STATUS_DEVBUSY;
3492        }
3493
3494        ablk->cbfn  = cbfn;
3495        ablk->cbarg = cbarg;
3496        ablk->busy  = BFA_TRUE;
3497
3498        m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3499        bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG,
3500                    bfa_ioc_portid(ablk->ioc));
3501        m->mode = (u8)mode;
3502        m->max_pf = (u8)max_pf;
3503        m->max_vf = (u8)max_vf;
3504        bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3505
3506        return BFA_STATUS_OK;
3507}
3508
3509bfa_status_t
3510bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
3511                int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3512{
3513        struct bfi_ablk_h2i_cfg_req_s *m;
3514
3515        if (!bfa_ioc_is_operational(ablk->ioc)) {
3516                bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3517                return BFA_STATUS_IOC_FAILURE;
3518        }
3519
3520        if (ablk->busy) {
3521                bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3522                return  BFA_STATUS_DEVBUSY;
3523        }
3524
3525        ablk->cbfn  = cbfn;
3526        ablk->cbarg = cbarg;
3527        ablk->busy  = BFA_TRUE;
3528
3529        m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3530        bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG,
3531                bfa_ioc_portid(ablk->ioc));
3532        m->port = (u8)port;
3533        m->mode = (u8)mode;
3534        m->max_pf = (u8)max_pf;
3535        m->max_vf = (u8)max_vf;
3536        bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3537
3538        return BFA_STATUS_OK;
3539}
3540
3541bfa_status_t
3542bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, u16 bw_min,
3543                   u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg)
3544{
3545        struct bfi_ablk_h2i_pf_req_s *m;
3546
3547        if (!bfa_ioc_is_operational(ablk->ioc)) {
3548                bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3549                return BFA_STATUS_IOC_FAILURE;
3550        }
3551
3552        if (ablk->busy) {
3553                bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3554                return  BFA_STATUS_DEVBUSY;
3555        }
3556
3557        ablk->cbfn  = cbfn;
3558        ablk->cbarg = cbarg;
3559        ablk->busy  = BFA_TRUE;
3560
3561        m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3562        bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
3563                bfa_ioc_portid(ablk->ioc));
3564        m->pcifn = (u8)pcifn;
3565        m->bw_min = cpu_to_be16(bw_min);
3566        m->bw_max = cpu_to_be16(bw_max);
3567        bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3568
3569        return BFA_STATUS_OK;
3570}
3571
3572bfa_status_t
3573bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3574{
3575        struct bfi_ablk_h2i_optrom_s *m;
3576
3577        if (!bfa_ioc_is_operational(ablk->ioc)) {
3578                bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3579                return BFA_STATUS_IOC_FAILURE;
3580        }
3581
3582        if (ablk->busy) {
3583                bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3584                return  BFA_STATUS_DEVBUSY;
3585        }
3586
3587        ablk->cbfn  = cbfn;
3588        ablk->cbarg = cbarg;
3589        ablk->busy  = BFA_TRUE;
3590
3591        m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3592        bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE,
3593                bfa_ioc_portid(ablk->ioc));
3594        bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3595
3596        return BFA_STATUS_OK;
3597}
3598
3599bfa_status_t
3600bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3601{
3602        struct bfi_ablk_h2i_optrom_s *m;
3603
3604        if (!bfa_ioc_is_operational(ablk->ioc)) {
3605                bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3606                return BFA_STATUS_IOC_FAILURE;
3607        }
3608
3609        if (ablk->busy) {
3610                bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3611                return  BFA_STATUS_DEVBUSY;
3612        }
3613
3614        ablk->cbfn  = cbfn;
3615        ablk->cbarg = cbarg;
3616        ablk->busy  = BFA_TRUE;
3617
3618        m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3619        bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE,
3620                bfa_ioc_portid(ablk->ioc));
3621        bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3622
3623        return BFA_STATUS_OK;
3624}
3625
3626/*
3627 *      SFP module specific
3628 */
3629
3630/* forward declarations */
3631static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp);
3632static void bfa_sfp_media_get(struct bfa_sfp_s *sfp);
3633static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp,
3634                                enum bfa_port_speed portspeed);
3635
3636static void
3637bfa_cb_sfp_show(struct bfa_sfp_s *sfp)
3638{
3639        bfa_trc(sfp, sfp->lock);
3640        if (sfp->cbfn)
3641                sfp->cbfn(sfp->cbarg, sfp->status);
3642        sfp->lock = 0;
3643        sfp->cbfn = NULL;
3644}
3645
3646static void
3647bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
3648{
3649        bfa_trc(sfp, sfp->portspeed);
3650        if (sfp->media) {
3651                bfa_sfp_media_get(sfp);
3652                if (sfp->state_query_cbfn)
3653                        sfp->state_query_cbfn(sfp->state_query_cbarg,
3654                                        sfp->status);
3655                sfp->media = NULL;
3656        }
3657
3658        if (sfp->portspeed) {
3659                sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
3660                if (sfp->state_query_cbfn)
3661                        sfp->state_query_cbfn(sfp->state_query_cbarg,
3662                                        sfp->status);
3663                sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3664        }
3665
3666        sfp->state_query_lock = 0;
3667        sfp->state_query_cbfn = NULL;
3668}
3669
3670/*
3671 *      IOC event handler.
3672 */
3673static void
3674bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
3675{
3676        struct bfa_sfp_s *sfp = sfp_arg;
3677
3678        bfa_trc(sfp, event);
3679        bfa_trc(sfp, sfp->lock);
3680        bfa_trc(sfp, sfp->state_query_lock);
3681
3682        switch (event) {
3683        case BFA_IOC_E_DISABLED:
3684        case BFA_IOC_E_FAILED:
3685                if (sfp->lock) {
3686                        sfp->status = BFA_STATUS_IOC_FAILURE;
3687                        bfa_cb_sfp_show(sfp);
3688                }
3689
3690                if (sfp->state_query_lock) {
3691                        sfp->status = BFA_STATUS_IOC_FAILURE;
3692                        bfa_cb_sfp_state_query(sfp);
3693                }
3694                break;
3695
3696        default:
3697                break;
3698        }
3699}
3700
3701/*
3702 * SFP's State Change Notification post to AEN
3703 */
3704static void
3705bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
3706{
3707        struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
3708        struct bfa_aen_entry_s  *aen_entry;
3709        enum bfa_port_aen_event aen_evt = 0;
3710
3711        bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
3712                      ((u64)rsp->event));
3713
3714        bfad_get_aen_entry(bfad, aen_entry);
3715        if (!aen_entry)
3716                return;
3717
3718        aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
3719        aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
3720        aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
3721
3722        switch (rsp->event) {
3723        case BFA_SFP_SCN_INSERTED:
3724                aen_evt = BFA_PORT_AEN_SFP_INSERT;
3725                break;
3726        case BFA_SFP_SCN_REMOVED:
3727                aen_evt = BFA_PORT_AEN_SFP_REMOVE;
3728                break;
3729        case BFA_SFP_SCN_FAILED:
3730                aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR;
3731                break;
3732        case BFA_SFP_SCN_UNSUPPORT:
3733                aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
3734                break;
3735        case BFA_SFP_SCN_POM:
3736                aen_evt = BFA_PORT_AEN_SFP_POM;
3737                aen_entry->aen_data.port.level = rsp->pomlvl;
3738                break;
3739        default:
3740                bfa_trc(sfp, rsp->event);
3741                WARN_ON(1);
3742        }
3743
3744        /* Send the AEN notification */
3745        bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
3746                                  BFA_AEN_CAT_PORT, aen_evt);
3747}
3748
3749/*
3750 *      SFP get data send
3751 */
3752static void
3753bfa_sfp_getdata_send(struct bfa_sfp_s *sfp)
3754{
3755        struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3756
3757        bfa_trc(sfp, req->memtype);
3758
3759        /* build host command */
3760        bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW,
3761                        bfa_ioc_portid(sfp->ioc));
3762
3763        /* send mbox cmd */
3764        bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd);
3765}
3766
3767/*
3768 *      SFP is valid, read sfp data
3769 */
3770static void
3771bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
3772{
3773        struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3774
3775        WARN_ON(sfp->lock != 0);
3776        bfa_trc(sfp, sfp->state);
3777
3778        sfp->lock = 1;
3779        sfp->memtype = memtype;
3780        req->memtype = memtype;
3781
3782        /* Setup SG list */
3783        bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
3784
3785        bfa_sfp_getdata_send(sfp);
3786}
3787
3788/*
3789 *      SFP scn handler
3790 */
3791static void
3792bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3793{
3794        struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
3795
3796        switch (rsp->event) {
3797        case BFA_SFP_SCN_INSERTED:
3798                sfp->state = BFA_SFP_STATE_INSERTED;
3799                sfp->data_valid = 0;
3800                bfa_sfp_scn_aen_post(sfp, rsp);
3801                break;
3802        case BFA_SFP_SCN_REMOVED:
3803                sfp->state = BFA_SFP_STATE_REMOVED;
3804                sfp->data_valid = 0;
3805                bfa_sfp_scn_aen_post(sfp, rsp);
3806                break;
3807        case BFA_SFP_SCN_FAILED:
3808                sfp->state = BFA_SFP_STATE_FAILED;
3809                sfp->data_valid = 0;
3810                bfa_sfp_scn_aen_post(sfp, rsp);
3811                break;
3812        case BFA_SFP_SCN_UNSUPPORT:
3813                sfp->state = BFA_SFP_STATE_UNSUPPORT;
3814                bfa_sfp_scn_aen_post(sfp, rsp);
3815                if (!sfp->lock)
3816                        bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3817                break;
3818        case BFA_SFP_SCN_POM:
3819                bfa_sfp_scn_aen_post(sfp, rsp);
3820                break;
3821        case BFA_SFP_SCN_VALID:
3822                sfp->state = BFA_SFP_STATE_VALID;
3823                if (!sfp->lock)
3824                        bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3825                break;
3826        default:
3827                bfa_trc(sfp, rsp->event);
3828                WARN_ON(1);
3829        }
3830}
3831
3832/*
3833 * SFP show complete
3834 */
3835static void
3836bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3837{
3838        struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg;
3839
3840        if (!sfp->lock) {
3841                /*
3842                 * receiving response after ioc failure
3843                 */
3844                bfa_trc(sfp, sfp->lock);
3845                return;
3846        }
3847
3848        bfa_trc(sfp, rsp->status);
3849        if (rsp->status == BFA_STATUS_OK) {
3850                sfp->data_valid = 1;
3851                if (sfp->state == BFA_SFP_STATE_VALID)
3852                        sfp->status = BFA_STATUS_OK;
3853                else if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3854                        sfp->status = BFA_STATUS_SFP_UNSUPP;
3855                else
3856                        bfa_trc(sfp, sfp->state);
3857        } else {
3858                sfp->data_valid = 0;
3859                sfp->status = rsp->status;
3860                /* sfpshow shouldn't change sfp state */
3861        }
3862
3863        bfa_trc(sfp, sfp->memtype);
3864        if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) {
3865                bfa_trc(sfp, sfp->data_valid);
3866                if (sfp->data_valid) {
3867                        u32     size = sizeof(struct sfp_mem_s);
3868                        u8 *des = (u8 *)(sfp->sfpmem);
3869                        memcpy(des, sfp->dbuf_kva, size);
3870                }
3871                /*
3872                 * Queue completion callback.
3873                 */
3874                bfa_cb_sfp_show(sfp);
3875        } else
3876                sfp->lock = 0;
3877
3878        bfa_trc(sfp, sfp->state_query_lock);
3879        if (sfp->state_query_lock) {
3880                sfp->state = rsp->state;
3881                /* Complete callback */
3882                bfa_cb_sfp_state_query(sfp);
3883        }
3884}
3885
3886/*
3887 *      SFP query fw sfp state
3888 */
3889static void
3890bfa_sfp_state_query(struct bfa_sfp_s *sfp)
3891{
3892        struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3893
3894        /* Should not be doing query if not in _INIT state */
3895        WARN_ON(sfp->state != BFA_SFP_STATE_INIT);
3896        WARN_ON(sfp->state_query_lock != 0);
3897        bfa_trc(sfp, sfp->state);
3898
3899        sfp->state_query_lock = 1;
3900        req->memtype = 0;
3901
3902        if (!sfp->lock)
3903                bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3904}
3905
3906static void
3907bfa_sfp_media_get(struct bfa_sfp_s *sfp)
3908{
3909        enum bfa_defs_sfp_media_e *media = sfp->media;
3910
3911        *media = BFA_SFP_MEDIA_UNKNOWN;
3912
3913        if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3914                *media = BFA_SFP_MEDIA_UNSUPPORT;
3915        else if (sfp->state == BFA_SFP_STATE_VALID) {
3916                union sfp_xcvr_e10g_code_u e10g;
3917                struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3918                u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 |
3919                                (sfpmem->srlid_base.xcvr[5] >> 1);
3920
3921                e10g.b = sfpmem->srlid_base.xcvr[0];
3922                bfa_trc(sfp, e10g.b);
3923                bfa_trc(sfp, xmtr_tech);
3924                /* check fc transmitter tech */
3925                if ((xmtr_tech & SFP_XMTR_TECH_CU) ||
3926                    (xmtr_tech & SFP_XMTR_TECH_CP) ||
3927                    (xmtr_tech & SFP_XMTR_TECH_CA))
3928                        *media = BFA_SFP_MEDIA_CU;
3929                else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) ||
3930                         (xmtr_tech & SFP_XMTR_TECH_EL_INTER))
3931                        *media = BFA_SFP_MEDIA_EL;
3932                else if ((xmtr_tech & SFP_XMTR_TECH_LL) ||
3933                         (xmtr_tech & SFP_XMTR_TECH_LC))
3934                        *media = BFA_SFP_MEDIA_LW;
3935                else if ((xmtr_tech & SFP_XMTR_TECH_SL) ||
3936                         (xmtr_tech & SFP_XMTR_TECH_SN) ||
3937                         (xmtr_tech & SFP_XMTR_TECH_SA))
3938                        *media = BFA_SFP_MEDIA_SW;
3939                /* Check 10G Ethernet Compilance code */
3940                else if (e10g.r.e10g_sr)
3941                        *media = BFA_SFP_MEDIA_SW;
3942                else if (e10g.r.e10g_lrm && e10g.r.e10g_lr)
3943                        *media = BFA_SFP_MEDIA_LW;
3944                else if (e10g.r.e10g_unall)
3945                        *media = BFA_SFP_MEDIA_UNKNOWN;
3946                else
3947                        bfa_trc(sfp, 0);
3948        } else
3949                bfa_trc(sfp, sfp->state);
3950}
3951
3952static bfa_status_t
3953bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed)
3954{
3955        struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3956        struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr;
3957        union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3;
3958        union sfp_xcvr_e10g_code_u e10g = xcvr->e10g;
3959
3960        if (portspeed == BFA_PORT_SPEED_10GBPS) {
3961                if (e10g.r.e10g_sr || e10g.r.e10g_lr)
3962                        return BFA_STATUS_OK;
3963                else {
3964                        bfa_trc(sfp, e10g.b);
3965                        return BFA_STATUS_UNSUPP_SPEED;
3966                }
3967        }
3968        if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) ||
3969            ((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) ||
3970            ((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) ||
3971            ((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) ||
3972            ((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100))
3973                return BFA_STATUS_OK;
3974        else {
3975                bfa_trc(sfp, portspeed);
3976                bfa_trc(sfp, fc3.b);
3977                bfa_trc(sfp, e10g.b);
3978                return BFA_STATUS_UNSUPP_SPEED;
3979        }
3980}
3981
3982/*
3983 *      SFP hmbox handler
3984 */
3985void
3986bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
3987{
3988        struct bfa_sfp_s *sfp = sfparg;
3989
3990        switch (msg->mh.msg_id) {
3991        case BFI_SFP_I2H_SHOW:
3992                bfa_sfp_show_comp(sfp, msg);
3993                break;
3994
3995        case BFI_SFP_I2H_SCN:
3996                bfa_sfp_scn(sfp, msg);
3997                break;
3998
3999        default:
4000                bfa_trc(sfp, msg->mh.msg_id);
4001                WARN_ON(1);
4002        }
4003}
4004
4005/*
4006 *      Return DMA memory needed by sfp module.
4007 */
4008u32
4009bfa_sfp_meminfo(void)
4010{
4011        return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
4012}
4013
4014/*
4015 *      Attach virtual and physical memory for SFP.
4016 */
4017void
4018bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev,
4019                struct bfa_trc_mod_s *trcmod)
4020{
4021        sfp->dev = dev;
4022        sfp->ioc = ioc;
4023        sfp->trcmod = trcmod;
4024
4025        sfp->cbfn = NULL;
4026        sfp->cbarg = NULL;
4027        sfp->sfpmem = NULL;
4028        sfp->lock = 0;
4029        sfp->data_valid = 0;
4030        sfp->state = BFA_SFP_STATE_INIT;
4031        sfp->state_query_lock = 0;
4032        sfp->state_query_cbfn = NULL;
4033        sfp->state_query_cbarg = NULL;
4034        sfp->media = NULL;
4035        sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
4036        sfp->is_elb = BFA_FALSE;
4037
4038        bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp);
4039        bfa_q_qe_init(&sfp->ioc_notify);
4040        bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp);
4041        list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q);
4042}
4043
4044/*
4045 *      Claim Memory for SFP
4046 */
4047void
4048bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa)
4049{
4050        sfp->dbuf_kva   = dm_kva;
4051        sfp->dbuf_pa    = dm_pa;
4052        memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s));
4053
4054        dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
4055        dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
4056}
4057
4058/*
4059 * Show SFP eeprom content
4060 *
4061 * @param[in] sfp   - bfa sfp module
4062 *
4063 * @param[out] sfpmem - sfp eeprom data
4064 *
4065 */
4066bfa_status_t
4067bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
4068                bfa_cb_sfp_t cbfn, void *cbarg)
4069{
4070
4071        if (!bfa_ioc_is_operational(sfp->ioc)) {
4072                bfa_trc(sfp, 0);
4073                return BFA_STATUS_IOC_NON_OP;
4074        }
4075
4076        if (sfp->lock) {
4077                bfa_trc(sfp, 0);
4078                return BFA_STATUS_DEVBUSY;
4079        }
4080
4081        sfp->cbfn = cbfn;
4082        sfp->cbarg = cbarg;
4083        sfp->sfpmem = sfpmem;
4084
4085        bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT);
4086        return BFA_STATUS_OK;
4087}
4088
4089/*
4090 * Return SFP Media type
4091 *
4092 * @param[in] sfp   - bfa sfp module
4093 *
4094 * @param[out] media - port speed from user
4095 *
4096 */
4097bfa_status_t
4098bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media,
4099                bfa_cb_sfp_t cbfn, void *cbarg)
4100{
4101        if (!bfa_ioc_is_operational(sfp->ioc)) {
4102                bfa_trc(sfp, 0);
4103                return BFA_STATUS_IOC_NON_OP;
4104        }
4105
4106        sfp->media = media;
4107        if (sfp->state == BFA_SFP_STATE_INIT) {
4108                if (sfp->state_query_lock) {
4109                        bfa_trc(sfp, 0);
4110                        return BFA_STATUS_DEVBUSY;
4111                } else {
4112                        sfp->state_query_cbfn = cbfn;
4113                        sfp->state_query_cbarg = cbarg;
4114                        bfa_sfp_state_query(sfp);
4115                        return BFA_STATUS_SFP_NOT_READY;
4116                }
4117        }
4118
4119        bfa_sfp_media_get(sfp);
4120        return BFA_STATUS_OK;
4121}
4122
4123/*
4124 * Check if user set port speed is allowed by the SFP
4125 *
4126 * @param[in] sfp   - bfa sfp module
4127 * @param[in] portspeed - port speed from user
4128 *
4129 */
4130bfa_status_t
4131bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
4132                bfa_cb_sfp_t cbfn, void *cbarg)
4133{
4134        WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN);
4135
4136        if (!bfa_ioc_is_operational(sfp->ioc))
4137                return BFA_STATUS_IOC_NON_OP;
4138
4139        /* For Mezz card, all speed is allowed */
4140        if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type))
4141                return BFA_STATUS_OK;
4142
4143        /* Check SFP state */
4144        sfp->portspeed = portspeed;
4145        if (sfp->state == BFA_SFP_STATE_INIT) {
4146                if (sfp->state_query_lock) {
4147                        bfa_trc(sfp, 0);
4148                        return BFA_STATUS_DEVBUSY;
4149                } else {
4150                        sfp->state_query_cbfn = cbfn;
4151                        sfp->state_query_cbarg = cbarg;
4152                        bfa_sfp_state_query(sfp);
4153                        return BFA_STATUS_SFP_NOT_READY;
4154                }
4155        }
4156
4157        if (sfp->state == BFA_SFP_STATE_REMOVED ||
4158            sfp->state == BFA_SFP_STATE_FAILED) {
4159                bfa_trc(sfp, sfp->state);
4160                return BFA_STATUS_NO_SFP_DEV;
4161        }
4162
4163        if (sfp->state == BFA_SFP_STATE_INSERTED) {
4164                bfa_trc(sfp, sfp->state);
4165                return BFA_STATUS_DEVBUSY;  /* sfp is reading data */
4166        }
4167
4168        /* For eloopback, all speed is allowed */
4169        if (sfp->is_elb)
4170                return BFA_STATUS_OK;
4171
4172        return bfa_sfp_speed_valid(sfp, portspeed);
4173}
4174
4175/*
4176 *      Flash module specific
4177 */
4178
4179/*
4180 * FLASH DMA buffer should be big enough to hold both MFG block and
4181 * asic block(64k) at the same time and also should be 2k aligned to
4182 * avoid write segement to cross sector boundary.
4183 */
4184#define BFA_FLASH_SEG_SZ        2048
4185#define BFA_FLASH_DMA_BUF_SZ    \
4186        BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
4187
4188static void
4189bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
4190                        int inst, int type)
4191{
4192        struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
4193        struct bfa_aen_entry_s  *aen_entry;
4194
4195        bfad_get_aen_entry(bfad, aen_entry);
4196        if (!aen_entry)
4197                return;
4198
4199        aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
4200        aen_entry->aen_data.audit.partition_inst = inst;
4201        aen_entry->aen_data.audit.partition_type = type;
4202
4203        /* Send the AEN notification */
4204        bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
4205                                  BFA_AEN_CAT_AUDIT, event);
4206}
4207
4208static void
4209bfa_flash_cb(struct bfa_flash_s *flash)
4210{
4211        flash->op_busy = 0;
4212        if (flash->cbfn)
4213                flash->cbfn(flash->cbarg, flash->status);
4214}
4215
4216static void
4217bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event)
4218{
4219        struct bfa_flash_s      *flash = cbarg;
4220
4221        bfa_trc(flash, event);
4222        switch (event) {
4223        case BFA_IOC_E_DISABLED:
4224        case BFA_IOC_E_FAILED:
4225                if (flash->op_busy) {
4226                        flash->status = BFA_STATUS_IOC_FAILURE;
4227                        flash->cbfn(flash->cbarg, flash->status);
4228                        flash->op_busy = 0;
4229                }
4230                break;
4231
4232        default:
4233                break;
4234        }
4235}
4236
4237/*
4238 * Send flash attribute query request.
4239 *
4240 * @param[in] cbarg - callback argument
4241 */
4242static void
4243bfa_flash_query_send(void *cbarg)
4244{
4245        struct bfa_flash_s *flash = cbarg;
4246        struct bfi_flash_query_req_s *msg =
4247                        (struct bfi_flash_query_req_s *) flash->mb.msg;
4248
4249        bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
4250                bfa_ioc_portid(flash->ioc));
4251        bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s),
4252                flash->dbuf_pa);
4253        bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4254}
4255
4256/*
4257 * Send flash write request.
4258 *
4259 * @param[in] cbarg - callback argument
4260 */
4261static void
4262bfa_flash_write_send(struct bfa_flash_s *flash)
4263{
4264        struct bfi_flash_write_req_s *msg =
4265                        (struct bfi_flash_write_req_s *) flash->mb.msg;
4266        u32     len;
4267
4268        msg->type = be32_to_cpu(flash->type);
4269        msg->instance = flash->instance;
4270        msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4271        len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4272                flash->residue : BFA_FLASH_DMA_BUF_SZ;
4273        msg->length = be32_to_cpu(len);
4274
4275        /* indicate if it's the last msg of the whole write operation */
4276        msg->last = (len == flash->residue) ? 1 : 0;
4277
4278        bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
4279                        bfa_ioc_portid(flash->ioc));
4280        bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4281        memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
4282        bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4283
4284        flash->residue -= len;
4285        flash->offset += len;
4286}
4287
4288/*
4289 * Send flash read request.
4290 *
4291 * @param[in] cbarg - callback argument
4292 */
4293static void
4294bfa_flash_read_send(void *cbarg)
4295{
4296        struct bfa_flash_s *flash = cbarg;
4297        struct bfi_flash_read_req_s *msg =
4298                        (struct bfi_flash_read_req_s *) flash->mb.msg;
4299        u32     len;
4300
4301        msg->type = be32_to_cpu(flash->type);
4302        msg->instance = flash->instance;
4303        msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4304        len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4305                        flash->residue : BFA_FLASH_DMA_BUF_SZ;
4306        msg->length = be32_to_cpu(len);
4307        bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
4308                bfa_ioc_portid(flash->ioc));
4309        bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4310        bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4311}
4312
4313/*
4314 * Send flash erase request.
4315 *
4316 * @param[in] cbarg - callback argument
4317 */
4318static void
4319bfa_flash_erase_send(void *cbarg)
4320{
4321        struct bfa_flash_s *flash = cbarg;
4322        struct bfi_flash_erase_req_s *msg =
4323                        (struct bfi_flash_erase_req_s *) flash->mb.msg;
4324
4325        msg->type = be32_to_cpu(flash->type);
4326        msg->instance = flash->instance;
4327        bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ,
4328                        bfa_ioc_portid(flash->ioc));
4329        bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4330}
4331
4332/*
4333 * Process flash response messages upon receiving interrupts.
4334 *
4335 * @param[in] flasharg - flash structure
4336 * @param[in] msg - message structure
4337 */
4338static void
4339bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
4340{
4341        struct bfa_flash_s *flash = flasharg;
4342        u32     status;
4343
4344        union {
4345                struct bfi_flash_query_rsp_s *query;
4346                struct bfi_flash_erase_rsp_s *erase;
4347                struct bfi_flash_write_rsp_s *write;
4348                struct bfi_flash_read_rsp_s *read;
4349                struct bfi_flash_event_s *event;
4350                struct bfi_mbmsg_s   *msg;
4351        } m;
4352
4353        m.msg = msg;
4354        bfa_trc(flash, msg->mh.msg_id);
4355
4356        if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) {
4357                /* receiving response after ioc failure */
4358                bfa_trc(flash, 0x9999);
4359                return;
4360        }
4361
4362        switch (msg->mh.msg_id) {
4363        case BFI_FLASH_I2H_QUERY_RSP:
4364                status = be32_to_cpu(m.query->status);
4365                bfa_trc(flash, status);
4366                if (status == BFA_STATUS_OK) {
4367                        u32     i;
4368                        struct bfa_flash_attr_s *attr, *f;
4369
4370                        attr = (struct bfa_flash_attr_s *) flash->ubuf;
4371                        f = (struct bfa_flash_attr_s *) flash->dbuf_kva;
4372                        attr->status = be32_to_cpu(f->status);
4373                        attr->npart = be32_to_cpu(f->npart);
4374                        bfa_trc(flash, attr->status);
4375                        bfa_trc(flash, attr->npart);
4376                        for (i = 0; i < attr->npart; i++) {
4377                                attr->part[i].part_type =
4378                                        be32_to_cpu(f->part[i].part_type);
4379                                attr->part[i].part_instance =
4380                                        be32_to_cpu(f->part[i</