linux/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
<<
>>
Prefs
   1/* bnx2x_main.c: QLogic Everest network driver.
   2 *
   3 * Copyright (c) 2007-2013 Broadcom Corporation
   4 * Copyright (c) 2014 QLogic Corporation
   5 * All rights reserved
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation.
  10 *
  11 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  12 * Written by: Eliezer Tamir
  13 * Based on code from Michael Chan's bnx2 driver
  14 * UDP CSUM errata workaround by Arik Gendelman
  15 * Slowpath and fastpath rework by Vladislav Zolotarov
  16 * Statistics and Link management by Yitchak Gertner
  17 *
  18 */
  19
  20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  21
  22#include <linux/module.h>
  23#include <linux/moduleparam.h>
  24#include <linux/kernel.h>
  25#include <linux/device.h>  /* for dev_info() */
  26#include <linux/timer.h>
  27#include <linux/errno.h>
  28#include <linux/ioport.h>
  29#include <linux/slab.h>
  30#include <linux/interrupt.h>
  31#include <linux/pci.h>
  32#include <linux/aer.h>
  33#include <linux/init.h>
  34#include <linux/netdevice.h>
  35#include <linux/etherdevice.h>
  36#include <linux/skbuff.h>
  37#include <linux/dma-mapping.h>
  38#include <linux/bitops.h>
  39#include <linux/irq.h>
  40#include <linux/delay.h>
  41#include <asm/byteorder.h>
  42#include <linux/time.h>
  43#include <linux/ethtool.h>
  44#include <linux/mii.h>
  45#include <linux/if_vlan.h>
  46#include <linux/crash_dump.h>
  47#include <net/ip.h>
  48#include <net/ipv6.h>
  49#include <net/tcp.h>
  50#include <net/vxlan.h>
  51#include <net/checksum.h>
  52#include <net/ip6_checksum.h>
  53#include <linux/workqueue.h>
  54#include <linux/crc32.h>
  55#include <linux/crc32c.h>
  56#include <linux/prefetch.h>
  57#include <linux/zlib.h>
  58#include <linux/io.h>
  59#include <linux/semaphore.h>
  60#include <linux/stringify.h>
  61#include <linux/vmalloc.h>
  62#include "bnx2x.h"
  63#include "bnx2x_init.h"
  64#include "bnx2x_init_ops.h"
  65#include "bnx2x_cmn.h"
  66#include "bnx2x_vfpf.h"
  67#include "bnx2x_dcb.h"
  68#include "bnx2x_sp.h"
  69#include <linux/firmware.h>
  70#include "bnx2x_fw_file_hdr.h"
  71/* FW files */
  72#define FW_FILE_VERSION                                 \
  73        __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
  74        __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
  75        __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
  76        __stringify(BCM_5710_FW_ENGINEERING_VERSION)
  77#define FW_FILE_NAME_E1         "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
  78#define FW_FILE_NAME_E1H        "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
  79#define FW_FILE_NAME_E2         "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
  80
  81/* Time in jiffies before concluding the transmitter is hung */
  82#define TX_TIMEOUT              (5*HZ)
  83
  84MODULE_AUTHOR("Eliezer Tamir");
  85MODULE_DESCRIPTION("QLogic "
  86                   "BCM57710/57711/57711E/"
  87                   "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
  88                   "57840/57840_MF Driver");
  89MODULE_LICENSE("GPL");
  90MODULE_FIRMWARE(FW_FILE_NAME_E1);
  91MODULE_FIRMWARE(FW_FILE_NAME_E1H);
  92MODULE_FIRMWARE(FW_FILE_NAME_E2);
  93
  94int bnx2x_num_queues;
  95module_param_named(num_queues, bnx2x_num_queues, int, 0444);
  96MODULE_PARM_DESC(num_queues,
  97                 " Set number of queues (default is as a number of CPUs)");
  98
  99static int disable_tpa;
 100module_param(disable_tpa, int, 0444);
 101MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
 102
 103static int int_mode;
 104module_param(int_mode, int, 0444);
 105MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
 106                                "(1 INT#x; 2 MSI)");
 107
 108static int dropless_fc;
 109module_param(dropless_fc, int, 0444);
 110MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
 111
 112static int mrrs = -1;
 113module_param(mrrs, int, 0444);
 114MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
 115
 116static int debug;
 117module_param(debug, int, 0444);
 118MODULE_PARM_DESC(debug, " Default debug msglevel");
 119
 120static struct workqueue_struct *bnx2x_wq;
 121struct workqueue_struct *bnx2x_iov_wq;
 122
 123struct bnx2x_mac_vals {
 124        u32 xmac_addr;
 125        u32 xmac_val;
 126        u32 emac_addr;
 127        u32 emac_val;
 128        u32 umac_addr[2];
 129        u32 umac_val[2];
 130        u32 bmac_addr;
 131        u32 bmac_val[2];
 132};
 133
 134enum bnx2x_board_type {
 135        BCM57710 = 0,
 136        BCM57711,
 137        BCM57711E,
 138        BCM57712,
 139        BCM57712_MF,
 140        BCM57712_VF,
 141        BCM57800,
 142        BCM57800_MF,
 143        BCM57800_VF,
 144        BCM57810,
 145        BCM57810_MF,
 146        BCM57810_VF,
 147        BCM57840_4_10,
 148        BCM57840_2_20,
 149        BCM57840_MF,
 150        BCM57840_VF,
 151        BCM57811,
 152        BCM57811_MF,
 153        BCM57840_O,
 154        BCM57840_MFO,
 155        BCM57811_VF
 156};
 157
 158/* indexed by board_type, above */
 159static struct {
 160        char *name;
 161} board_info[] = {
 162        [BCM57710]      = { "QLogic BCM57710 10 Gigabit PCIe [Everest]" },
 163        [BCM57711]      = { "QLogic BCM57711 10 Gigabit PCIe" },
 164        [BCM57711E]     = { "QLogic BCM57711E 10 Gigabit PCIe" },
 165        [BCM57712]      = { "QLogic BCM57712 10 Gigabit Ethernet" },
 166        [BCM57712_MF]   = { "QLogic BCM57712 10 Gigabit Ethernet Multi Function" },
 167        [BCM57712_VF]   = { "QLogic BCM57712 10 Gigabit Ethernet Virtual Function" },
 168        [BCM57800]      = { "QLogic BCM57800 10 Gigabit Ethernet" },
 169        [BCM57800_MF]   = { "QLogic BCM57800 10 Gigabit Ethernet Multi Function" },
 170        [BCM57800_VF]   = { "QLogic BCM57800 10 Gigabit Ethernet Virtual Function" },
 171        [BCM57810]      = { "QLogic BCM57810 10 Gigabit Ethernet" },
 172        [BCM57810_MF]   = { "QLogic BCM57810 10 Gigabit Ethernet Multi Function" },
 173        [BCM57810_VF]   = { "QLogic BCM57810 10 Gigabit Ethernet Virtual Function" },
 174        [BCM57840_4_10] = { "QLogic BCM57840 10 Gigabit Ethernet" },
 175        [BCM57840_2_20] = { "QLogic BCM57840 20 Gigabit Ethernet" },
 176        [BCM57840_MF]   = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
 177        [BCM57840_VF]   = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" },
 178        [BCM57811]      = { "QLogic BCM57811 10 Gigabit Ethernet" },
 179        [BCM57811_MF]   = { "QLogic BCM57811 10 Gigabit Ethernet Multi Function" },
 180        [BCM57840_O]    = { "QLogic BCM57840 10/20 Gigabit Ethernet" },
 181        [BCM57840_MFO]  = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
 182        [BCM57811_VF]   = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" }
 183};
 184
 185#ifndef PCI_DEVICE_ID_NX2_57710
 186#define PCI_DEVICE_ID_NX2_57710         CHIP_NUM_57710
 187#endif
 188#ifndef PCI_DEVICE_ID_NX2_57711
 189#define PCI_DEVICE_ID_NX2_57711         CHIP_NUM_57711
 190#endif
 191#ifndef PCI_DEVICE_ID_NX2_57711E
 192#define PCI_DEVICE_ID_NX2_57711E        CHIP_NUM_57711E
 193#endif
 194#ifndef PCI_DEVICE_ID_NX2_57712
 195#define PCI_DEVICE_ID_NX2_57712         CHIP_NUM_57712
 196#endif
 197#ifndef PCI_DEVICE_ID_NX2_57712_MF
 198#define PCI_DEVICE_ID_NX2_57712_MF      CHIP_NUM_57712_MF
 199#endif
 200#ifndef PCI_DEVICE_ID_NX2_57712_VF
 201#define PCI_DEVICE_ID_NX2_57712_VF      CHIP_NUM_57712_VF
 202#endif
 203#ifndef PCI_DEVICE_ID_NX2_57800
 204#define PCI_DEVICE_ID_NX2_57800         CHIP_NUM_57800
 205#endif
 206#ifndef PCI_DEVICE_ID_NX2_57800_MF
 207#define PCI_DEVICE_ID_NX2_57800_MF      CHIP_NUM_57800_MF
 208#endif
 209#ifndef PCI_DEVICE_ID_NX2_57800_VF
 210#define PCI_DEVICE_ID_NX2_57800_VF      CHIP_NUM_57800_VF
 211#endif
 212#ifndef PCI_DEVICE_ID_NX2_57810
 213#define PCI_DEVICE_ID_NX2_57810         CHIP_NUM_57810
 214#endif
 215#ifndef PCI_DEVICE_ID_NX2_57810_MF
 216#define PCI_DEVICE_ID_NX2_57810_MF      CHIP_NUM_57810_MF
 217#endif
 218#ifndef PCI_DEVICE_ID_NX2_57840_O
 219#define PCI_DEVICE_ID_NX2_57840_O       CHIP_NUM_57840_OBSOLETE
 220#endif
 221#ifndef PCI_DEVICE_ID_NX2_57810_VF
 222#define PCI_DEVICE_ID_NX2_57810_VF      CHIP_NUM_57810_VF
 223#endif
 224#ifndef PCI_DEVICE_ID_NX2_57840_4_10
 225#define PCI_DEVICE_ID_NX2_57840_4_10    CHIP_NUM_57840_4_10
 226#endif
 227#ifndef PCI_DEVICE_ID_NX2_57840_2_20
 228#define PCI_DEVICE_ID_NX2_57840_2_20    CHIP_NUM_57840_2_20
 229#endif
 230#ifndef PCI_DEVICE_ID_NX2_57840_MFO
 231#define PCI_DEVICE_ID_NX2_57840_MFO     CHIP_NUM_57840_MF_OBSOLETE
 232#endif
 233#ifndef PCI_DEVICE_ID_NX2_57840_MF
 234#define PCI_DEVICE_ID_NX2_57840_MF      CHIP_NUM_57840_MF
 235#endif
 236#ifndef PCI_DEVICE_ID_NX2_57840_VF
 237#define PCI_DEVICE_ID_NX2_57840_VF      CHIP_NUM_57840_VF
 238#endif
 239#ifndef PCI_DEVICE_ID_NX2_57811
 240#define PCI_DEVICE_ID_NX2_57811         CHIP_NUM_57811
 241#endif
 242#ifndef PCI_DEVICE_ID_NX2_57811_MF
 243#define PCI_DEVICE_ID_NX2_57811_MF      CHIP_NUM_57811_MF
 244#endif
 245#ifndef PCI_DEVICE_ID_NX2_57811_VF
 246#define PCI_DEVICE_ID_NX2_57811_VF      CHIP_NUM_57811_VF
 247#endif
 248
 249static const struct pci_device_id bnx2x_pci_tbl[] = {
 250        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
 251        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
 252        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
 253        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
 254        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
 255        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF },
 256        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
 257        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
 258        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF },
 259        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
 260        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
 261        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
 262        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
 263        { PCI_VDEVICE(QLOGIC,   PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
 264        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
 265        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
 266        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
 267        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
 268        { PCI_VDEVICE(QLOGIC,   PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
 269        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
 270        { PCI_VDEVICE(QLOGIC,   PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
 271        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
 272        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
 273        { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
 274        { 0 }
 275};
 276
 277MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
 278
 279const u32 dmae_reg_go_c[] = {
 280        DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
 281        DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
 282        DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
 283        DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
 284};
 285
 286/* Global resources for unloading a previously loaded device */
 287#define BNX2X_PREV_WAIT_NEEDED 1
 288static DEFINE_SEMAPHORE(bnx2x_prev_sem);
 289static LIST_HEAD(bnx2x_prev_list);
 290
 291/* Forward declaration */
 292static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
 293static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
 294static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
 295
 296/****************************************************************************
 297* General service functions
 298****************************************************************************/
 299
 300static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr);
 301
 302static void __storm_memset_dma_mapping(struct bnx2x *bp,
 303                                       u32 addr, dma_addr_t mapping)
 304{
 305        REG_WR(bp,  addr, U64_LO(mapping));
 306        REG_WR(bp,  addr + 4, U64_HI(mapping));
 307}
 308
 309static void storm_memset_spq_addr(struct bnx2x *bp,
 310                                  dma_addr_t mapping, u16 abs_fid)
 311{
 312        u32 addr = XSEM_REG_FAST_MEMORY +
 313                        XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
 314
 315        __storm_memset_dma_mapping(bp, addr, mapping);
 316}
 317
 318static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
 319                                  u16 pf_id)
 320{
 321        REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
 322                pf_id);
 323        REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
 324                pf_id);
 325        REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
 326                pf_id);
 327        REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
 328                pf_id);
 329}
 330
 331static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
 332                                 u8 enable)
 333{
 334        REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
 335                enable);
 336        REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
 337                enable);
 338        REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
 339                enable);
 340        REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
 341                enable);
 342}
 343
 344static void storm_memset_eq_data(struct bnx2x *bp,
 345                                 struct event_ring_data *eq_data,
 346                                u16 pfid)
 347{
 348        size_t size = sizeof(struct event_ring_data);
 349
 350        u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
 351
 352        __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
 353}
 354
 355static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
 356                                 u16 pfid)
 357{
 358        u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
 359        REG_WR16(bp, addr, eq_prod);
 360}
 361
 362/* used only at init
 363 * locking is done by mcp
 364 */
 365static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
 366{
 367        pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
 368        pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
 369        pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
 370                               PCICFG_VENDOR_ID_OFFSET);
 371}
 372
 373static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
 374{
 375        u32 val;
 376
 377        pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
 378        pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
 379        pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
 380                               PCICFG_VENDOR_ID_OFFSET);
 381
 382        return val;
 383}
 384
 385#define DMAE_DP_SRC_GRC         "grc src_addr [%08x]"
 386#define DMAE_DP_SRC_PCI         "pci src_addr [%x:%08x]"
 387#define DMAE_DP_DST_GRC         "grc dst_addr [%08x]"
 388#define DMAE_DP_DST_PCI         "pci dst_addr [%x:%08x]"
 389#define DMAE_DP_DST_NONE        "dst_addr [none]"
 390
 391static void bnx2x_dp_dmae(struct bnx2x *bp,
 392                          struct dmae_command *dmae, int msglvl)
 393{
 394        u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
 395        int i;
 396
 397        switch (dmae->opcode & DMAE_COMMAND_DST) {
 398        case DMAE_CMD_DST_PCI:
 399                if (src_type == DMAE_CMD_SRC_PCI)
 400                        DP(msglvl, "DMAE: opcode 0x%08x\n"
 401                           "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
 402                           "comp_addr [%x:%08x], comp_val 0x%08x\n",
 403                           dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
 404                           dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
 405                           dmae->comp_addr_hi, dmae->comp_addr_lo,
 406                           dmae->comp_val);
 407                else
 408                        DP(msglvl, "DMAE: opcode 0x%08x\n"
 409                           "src [%08x], len [%d*4], dst [%x:%08x]\n"
 410                           "comp_addr [%x:%08x], comp_val 0x%08x\n",
 411                           dmae->opcode, dmae->src_addr_lo >> 2,
 412                           dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
 413                           dmae->comp_addr_hi, dmae->comp_addr_lo,
 414                           dmae->comp_val);
 415                break;
 416        case DMAE_CMD_DST_GRC:
 417                if (src_type == DMAE_CMD_SRC_PCI)
 418                        DP(msglvl, "DMAE: opcode 0x%08x\n"
 419                           "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
 420                           "comp_addr [%x:%08x], comp_val 0x%08x\n",
 421                           dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
 422                           dmae->len, dmae->dst_addr_lo >> 2,
 423                           dmae->comp_addr_hi, dmae->comp_addr_lo,
 424                           dmae->comp_val);
 425                else
 426                        DP(msglvl, "DMAE: opcode 0x%08x\n"
 427                           "src [%08x], len [%d*4], dst [%08x]\n"
 428                           "comp_addr [%x:%08x], comp_val 0x%08x\n",
 429                           dmae->opcode, dmae->src_addr_lo >> 2,
 430                           dmae->len, dmae->dst_addr_lo >> 2,
 431                           dmae->comp_addr_hi, dmae->comp_addr_lo,
 432                           dmae->comp_val);
 433                break;
 434        default:
 435                if (src_type == DMAE_CMD_SRC_PCI)
 436                        DP(msglvl, "DMAE: opcode 0x%08x\n"
 437                           "src_addr [%x:%08x]  len [%d * 4]  dst_addr [none]\n"
 438                           "comp_addr [%x:%08x]  comp_val 0x%08x\n",
 439                           dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
 440                           dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
 441                           dmae->comp_val);
 442                else
 443                        DP(msglvl, "DMAE: opcode 0x%08x\n"
 444                           "src_addr [%08x]  len [%d * 4]  dst_addr [none]\n"
 445                           "comp_addr [%x:%08x]  comp_val 0x%08x\n",
 446                           dmae->opcode, dmae->src_addr_lo >> 2,
 447                           dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
 448                           dmae->comp_val);
 449                break;
 450        }
 451
 452        for (i = 0; i < (sizeof(struct dmae_command)/4); i++)
 453                DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n",
 454                   i, *(((u32 *)dmae) + i));
 455}
 456
 457/* copy command into DMAE command memory and set DMAE command go */
 458void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
 459{
 460        u32 cmd_offset;
 461        int i;
 462
 463        cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
 464        for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
 465                REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
 466        }
 467        REG_WR(bp, dmae_reg_go_c[idx], 1);
 468}
 469
 470u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
 471{
 472        return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
 473                           DMAE_CMD_C_ENABLE);
 474}
 475
 476u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
 477{
 478        return opcode & ~DMAE_CMD_SRC_RESET;
 479}
 480
 481u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
 482                             bool with_comp, u8 comp_type)
 483{
 484        u32 opcode = 0;
 485
 486        opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
 487                   (dst_type << DMAE_COMMAND_DST_SHIFT));
 488
 489        opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
 490
 491        opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
 492        opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
 493                   (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
 494        opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
 495
 496#ifdef __BIG_ENDIAN
 497        opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
 498#else
 499        opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
 500#endif
 501        if (with_comp)
 502                opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
 503        return opcode;
 504}
 505
 506void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
 507                                      struct dmae_command *dmae,
 508                                      u8 src_type, u8 dst_type)
 509{
 510        memset(dmae, 0, sizeof(struct dmae_command));
 511
 512        /* set the opcode */
 513        dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
 514                                         true, DMAE_COMP_PCI);
 515
 516        /* fill in the completion parameters */
 517        dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
 518        dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
 519        dmae->comp_val = DMAE_COMP_VAL;
 520}
 521
 522/* issue a dmae command over the init-channel and wait for completion */
 523int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
 524                               u32 *comp)
 525{
 526        int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
 527        int rc = 0;
 528
 529        bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE);
 530
 531        /* Lock the dmae channel. Disable BHs to prevent a dead-lock
 532         * as long as this code is called both from syscall context and
 533         * from ndo_set_rx_mode() flow that may be called from BH.
 534         */
 535
 536        spin_lock_bh(&bp->dmae_lock);
 537
 538        /* reset completion */
 539        *comp = 0;
 540
 541        /* post the command on the channel used for initializations */
 542        bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
 543
 544        /* wait for completion */
 545        udelay(5);
 546        while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
 547
 548                if (!cnt ||
 549                    (bp->recovery_state != BNX2X_RECOVERY_DONE &&
 550                     bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
 551                        BNX2X_ERR("DMAE timeout!\n");
 552                        rc = DMAE_TIMEOUT;
 553                        goto unlock;
 554                }
 555                cnt--;
 556                udelay(50);
 557        }
 558        if (*comp & DMAE_PCI_ERR_FLAG) {
 559                BNX2X_ERR("DMAE PCI error!\n");
 560                rc = DMAE_PCI_ERROR;
 561        }
 562
 563unlock:
 564
 565        spin_unlock_bh(&bp->dmae_lock);
 566
 567        return rc;
 568}
 569
 570void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
 571                      u32 len32)
 572{
 573        int rc;
 574        struct dmae_command dmae;
 575
 576        if (!bp->dmae_ready) {
 577                u32 *data = bnx2x_sp(bp, wb_data[0]);
 578
 579                if (CHIP_IS_E1(bp))
 580                        bnx2x_init_ind_wr(bp, dst_addr, data, len32);
 581                else
 582                        bnx2x_init_str_wr(bp, dst_addr, data, len32);
 583                return;
 584        }
 585
 586        /* set opcode and fixed command fields */
 587        bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
 588
 589        /* fill in addresses and len */
 590        dmae.src_addr_lo = U64_LO(dma_addr);
 591        dmae.src_addr_hi = U64_HI(dma_addr);
 592        dmae.dst_addr_lo = dst_addr >> 2;
 593        dmae.dst_addr_hi = 0;
 594        dmae.len = len32;
 595
 596        /* issue the command and wait for completion */
 597        rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
 598        if (rc) {
 599                BNX2X_ERR("DMAE returned failure %d\n", rc);
 600#ifdef BNX2X_STOP_ON_ERROR
 601                bnx2x_panic();
 602#endif
 603        }
 604}
 605
 606void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
 607{
 608        int rc;
 609        struct dmae_command dmae;
 610
 611        if (!bp->dmae_ready) {
 612                u32 *data = bnx2x_sp(bp, wb_data[0]);
 613                int i;
 614
 615                if (CHIP_IS_E1(bp))
 616                        for (i = 0; i < len32; i++)
 617                                data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
 618                else
 619                        for (i = 0; i < len32; i++)
 620                                data[i] = REG_RD(bp, src_addr + i*4);
 621
 622                return;
 623        }
 624
 625        /* set opcode and fixed command fields */
 626        bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
 627
 628        /* fill in addresses and len */
 629        dmae.src_addr_lo = src_addr >> 2;
 630        dmae.src_addr_hi = 0;
 631        dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
 632        dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
 633        dmae.len = len32;
 634
 635        /* issue the command and wait for completion */
 636        rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
 637        if (rc) {
 638                BNX2X_ERR("DMAE returned failure %d\n", rc);
 639#ifdef BNX2X_STOP_ON_ERROR
 640                bnx2x_panic();
 641#endif
 642        }
 643}
 644
 645static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
 646                                      u32 addr, u32 len)
 647{
 648        int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
 649        int offset = 0;
 650
 651        while (len > dmae_wr_max) {
 652                bnx2x_write_dmae(bp, phys_addr + offset,
 653                                 addr + offset, dmae_wr_max);
 654                offset += dmae_wr_max * 4;
 655                len -= dmae_wr_max;
 656        }
 657
 658        bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
 659}
 660
 661enum storms {
 662           XSTORM,
 663           TSTORM,
 664           CSTORM,
 665           USTORM,
 666           MAX_STORMS
 667};
 668
 669#define STORMS_NUM 4
 670#define REGS_IN_ENTRY 4
 671
 672static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp,
 673                                              enum storms storm,
 674                                              int entry)
 675{
 676        switch (storm) {
 677        case XSTORM:
 678                return XSTORM_ASSERT_LIST_OFFSET(entry);
 679        case TSTORM:
 680                return TSTORM_ASSERT_LIST_OFFSET(entry);
 681        case CSTORM:
 682                return CSTORM_ASSERT_LIST_OFFSET(entry);
 683        case USTORM:
 684                return USTORM_ASSERT_LIST_OFFSET(entry);
 685        case MAX_STORMS:
 686        default:
 687                BNX2X_ERR("unknown storm\n");
 688        }
 689        return -EINVAL;
 690}
 691
 692static int bnx2x_mc_assert(struct bnx2x *bp)
 693{
 694        char last_idx;
 695        int i, j, rc = 0;
 696        enum storms storm;
 697        u32 regs[REGS_IN_ENTRY];
 698        u32 bar_storm_intmem[STORMS_NUM] = {
 699                BAR_XSTRORM_INTMEM,
 700                BAR_TSTRORM_INTMEM,
 701                BAR_CSTRORM_INTMEM,
 702                BAR_USTRORM_INTMEM
 703        };
 704        u32 storm_assert_list_index[STORMS_NUM] = {
 705                XSTORM_ASSERT_LIST_INDEX_OFFSET,
 706                TSTORM_ASSERT_LIST_INDEX_OFFSET,
 707                CSTORM_ASSERT_LIST_INDEX_OFFSET,
 708                USTORM_ASSERT_LIST_INDEX_OFFSET
 709        };
 710        char *storms_string[STORMS_NUM] = {
 711                "XSTORM",
 712                "TSTORM",
 713                "CSTORM",
 714                "USTORM"
 715        };
 716
 717        for (storm = XSTORM; storm < MAX_STORMS; storm++) {
 718                last_idx = REG_RD8(bp, bar_storm_intmem[storm] +
 719                                   storm_assert_list_index[storm]);
 720                if (last_idx)
 721                        BNX2X_ERR("%s_ASSERT_LIST_INDEX 0x%x\n",
 722                                  storms_string[storm], last_idx);
 723
 724                /* print the asserts */
 725                for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
 726                        /* read a single assert entry */
 727                        for (j = 0; j < REGS_IN_ENTRY; j++)
 728                                regs[j] = REG_RD(bp, bar_storm_intmem[storm] +
 729                                          bnx2x_get_assert_list_entry(bp,
 730                                                                      storm,
 731                                                                      i) +
 732                                          sizeof(u32) * j);
 733
 734                        /* log entry if it contains a valid assert */
 735                        if (regs[0] != COMMON_ASM_INVALID_ASSERT_OPCODE) {
 736                                BNX2X_ERR("%s_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
 737                                          storms_string[storm], i, regs[3],
 738                                          regs[2], regs[1], regs[0]);
 739                                rc++;
 740                        } else {
 741                                break;
 742                        }
 743                }
 744        }
 745
 746        BNX2X_ERR("Chip Revision: %s, FW Version: %d_%d_%d\n",
 747                  CHIP_IS_E1(bp) ? "everest1" :
 748                  CHIP_IS_E1H(bp) ? "everest1h" :
 749                  CHIP_IS_E2(bp) ? "everest2" : "everest3",
 750                  BCM_5710_FW_MAJOR_VERSION,
 751                  BCM_5710_FW_MINOR_VERSION,
 752                  BCM_5710_FW_REVISION_VERSION);
 753
 754        return rc;
 755}
 756
 757#define MCPR_TRACE_BUFFER_SIZE  (0x800)
 758#define SCRATCH_BUFFER_SIZE(bp) \
 759        (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
 760
 761void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
 762{
 763        u32 addr, val;
 764        u32 mark, offset;
 765        __be32 data[9];
 766        int word;
 767        u32 trace_shmem_base;
 768        if (BP_NOMCP(bp)) {
 769                BNX2X_ERR("NO MCP - can not dump\n");
 770                return;
 771        }
 772        netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
 773                (bp->common.bc_ver & 0xff0000) >> 16,
 774                (bp->common.bc_ver & 0xff00) >> 8,
 775                (bp->common.bc_ver & 0xff));
 776
 777        if (pci_channel_offline(bp->pdev)) {
 778                BNX2X_ERR("Cannot dump MCP info while in PCI error\n");
 779                return;
 780        }
 781
 782        val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
 783        if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
 784                BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
 785
 786        if (BP_PATH(bp) == 0)
 787                trace_shmem_base = bp->common.shmem_base;
 788        else
 789                trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
 790
 791        /* sanity */
 792        if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
 793            trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
 794                                SCRATCH_BUFFER_SIZE(bp)) {
 795                BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
 796                          trace_shmem_base);
 797                return;
 798        }
 799
 800        addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;
 801
 802        /* validate TRCB signature */
 803        mark = REG_RD(bp, addr);
 804        if (mark != MFW_TRACE_SIGNATURE) {
 805                BNX2X_ERR("Trace buffer signature is missing.");
 806                return ;
 807        }
 808
 809        /* read cyclic buffer pointer */
 810        addr += 4;
 811        mark = REG_RD(bp, addr);
 812        mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
 813        if (mark >= trace_shmem_base || mark < addr + 4) {
 814                BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
 815                return;
 816        }
 817        printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
 818
 819        printk("%s", lvl);
 820
 821        /* dump buffer after the mark */
 822        for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
 823                for (word = 0; word < 8; word++)
 824                        data[word] = htonl(REG_RD(bp, offset + 4*word));
 825                data[8] = 0x0;
 826                pr_cont("%s", (char *)data);
 827        }
 828
 829        /* dump buffer before the mark */
 830        for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
 831                for (word = 0; word < 8; word++)
 832                        data[word] = htonl(REG_RD(bp, offset + 4*word));
 833                data[8] = 0x0;
 834                pr_cont("%s", (char *)data);
 835        }
 836        printk("%s" "end of fw dump\n", lvl);
 837}
 838
 839static void bnx2x_fw_dump(struct bnx2x *bp)
 840{
 841        bnx2x_fw_dump_lvl(bp, KERN_ERR);
 842}
 843
 844static void bnx2x_hc_int_disable(struct bnx2x *bp)
 845{
 846        int port = BP_PORT(bp);
 847        u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
 848        u32 val = REG_RD(bp, addr);
 849
 850        /* in E1 we must use only PCI configuration space to disable
 851         * MSI/MSIX capability
 852         * It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
 853         */
 854        if (CHIP_IS_E1(bp)) {
 855                /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
 856                 * Use mask register to prevent from HC sending interrupts
 857                 * after we exit the function
 858                 */
 859                REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
 860
 861                val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
 862                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
 863                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
 864        } else
 865                val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
 866                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
 867                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
 868                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
 869
 870        DP(NETIF_MSG_IFDOWN,
 871           "write %x to HC %d (addr 0x%x)\n",
 872           val, port, addr);
 873
 874        REG_WR(bp, addr, val);
 875        if (REG_RD(bp, addr) != val)
 876                BNX2X_ERR("BUG! Proper val not read from IGU!\n");
 877}
 878
 879static void bnx2x_igu_int_disable(struct bnx2x *bp)
 880{
 881        u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
 882
 883        val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
 884                 IGU_PF_CONF_INT_LINE_EN |
 885                 IGU_PF_CONF_ATTN_BIT_EN);
 886
 887        DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
 888
 889        REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
 890        if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
 891                BNX2X_ERR("BUG! Proper val not read from IGU!\n");
 892}
 893
 894static void bnx2x_int_disable(struct bnx2x *bp)
 895{
 896        if (bp->common.int_block == INT_BLOCK_HC)
 897                bnx2x_hc_int_disable(bp);
 898        else
 899                bnx2x_igu_int_disable(bp);
 900}
 901
 902void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
 903{
 904        int i;
 905        u16 j;
 906        struct hc_sp_status_block_data sp_sb_data;
 907        int func = BP_FUNC(bp);
 908#ifdef BNX2X_STOP_ON_ERROR
 909        u16 start = 0, end = 0;
 910        u8 cos;
 911#endif
 912        if (IS_PF(bp) && disable_int)
 913                bnx2x_int_disable(bp);
 914
 915        bp->stats_state = STATS_STATE_DISABLED;
 916        bp->eth_stats.unrecoverable_error++;
 917        DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
 918
 919        BNX2X_ERR("begin crash dump -----------------\n");
 920
 921        /* Indices */
 922        /* Common */
 923        if (IS_PF(bp)) {
 924                struct host_sp_status_block *def_sb = bp->def_status_blk;
 925                int data_size, cstorm_offset;
 926
 927                BNX2X_ERR("def_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)  spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
 928                          bp->def_idx, bp->def_att_idx, bp->attn_state,
 929                          bp->spq_prod_idx, bp->stats_counter);
 930                BNX2X_ERR("DSB: attn bits(0x%x)  ack(0x%x)  id(0x%x)  idx(0x%x)\n",
 931                          def_sb->atten_status_block.attn_bits,
 932                          def_sb->atten_status_block.attn_bits_ack,
 933                          def_sb->atten_status_block.status_block_id,
 934                          def_sb->atten_status_block.attn_bits_index);
 935                BNX2X_ERR("     def (");
 936                for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
 937                        pr_cont("0x%x%s",
 938                                def_sb->sp_sb.index_values[i],
 939                                (i == HC_SP_SB_MAX_INDICES - 1) ? ")  " : " ");
 940
 941                data_size = sizeof(struct hc_sp_status_block_data) /
 942                            sizeof(u32);
 943                cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func);
 944                for (i = 0; i < data_size; i++)
 945                        *((u32 *)&sp_sb_data + i) =
 946                                REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
 947                                           i * sizeof(u32));
 948
 949                pr_cont("igu_sb_id(0x%x)  igu_seg_id(0x%x) pf_id(0x%x)  vnic_id(0x%x)  vf_id(0x%x)  vf_valid (0x%x) state(0x%x)\n",
 950                        sp_sb_data.igu_sb_id,
 951                        sp_sb_data.igu_seg_id,
 952                        sp_sb_data.p_func.pf_id,
 953                        sp_sb_data.p_func.vnic_id,
 954                        sp_sb_data.p_func.vf_id,
 955                        sp_sb_data.p_func.vf_valid,
 956                        sp_sb_data.state);
 957        }
 958
 959        for_each_eth_queue(bp, i) {
 960                struct bnx2x_fastpath *fp = &bp->fp[i];
 961                int loop;
 962                struct hc_status_block_data_e2 sb_data_e2;
 963                struct hc_status_block_data_e1x sb_data_e1x;
 964                struct hc_status_block_sm  *hc_sm_p =
 965                        CHIP_IS_E1x(bp) ?
 966                        sb_data_e1x.common.state_machine :
 967                        sb_data_e2.common.state_machine;
 968                struct hc_index_data *hc_index_p =
 969                        CHIP_IS_E1x(bp) ?
 970                        sb_data_e1x.index_data :
 971                        sb_data_e2.index_data;
 972                u8 data_size, cos;
 973                u32 *sb_data_p;
 974                struct bnx2x_fp_txdata txdata;
 975
 976                if (!bp->fp)
 977                        break;
 978
 979                if (!fp->rx_cons_sb)
 980                        continue;
 981
 982                /* Rx */
 983                BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)  rx_comp_prod(0x%x)  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
 984                          i, fp->rx_bd_prod, fp->rx_bd_cons,
 985                          fp->rx_comp_prod,
 986                          fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
 987                BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)  fp_hc_idx(0x%x)\n",
 988                          fp->rx_sge_prod, fp->last_max_sge,
 989                          le16_to_cpu(fp->fp_hc_idx));
 990
 991                /* Tx */
 992                for_each_cos_in_tx_queue(fp, cos)
 993                {
 994                        if (!fp->txdata_ptr[cos])
 995                                break;
 996
 997                        txdata = *fp->txdata_ptr[cos];
 998
 999                        if (!txdata.tx_cons_sb)
1000                                continue;
1001
1002                        BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)  *tx_cons_sb(0x%x)\n",
1003                                  i, txdata.tx_pkt_prod,
1004                                  txdata.tx_pkt_cons, txdata.tx_bd_prod,
1005                                  txdata.tx_bd_cons,
1006                                  le16_to_cpu(*txdata.tx_cons_sb));
1007                }
1008
1009                loop = CHIP_IS_E1x(bp) ?
1010                        HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
1011
1012                /* host sb data */
1013
1014                if (IS_FCOE_FP(fp))
1015                        continue;
1016
1017                BNX2X_ERR("     run indexes (");
1018                for (j = 0; j < HC_SB_MAX_SM; j++)
1019                        pr_cont("0x%x%s",
1020                               fp->sb_running_index[j],
1021                               (j == HC_SB_MAX_SM - 1) ? ")" : " ");
1022
1023                BNX2X_ERR("     indexes (");
1024                for (j = 0; j < loop; j++)
1025                        pr_cont("0x%x%s",
1026                               fp->sb_index_values[j],
1027                               (j == loop - 1) ? ")" : " ");
1028
1029                /* VF cannot access FW refelection for status block */
1030                if (IS_VF(bp))
1031                        continue;
1032
1033                /* fw sb data */
1034                data_size = CHIP_IS_E1x(bp) ?
1035                        sizeof(struct hc_status_block_data_e1x) :
1036                        sizeof(struct hc_status_block_data_e2);
1037                data_size /= sizeof(u32);
1038                sb_data_p = CHIP_IS_E1x(bp) ?
1039                        (u32 *)&sb_data_e1x :
1040                        (u32 *)&sb_data_e2;
1041                /* copy sb data in here */
1042                for (j = 0; j < data_size; j++)
1043                        *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
1044                                CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
1045                                j * sizeof(u32));
1046
1047                if (!CHIP_IS_E1x(bp)) {
1048                        pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) vnic_id(0x%x)  same_igu_sb_1b(0x%x) state(0x%x)\n",
1049                                sb_data_e2.common.p_func.pf_id,
1050                                sb_data_e2.common.p_func.vf_id,
1051                                sb_data_e2.common.p_func.vf_valid,
1052                                sb_data_e2.common.p_func.vnic_id,
1053                                sb_data_e2.common.same_igu_sb_1b,
1054                                sb_data_e2.common.state);
1055                } else {
1056                        pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) vnic_id(0x%x)  same_igu_sb_1b(0x%x) state(0x%x)\n",
1057                                sb_data_e1x.common.p_func.pf_id,
1058                                sb_data_e1x.common.p_func.vf_id,
1059                                sb_data_e1x.common.p_func.vf_valid,
1060                                sb_data_e1x.common.p_func.vnic_id,
1061                                sb_data_e1x.common.same_igu_sb_1b,
1062                                sb_data_e1x.common.state);
1063                }
1064
1065                /* SB_SMs data */
1066                for (j = 0; j < HC_SB_MAX_SM; j++) {
1067                        pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x)  igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
1068                                j, hc_sm_p[j].__flags,
1069                                hc_sm_p[j].igu_sb_id,
1070                                hc_sm_p[j].igu_seg_id,
1071                                hc_sm_p[j].time_to_expire,
1072                                hc_sm_p[j].timer_value);
1073                }
1074
1075                /* Indices data */
1076                for (j = 0; j < loop; j++) {
1077                        pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
1078                               hc_index_p[j].flags,
1079                               hc_index_p[j].timeout);
1080                }
1081        }
1082
1083#ifdef BNX2X_STOP_ON_ERROR
1084        if (IS_PF(bp)) {
1085                /* event queue */
1086                BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
1087                for (i = 0; i < NUM_EQ_DESC; i++) {
1088                        u32 *data = (u32 *)&bp->eq_ring[i].message.data;
1089
1090                        BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
1091                                  i, bp->eq_ring[i].message.opcode,
1092                                  bp->eq_ring[i].message.error);
1093                        BNX2X_ERR("data: %x %x %x\n",
1094                                  data[0], data[1], data[2]);
1095                }
1096        }
1097
1098        /* Rings */
1099        /* Rx */
1100        for_each_valid_rx_queue(bp, i) {
1101                struct bnx2x_fastpath *fp = &bp->fp[i];
1102
1103                if (!bp->fp)
1104                        break;
1105
1106                if (!fp->rx_cons_sb)
1107                        continue;
1108
1109                start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1110                end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1111                for (j = start; j != end; j = RX_BD(j + 1)) {
1112                        u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1113                        struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1114
1115                        BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
1116                                  i, j, rx_bd[1], rx_bd[0], sw_bd->data);
1117                }
1118
1119                start = RX_SGE(fp->rx_sge_prod);
1120                end = RX_SGE(fp->last_max_sge);
1121                for (j = start; j != end; j = RX_SGE(j + 1)) {
1122                        u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1123                        struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1124
1125                        BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
1126                                  i, j, rx_sge[1], rx_sge[0], sw_page->page);
1127                }
1128
1129                start = RCQ_BD(fp->rx_comp_cons - 10);
1130                end = RCQ_BD(fp->rx_comp_cons + 503);
1131                for (j = start; j != end; j = RCQ_BD(j + 1)) {
1132                        u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1133
1134                        BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1135                                  i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
1136                }
1137        }
1138
1139        /* Tx */
1140        for_each_valid_tx_queue(bp, i) {
1141                struct bnx2x_fastpath *fp = &bp->fp[i];
1142
1143                if (!bp->fp)
1144                        break;
1145
1146                for_each_cos_in_tx_queue(fp, cos) {
1147                        struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1148
1149                        if (!fp->txdata_ptr[cos])
1150                                break;
1151
1152                        if (!txdata->tx_cons_sb)
1153                                continue;
1154
1155                        start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
1156                        end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
1157                        for (j = start; j != end; j = TX_BD(j + 1)) {
1158                                struct sw_tx_bd *sw_bd =
1159                                        &txdata->tx_buf_ring[j];
1160
1161                                BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n",
1162                                          i, cos, j, sw_bd->skb,
1163                                          sw_bd->first_bd);
1164                        }
1165
1166                        start = TX_BD(txdata->tx_bd_cons - 10);
1167                        end = TX_BD(txdata->tx_bd_cons + 254);
1168                        for (j = start; j != end; j = TX_BD(j + 1)) {
1169                                u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
1170
1171                                BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
1172                                          i, cos, j, tx_bd[0], tx_bd[1],
1173                                          tx_bd[2], tx_bd[3]);
1174                        }
1175                }
1176        }
1177#endif
1178        if (IS_PF(bp)) {
1179                int tmp_msg_en = bp->msg_enable;
1180
1181                bnx2x_fw_dump(bp);
1182                bp->msg_enable |= NETIF_MSG_HW;
1183                BNX2X_ERR("Idle check (1st round) ----------\n");
1184                bnx2x_idle_chk(bp);
1185                BNX2X_ERR("Idle check (2nd round) ----------\n");
1186                bnx2x_idle_chk(bp);
1187                bp->msg_enable = tmp_msg_en;
1188                bnx2x_mc_assert(bp);
1189        }
1190
1191        BNX2X_ERR("end crash dump -----------------\n");
1192}
1193
1194/*
1195 * FLR Support for E2
1196 *
1197 * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW
1198 * initialization.
1199 */
1200#define FLR_WAIT_USEC           10000   /* 10 milliseconds */
1201#define FLR_WAIT_INTERVAL       50      /* usec */
1202#define FLR_POLL_CNT            (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */
1203
1204struct pbf_pN_buf_regs {
1205        int pN;
1206        u32 init_crd;
1207        u32 crd;
1208        u32 crd_freed;
1209};
1210
1211struct pbf_pN_cmd_regs {
1212        int pN;
1213        u32 lines_occup;
1214        u32 lines_freed;
1215};
1216
1217static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
1218                                     struct pbf_pN_buf_regs *regs,
1219                                     u32 poll_count)
1220{
1221        u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
1222        u32 cur_cnt = poll_count;
1223
1224        crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
1225        crd = crd_start = REG_RD(bp, regs->crd);
1226        init_crd = REG_RD(bp, regs->init_crd);
1227
1228        DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
1229        DP(BNX2X_MSG_SP, "CREDIT[%d]      : s:%x\n", regs->pN, crd);
1230        DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
1231
1232        while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
1233               (init_crd - crd_start))) {
1234                if (cur_cnt--) {
1235                        udelay(FLR_WAIT_INTERVAL);
1236                        crd = REG_RD(bp, regs->crd);
1237                        crd_freed = REG_RD(bp, regs->crd_freed);
1238                } else {
1239                        DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
1240                           regs->pN);
1241                        DP(BNX2X_MSG_SP, "CREDIT[%d]      : c:%x\n",
1242                           regs->pN, crd);
1243                        DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
1244                           regs->pN, crd_freed);
1245                        break;
1246                }
1247        }
1248        DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
1249           poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1250}
1251
1252static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
1253                                     struct pbf_pN_cmd_regs *regs,
1254                                     u32 poll_count)
1255{
1256        u32 occup, to_free, freed, freed_start;
1257        u32 cur_cnt = poll_count;
1258
1259        occup = to_free = REG_RD(bp, regs->lines_occup);
1260        freed = freed_start = REG_RD(bp, regs->lines_freed);
1261
1262        DP(BNX2X_MSG_SP, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
1263        DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
1264
1265        while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
1266                if (cur_cnt--) {
1267                        udelay(FLR_WAIT_INTERVAL);
1268                        occup = REG_RD(bp, regs->lines_occup);
1269                        freed = REG_RD(bp, regs->lines_freed);
1270                } else {
1271                        DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
1272                           regs->pN);
1273                        DP(BNX2X_MSG_SP, "OCCUPANCY[%d]   : s:%x\n",
1274                           regs->pN, occup);
1275                        DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
1276                           regs->pN, freed);
1277                        break;
1278                }
1279        }
1280        DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
1281           poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1282}
1283
1284static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1285                                    u32 expected, u32 poll_count)
1286{
1287        u32 cur_cnt = poll_count;
1288        u32 val;
1289
1290        while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
1291                udelay(FLR_WAIT_INTERVAL);
1292
1293        return val;
1294}
1295
1296int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
1297                                    char *msg, u32 poll_cnt)
1298{
1299        u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
1300        if (val != 0) {
1301                BNX2X_ERR("%s usage count=%d\n", msg, val);
1302                return 1;
1303        }
1304        return 0;
1305}
1306
1307/* Common routines with VF FLR cleanup */
1308u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
1309{
1310        /* adjust polling timeout */
1311        if (CHIP_REV_IS_EMUL(bp))
1312                return FLR_POLL_CNT * 2000;
1313
1314        if (CHIP_REV_IS_FPGA(bp))
1315                return FLR_POLL_CNT * 120;
1316
1317        return FLR_POLL_CNT;
1318}
1319
1320void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
1321{
1322        struct pbf_pN_cmd_regs cmd_regs[] = {
1323                {0, (CHIP_IS_E3B0(bp)) ?
1324                        PBF_REG_TQ_OCCUPANCY_Q0 :
1325                        PBF_REG_P0_TQ_OCCUPANCY,
1326                    (CHIP_IS_E3B0(bp)) ?
1327                        PBF_REG_TQ_LINES_FREED_CNT_Q0 :
1328                        PBF_REG_P0_TQ_LINES_FREED_CNT},
1329                {1, (CHIP_IS_E3B0(bp)) ?
1330                        PBF_REG_TQ_OCCUPANCY_Q1 :
1331                        PBF_REG_P1_TQ_OCCUPANCY,
1332                    (CHIP_IS_E3B0(bp)) ?
1333                        PBF_REG_TQ_LINES_FREED_CNT_Q1 :
1334                        PBF_REG_P1_TQ_LINES_FREED_CNT},
1335                {4, (CHIP_IS_E3B0(bp)) ?
1336                        PBF_REG_TQ_OCCUPANCY_LB_Q :
1337                        PBF_REG_P4_TQ_OCCUPANCY,
1338                    (CHIP_IS_E3B0(bp)) ?
1339                        PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
1340                        PBF_REG_P4_TQ_LINES_FREED_CNT}
1341        };
1342
1343        struct pbf_pN_buf_regs buf_regs[] = {
1344                {0, (CHIP_IS_E3B0(bp)) ?
1345                        PBF_REG_INIT_CRD_Q0 :
1346                        PBF_REG_P0_INIT_CRD ,
1347                    (CHIP_IS_E3B0(bp)) ?
1348                        PBF_REG_CREDIT_Q0 :
1349                        PBF_REG_P0_CREDIT,
1350                    (CHIP_IS_E3B0(bp)) ?
1351                        PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
1352                        PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
1353                {1, (CHIP_IS_E3B0(bp)) ?
1354                        PBF_REG_INIT_CRD_Q1 :
1355                        PBF_REG_P1_INIT_CRD,
1356                    (CHIP_IS_E3B0(bp)) ?
1357                        PBF_REG_CREDIT_Q1 :
1358                        PBF_REG_P1_CREDIT,
1359                    (CHIP_IS_E3B0(bp)) ?
1360                        PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
1361                        PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
1362                {4, (CHIP_IS_E3B0(bp)) ?
1363                        PBF_REG_INIT_CRD_LB_Q :
1364                        PBF_REG_P4_INIT_CRD,
1365                    (CHIP_IS_E3B0(bp)) ?
1366                        PBF_REG_CREDIT_LB_Q :
1367                        PBF_REG_P4_CREDIT,
1368                    (CHIP_IS_E3B0(bp)) ?
1369                        PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
1370                        PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
1371        };
1372
1373        int i;
1374
1375        /* Verify the command queues are flushed P0, P1, P4 */
1376        for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
1377                bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
1378
1379        /* Verify the transmission buffers are flushed P0, P1, P4 */
1380        for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
1381                bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
1382}
1383
1384#define OP_GEN_PARAM(param) \
1385        (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
1386
1387#define OP_GEN_TYPE(type) \
1388        (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
1389
1390#define OP_GEN_AGG_VECT(index) \
1391        (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
1392
1393int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
1394{
1395        u32 op_gen_command = 0;
1396        u32 comp_addr = BAR_CSTRORM_INTMEM +
1397                        CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
1398
1399        if (REG_RD(bp, comp_addr)) {
1400                BNX2X_ERR("Cleanup complete was not 0 before sending\n");
1401                return 1;
1402        }
1403
1404        op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
1405        op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
1406        op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
1407        op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
1408
1409        DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
1410        REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command);
1411
1412        if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
1413                BNX2X_ERR("FW final cleanup did not succeed\n");
1414                DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
1415                   (REG_RD(bp, comp_addr)));
1416                bnx2x_panic();
1417                return 1;
1418        }
1419        /* Zero completion for next FLR */
1420        REG_WR(bp, comp_addr, 0);
1421
1422        return 0;
1423}
1424
1425u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
1426{
1427        u16 status;
1428
1429        pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
1430        return status & PCI_EXP_DEVSTA_TRPND;
1431}
1432
1433/* PF FLR specific routines
1434*/
1435static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
1436{
1437        /* wait for CFC PF usage-counter to zero (includes all the VFs) */
1438        if (bnx2x_flr_clnup_poll_hw_counter(bp,
1439                        CFC_REG_NUM_LCIDS_INSIDE_PF,
1440                        "CFC PF usage counter timed out",
1441                        poll_cnt))
1442                return 1;
1443
1444        /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
1445        if (bnx2x_flr_clnup_poll_hw_counter(bp,
1446                        DORQ_REG_PF_USAGE_CNT,
1447                        "DQ PF usage counter timed out",
1448                        poll_cnt))
1449                return 1;
1450
1451        /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
1452        if (bnx2x_flr_clnup_poll_hw_counter(bp,
1453                        QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
1454                        "QM PF usage counter timed out",
1455                        poll_cnt))
1456                return 1;
1457
1458        /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
1459        if (bnx2x_flr_clnup_poll_hw_counter(bp,
1460                        TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
1461                        "Timers VNIC usage counter timed out",
1462                        poll_cnt))
1463                return 1;
1464        if (bnx2x_flr_clnup_poll_hw_counter(bp,
1465                        TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
1466                        "Timers NUM_SCANS usage counter timed out",
1467                        poll_cnt))
1468                return 1;
1469
1470        /* Wait DMAE PF usage counter to zero */
1471        if (bnx2x_flr_clnup_poll_hw_counter(bp,
1472                        dmae_reg_go_c[INIT_DMAE_C(bp)],
1473                        "DMAE command register timed out",
1474                        poll_cnt))
1475                return 1;
1476
1477        return 0;
1478}
1479
1480static void bnx2x_hw_enable_status(struct bnx2x *bp)
1481{
1482        u32 val;
1483
1484        val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
1485        DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
1486
1487        val = REG_RD(bp, PBF_REG_DISABLE_PF);
1488        DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
1489
1490        val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
1491        DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
1492
1493        val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
1494        DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
1495
1496        val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
1497        DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
1498
1499        val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
1500        DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
1501
1502        val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
1503        DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
1504
1505        val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
1506        DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
1507           val);
1508}
1509
1510static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
1511{
1512        u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1513
1514        DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
1515
1516        /* Re-enable PF target read access */
1517        REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1518
1519        /* Poll HW usage counters */
1520        DP(BNX2X_MSG_SP, "Polling usage counters\n");
1521        if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
1522                return -EBUSY;
1523
1524        /* Zero the igu 'trailing edge' and 'leading edge' */
1525
1526        /* Send the FW cleanup command */
1527        if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
1528                return -EBUSY;
1529
1530        /* ATC cleanup */
1531
1532        /* Verify TX hw is flushed */
1533        bnx2x_tx_hw_flushed(bp, poll_cnt);
1534
1535        /* Wait 100ms (not adjusted according to platform) */
1536        msleep(100);
1537
1538        /* Verify no pending pci transactions */
1539        if (bnx2x_is_pcie_pending(bp->pdev))
1540                BNX2X_ERR("PCIE Transactions still pending\n");
1541
1542        /* Debug */
1543        bnx2x_hw_enable_status(bp);
1544
1545        /*
1546         * Master enable - Due to WB DMAE writes performed before this
1547         * register is re-initialized as part of the regular function init
1548         */
1549        REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
1550
1551        return 0;
1552}
1553
1554static void bnx2x_hc_int_enable(struct bnx2x *bp)
1555{
1556        int port = BP_PORT(bp);
1557        u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1558        u32 val = REG_RD(bp, addr);
1559        bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1560        bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1561        bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1562
1563        if (msix) {
1564                val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1565                         HC_CONFIG_0_REG_INT_LINE_EN_0);
1566                val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1567                        HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1568                if (single_msix)
1569                        val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
1570        } else if (msi) {
1571                val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1572                val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1573                        HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1574                        HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1575        } else {
1576                val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1577                        HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1578                        HC_CONFIG_0_REG_INT_LINE_EN_0 |
1579                        HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1580
1581                if (!CHIP_IS_E1(bp)) {
1582                        DP(NETIF_MSG_IFUP,
1583                           "write %x to HC %d (addr 0x%x)\n", val, port, addr);
1584
1585                        REG_WR(bp, addr, val);
1586
1587                        val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1588                }
1589        }
1590
1591        if (CHIP_IS_E1(bp))
1592                REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1593
1594        DP(NETIF_MSG_IFUP,
1595           "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr,
1596           (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1597
1598        REG_WR(bp, addr, val);
1599        /*
1600         * Ensure that HC_CONFIG is written before leading/trailing edge config
1601         */
1602        barrier();
1603
1604        if (!CHIP_IS_E1(bp)) {
1605                /* init leading/trailing edge */
1606                if (IS_MF(bp)) {
1607                        val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1608                        if (bp->port.pmf)
1609                                /* enable nig and gpio3 attention */
1610                                val |= 0x1100;
1611                } else
1612                        val = 0xffff;
1613
1614                REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1615                REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1616        }
1617}
1618
1619static void bnx2x_igu_int_enable(struct bnx2x *bp)
1620{
1621        u32 val;
1622        bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1623        bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1624        bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1625
1626        val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1627
1628        if (msix) {
1629                val &= ~(IGU_PF_CONF_INT_LINE_EN |
1630                         IGU_PF_CONF_SINGLE_ISR_EN);
1631                val |= (IGU_PF_CONF_MSI_MSIX_EN |
1632                        IGU_PF_CONF_ATTN_BIT_EN);
1633
1634                if (single_msix)
1635                        val |= IGU_PF_CONF_SINGLE_ISR_EN;
1636        } else if (msi) {
1637                val &= ~IGU_PF_CONF_INT_LINE_EN;
1638                val |= (IGU_PF_CONF_MSI_MSIX_EN |
1639                        IGU_PF_CONF_ATTN_BIT_EN |
1640                        IGU_PF_CONF_SINGLE_ISR_EN);
1641        } else {
1642                val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1643                val |= (IGU_PF_CONF_INT_LINE_EN |
1644                        IGU_PF_CONF_ATTN_BIT_EN |
1645                        IGU_PF_CONF_SINGLE_ISR_EN);
1646        }
1647
1648        /* Clean previous status - need to configure igu prior to ack*/
1649        if ((!msix) || single_msix) {
1650                REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1651                bnx2x_ack_int(bp);
1652        }
1653
1654        val |= IGU_PF_CONF_FUNC_EN;
1655
1656        DP(NETIF_MSG_IFUP, "write 0x%x to IGU  mode %s\n",
1657           val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1658
1659        REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1660
1661        if (val & IGU_PF_CONF_INT_LINE_EN)
1662                pci_intx(bp->pdev, true);
1663
1664        barrier();
1665
1666        /* init leading/trailing edge */
1667        if (IS_MF(bp)) {
1668                val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1669                if (bp->port.pmf)
1670                        /* enable nig and gpio3 attention */
1671                        val |= 0x1100;
1672        } else
1673                val = 0xffff;
1674
1675        REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1676        REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1677}
1678
1679void bnx2x_int_enable(struct bnx2x *bp)
1680{
1681        if (bp->common.int_block == INT_BLOCK_HC)
1682                bnx2x_hc_int_enable(bp);
1683        else
1684                bnx2x_igu_int_enable(bp);
1685}
1686
1687void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1688{
1689        int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1690        int i, offset;
1691
1692        if (disable_hw)
1693                /* prevent the HW from sending interrupts */
1694                bnx2x_int_disable(bp);
1695
1696        /* make sure all ISRs are done */
1697        if (msix) {
1698                synchronize_irq(bp->msix_table[0].vector);
1699                offset = 1;
1700                if (CNIC_SUPPORT(bp))
1701                        offset++;
1702                for_each_eth_queue(bp, i)
1703                        synchronize_irq(bp->msix_table[offset++].vector);
1704        } else
1705                synchronize_irq(bp->pdev->irq);
1706
1707        /* make sure sp_task is not running */
1708        cancel_delayed_work(&bp->sp_task);
1709        cancel_delayed_work(&bp->period_task);
1710        flush_workqueue(bnx2x_wq);
1711}
1712
1713/* fast path */
1714
1715/*
1716 * General service functions
1717 */
1718
1719/* Return true if succeeded to acquire the lock */
1720static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1721{
1722        u32 lock_status;
1723        u32 resource_bit = (1 << resource);
1724        int func = BP_FUNC(bp);
1725        u32 hw_lock_control_reg;
1726
1727        DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1728           "Trying to take a lock on resource %d\n", resource);
1729
1730        /* Validating that the resource is within range */
1731        if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1732                DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1733                   "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1734                   resource, HW_LOCK_MAX_RESOURCE_VALUE);
1735                return false;
1736        }
1737
1738        if (func <= 5)
1739                hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1740        else
1741                hw_lock_control_reg =
1742                                (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1743
1744        /* Try to acquire the lock */
1745        REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1746        lock_status = REG_RD(bp, hw_lock_control_reg);
1747        if (lock_status & resource_bit)
1748                return true;
1749
1750        DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1751           "Failed to get a lock on resource %d\n", resource);
1752        return false;
1753}
1754
1755/**
1756 * bnx2x_get_leader_lock_resource - get the recovery leader resource id
1757 *
1758 * @bp: driver handle
1759 *
1760 * Returns the recovery leader resource id according to the engine this function
1761 * belongs to. Currently only only 2 engines is supported.
1762 */
1763static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
1764{
1765        if (BP_PATH(bp))
1766                return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
1767        else
1768                return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
1769}
1770
1771/**
1772 * bnx2x_trylock_leader_lock- try to acquire a leader lock.
1773 *
1774 * @bp: driver handle
1775 *
1776 * Tries to acquire a leader lock for current engine.
1777 */
1778static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
1779{
1780        return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1781}
1782
1783static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
1784
1785/* schedule the sp task and mark that interrupt occurred (runs from ISR) */
1786static int bnx2x_schedule_sp_task(struct bnx2x *bp)
1787{
1788        /* Set the interrupt occurred bit for the sp-task to recognize it
1789         * must ack the interrupt and transition according to the IGU
1790         * state machine.
1791         */
1792        atomic_set(&bp->interrupt_occurred, 1);
1793
1794        /* The sp_task must execute only after this bit
1795         * is set, otherwise we will get out of sync and miss all
1796         * further interrupts. Hence, the barrier.
1797         */
1798        smp_wmb();
1799
1800        /* schedule sp_task to workqueue */
1801        return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1802}
1803
1804void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1805{
1806        struct bnx2x *bp = fp->bp;
1807        int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1808        int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1809        enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
1810        struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
1811
1812        DP(BNX2X_MSG_SP,
1813           "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
1814           fp->index, cid, command, bp->state,
1815           rr_cqe->ramrod_cqe.ramrod_type);
1816
1817        /* If cid is within VF range, replace the slowpath object with the
1818         * one corresponding to this VF
1819         */
1820        if (cid >= BNX2X_FIRST_VF_CID  &&
1821            cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)
1822                bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);
1823
1824        switch (command) {
1825        case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
1826                DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
1827                drv_cmd = BNX2X_Q_CMD_UPDATE;
1828                break;
1829
1830        case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
1831                DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
1832                drv_cmd = BNX2X_Q_CMD_SETUP;
1833                break;
1834
1835        case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
1836                DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
1837                drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
1838                break;
1839
1840        case (RAMROD_CMD_ID_ETH_HALT):
1841                DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
1842                drv_cmd = BNX2X_Q_CMD_HALT;
1843                break;
1844
1845        case (RAMROD_CMD_ID_ETH_TERMINATE):
1846                DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid);
1847                drv_cmd = BNX2X_Q_CMD_TERMINATE;
1848                break;
1849
1850        case (RAMROD_CMD_ID_ETH_EMPTY):
1851                DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
1852                drv_cmd = BNX2X_Q_CMD_EMPTY;
1853                break;
1854
1855        case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
1856                DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
1857                drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
1858                break;
1859
1860        default:
1861                BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
1862                          command, fp->index);
1863                return;
1864        }
1865
1866        if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
1867            q_obj->complete_cmd(bp, q_obj, drv_cmd))
1868                /* q_obj->complete_cmd() failure means that this was
1869                 * an unexpected completion.
1870                 *
1871                 * In this case we don't want to increase the bp->spq_left
1872                 * because apparently we haven't sent this command the first
1873                 * place.
1874                 */
1875#ifdef BNX2X_STOP_ON_ERROR
1876                bnx2x_panic();
1877#else
1878                return;
1879#endif
1880
1881        smp_mb__before_atomic();
1882        atomic_inc(&bp->cq_spq_left);
1883        /* push the change in bp->spq_left and towards the memory */
1884        smp_mb__after_atomic();
1885
1886        DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
1887
1888        if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
1889            (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
1890                /* if Q update ramrod is completed for last Q in AFEX vif set
1891                 * flow, then ACK MCP at the end
1892                 *
1893                 * mark pending ACK to MCP bit.
1894                 * prevent case that both bits are cleared.
1895                 * At the end of load/unload driver checks that
1896                 * sp_state is cleared, and this order prevents
1897                 * races
1898                 */
1899                smp_mb__before_atomic();
1900                set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
1901                wmb();
1902                clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
1903                smp_mb__after_atomic();
1904
1905                /* schedule the sp task as mcp ack is required */
1906                bnx2x_schedule_sp_task(bp);
1907        }
1908
1909        return;
1910}
1911
1912irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1913{
1914        struct bnx2x *bp = netdev_priv(dev_instance);
1915        u16 status = bnx2x_ack_int(bp);
1916        u16 mask;
1917        int i;
1918        u8 cos;
1919
1920        /* Return here if interrupt is shared and it's not for us */
1921        if (unlikely(status == 0)) {
1922                DP(NETIF_MSG_INTR, "not our interrupt!\n");
1923                return IRQ_NONE;
1924        }
1925        DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1926
1927#ifdef BNX2X_STOP_ON_ERROR
1928        if (unlikely(bp->panic))
1929                return IRQ_HANDLED;
1930#endif
1931
1932        for_each_eth_queue(bp, i) {
1933                struct bnx2x_fastpath *fp = &bp->fp[i];
1934
1935                mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
1936                if (status & mask) {
1937                        /* Handle Rx or Tx according to SB id */
1938                        for_each_cos_in_tx_queue(fp, cos)
1939                                prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1940                        prefetch(&fp->sb_running_index[SM_RX_ID]);
1941                        napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1942                        status &= ~mask;
1943                }
1944        }
1945
1946        if (CNIC_SUPPORT(bp)) {
1947                mask = 0x2;
1948                if (status & (mask | 0x1)) {
1949                        struct cnic_ops *c_ops = NULL;
1950
1951                        rcu_read_lock();
1952                        c_ops = rcu_dereference(bp->cnic_ops);
1953                        if (c_ops && (bp->cnic_eth_dev.drv_state &
1954                                      CNIC_DRV_STATE_HANDLES_IRQ))
1955                                c_ops->cnic_handler(bp->cnic_data, NULL);
1956                        rcu_read_unlock();
1957
1958                        status &= ~mask;
1959                }
1960        }
1961
1962        if (unlikely(status & 0x1)) {
1963
1964                /* schedule sp task to perform default status block work, ack
1965                 * attentions and enable interrupts.
1966                 */
1967                bnx2x_schedule_sp_task(bp);
1968
1969                status &= ~0x1;
1970                if (!status)
1971                        return IRQ_HANDLED;
1972        }
1973
1974        if (unlikely(status))
1975                DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1976                   status);
1977
1978        return IRQ_HANDLED;
1979}
1980
1981/* Link */
1982
1983/*
1984 * General service functions
1985 */
1986
1987int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1988{
1989        u32 lock_status;
1990        u32 resource_bit = (1 << resource);
1991        int func = BP_FUNC(bp);
1992        u32 hw_lock_control_reg;
1993        int cnt;
1994
1995        /* Validating that the resource is within range */
1996        if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1997                BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1998                   resource, HW_LOCK_MAX_RESOURCE_VALUE);
1999                return -EINVAL;
2000        }
2001
2002        if (func <= 5) {
2003                hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
2004        } else {
2005                hw_lock_control_reg =
2006                                (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
2007        }
2008
2009        /* Validating that the resource is not already taken */
2010        lock_status = REG_RD(bp, hw_lock_control_reg);
2011        if (lock_status & resource_bit) {
2012                BNX2X_ERR("lock_status 0x%x  resource_bit 0x%x\n",
2013                   lock_status, resource_bit);
2014                return -EEXIST;
2015        }
2016
2017        /* Try for 5 second every 5ms */
2018        for (cnt = 0; cnt < 1000; cnt++) {
2019                /* Try to acquire the lock */
2020                REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
2021                lock_status = REG_RD(bp, hw_lock_control_reg);
2022                if (lock_status & resource_bit)
2023                        return 0;
2024
2025                usleep_range(5000, 10000);
2026        }
2027        BNX2X_ERR("Timeout\n");
2028        return -EAGAIN;
2029}
2030
2031int bnx2x_release_leader_lock(struct bnx2x *bp)
2032{
2033        return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
2034}
2035
2036int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
2037{
2038        u32 lock_status;
2039        u32 resource_bit = (1 << resource);
2040        int func = BP_FUNC(bp);
2041        u32 hw_lock_control_reg;
2042
2043        /* Validating that the resource is within range */
2044        if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
2045                BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
2046                   resource, HW_LOCK_MAX_RESOURCE_VALUE);
2047                return -EINVAL;
2048        }
2049
2050        if (func <= 5) {
2051                hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
2052        } else {
2053                hw_lock_control_reg =
2054                                (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
2055        }
2056
2057        /* Validating that the resource is currently taken */
2058        lock_status = REG_RD(bp, hw_lock_control_reg);
2059        if (!(lock_status & resource_bit)) {
2060                BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n",
2061                          lock_status, resource_bit);
2062                return -EFAULT;
2063        }
2064
2065        REG_WR(bp, hw_lock_control_reg, resource_bit);
2066        return 0;
2067}
2068
2069int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
2070{
2071        /* The GPIO should be swapped if swap register is set and active */
2072        int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2073                         REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2074        int gpio_shift = gpio_num +
2075                        (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2076        u32 gpio_mask = (1 << gpio_shift);
2077        u32 gpio_reg;
2078        int value;
2079
2080        if (gpio_num > MISC_REGISTERS_GPIO_3) {
2081                BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2082                return -EINVAL;
2083        }
2084
2085        /* read GPIO value */
2086        gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2087
2088        /* get the requested pin value */
2089        if ((gpio_reg & gpio_mask) == gpio_mask)
2090                value = 1;
2091        else
2092                value = 0;
2093
2094        return value;
2095}
2096
2097int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2098{
2099        /* The GPIO should be swapped if swap register is set and active */
2100        int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2101                         REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2102        int gpio_shift = gpio_num +
2103                        (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2104        u32 gpio_mask = (1 << gpio_shift);
2105        u32 gpio_reg;
2106
2107        if (gpio_num > MISC_REGISTERS_GPIO_3) {
2108                BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2109                return -EINVAL;
2110        }
2111
2112        bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2113        /* read GPIO and mask except the float bits */
2114        gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2115
2116        switch (mode) {
2117        case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2118                DP(NETIF_MSG_LINK,
2119                   "Set GPIO %d (shift %d) -> output low\n",
2120                   gpio_num, gpio_shift);
2121                /* clear FLOAT and set CLR */
2122                gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2123                gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2124                break;
2125
2126        case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2127                DP(NETIF_MSG_LINK,
2128                   "Set GPIO %d (shift %d) -> output high\n",
2129                   gpio_num, gpio_shift);
2130                /* clear FLOAT and set SET */
2131                gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2132                gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2133                break;
2134
2135        case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2136                DP(NETIF_MSG_LINK,
2137                   "Set GPIO %d (shift %d) -> input\n",
2138                   gpio_num, gpio_shift);
2139                /* set FLOAT */
2140                gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2141                break;
2142
2143        default:
2144                break;
2145        }
2146
2147        REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2148        bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2149
2150        return 0;
2151}
2152
2153int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
2154{
2155        u32 gpio_reg = 0;
2156        int rc = 0;
2157
2158        /* Any port swapping should be handled by caller. */
2159
2160        bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2161        /* read GPIO and mask except the float bits */
2162        gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2163        gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2164        gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2165        gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2166
2167        switch (mode) {
2168        case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2169                DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
2170                /* set CLR */
2171                gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2172                break;
2173
2174        case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2175                DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
2176                /* set SET */
2177                gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2178                break;
2179
2180        case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2181                DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
2182                /* set FLOAT */
2183                gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2184                break;
2185
2186        default:
2187                BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
2188                rc = -EINVAL;
2189                break;
2190        }
2191
2192        if (rc == 0)
2193                REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2194
2195        bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2196
2197        return rc;
2198}
2199
2200int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2201{
2202        /* The GPIO should be swapped if swap register is set and active */
2203        int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2204                         REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2205        int gpio_shift = gpio_num +
2206                        (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2207        u32 gpio_mask = (1 << gpio_shift);
2208        u32 gpio_reg;
2209
2210        if (gpio_num > MISC_REGISTERS_GPIO_3) {
2211                BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2212                return -EINVAL;
2213        }
2214
2215        bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2216        /* read GPIO int */
2217        gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2218
2219        switch (mode) {
2220        case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2221                DP(NETIF_MSG_LINK,
2222                   "Clear GPIO INT %d (shift %d) -> output low\n",
2223                   gpio_num, gpio_shift);
2224                /* clear SET and set CLR */
2225                gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2226                gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2227                break;
2228
2229        case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2230                DP(NETIF_MSG_LINK,
2231                   "Set GPIO INT %d (shift %d) -> output high\n",
2232                   gpio_num, gpio_shift);
2233                /* clear CLR and set SET */
2234                gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2235                gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2236                break;
2237
2238        default:
2239                break;
2240        }
2241
2242        REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2243        bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2244
2245        return 0;
2246}
2247
2248static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
2249{
2250        u32 spio_reg;
2251
2252        /* Only 2 SPIOs are configurable */
2253        if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
2254                BNX2X_ERR("Invalid SPIO 0x%x\n", spio);
2255                return -EINVAL;
2256        }
2257
2258        bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2259        /* read SPIO and mask except the float bits */
2260        spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
2261
2262        switch (mode) {
2263        case MISC_SPIO_OUTPUT_LOW:
2264                DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio);
2265                /* clear FLOAT and set CLR */
2266                spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2267                spio_reg |=  (spio << MISC_SPIO_CLR_POS);
2268                break;
2269
2270        case MISC_SPIO_OUTPUT_HIGH:
2271                DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio);
2272                /* clear FLOAT and set SET */
2273                spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2274                spio_reg |=  (spio << MISC_SPIO_SET_POS);
2275                break;
2276
2277        case MISC_SPIO_INPUT_HI_Z:
2278                DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio);
2279                /* set FLOAT */
2280                spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
2281                break;
2282
2283        default:
2284                break;
2285        }
2286
2287        REG_WR(bp, MISC_REG_SPIO, spio_reg);
2288        bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2289
2290        return 0;
2291}
2292
2293void bnx2x_calc_fc_adv(struct bnx2x *bp)
2294{
2295        u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
2296
2297        bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2298                                           ADVERTISED_Pause);
2299        switch (bp->link_vars.ieee_fc &
2300                MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2301        case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2302                bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
2303                                                  ADVERTISED_Pause);
2304                break;
2305
2306        case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2307                bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
2308                break;
2309
2310        default:
2311                break;
2312        }
2313}
2314
2315static void bnx2x_set_requested_fc(struct bnx2x *bp)
2316{
2317        /* Initialize link parameters structure variables
2318         * It is recommended to turn off RX FC for jumbo frames
2319         *  for better performance
2320         */
2321        if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
2322                bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2323        else
2324                bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2325}
2326
2327static void bnx2x_init_dropless_fc(struct bnx2x *bp)
2328{
2329        u32 pause_enabled = 0;
2330
2331        if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
2332                if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2333                        pause_enabled = 1;
2334
2335                REG_WR(bp, BAR_USTRORM_INTMEM +
2336                           USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
2337                       pause_enabled);
2338        }
2339
2340        DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
2341           pause_enabled ? "enabled" : "disabled");
2342}
2343
2344int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2345{
2346        int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
2347        u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
2348
2349        if (!BP_NOMCP(bp)) {
2350                bnx2x_set_requested_fc(bp);
2351                bnx2x_acquire_phy_lock(bp);
2352
2353                if (load_mode == LOAD_DIAG) {
2354                        struct link_params *lp = &bp->link_params;
2355                        lp->loopback_mode = LOOPBACK_XGXS;
2356                        /* Prefer doing PHY loopback at highest speed */
2357                        if (lp->req_line_speed[cfx_idx] < SPEED_20000) {
2358                                if (lp->speed_cap_mask[cfx_idx] &
2359                                    PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
2360                                        lp->req_line_speed[cfx_idx] =
2361                                        SPEED_20000;
2362                                else if (lp->speed_cap_mask[cfx_idx] &
2363                                            PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2364                                                lp->req_line_speed[cfx_idx] =
2365                                                SPEED_10000;
2366                                else
2367                                        lp->req_line_speed[cfx_idx] =
2368                                        SPEED_1000;
2369                        }
2370                }
2371
2372                if (load_mode == LOAD_LOOPBACK_EXT) {
2373                        struct link_params *lp = &bp->link_params;
2374                        lp->loopback_mode = LOOPBACK_EXT;
2375                }
2376
2377                rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2378
2379                bnx2x_release_phy_lock(bp);
2380
2381                bnx2x_init_dropless_fc(bp);
2382
2383                bnx2x_calc_fc_adv(bp);
2384
2385                if (bp->link_vars.link_up) {
2386                        bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2387                        bnx2x_link_report(bp);
2388                }
2389                queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2390                bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
2391                return rc;
2392        }
2393        BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2394        return -EINVAL;
2395}
2396
2397void bnx2x_link_set(struct bnx2x *bp)
2398{
2399        if (!BP_NOMCP(bp)) {
2400                bnx2x_acquire_phy_lock(bp);
2401                bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2402                bnx2x_release_phy_lock(bp);
2403
2404                bnx2x_init_dropless_fc(bp);
2405
2406                bnx2x_calc_fc_adv(bp);
2407        } else
2408                BNX2X_ERR("Bootcode is missing - can not set link\n");
2409}
2410
2411static void bnx2x__link_reset(struct bnx2x *bp)
2412{
2413        if (!BP_NOMCP(bp)) {
2414                bnx2x_acquire_phy_lock(bp);
2415                bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
2416                bnx2x_release_phy_lock(bp);
2417        } else
2418                BNX2X_ERR("Bootcode is missing - can not reset link\n");
2419}
2420
2421void bnx2x_force_link_reset(struct bnx2x *bp)
2422{
2423        bnx2x_acquire_phy_lock(bp);
2424        bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2425        bnx2x_release_phy_lock(bp);
2426}
2427
2428u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
2429{
2430        u8 rc = 0;
2431
2432        if (!BP_NOMCP(bp)) {
2433                bnx2x_acquire_phy_lock(bp);
2434                rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
2435                                     is_serdes);
2436                bnx2x_release_phy_lock(bp);
2437        } else
2438                BNX2X_ERR("Bootcode is missing - can not test link\n");
2439
2440        return rc;
2441}
2442
2443/* Calculates the sum of vn_min_rates.
2444   It's needed for further normalizing of the min_rates.
2445   Returns:
2446     sum of vn_min_rates.
2447       or
2448     0 - if all the min_rates are 0.
2449     In the later case fairness algorithm should be deactivated.
2450     If not all min_rates are zero then those that are zeroes will be set to 1.
2451 */
2452static void bnx2x_calc_vn_min(struct bnx2x *bp,
2453                                      struct cmng_init_input *input)
2454{
2455        int all_zero = 1;
2456        int vn;
2457
2458        for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2459                u32 vn_cfg = bp->mf_config[vn];
2460                u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2461                                   FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2462
2463                /* Skip hidden vns */
2464                if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2465                        vn_min_rate = 0;
2466                /* If min rate is zero - set it to 1 */
2467                else if (!vn_min_rate)
2468                        vn_min_rate = DEF_MIN_RATE;
2469                else
2470                        all_zero = 0;
2471
2472                input->vnic_min_rate[vn] = vn_min_rate;
2473        }
2474
2475        /* if ETS or all min rates are zeros - disable fairness */
2476        if (BNX2X_IS_ETS_ENABLED(bp)) {
2477                input->flags.cmng_enables &=
2478                                        ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2479                DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
2480        } else if (all_zero) {
2481                input->flags.cmng_enables &=
2482                                        ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2483                DP(NETIF_MSG_IFUP,
2484                   "All MIN values are zeroes fairness will be disabled\n");
2485        } else
2486                input->flags.cmng_enables |=
2487                                        CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2488}
2489
2490static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
2491                                    struct cmng_init_input *input)
2492{
2493        u16 vn_max_rate;
2494        u32 vn_cfg = bp->mf_config[vn];
2495
2496        if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2497                vn_max_rate = 0;
2498        else {
2499                u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
2500
2501                if (IS_MF_PERCENT_BW(bp)) {
2502                        /* maxCfg in percents of linkspeed */
2503                        vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
2504                } else /* SD modes */
2505                        /* maxCfg is absolute in 100Mb units */
2506                        vn_max_rate = maxCfg * 100;
2507        }
2508
2509        DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
2510
2511        input->vnic_max_rate[vn] = vn_max_rate;
2512}
2513
2514static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2515{
2516        if (CHIP_REV_IS_SLOW(bp))
2517                return CMNG_FNS_NONE;
2518        if (IS_MF(bp))
2519                return CMNG_FNS_MINMAX;
2520
2521        return CMNG_FNS_NONE;
2522}
2523
2524void bnx2x_read_mf_cfg(struct bnx2x *bp)
2525{
2526        int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2527
2528        if (BP_NOMCP(bp))
2529                return; /* what should be the default value in this case */
2530
2531        /* For 2 port configuration the absolute function number formula
2532         * is:
2533         *      abs_func = 2 * vn + BP_PORT + BP_PATH
2534         *
2535         *      and there are 4 functions per port
2536         *
2537         * For 4 port configuration it is
2538         *      abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2539         *
2540         *      and there are 2 functions per port
2541         */
2542        for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2543                int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2544
2545                if (func >= E1H_FUNC_MAX)
2546                        break;
2547
2548                bp->mf_config[vn] =
2549                        MF_CFG_RD(bp, func_mf_config[func].config);
2550        }
2551        if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2552                DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
2553                bp->flags |= MF_FUNC_DIS;
2554        } else {
2555                DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2556                bp->flags &= ~MF_FUNC_DIS;
2557        }
2558}
2559
2560static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2561{
2562        struct cmng_init_input input;
2563        memset(&input, 0, sizeof(struct cmng_init_input));
2564
2565        input.port_rate = bp->link_vars.line_speed;
2566
2567        if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
2568                int vn;
2569
2570                /* read mf conf from shmem */
2571                if (read_cfg)
2572                        bnx2x_read_mf_cfg(bp);
2573
2574                /* vn_weight_sum and enable fairness if not 0 */
2575                bnx2x_calc_vn_min(bp, &input);
2576
2577                /* calculate and set min-max rate for each vn */
2578                if (bp->port.pmf)
2579                        for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2580                                bnx2x_calc_vn_max(bp, vn, &input);
2581
2582                /* always enable rate shaping and fairness */
2583                input.flags.cmng_enables |=
2584                                        CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2585
2586                bnx2x_init_cmng(&input, &bp->cmng);
2587                return;
2588        }
2589
2590        /* rate shaping and fairness are disabled */
2591        DP(NETIF_MSG_IFUP,
2592           "rate shaping and fairness are disabled\n");
2593}
2594
2595static void storm_memset_cmng(struct bnx2x *bp,
2596                              struct cmng_init *cmng,
2597                              u8 port)
2598{
2599        int vn;
2600        size_t size = sizeof(struct cmng_struct_per_port);
2601
2602        u32 addr = BAR_XSTRORM_INTMEM +
2603                        XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
2604
2605        __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
2606
2607        for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2608                int func = func_by_vn(bp, vn);
2609
2610                addr = BAR_XSTRORM_INTMEM +
2611                       XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
2612                size = sizeof(struct rate_shaping_vars_per_vn);
2613                __storm_memset_struct(bp, addr, size,
2614                                      (u32 *)&cmng->vnic.vnic_max_rate[vn]);
2615
2616                addr = BAR_XSTRORM_INTMEM +
2617                       XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
2618                size = sizeof(struct fairness_vars_per_vn);
2619                __storm_memset_struct(bp, addr, size,
2620                                      (u32 *)&cmng->vnic.vnic_min_rate[vn]);
2621        }
2622}
2623
2624/* init cmng mode in HW according to local configuration */
2625void bnx2x_set_local_cmng(struct bnx2x *bp)
2626{
2627        int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2628
2629        if (cmng_fns != CMNG_FNS_NONE) {
2630                bnx2x_cmng_fns_init(bp, false, cmng_fns);
2631                storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2632        } else {
2633                /* rate shaping and fairness are disabled */
2634                DP(NETIF_MSG_IFUP,
2635                   "single function mode without fairness\n");
2636        }
2637}
2638
2639/* This function is called upon link interrupt */
2640static void bnx2x_link_attn(struct bnx2x *bp)
2641{
2642        /* Make sure that we are synced with the current statistics */
2643        bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2644
2645        bnx2x_link_update(&bp->link_params, &bp->link_vars);
2646
2647        bnx2x_init_dropless_fc(bp);
2648
2649        if (bp->link_vars.link_up) {
2650
2651                if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
2652                        struct host_port_stats *pstats;
2653
2654                        pstats = bnx2x_sp(bp, port_stats);
2655                        /* reset old mac stats */
2656                        memset(&(pstats->mac_stx[0]), 0,
2657                               sizeof(struct mac_stx));
2658                }
2659                if (bp->state == BNX2X_STATE_OPEN)
2660                        bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2661        }
2662
2663        if (bp->link_vars.link_up && bp->link_vars.line_speed)
2664                bnx2x_set_local_cmng(bp);
2665
2666        __bnx2x_link_report(bp);
2667
2668        if (IS_MF(bp))
2669                bnx2x_link_sync_notify(bp);
2670}
2671
2672void bnx2x__link_status_update(struct bnx2x *bp)
2673{
2674        if (bp->state != BNX2X_STATE_OPEN)
2675                return;
2676
2677        /* read updated dcb configuration */
2678        if (IS_PF(bp)) {
2679                bnx2x_dcbx_pmf_update(bp);
2680                bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2681                if (bp->link_vars.link_up)
2682                        bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2683                else
2684                        bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2685                        /* indicate link status */
2686                bnx2x_link_report(bp);
2687
2688        } else { /* VF */
2689                bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
2690                                          SUPPORTED_10baseT_Full |
2691                                          SUPPORTED_100baseT_Half |
2692                                          SUPPORTED_100baseT_Full |
2693                                          SUPPORTED_1000baseT_Full |
2694                                          SUPPORTED_2500baseX_Full |
2695                                          SUPPORTED_10000baseT_Full |
2696                                          SUPPORTED_TP |
2697                                          SUPPORTED_FIBRE |
2698                                          SUPPORTED_Autoneg |
2699                                          SUPPORTED_Pause |
2700                                          SUPPORTED_Asym_Pause);
2701                bp->port.advertising[0] = bp->port.supported[0];
2702
2703                bp->link_params.bp = bp;
2704                bp->link_params.port = BP_PORT(bp);
2705                bp->link_params.req_duplex[0] = DUPLEX_FULL;
2706                bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
2707                bp->link_params.req_line_speed[0] = SPEED_10000;
2708                bp->link_params.speed_cap_mask[0] = 0x7f0000;
2709                bp->link_params.switch_cfg = SWITCH_CFG_10G;
2710                bp->link_vars.mac_type = MAC_TYPE_BMAC;
2711                bp->link_vars.line_speed = SPEED_10000;
2712                bp->link_vars.link_status =
2713                        (LINK_STATUS_LINK_UP |
2714                         LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
2715                bp->link_vars.link_up = 1;
2716                bp->link_vars.duplex = DUPLEX_FULL;
2717                bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
2718                __bnx2x_link_report(bp);
2719
2720                bnx2x_sample_bulletin(bp);
2721
2722                /* if bulletin board did not have an update for link status
2723                 * __bnx2x_link_report will report current status
2724                 * but it will NOT duplicate report in case of already reported
2725                 * during sampling bulletin board.
2726                 */
2727                bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2728        }
2729}
2730
2731static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
2732                                  u16 vlan_val, u8 allowed_prio)
2733{
2734        struct bnx2x_func_state_params func_params = {NULL};
2735        struct bnx2x_func_afex_update_params *f_update_params =
2736                &func_params.params.afex_update;
2737
2738        func_params.f_obj = &bp->func_obj;
2739        func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
2740
2741        /* no need to wait for RAMROD completion, so don't
2742         * set RAMROD_COMP_WAIT flag
2743         */
2744
2745        f_update_params->vif_id = vifid;
2746        f_update_params->afex_default_vlan = vlan_val;
2747        f_update_params->allowed_priorities = allowed_prio;
2748
2749        /* if ramrod can not be sent, response to MCP immediately */
2750        if (bnx2x_func_state_change(bp, &func_params) < 0)
2751                bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
2752
2753        return 0;
2754}
2755
2756static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
2757                                          u16 vif_index, u8 func_bit_map)
2758{
2759        struct bnx2x_func_state_params func_params = {NULL};
2760        struct bnx2x_func_afex_viflists_params *update_params =
2761                &func_params.params.afex_viflists;
2762        int rc;
2763        u32 drv_msg_code;
2764
2765        /* validate only LIST_SET and LIST_GET are received from switch */
2766        if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
2767                BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
2768                          cmd_type);
2769
2770        func_params.f_obj = &bp->func_obj;
2771        func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
2772
2773        /* set parameters according to cmd_type */
2774        update_params->afex_vif_list_command = cmd_type;
2775        update_params->vif_list_index = vif_index;
2776        update_params->func_bit_map =
2777                (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
2778        update_params->func_to_clear = 0;
2779        drv_msg_code =
2780                (cmd_type == VIF_LIST_RULE_GET) ?
2781                DRV_MSG_CODE_AFEX_LISTGET_ACK :
2782                DRV_MSG_CODE_AFEX_LISTSET_ACK;
2783
2784        /* if ramrod can not be sent, respond to MCP immediately for
2785         * SET and GET requests (other are not triggered from MCP)
2786         */
2787        rc = bnx2x_func_state_change(bp, &func_params);
2788        if (rc < 0)
2789                bnx2x_fw_command(bp, drv_msg_code, 0);
2790
2791        return 0;
2792}
2793
2794static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
2795{
2796        struct afex_stats afex_stats;
2797        u32 func = BP_ABS_FUNC(bp);
2798        u32 mf_config;
2799        u16 vlan_val;
2800        u32 vlan_prio;
2801        u16 vif_id;
2802        u8 allowed_prio;
2803        u8 vlan_mode;
2804        u32 addr_to_write, vifid, addrs, stats_type, i;
2805
2806        if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
2807                vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2808                DP(BNX2X_MSG_MCP,
2809                   "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
2810                bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
2811        }
2812
2813        if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
2814                vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2815                addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
2816                DP(BNX2X_MSG_MCP,
2817                   "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
2818                   vifid, addrs);
2819                bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
2820                                               addrs);
2821        }
2822
2823        if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
2824                addr_to_write = SHMEM2_RD(bp,
2825                        afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
2826                stats_type = SHMEM2_RD(bp,
2827                        afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2828
2829                DP(BNX2X_MSG_MCP,
2830                   "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
2831                   addr_to_write);
2832
2833                bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
2834
2835                /* write response to scratchpad, for MCP */
2836                for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
2837                        REG_WR(bp, addr_to_write + i*sizeof(u32),
2838                               *(((u32 *)(&afex_stats))+i));
2839
2840                /* send ack message to MCP */
2841                bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
2842        }
2843
2844        if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
2845                mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
2846                bp->mf_config[BP_VN(bp)] = mf_config;
2847                DP(BNX2X_MSG_MCP,
2848                   "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
2849                   mf_config);
2850
2851                /* if VIF_SET is "enabled" */
2852                if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
2853                        /* set rate limit directly to internal RAM */
2854                        struct cmng_init_input cmng_input;
2855                        struct rate_shaping_vars_per_vn m_rs_vn;
2856                        size_t size = sizeof(struct rate_shaping_vars_per_vn);
2857                        u32 addr = BAR_XSTRORM_INTMEM +
2858                            XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
2859
2860                        bp->mf_config[BP_VN(bp)] = mf_config;
2861
2862                        bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
2863                        m_rs_vn.vn_counter.rate =
2864                                cmng_input.vnic_max_rate[BP_VN(bp)];
2865                        m_rs_vn.vn_counter.quota =
2866                                (m_rs_vn.vn_counter.rate *
2867                                 RS_PERIODIC_TIMEOUT_USEC) / 8;
2868
2869                        __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
2870
2871                        /* read relevant values from mf_cfg struct in shmem */
2872                        vif_id =
2873                                (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2874                                 FUNC_MF_CFG_E1HOV_TAG_MASK) >>
2875                                FUNC_MF_CFG_E1HOV_TAG_SHIFT;
2876                        vlan_val =
2877                                (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2878                                 FUNC_MF_CFG_AFEX_VLAN_MASK) >>
2879                                FUNC_MF_CFG_AFEX_VLAN_SHIFT;
2880                        vlan_prio = (mf_config &
2881                                     FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
2882                                    FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
2883                        vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
2884                        vlan_mode =
2885                                (MF_CFG_RD(bp,
2886                                           func_mf_config[func].afex_config) &
2887                                 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
2888                                FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
2889                        allowed_prio =
2890                                (MF_CFG_RD(bp,
2891                                           func_mf_config[func].afex_config) &
2892                                 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
2893                                FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
2894
2895                        /* send ramrod to FW, return in case of failure */
2896                        if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
2897                                                   allowed_prio))
2898                                return;
2899
2900                        bp->afex_def_vlan_tag = vlan_val;
2901                        bp->afex_vlan_mode = vlan_mode;
2902                } else {
2903                        /* notify link down because BP->flags is disabled */
2904                        bnx2x_link_report(bp);
2905
2906                        /* send INVALID VIF ramrod to FW */
2907                        bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
2908
2909                        /* Reset the default afex VLAN */
2910                        bp->afex_def_vlan_tag = -1;
2911                }
2912        }
2913}
2914
2915static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
2916{
2917        struct bnx2x_func_switch_update_params *switch_update_params;
2918        struct bnx2x_func_state_params func_params;
2919
2920        memset(&func_params, 0, sizeof(struct bnx2x_func_state_params));
2921        switch_update_params = &func_params.params.switch_update;
2922        func_params.f_obj = &bp->func_obj;
2923        func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
2924
2925        /* Prepare parameters for function state transitions */
2926        __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2927        __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
2928
2929        if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
2930                int func = BP_ABS_FUNC(bp);
2931                u32 val;
2932
2933                /* Re-learn the S-tag from shmem */
2934                val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2935                                FUNC_MF_CFG_E1HOV_TAG_MASK;
2936                if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
2937                        bp->mf_ov = val;
2938                } else {
2939                        BNX2X_ERR("Got an SVID event, but no tag is configured in shmem\n");
2940                        goto fail;
2941                }
2942
2943                /* Configure new S-tag in LLH */
2944                REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + BP_PORT(bp) * 8,
2945                       bp->mf_ov);
2946
2947                /* Send Ramrod to update FW of change */
2948                __set_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
2949                          &switch_update_params->changes);
2950                switch_update_params->vlan = bp->mf_ov;
2951
2952                if (bnx2x_func_state_change(bp, &func_params) < 0) {
2953                        BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n",
2954                                  bp->mf_ov);
2955                        goto fail;
2956                } else {
2957                        DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n",
2958                           bp->mf_ov);
2959                }
2960        } else {
2961                goto fail;
2962        }
2963
2964        bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
2965        return;
2966fail:
2967        bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0);
2968}
2969
2970static void bnx2x_pmf_update(struct bnx2x *bp)
2971{
2972        int port = BP_PORT(bp);
2973        u32 val;
2974
2975        bp->port.pmf = 1;
2976        DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf);
2977
2978        /*
2979         * We need the mb() to ensure the ordering between the writing to
2980         * bp->port.pmf here and reading it from the bnx2x_periodic_task().
2981         */
2982        smp_mb();
2983
2984        /* queue a periodic task */
2985        queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2986
2987        bnx2x_dcbx_pmf_update(bp);
2988
2989        /* enable nig attention */
2990        val = (0xff0f | (1 << (BP_VN(bp) + 4)));
2991        if (bp->common.int_block == INT_BLOCK_HC) {
2992                REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2993                REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2994        } else if (!CHIP_IS_E1x(bp)) {
2995                REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2996                REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2997        }
2998
2999        bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3000}
3001
3002/* end of Link */
3003
3004/* slow path */
3005
3006/*
3007 * General service functions
3008 */
3009
3010/* send the MCP a request, block until there is a reply */
3011u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
3012{
3013        int mb_idx = BP_FW_MB_IDX(bp);
3014        u32 seq;
3015        u32 rc = 0;
3016        u32 cnt = 1;
3017        u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
3018
3019        mutex_lock(&bp->fw_mb_mutex);
3020        seq = ++bp->fw_seq;
3021        SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
3022        SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
3023
3024        DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
3025                        (command | seq), param);
3026
3027        do {
3028                /* let the FW do it's magic ... */
3029                msleep(delay);
3030
3031                rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
3032
3033                /* Give the FW up to 5 second (500*10ms) */
3034        } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
3035
3036        DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
3037           cnt*delay, rc, seq);
3038
3039        /* is this a reply to our command? */
3040        if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
3041                rc &= FW_MSG_CODE_MASK;
3042        else {
3043                /* FW BUG! */
3044                BNX2X_ERR("FW failed to respond!\n");
3045                bnx2x_fw_dump(bp);
3046                rc = 0;
3047        }
3048        mutex_unlock(&bp->fw_mb_mutex);
3049
3050        return rc;
3051}
3052
3053static void storm_memset_func_cfg(struct bnx2x *bp,
3054                                 struct tstorm_eth_function_common_config *tcfg,
3055                                 u16 abs_fid)
3056{
3057        size_t size = sizeof(struct tstorm_eth_function_common_config);
3058
3059        u32 addr = BAR_TSTRORM_INTMEM +
3060                        TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
3061
3062        __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
3063}
3064
3065void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
3066{
3067        if (CHIP_IS_E1x(bp)) {
3068                struct tstorm_eth_function_common_config tcfg = {0};
3069
3070                storm_memset_func_cfg(bp, &tcfg, p->func_id);
3071        }
3072
3073        /* Enable the function in the FW */
3074        storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
3075        storm_memset_func_en(bp, p->func_id, 1);
3076
3077        /* spq */
3078        if (p->spq_active) {
3079                storm_memset_spq_addr(bp, p->spq_map, p->func_id);
3080                REG_WR(bp, XSEM_REG_FAST_MEMORY +
3081                       XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
3082        }
3083}
3084
3085/**
3086 * bnx2x_get_common_flags - Return common flags
3087 *
3088 * @bp:         device handle
3089 * @fp:         queue handle
3090 * @zero_stats: TRUE if statistics zeroing is needed
3091 *
3092 * Return the flags that are common for the Tx-only and not normal connections.
3093 */
3094static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
3095                                            struct bnx2x_fastpath *fp,
3096                                            bool zero_stats)
3097{
3098        unsigned long flags = 0;
3099
3100        /* PF driver will always initialize the Queue to an ACTIVE state */
3101        __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
3102
3103        /* tx only connections collect statistics (on the same index as the
3104         * parent connection). The statistics are zeroed when the parent
3105         * connection is initialized.
3106         */
3107
3108        __set_bit(BNX2X_Q_FLG_STATS, &flags);
3109        if (zero_stats)
3110                __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
3111
3112        if (bp->flags & TX_SWITCHING)
3113                __set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags);
3114
3115        __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
3116        __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
3117
3118#ifdef BNX2X_STOP_ON_ERROR
3119        __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
3120#endif
3121
3122        return flags;
3123}
3124
3125static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
3126                                       struct bnx2x_fastpath *fp,
3127                                       bool leading)
3128{
3129        unsigned long flags = 0;
3130
3131        /* calculate other queue flags */
3132        if (IS_MF_SD(bp))
3133                __set_bit(BNX2X_Q_FLG_OV, &flags);
3134
3135        if (IS_FCOE_FP(fp)) {
3136                __set_bit(BNX2X_Q_FLG_FCOE, &flags);
3137                /* For FCoE - force usage of default priority (for afex) */
3138                __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
3139        }
3140
3141        if (fp->mode != TPA_MODE_DISABLED) {
3142                __set_bit(BNX2X_Q_FLG_TPA, &flags);
3143                __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
3144                if (fp->mode == TPA_MODE_GRO)
3145                        __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags);
3146        }
3147
3148        if (leading) {
3149                __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
3150                __set_bit(BNX2X_Q_FLG_MCAST, &flags);
3151        }
3152
3153        /* Always set HW VLAN stripping */
3154        __set_bit(BNX2X_Q_FLG_VLAN, &flags);
3155
3156        /* configure silent vlan removal */
3157        if (IS_MF_AFEX(bp))
3158                __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
3159
3160        return flags | bnx2x_get_common_flags(bp, fp, true);
3161}
3162
3163static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
3164        struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
3165        u8 cos)
3166{
3167        gen_init->stat_id = bnx2x_stats_id(fp);
3168        gen_init->spcl_id = fp->cl_id;
3169
3170        /* Always use mini-jumbo MTU for FCoE L2 ring */
3171        if (IS_FCOE_FP(fp))
3172                gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
3173        else
3174                gen_init->mtu = bp->dev->mtu;
3175
3176        gen_init->cos = cos;
3177
3178        gen_init->fp_hsi = ETH_FP_HSI_VERSION;
3179}
3180
3181static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
3182        struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
3183        struct bnx2x_rxq_setup_params *rxq_init)
3184{
3185        u8 max_sge = 0;
3186        u16 sge_sz = 0;
3187        u16 tpa_agg_size = 0;
3188
3189        if (fp->mode != TPA_MODE_DISABLED) {
3190                pause->sge_th_lo = SGE_TH_LO(bp);
3191                pause->sge_th_hi = SGE_TH_HI(bp);
3192
3193                /* validate SGE ring has enough to cross high threshold */
3194                WARN_ON(bp->dropless_fc &&
3195                                pause->sge_th_hi + FW_PREFETCH_CNT >
3196                                MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
3197
3198                tpa_agg_size = TPA_AGG_SIZE;
3199                max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
3200                        SGE_PAGE_SHIFT;
3201                max_sge = ((max_sge + PAGES_PER_SGE - 1) &
3202                          (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
3203                sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff);
3204        }
3205
3206        /* pause - not for e1 */
3207        if (!CHIP_IS_E1(bp)) {
3208                pause->bd_th_lo = BD_TH_LO(bp);
3209                pause->bd_th_hi = BD_TH_HI(bp);
3210
3211                pause->rcq_th_lo = RCQ_TH_LO(bp);
3212                pause->rcq_th_hi = RCQ_TH_HI(bp);
3213                /*
3214                 * validate that rings have enough entries to cross
3215                 * high thresholds
3216                 */
3217                WARN_ON(bp->dropless_fc &&
3218                                pause->bd_th_hi + FW_PREFETCH_CNT >
3219                                bp->rx_ring_size);
3220                WARN_ON(bp->dropless_fc &&
3221                                pause->rcq_th_hi + FW_PREFETCH_CNT >
3222                                NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
3223
3224                pause->pri_map = 1;
3225        }
3226
3227        /* rxq setup */
3228        rxq_init->dscr_map = fp->rx_desc_mapping;
3229        rxq_init->sge_map = fp->rx_sge_mapping;
3230        rxq_init->rcq_map = fp->rx_comp_mapping;
3231        rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
3232
3233        /* This should be a maximum number of data bytes that may be
3234         * placed on the BD (not including paddings).
3235         */
3236        rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
3237                           BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
3238
3239        rxq_init->cl_qzone_id = fp->cl_qzone_id;
3240        rxq_init->tpa_agg_sz = tpa_agg_size;
3241        rxq_init->sge_buf_sz = sge_sz;
3242        rxq_init->max_sges_pkt = max_sge;
3243        rxq_init->rss_engine_id = BP_FUNC(bp);
3244        rxq_init->mcast_engine_id = BP_FUNC(bp);
3245
3246        /* Maximum number or simultaneous TPA aggregation for this Queue.
3247         *
3248         * For PF Clients it should be the maximum available number.
3249         * VF driver(s) may want to define it to a smaller value.
3250         */
3251        rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
3252
3253        rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
3254        rxq_init->fw_sb_id = fp->fw_sb_id;
3255
3256        if (IS_FCOE_FP(fp))
3257                rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
3258        else
3259                rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
3260        /* configure silent vlan removal
3261         * if multi function mode is afex, then mask default vlan
3262         */
3263        if (IS_MF_AFEX(bp)) {
3264                rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
3265                rxq_init->silent_removal_mask = VLAN_VID_MASK;
3266        }
3267}
3268
3269static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
3270        struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
3271        u8 cos)
3272{
3273        txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
3274        txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
3275        txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
3276        txq_init->fw_sb_id = fp->fw_sb_id;
3277
3278        /*
3279         * set the tss leading client id for TX classification ==
3280         * leading RSS client id
3281         */
3282        txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
3283
3284        if (IS_FCOE_FP(fp)) {
3285                txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
3286                txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
3287        }
3288}
3289
3290static void bnx2x_pf_init(struct bnx2x *bp)
3291{
3292        struct bnx2x_func_init_params func_init = {0};
3293        struct event_ring_data eq_data = { {0} };
3294
3295        if (!CHIP_IS_E1x(bp)) {
3296                /* reset IGU PF statistics: MSIX + ATTN */
3297                /* PF */
3298                REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3299                           BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3300                           (CHIP_MODE_IS_4_PORT(bp) ?
3301                                BP_FUNC(bp) : BP_VN(bp))*4, 0);
3302                /* ATTN */
3303                REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3304                           BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3305                           BNX2X_IGU_STAS_MSG_PF_CNT*4 +
3306                           (CHIP_MODE_IS_4_PORT(bp) ?
3307                                BP_FUNC(bp) : BP_VN(bp))*4, 0);
3308        }
3309
3310        func_init.spq_active = true;
3311        func_init.pf_id = BP_FUNC(bp);
3312        func_init.func_id = BP_FUNC(bp);
3313        func_init.spq_map = bp->spq_mapping;
3314        func_init.spq_prod = bp->spq_prod_idx;
3315
3316        bnx2x_func_init(bp, &func_init);
3317
3318        memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3319
3320        /*
3321         * Congestion management values depend on the link rate
3322         * There is no active link so initial link rate is set to 10 Gbps.
3323         * When the link comes up The congestion management values are
3324         * re-calculated according to the actual link rate.
3325         */
3326        bp->link_vars.line_speed = SPEED_10000;
3327        bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
3328
3329        /* Only the PMF sets the HW */
3330        if (bp->port.pmf)
3331                storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3332
3333        /* init Event Queue - PCI bus guarantees correct endianity*/
3334        eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
3335        eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
3336        eq_data.producer = bp->eq_prod;
3337        eq_data.index_id = HC_SP_INDEX_EQ_CONS;
3338        eq_data.sb_id = DEF_SB_ID;
3339        storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
3340}
3341
3342static void bnx2x_e1h_disable(struct bnx2x *bp)
3343{
3344        int port = BP_PORT(bp);
3345
3346        bnx2x_tx_disable(bp);
3347
3348        REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
3349}
3350
3351static void bnx2x_e1h_enable(struct bnx2x *bp)
3352{
3353        int port = BP_PORT(bp);
3354
3355        if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
3356                REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
3357
3358        /* Tx queue should be only re-enabled */
3359        netif_tx_wake_all_queues(bp->dev);
3360
3361        /*
3362         * Should not call netif_carrier_on since it will be called if the link
3363         * is up when checking for link state
3364         */
3365}
3366
3367#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
3368
3369static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3370{
3371        struct eth_stats_info *ether_stat =
3372                &bp->slowpath->drv_info_to_mcp.ether_stat;
3373        struct bnx2x_vlan_mac_obj *mac_obj =
3374                &bp->sp_objs->mac_obj;
3375        int i;
3376
3377        strlcpy(ether_stat->version, DRV_MODULE_VERSION,
3378                ETH_STAT_INFO_VERSION_LEN);
3379
3380        /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
3381         * mac_local field in ether_stat struct. The base address is offset by 2
3382         * bytes to account for the field being 8 bytes but a mac address is
3383         * only 6 bytes. Likewise, the stride for the get_n_elements function is
3384         * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes
3385         * allocated by the ether_stat struct, so the macs will land in their
3386         * proper positions.
3387         */
3388        for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
3389                memset(ether_stat->mac_local + i, 0,
3390                       sizeof(ether_stat->mac_local[0]));
3391        mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
3392                                DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
3393                                ether_stat->mac_local + MAC_PAD, MAC_PAD,
3394                                ETH_ALEN);
3395        ether_stat->mtu_size = bp->dev->mtu;
3396        if (bp->dev->features & NETIF_F_RXCSUM)
3397                ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
3398        if (bp->dev->features & NETIF_F_TSO)
3399                ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
3400        ether_stat->feature_flags |= bp->common.boot_mode;
3401
3402        ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
3403
3404        ether_stat->txq_size = bp->tx_ring_size;
3405        ether_stat->rxq_size = bp->rx_ring_size;
3406
3407#ifdef CONFIG_BNX2X_SRIOV
3408        ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0;
3409#endif
3410}
3411
3412static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3413{
3414        struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3415        struct fcoe_stats_info *fcoe_stat =
3416                &bp->slowpath->drv_info_to_mcp.fcoe_stat;
3417
3418        if (!CNIC_LOADED(bp))
3419                return;
3420
3421        memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
3422
3423        fcoe_stat->qos_priority =
3424                app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
3425
3426        /* insert FCoE stats from ramrod response */
3427        if (!NO_FCOE(bp)) {
3428                struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
3429                        &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3430                        tstorm_queue_statistics;
3431
3432                struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
3433                        &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3434                        xstorm_queue_statistics;
3435
3436                struct fcoe_statistics_params *fw_fcoe_stat =
3437                        &bp->fw_stats_data->fcoe;
3438
3439                ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0,
3440                          fcoe_stat->rx_bytes_lo,
3441                          fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
3442
3443                ADD_64_LE(fcoe_stat->rx_bytes_hi,
3444                          fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
3445                          fcoe_stat->rx_bytes_lo,
3446                          fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
3447
3448                ADD_64_LE(fcoe_stat->rx_bytes_hi,
3449                          fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
3450                          fcoe_stat->rx_bytes_lo,
3451                          fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
3452
3453                ADD_64_LE(fcoe_stat->rx_bytes_hi,
3454                          fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
3455                          fcoe_stat->rx_bytes_lo,
3456                          fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
3457
3458                ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3459                          fcoe_stat->rx_frames_lo,
3460                          fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
3461
3462                ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3463                          fcoe_stat->rx_frames_lo,
3464                          fcoe_q_tstorm_stats->rcv_ucast_pkts);
3465
3466                ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3467                          fcoe_stat->rx_frames_lo,
3468                          fcoe_q_tstorm_stats->rcv_bcast_pkts);
3469
3470                ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3471                          fcoe_stat->rx_frames_lo,
3472                          fcoe_q_tstorm_stats->rcv_mcast_pkts);
3473
3474                ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0,
3475                          fcoe_stat->tx_bytes_lo,
3476                          fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
3477
3478                ADD_64_LE(fcoe_stat->tx_bytes_hi,
3479                          fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
3480                          fcoe_stat->tx_bytes_lo,
3481                          fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
3482
3483                ADD_64_LE(fcoe_stat->tx_bytes_hi,
3484                          fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
3485                          fcoe_stat->tx_bytes_lo,
3486                          fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
3487
3488                ADD_64_LE(fcoe_stat->tx_bytes_hi,
3489                          fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
3490                          fcoe_stat->tx_bytes_lo,
3491                          fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
3492
3493                ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3494                          fcoe_stat->tx_frames_lo,
3495                          fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
3496
3497                ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3498                          fcoe_stat->tx_frames_lo,
3499                          fcoe_q_xstorm_stats->ucast_pkts_sent);
3500
3501                ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3502                          fcoe_stat->tx_frames_lo,
3503                          fcoe_q_xstorm_stats->bcast_pkts_sent);
3504
3505                ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3506                          fcoe_stat->tx_frames_lo,
3507                          fcoe_q_xstorm_stats->mcast_pkts_sent);
3508        }
3509
3510        /* ask L5 driver to add data to the struct */
3511        bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
3512}
3513
3514static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3515{
3516        struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3517        struct iscsi_stats_info *iscsi_stat =
3518                &bp->slowpath->drv_info_to_mcp.iscsi_stat;
3519
3520        if (!CNIC_LOADED(bp))
3521                return;
3522
3523        memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
3524               ETH_ALEN);
3525
3526        iscsi_stat->qos_priority =
3527                app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
3528
3529        /* ask L5 driver to add data to the struct */
3530        bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
3531}
3532
3533/* called due to MCP event (on pmf):
3534 *      reread new bandwidth configuration
3535 *      configure FW
3536 *      notify others function about the change
3537 */
3538static void bnx2x_config_mf_bw(struct bnx2x *bp)
3539{
3540        /* Workaround for MFW bug.
3541         * MFW is not supposed to generate BW attention in
3542         * single function mode.
3543         */
3544        if (!IS_MF(bp)) {
3545                DP(BNX2X_MSG_MCP,
3546                   "Ignoring MF BW config in single function mode\n");
3547                return;
3548        }
3549
3550        if (bp->link_vars.link_up) {
3551                bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
3552                bnx2x_link_sync_notify(bp);
3553        }
3554        storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3555}
3556
3557static void bnx2x_set_mf_bw(struct bnx2x *bp)
3558{
3559        bnx2x_config_mf_bw(bp);
3560        bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
3561}
3562
3563static void bnx2x_handle_eee_event(struct bnx2x *bp)
3564{
3565        DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
3566        bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
3567}
3568
3569#define BNX2X_UPDATE_DRV_INFO_IND_LENGTH        (20)
3570#define BNX2X_UPDATE_DRV_INFO_IND_COUNT         (25)
3571
3572static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
3573{
3574        enum drv_info_opcode op_code;
3575        u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
3576        bool release = false;
3577        int wait;
3578
3579        /* if drv_info version supported by MFW doesn't match - send NACK */
3580        if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
3581                bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3582                return;
3583        }
3584
3585        op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
3586                  DRV_INFO_CONTROL_OP_CODE_SHIFT;
3587
3588        /* Must prevent other flows from accessing drv_info_to_mcp */
3589        mutex_lock(&bp->drv_info_mutex);
3590
3591        memset(&bp->slowpath->drv_info_to_mcp, 0,
3592               sizeof(union drv_info_to_mcp));
3593
3594        switch (op_code) {
3595        case ETH_STATS_OPCODE:
3596                bnx2x_drv_info_ether_stat(bp);
3597                break;
3598        case FCOE_STATS_OPCODE:
3599                bnx2x_drv_info_fcoe_stat(bp);
3600                break;
3601        case ISCSI_STATS_OPCODE:
3602                bnx2x_drv_info_iscsi_stat(bp);
3603                break;
3604        default:
3605                /* if op code isn't supported - send NACK */
3606                bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3607                goto out;
3608        }
3609
3610        /* if we got drv_info attn from MFW then these fields are defined in
3611         * shmem2 for sure
3612         */
3613        SHMEM2_WR(bp, drv_info_host_addr_lo,
3614                U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3615        SHMEM2_WR(bp, drv_info_host_addr_hi,
3616                U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3617
3618        bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
3619
3620        /* Since possible management wants both this and get_driver_version
3621         * need to wait until management notifies us it finished utilizing
3622         * the buffer.
3623         */
3624        if (!SHMEM2_HAS(bp, mfw_drv_indication)) {
3625                DP(BNX2X_MSG_MCP, "Management does not support indication\n");
3626        } else if (!bp->drv_info_mng_owner) {
3627                u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1));
3628
3629                for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) {
3630                        u32 indication = SHMEM2_RD(bp, mfw_drv_indication);
3631
3632                        /* Management is done; need to clear indication */
3633                        if (indication & bit) {
3634                                SHMEM2_WR(bp, mfw_drv_indication,
3635                                          indication & ~bit);
3636                                release = true;
3637                                break;
3638                        }
3639
3640                        msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH);
3641                }
3642        }
3643        if (!release) {
3644                DP(BNX2X_MSG_MCP, "Management did not release indication\n");
3645                bp->drv_info_mng_owner = true;
3646        }
3647
3648out:
3649        mutex_unlock(&bp->drv_info_mutex);
3650}
3651
3652static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format)
3653{
3654        u8 vals[4];
3655        int i = 0;
3656
3657        if (bnx2x_format) {
3658                i = sscanf(version, "1.%c%hhd.%hhd.%hhd",
3659                           &vals[0], &vals[1], &vals[2], &vals[3]);
3660                if (i > 0)
3661                        vals[0] -= '0';
3662        } else {
3663                i = sscanf(version, "%hhd.%hhd.%hhd.%hhd",
3664                           &vals[0], &vals[1], &vals[2], &vals[3]);
3665        }
3666
3667        while (i < 4)
3668                vals[i++] = 0;
3669
3670        return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3];
3671}
3672
3673void bnx2x_update_mng_version(struct bnx2x *bp)
3674{
3675        u32 iscsiver = DRV_VER_NOT_LOADED;
3676        u32 fcoever = DRV_VER_NOT_LOADED;
3677        u32 ethver = DRV_VER_NOT_LOADED;
3678        int idx = BP_FW_MB_IDX(bp);
3679        u8 *version;
3680
3681        if (!SHMEM2_HAS(bp, func_os_drv_ver))
3682                return;
3683
3684        mutex_lock(&bp->drv_info_mutex);
3685        /* Must not proceed when `bnx2x_handle_drv_info_req' is feasible */
3686        if (bp->drv_info_mng_owner)
3687                goto out;
3688
3689        if (bp->state != BNX2X_STATE_OPEN)
3690                goto out;
3691
3692        /* Parse ethernet driver version */
3693        ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3694        if (!CNIC_LOADED(bp))
3695                goto out;
3696
3697        /* Try getting storage driver version via cnic */
3698        memset(&bp->slowpath->drv_info_to_mcp, 0,
3699               sizeof(union drv_info_to_mcp));
3700        bnx2x_drv_info_iscsi_stat(bp);
3701        version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version;
3702        iscsiver = bnx2x_update_mng_version_utility(version, false);
3703
3704        memset(&bp->slowpath->drv_info_to_mcp, 0,
3705               sizeof(union drv_info_to_mcp));
3706        bnx2x_drv_info_fcoe_stat(bp);
3707        version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version;
3708        fcoever = bnx2x_update_mng_version_utility(version, false);
3709
3710out:
3711        SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver);
3712        SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver);
3713        SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever);
3714
3715        mutex_unlock(&bp->drv_info_mutex);
3716
3717        DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n",
3718           ethver, iscsiver, fcoever);
3719}
3720
3721void bnx2x_update_mfw_dump(struct bnx2x *bp)
3722{
3723        u32 drv_ver;
3724        u32 valid_dump;
3725
3726        if (!SHMEM2_HAS(bp, drv_info))
3727                return;
3728
3729        /* Update Driver load time, possibly broken in y2038 */
3730        SHMEM2_WR(bp, drv_info.epoc, (u32)ktime_get_real_seconds());
3731
3732        drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3733        SHMEM2_WR(bp, drv_info.drv_ver, drv_ver);
3734
3735        SHMEM2_WR(bp, drv_info.fw_ver, REG_RD(bp, XSEM_REG_PRAM));
3736
3737        /* Check & notify On-Chip dump. */
3738        valid_dump = SHMEM2_RD(bp, drv_info.valid_dump);
3739
3740        if (valid_dump & FIRST_DUMP_VALID)
3741                DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 1st partition\n");
3742
3743        if (valid_dump & SECOND_DUMP_VALID)
3744                DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 2nd partition\n");
3745}
3746
3747static void bnx2x_oem_event(struct bnx2x *bp, u32 event)
3748{
3749        u32 cmd_ok, cmd_fail;
3750
3751        /* sanity */
3752        if (event & DRV_STATUS_DCC_EVENT_MASK &&
3753            event & DRV_STATUS_OEM_EVENT_MASK) {
3754                BNX2X_ERR("Received simultaneous events %08x\n", event);
3755                return;
3756        }
3757
3758        if (event & DRV_STATUS_DCC_EVENT_MASK) {
3759                cmd_fail = DRV_MSG_CODE_DCC_FAILURE;
3760                cmd_ok = DRV_MSG_CODE_DCC_OK;
3761        } else /* if (event & DRV_STATUS_OEM_EVENT_MASK) */ {
3762                cmd_fail = DRV_MSG_CODE_OEM_FAILURE;
3763                cmd_ok = DRV_MSG_CODE_OEM_OK;
3764        }
3765
3766        DP(BNX2X_MSG_MCP, "oem_event 0x%x\n", event);
3767
3768        if (event & (DRV_STATUS_DCC_DISABLE_ENABLE_PF |
3769                     DRV_STATUS_OEM_DISABLE_ENABLE_PF)) {
3770                /* This is the only place besides the function initialization
3771                 * where the bp->flags can change so it is done without any
3772                 * locks
3773                 */
3774                if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
3775                        DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n");
3776                        bp->flags |= MF_FUNC_DIS;
3777
3778                        bnx2x_e1h_disable(bp);
3779                } else {
3780                        DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n");
3781                        bp->flags &= ~MF_FUNC_DIS;
3782
3783                        bnx2x_e1h_enable(bp);
3784                }
3785                event &= ~(DRV_STATUS_DCC_DISABLE_ENABLE_PF |
3786                           DRV_STATUS_OEM_DISABLE_ENABLE_PF);
3787        }
3788
3789        if (event & (DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
3790                     DRV_STATUS_OEM_BANDWIDTH_ALLOCATION)) {
3791                bnx2x_config_mf_bw(bp);
3792                event &= ~(DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
3793                           DRV_STATUS_OEM_BANDWIDTH_ALLOCATION);
3794        }
3795
3796        /* Report results to MCP */
3797        if (event)
3798                bnx2x_fw_command(bp, cmd_fail, 0);
3799        else
3800                bnx2x_fw_command(bp, cmd_ok, 0);
3801}
3802
3803/* must be called under the spq lock */
3804static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
3805{
3806        struct eth_spe *next_spe = bp->spq_prod_bd;
3807
3808        if (bp->spq_prod_bd == bp->spq_last_bd) {
3809                bp->spq_prod_bd = bp->spq;
3810                bp->spq_prod_idx = 0;
3811                DP(BNX2X_MSG_SP, "end of spq\n");
3812        } else {
3813                bp->spq_prod_bd++;
3814                bp->spq_prod_idx++;
3815        }
3816        return next_spe;
3817}
3818
3819/* must be called under the spq lock */
3820static void bnx2x_sp_prod_update(struct bnx2x *bp)
3821{
3822        int func = BP_FUNC(bp);
3823
3824        /*
3825         * Make sure that BD data is updated before writing the producer:
3826         * BD data is written to the memory, the producer is read from the
3827         * memory, thus we need a full memory barrier to ensure the ordering.
3828         */
3829        mb();
3830
3831        REG_WR16_RELAXED(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
3832                         bp->spq_prod_idx);
3833}
3834
3835/**
3836 * bnx2x_is_contextless_ramrod - check if the current command ends on EQ
3837 *
3838 * @cmd:        command to check
3839 * @cmd_type:   command type
3840 */
3841static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
3842{
3843        if ((cmd_type == NONE_CONNECTION_TYPE) ||
3844            (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
3845            (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
3846            (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
3847            (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
3848            (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
3849            (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
3850                return true;
3851        else
3852                return false;
3853}
3854
3855/**
3856 * bnx2x_sp_post - place a single command on an SP ring
3857 *
3858 * @bp:         driver handle