linux/drivers/net/ethernet/qlogic/qed/qed_hsi.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
   2/* QLogic qed NIC Driver
   3 * Copyright (c) 2015-2017  QLogic Corporation
   4 * Copyright (c) 2019-2020 Marvell International Ltd.
   5 */
   6
   7#ifndef _QED_HSI_H
   8#define _QED_HSI_H
   9
  10#include <linux/types.h>
  11#include <linux/io.h>
  12#include <linux/bitops.h>
  13#include <linux/delay.h>
  14#include <linux/kernel.h>
  15#include <linux/list.h>
  16#include <linux/slab.h>
  17#include <linux/qed/common_hsi.h>
  18#include <linux/qed/storage_common.h>
  19#include <linux/qed/tcp_common.h>
  20#include <linux/qed/fcoe_common.h>
  21#include <linux/qed/eth_common.h>
  22#include <linux/qed/iscsi_common.h>
  23#include <linux/qed/iwarp_common.h>
  24#include <linux/qed/rdma_common.h>
  25#include <linux/qed/roce_common.h>
  26#include <linux/qed/qed_fcoe_if.h>
  27
  28struct qed_hwfn;
  29struct qed_ptt;
  30
  31/* Opcodes for the event ring */
  32enum common_event_opcode {
  33        COMMON_EVENT_PF_START,
  34        COMMON_EVENT_PF_STOP,
  35        COMMON_EVENT_VF_START,
  36        COMMON_EVENT_VF_STOP,
  37        COMMON_EVENT_VF_PF_CHANNEL,
  38        COMMON_EVENT_VF_FLR,
  39        COMMON_EVENT_PF_UPDATE,
  40        COMMON_EVENT_MALICIOUS_VF,
  41        COMMON_EVENT_RL_UPDATE,
  42        COMMON_EVENT_EMPTY,
  43        MAX_COMMON_EVENT_OPCODE
  44};
  45
  46/* Common Ramrod Command IDs */
  47enum common_ramrod_cmd_id {
  48        COMMON_RAMROD_UNUSED,
  49        COMMON_RAMROD_PF_START,
  50        COMMON_RAMROD_PF_STOP,
  51        COMMON_RAMROD_VF_START,
  52        COMMON_RAMROD_VF_STOP,
  53        COMMON_RAMROD_PF_UPDATE,
  54        COMMON_RAMROD_RL_UPDATE,
  55        COMMON_RAMROD_EMPTY,
  56        MAX_COMMON_RAMROD_CMD_ID
  57};
  58
  59/* How ll2 should deal with packet upon errors */
  60enum core_error_handle {
  61        LL2_DROP_PACKET,
  62        LL2_DO_NOTHING,
  63        LL2_ASSERT,
  64        MAX_CORE_ERROR_HANDLE
  65};
  66
  67/* Opcodes for the event ring */
  68enum core_event_opcode {
  69        CORE_EVENT_TX_QUEUE_START,
  70        CORE_EVENT_TX_QUEUE_STOP,
  71        CORE_EVENT_RX_QUEUE_START,
  72        CORE_EVENT_RX_QUEUE_STOP,
  73        CORE_EVENT_RX_QUEUE_FLUSH,
  74        CORE_EVENT_TX_QUEUE_UPDATE,
  75        CORE_EVENT_QUEUE_STATS_QUERY,
  76        MAX_CORE_EVENT_OPCODE
  77};
  78
  79/* The L4 pseudo checksum mode for Core */
  80enum core_l4_pseudo_checksum_mode {
  81        CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH,
  82        CORE_L4_PSEUDO_CSUM_ZERO_LENGTH,
  83        MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
  84};
  85
  86/* Light-L2 RX Producers in Tstorm RAM */
  87struct core_ll2_port_stats {
  88        struct regpair gsi_invalid_hdr;
  89        struct regpair gsi_invalid_pkt_length;
  90        struct regpair gsi_unsupported_pkt_typ;
  91        struct regpair gsi_crcchksm_error;
  92};
  93
  94/* LL2 TX Per Queue Stats */
  95struct core_ll2_pstorm_per_queue_stat {
  96        struct regpair sent_ucast_bytes;
  97        struct regpair sent_mcast_bytes;
  98        struct regpair sent_bcast_bytes;
  99        struct regpair sent_ucast_pkts;
 100        struct regpair sent_mcast_pkts;
 101        struct regpair sent_bcast_pkts;
 102        struct regpair error_drop_pkts;
 103};
 104
 105/* Light-L2 RX Producers in Tstorm RAM */
 106struct core_ll2_rx_prod {
 107        __le16 bd_prod;
 108        __le16 cqe_prod;
 109};
 110
 111struct core_ll2_tstorm_per_queue_stat {
 112        struct regpair packet_too_big_discard;
 113        struct regpair no_buff_discard;
 114};
 115
 116struct core_ll2_ustorm_per_queue_stat {
 117        struct regpair rcv_ucast_bytes;
 118        struct regpair rcv_mcast_bytes;
 119        struct regpair rcv_bcast_bytes;
 120        struct regpair rcv_ucast_pkts;
 121        struct regpair rcv_mcast_pkts;
 122        struct regpair rcv_bcast_pkts;
 123};
 124
 125/* Structure for doorbell data, in PWM mode, for RX producers update. */
 126struct core_pwm_prod_update_data {
 127        __le16 icid; /* internal CID */
 128        u8 reserved0;
 129        u8 params;
 130#define CORE_PWM_PROD_UPDATE_DATA_AGG_CMD_MASK    0x3
 131#define CORE_PWM_PROD_UPDATE_DATA_AGG_CMD_SHIFT   0
 132#define CORE_PWM_PROD_UPDATE_DATA_RESERVED1_MASK  0x3F  /* Set 0 */
 133#define CORE_PWM_PROD_UPDATE_DATA_RESERVED1_SHIFT 2
 134        struct core_ll2_rx_prod prod; /* Producers */
 135};
 136
 137/* Core Ramrod Command IDs (light L2) */
 138enum core_ramrod_cmd_id {
 139        CORE_RAMROD_UNUSED,
 140        CORE_RAMROD_RX_QUEUE_START,
 141        CORE_RAMROD_TX_QUEUE_START,
 142        CORE_RAMROD_RX_QUEUE_STOP,
 143        CORE_RAMROD_TX_QUEUE_STOP,
 144        CORE_RAMROD_RX_QUEUE_FLUSH,
 145        CORE_RAMROD_TX_QUEUE_UPDATE,
 146        CORE_RAMROD_QUEUE_STATS_QUERY,
 147        MAX_CORE_RAMROD_CMD_ID
 148};
 149
 150/* Core RX CQE Type for Light L2 */
 151enum core_roce_flavor_type {
 152        CORE_ROCE,
 153        CORE_RROCE,
 154        MAX_CORE_ROCE_FLAVOR_TYPE
 155};
 156
 157/* Specifies how ll2 should deal with packets errors: packet_too_big and
 158 * no_buff.
 159 */
 160struct core_rx_action_on_error {
 161        u8 error_type;
 162#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK     0x3
 163#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT    0
 164#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK            0x3
 165#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT           2
 166#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK           0xF
 167#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT          4
 168};
 169
 170/* Core RX BD for Light L2 */
 171struct core_rx_bd {
 172        struct regpair addr;
 173        __le16 reserved[4];
 174};
 175
 176/* Core RX CM offload BD for Light L2 */
 177struct core_rx_bd_with_buff_len {
 178        struct regpair addr;
 179        __le16 buff_length;
 180        __le16 reserved[3];
 181};
 182
 183/* Core RX CM offload BD for Light L2 */
 184union core_rx_bd_union {
 185        struct core_rx_bd rx_bd;
 186        struct core_rx_bd_with_buff_len rx_bd_with_len;
 187};
 188
 189/* Opaque Data for Light L2 RX CQE */
 190struct core_rx_cqe_opaque_data {
 191        __le32 data[2];
 192};
 193
 194/* Core RX CQE Type for Light L2 */
 195enum core_rx_cqe_type {
 196        CORE_RX_CQE_ILLEGAL_TYPE,
 197        CORE_RX_CQE_TYPE_REGULAR,
 198        CORE_RX_CQE_TYPE_GSI_OFFLOAD,
 199        CORE_RX_CQE_TYPE_SLOW_PATH,
 200        MAX_CORE_RX_CQE_TYPE
 201};
 202
 203/* Core RX CQE for Light L2 */
 204struct core_rx_fast_path_cqe {
 205        u8 type;
 206        u8 placement_offset;
 207        struct parsing_and_err_flags parse_flags;
 208        __le16 packet_length;
 209        __le16 vlan;
 210        struct core_rx_cqe_opaque_data opaque_data;
 211        struct parsing_err_flags err_flags;
 212        __le16 reserved0;
 213        __le32 reserved1[3];
 214};
 215
 216/* Core Rx CM offload CQE */
 217struct core_rx_gsi_offload_cqe {
 218        u8 type;
 219        u8 data_length_error;
 220        struct parsing_and_err_flags parse_flags;
 221        __le16 data_length;
 222        __le16 vlan;
 223        __le32 src_mac_addrhi;
 224        __le16 src_mac_addrlo;
 225        __le16 qp_id;
 226        __le32 src_qp;
 227        struct core_rx_cqe_opaque_data opaque_data;
 228        __le32 reserved;
 229};
 230
 231/* Core RX CQE for Light L2 */
 232struct core_rx_slow_path_cqe {
 233        u8 type;
 234        u8 ramrod_cmd_id;
 235        __le16 echo;
 236        struct core_rx_cqe_opaque_data opaque_data;
 237        __le32 reserved1[5];
 238};
 239
 240/* Core RX CM offload BD for Light L2 */
 241union core_rx_cqe_union {
 242        struct core_rx_fast_path_cqe rx_cqe_fp;
 243        struct core_rx_gsi_offload_cqe rx_cqe_gsi;
 244        struct core_rx_slow_path_cqe rx_cqe_sp;
 245};
 246
 247/* Ramrod data for rx queue start ramrod */
 248struct core_rx_start_ramrod_data {
 249        struct regpair bd_base;
 250        struct regpair cqe_pbl_addr;
 251        __le16 mtu;
 252        __le16 sb_id;
 253        u8 sb_index;
 254        u8 complete_cqe_flg;
 255        u8 complete_event_flg;
 256        u8 drop_ttl0_flg;
 257        __le16 num_of_pbl_pages;
 258        u8 inner_vlan_stripping_en;
 259        u8 report_outer_vlan;
 260        u8 queue_id;
 261        u8 main_func_queue;
 262        u8 mf_si_bcast_accept_all;
 263        u8 mf_si_mcast_accept_all;
 264        struct core_rx_action_on_error action_on_error;
 265        u8 gsi_offload_flag;
 266        u8 vport_id_valid;
 267        u8 vport_id;
 268        u8 zero_prod_flg;
 269        u8 wipe_inner_vlan_pri_en;
 270        u8 reserved[2];
 271};
 272
 273/* Ramrod data for rx queue stop ramrod */
 274struct core_rx_stop_ramrod_data {
 275        u8 complete_cqe_flg;
 276        u8 complete_event_flg;
 277        u8 queue_id;
 278        u8 reserved1;
 279        __le16 reserved2[2];
 280};
 281
 282/* Flags for Core TX BD */
 283struct core_tx_bd_data {
 284        __le16 as_bitfield;
 285#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK            0x1
 286#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT           0
 287#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK             0x1
 288#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT            1
 289#define CORE_TX_BD_DATA_START_BD_MASK                   0x1
 290#define CORE_TX_BD_DATA_START_BD_SHIFT                  2
 291#define CORE_TX_BD_DATA_IP_CSUM_MASK                    0x1
 292#define CORE_TX_BD_DATA_IP_CSUM_SHIFT                   3
 293#define CORE_TX_BD_DATA_L4_CSUM_MASK                    0x1
 294#define CORE_TX_BD_DATA_L4_CSUM_SHIFT                   4
 295#define CORE_TX_BD_DATA_IPV6_EXT_MASK                   0x1
 296#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT                  5
 297#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK                0x1
 298#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT               6
 299#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK        0x1
 300#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT       7
 301#define CORE_TX_BD_DATA_NBDS_MASK                       0xF
 302#define CORE_TX_BD_DATA_NBDS_SHIFT                      8
 303#define CORE_TX_BD_DATA_ROCE_FLAV_MASK                  0x1
 304#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT                 12
 305#define CORE_TX_BD_DATA_IP_LEN_MASK                     0x1
 306#define CORE_TX_BD_DATA_IP_LEN_SHIFT                    13
 307#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_MASK     0x1
 308#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_SHIFT    14
 309#define CORE_TX_BD_DATA_RESERVED0_MASK                  0x1
 310#define CORE_TX_BD_DATA_RESERVED0_SHIFT                 15
 311};
 312
 313/* Core TX BD for Light L2 */
 314struct core_tx_bd {
 315        struct regpair addr;
 316        __le16 nbytes;
 317        __le16 nw_vlan_or_lb_echo;
 318        struct core_tx_bd_data bd_data;
 319        __le16 bitfield1;
 320#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK         0x3FFF
 321#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT        0
 322#define CORE_TX_BD_TX_DST_MASK                  0x3
 323#define CORE_TX_BD_TX_DST_SHIFT                 14
 324};
 325
 326/* Light L2 TX Destination */
 327enum core_tx_dest {
 328        CORE_TX_DEST_NW,
 329        CORE_TX_DEST_LB,
 330        CORE_TX_DEST_RESERVED,
 331        CORE_TX_DEST_DROP,
 332        MAX_CORE_TX_DEST
 333};
 334
 335/* Ramrod data for tx queue start ramrod */
 336struct core_tx_start_ramrod_data {
 337        struct regpair pbl_base_addr;
 338        __le16 mtu;
 339        __le16 sb_id;
 340        u8 sb_index;
 341        u8 stats_en;
 342        u8 stats_id;
 343        u8 conn_type;
 344        __le16 pbl_size;
 345        __le16 qm_pq_id;
 346        u8 gsi_offload_flag;
 347        u8 ctx_stats_en;
 348        u8 vport_id_valid;
 349        u8 vport_id;
 350        u8 enforce_security_flag;
 351        u8 reserved[7];
 352};
 353
 354/* Ramrod data for tx queue stop ramrod */
 355struct core_tx_stop_ramrod_data {
 356        __le32 reserved0[2];
 357};
 358
 359/* Ramrod data for tx queue update ramrod */
 360struct core_tx_update_ramrod_data {
 361        u8 update_qm_pq_id_flg;
 362        u8 reserved0;
 363        __le16 qm_pq_id;
 364        __le32 reserved1;
 365};
 366
 367/* Enum flag for what type of dcb data to update */
 368enum dcb_dscp_update_mode {
 369        DONT_UPDATE_DCB_DSCP,
 370        UPDATE_DCB,
 371        UPDATE_DSCP,
 372        UPDATE_DCB_DSCP,
 373        MAX_DCB_DSCP_UPDATE_MODE
 374};
 375
 376/* The core storm context for the Ystorm */
 377struct ystorm_core_conn_st_ctx {
 378        __le32 reserved[4];
 379};
 380
 381/* The core storm context for the Pstorm */
 382struct pstorm_core_conn_st_ctx {
 383        __le32 reserved[20];
 384};
 385
 386/* Core Slowpath Connection storm context of Xstorm */
 387struct xstorm_core_conn_st_ctx {
 388        __le32 spq_base_lo;
 389        __le32 spq_base_hi;
 390        struct regpair consolid_base_addr;
 391        __le16 spq_cons;
 392        __le16 consolid_cons;
 393        __le32 reserved0[55];
 394};
 395
 396struct e4_xstorm_core_conn_ag_ctx {
 397        u8 reserved0;
 398        u8 state;
 399        u8 flags0;
 400#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK    0x1
 401#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT   0
 402#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK       0x1
 403#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT      1
 404#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK       0x1
 405#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT      2
 406#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK    0x1
 407#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT   3
 408#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK       0x1
 409#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT      4
 410#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK       0x1
 411#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT      5
 412#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK       0x1
 413#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT      6
 414#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK       0x1
 415#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT      7
 416        u8 flags1;
 417#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK       0x1
 418#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT      0
 419#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK       0x1
 420#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT      1
 421#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK       0x1
 422#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT      2
 423#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_MASK           0x1
 424#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT          3
 425#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_MASK           0x1
 426#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT          4
 427#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_MASK           0x1
 428#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT          5
 429#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK  0x1
 430#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
 431#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK    0x1
 432#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT   7
 433        u8 flags2;
 434#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_MASK     0x3
 435#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT    0
 436#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_MASK     0x3
 437#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT    2
 438#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_MASK     0x3
 439#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT    4
 440#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_MASK     0x3
 441#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT    6
 442        u8 flags3;
 443#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_MASK     0x3
 444#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT    0
 445#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_MASK     0x3
 446#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT    2
 447#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_MASK     0x3
 448#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT    4
 449#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_MASK     0x3
 450#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT    6
 451        u8 flags4;
 452#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_MASK     0x3
 453#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT    0
 454#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_MASK     0x3
 455#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT    2
 456#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_MASK    0x3
 457#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT   4
 458#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_MASK    0x3
 459#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT   6
 460        u8 flags5;
 461#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_MASK    0x3
 462#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT   0
 463#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_MASK    0x3
 464#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT   2
 465#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_MASK    0x3
 466#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT   4
 467#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_MASK    0x3
 468#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT   6
 469        u8 flags6;
 470#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK        0x3
 471#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT       0
 472#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_MASK                    0x3
 473#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT                   2
 474#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK                   0x3
 475#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT                  4
 476#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK            0x3
 477#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT           6
 478        u8 flags7;
 479#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK        0x3
 480#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT       0
 481#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK      0x3
 482#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT     2
 483#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK       0x3
 484#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT      4
 485#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK           0x1
 486#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT          6
 487#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK           0x1
 488#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT          7
 489        u8 flags8;
 490#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK   0x1
 491#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT  0
 492#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK   0x1
 493#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT  1
 494#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK   0x1
 495#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT  2
 496#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK   0x1
 497#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT  3
 498#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK   0x1
 499#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT  4
 500#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK   0x1
 501#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT  5
 502#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK   0x1
 503#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT  6
 504#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK   0x1
 505#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT  7
 506        u8 flags9;
 507#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK                  0x1
 508#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT                 0
 509#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK                  0x1
 510#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT                 1
 511#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK                  0x1
 512#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT                 2
 513#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK                  0x1
 514#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT                 3
 515#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK                  0x1
 516#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT                 4
 517#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK                  0x1
 518#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT                 5
 519#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK     0x1
 520#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT    6
 521#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK                  0x1
 522#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT                 7
 523        u8 flags10;
 524#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK                0x1
 525#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT               0
 526#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK         0x1
 527#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT        1
 528#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK             0x1
 529#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT            2
 530#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK              0x1
 531#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT             3
 532#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK            0x1
 533#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT           4
 534#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK                  0x1
 535#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT                 5
 536#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK              0x1
 537#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT             6
 538#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK              0x1
 539#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT             7
 540        u8 flags11;
 541#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK      0x1
 542#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT     0
 543#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK      0x1
 544#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT     1
 545#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK  0x1
 546#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
 547#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK         0x1
 548#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT        3
 549#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK         0x1
 550#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT        4
 551#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK         0x1
 552#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT        5
 553#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK    0x1
 554#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT   6
 555#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK         0x1
 556#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT        7
 557        u8 flags12;
 558#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK        0x1
 559#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT       0
 560#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK        0x1
 561#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT       1
 562#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK    0x1
 563#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT   2
 564#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK    0x1
 565#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT   3
 566#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK        0x1
 567#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT       4
 568#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK        0x1
 569#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT       5
 570#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK        0x1
 571#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT       6
 572#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK        0x1
 573#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT       7
 574        u8 flags13;
 575#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK        0x1
 576#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT       0
 577#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK        0x1
 578#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT       1
 579#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK    0x1
 580#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT   2
 581#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK    0x1
 582#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT   3
 583#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK    0x1
 584#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT   4
 585#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK    0x1
 586#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT   5
 587#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK    0x1
 588#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT   6
 589#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK    0x1
 590#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT   7
 591        u8 flags14;
 592#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_MASK   0x1
 593#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT  0
 594#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_MASK   0x1
 595#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT  1
 596#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_MASK   0x1
 597#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT  2
 598#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_MASK   0x1
 599#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT  3
 600#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_MASK   0x1
 601#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT  4
 602#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_MASK   0x1
 603#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT  5
 604#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_MASK    0x3
 605#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT   6
 606        u8 byte2;
 607        __le16 physical_q0;
 608        __le16 consolid_prod;
 609        __le16 reserved16;
 610        __le16 tx_bd_cons;
 611        __le16 tx_bd_or_spq_prod;
 612        __le16 updated_qm_pq_id;
 613        __le16 conn_dpi;
 614        u8 byte3;
 615        u8 byte4;
 616        u8 byte5;
 617        u8 byte6;
 618        __le32 reg0;
 619        __le32 reg1;
 620        __le32 reg2;
 621        __le32 reg3;
 622        __le32 reg4;
 623        __le32 reg5;
 624        __le32 reg6;
 625        __le16 word7;
 626        __le16 word8;
 627        __le16 word9;
 628        __le16 word10;
 629        __le32 reg7;
 630        __le32 reg8;
 631        __le32 reg9;
 632        u8 byte7;
 633        u8 byte8;
 634        u8 byte9;
 635        u8 byte10;
 636        u8 byte11;
 637        u8 byte12;
 638        u8 byte13;
 639        u8 byte14;
 640        u8 byte15;
 641        u8 e5_reserved;
 642        __le16 word11;
 643        __le32 reg10;
 644        __le32 reg11;
 645        __le32 reg12;
 646        __le32 reg13;
 647        __le32 reg14;
 648        __le32 reg15;
 649        __le32 reg16;
 650        __le32 reg17;
 651        __le32 reg18;
 652        __le32 reg19;
 653        __le16 word12;
 654        __le16 word13;
 655        __le16 word14;
 656        __le16 word15;
 657};
 658
 659struct e4_tstorm_core_conn_ag_ctx {
 660        u8 byte0;
 661        u8 byte1;
 662        u8 flags0;
 663#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_MASK    0x1
 664#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT   0
 665#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_MASK    0x1
 666#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT   1
 667#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_MASK    0x1
 668#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT   2
 669#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_MASK    0x1
 670#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT   3
 671#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_MASK    0x1
 672#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT   4
 673#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_MASK    0x1
 674#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT   5
 675#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_MASK     0x3
 676#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT    6
 677        u8 flags1;
 678#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_MASK     0x3
 679#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT    0
 680#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_MASK     0x3
 681#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT    2
 682#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_MASK     0x3
 683#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT    4
 684#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_MASK     0x3
 685#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT    6
 686        u8 flags2;
 687#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_MASK     0x3
 688#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT    0
 689#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_MASK     0x3
 690#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT    2
 691#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_MASK     0x3
 692#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT    4
 693#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_MASK     0x3
 694#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT    6
 695        u8 flags3;
 696#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_MASK     0x3
 697#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT    0
 698#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_MASK    0x3
 699#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT   2
 700#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK   0x1
 701#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT  4
 702#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK   0x1
 703#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT  5
 704#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK   0x1
 705#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT  6
 706#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK   0x1
 707#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT  7
 708        u8 flags4;
 709#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK           0x1
 710#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT          0
 711#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK           0x1
 712#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT          1
 713#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK           0x1
 714#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT          2
 715#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK           0x1
 716#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT          3
 717#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK           0x1
 718#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT          4
 719#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK           0x1
 720#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT          5
 721#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK          0x1
 722#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT         6
 723#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK         0x1
 724#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT        7
 725        u8 flags5;
 726#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK         0x1
 727#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT        0
 728#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK         0x1
 729#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT        1
 730#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK         0x1
 731#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT        2
 732#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK         0x1
 733#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT        3
 734#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK         0x1
 735#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT        4
 736#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK         0x1
 737#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT        5
 738#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK         0x1
 739#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT        6
 740#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK         0x1
 741#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT        7
 742        __le32 reg0;
 743        __le32 reg1;
 744        __le32 reg2;
 745        __le32 reg3;
 746        __le32 reg4;
 747        __le32 reg5;
 748        __le32 reg6;
 749        __le32 reg7;
 750        __le32 reg8;
 751        u8 byte2;
 752        u8 byte3;
 753        __le16 word0;
 754        u8 byte4;
 755        u8 byte5;
 756        __le16 word1;
 757        __le16 word2;
 758        __le16 word3;
 759        __le32 ll2_rx_prod;
 760        __le32 reg10;
 761};
 762
 763struct e4_ustorm_core_conn_ag_ctx {
 764        u8 reserved;
 765        u8 byte1;
 766        u8 flags0;
 767#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_MASK    0x1
 768#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT   0
 769#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_MASK    0x1
 770#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT   1
 771#define E4_USTORM_CORE_CONN_AG_CTX_CF0_MASK     0x3
 772#define E4_USTORM_CORE_CONN_AG_CTX_CF0_SHIFT    2
 773#define E4_USTORM_CORE_CONN_AG_CTX_CF1_MASK     0x3
 774#define E4_USTORM_CORE_CONN_AG_CTX_CF1_SHIFT    4
 775#define E4_USTORM_CORE_CONN_AG_CTX_CF2_MASK     0x3
 776#define E4_USTORM_CORE_CONN_AG_CTX_CF2_SHIFT    6
 777        u8 flags1;
 778#define E4_USTORM_CORE_CONN_AG_CTX_CF3_MASK     0x3
 779#define E4_USTORM_CORE_CONN_AG_CTX_CF3_SHIFT    0
 780#define E4_USTORM_CORE_CONN_AG_CTX_CF4_MASK     0x3
 781#define E4_USTORM_CORE_CONN_AG_CTX_CF4_SHIFT    2
 782#define E4_USTORM_CORE_CONN_AG_CTX_CF5_MASK     0x3
 783#define E4_USTORM_CORE_CONN_AG_CTX_CF5_SHIFT    4
 784#define E4_USTORM_CORE_CONN_AG_CTX_CF6_MASK     0x3
 785#define E4_USTORM_CORE_CONN_AG_CTX_CF6_SHIFT    6
 786        u8 flags2;
 787#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_MASK           0x1
 788#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT          0
 789#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_MASK           0x1
 790#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT          1
 791#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_MASK           0x1
 792#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT          2
 793#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_MASK           0x1
 794#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT          3
 795#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_MASK           0x1
 796#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT          4
 797#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_MASK           0x1
 798#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT          5
 799#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_MASK           0x1
 800#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT          6
 801#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK         0x1
 802#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT        7
 803        u8 flags3;
 804#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK         0x1
 805#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT        0
 806#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK         0x1
 807#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT        1
 808#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK         0x1
 809#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT        2
 810#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK         0x1
 811#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT        3
 812#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK         0x1
 813#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT        4
 814#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK         0x1
 815#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT        5
 816#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK         0x1
 817#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT        6
 818#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK         0x1
 819#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT        7
 820        u8 byte2;
 821        u8 byte3;
 822        __le16 word0;
 823        __le16 word1;
 824        __le32 rx_producers;
 825        __le32 reg1;
 826        __le32 reg2;
 827        __le32 reg3;
 828        __le16 word2;
 829        __le16 word3;
 830};
 831
 832/* The core storm context for the Mstorm */
 833struct mstorm_core_conn_st_ctx {
 834        __le32 reserved[40];
 835};
 836
 837/* The core storm context for the Ustorm */
 838struct ustorm_core_conn_st_ctx {
 839        __le32 reserved[20];
 840};
 841
 842/* The core storm context for the Tstorm */
 843struct tstorm_core_conn_st_ctx {
 844        __le32 reserved[4];
 845};
 846
 847/* core connection context */
 848struct e4_core_conn_context {
 849        struct ystorm_core_conn_st_ctx ystorm_st_context;
 850        struct regpair ystorm_st_padding[2];
 851        struct pstorm_core_conn_st_ctx pstorm_st_context;
 852        struct regpair pstorm_st_padding[2];
 853        struct xstorm_core_conn_st_ctx xstorm_st_context;
 854        struct e4_xstorm_core_conn_ag_ctx xstorm_ag_context;
 855        struct e4_tstorm_core_conn_ag_ctx tstorm_ag_context;
 856        struct e4_ustorm_core_conn_ag_ctx ustorm_ag_context;
 857        struct mstorm_core_conn_st_ctx mstorm_st_context;
 858        struct ustorm_core_conn_st_ctx ustorm_st_context;
 859        struct regpair ustorm_st_padding[2];
 860        struct tstorm_core_conn_st_ctx tstorm_st_context;
 861        struct regpair tstorm_st_padding[2];
 862};
 863
 864struct eth_mstorm_per_pf_stat {
 865        struct regpair gre_discard_pkts;
 866        struct regpair vxlan_discard_pkts;
 867        struct regpair geneve_discard_pkts;
 868        struct regpair lb_discard_pkts;
 869};
 870
 871struct eth_mstorm_per_queue_stat {
 872        struct regpair ttl0_discard;
 873        struct regpair packet_too_big_discard;
 874        struct regpair no_buff_discard;
 875        struct regpair not_active_discard;
 876        struct regpair tpa_coalesced_pkts;
 877        struct regpair tpa_coalesced_events;
 878        struct regpair tpa_aborts_num;
 879        struct regpair tpa_coalesced_bytes;
 880};
 881
 882/* Ethernet TX Per PF */
 883struct eth_pstorm_per_pf_stat {
 884        struct regpair sent_lb_ucast_bytes;
 885        struct regpair sent_lb_mcast_bytes;
 886        struct regpair sent_lb_bcast_bytes;
 887        struct regpair sent_lb_ucast_pkts;
 888        struct regpair sent_lb_mcast_pkts;
 889        struct regpair sent_lb_bcast_pkts;
 890        struct regpair sent_gre_bytes;
 891        struct regpair sent_vxlan_bytes;
 892        struct regpair sent_geneve_bytes;
 893        struct regpair sent_mpls_bytes;
 894        struct regpair sent_gre_mpls_bytes;
 895        struct regpair sent_udp_mpls_bytes;
 896        struct regpair sent_gre_pkts;
 897        struct regpair sent_vxlan_pkts;
 898        struct regpair sent_geneve_pkts;
 899        struct regpair sent_mpls_pkts;
 900        struct regpair sent_gre_mpls_pkts;
 901        struct regpair sent_udp_mpls_pkts;
 902        struct regpair gre_drop_pkts;
 903        struct regpair vxlan_drop_pkts;
 904        struct regpair geneve_drop_pkts;
 905        struct regpair mpls_drop_pkts;
 906        struct regpair gre_mpls_drop_pkts;
 907        struct regpair udp_mpls_drop_pkts;
 908};
 909
 910/* Ethernet TX Per Queue Stats */
 911struct eth_pstorm_per_queue_stat {
 912        struct regpair sent_ucast_bytes;
 913        struct regpair sent_mcast_bytes;
 914        struct regpair sent_bcast_bytes;
 915        struct regpair sent_ucast_pkts;
 916        struct regpair sent_mcast_pkts;
 917        struct regpair sent_bcast_pkts;
 918        struct regpair error_drop_pkts;
 919};
 920
 921/* ETH Rx producers data */
 922struct eth_rx_rate_limit {
 923        __le16 mult;
 924        __le16 cnst;
 925        u8 add_sub_cnst;
 926        u8 reserved0;
 927        __le16 reserved1;
 928};
 929
 930/* Update RSS indirection table entry command */
 931struct eth_tstorm_rss_update_data {
 932        u8 valid;
 933        u8 vport_id;
 934        u8 ind_table_index;
 935        u8 reserved;
 936        __le16 ind_table_value;
 937        __le16 reserved1;
 938};
 939
 940struct eth_ustorm_per_pf_stat {
 941        struct regpair rcv_lb_ucast_bytes;
 942        struct regpair rcv_lb_mcast_bytes;
 943        struct regpair rcv_lb_bcast_bytes;
 944        struct regpair rcv_lb_ucast_pkts;
 945        struct regpair rcv_lb_mcast_pkts;
 946        struct regpair rcv_lb_bcast_pkts;
 947        struct regpair rcv_gre_bytes;
 948        struct regpair rcv_vxlan_bytes;
 949        struct regpair rcv_geneve_bytes;
 950        struct regpair rcv_gre_pkts;
 951        struct regpair rcv_vxlan_pkts;
 952        struct regpair rcv_geneve_pkts;
 953};
 954
 955struct eth_ustorm_per_queue_stat {
 956        struct regpair rcv_ucast_bytes;
 957        struct regpair rcv_mcast_bytes;
 958        struct regpair rcv_bcast_bytes;
 959        struct regpair rcv_ucast_pkts;
 960        struct regpair rcv_mcast_pkts;
 961        struct regpair rcv_bcast_pkts;
 962};
 963
 964/* Event Ring VF-PF Channel data */
 965struct vf_pf_channel_eqe_data {
 966        struct regpair msg_addr;
 967};
 968
 969/* Event Ring malicious VF data */
 970struct malicious_vf_eqe_data {
 971        u8 vf_id;
 972        u8 err_id;
 973        __le16 reserved[3];
 974};
 975
 976/* Event Ring initial cleanup data */
 977struct initial_cleanup_eqe_data {
 978        u8 vf_id;
 979        u8 reserved[7];
 980};
 981
 982/* Event Data Union */
 983union event_ring_data {
 984        u8 bytes[8];
 985        struct vf_pf_channel_eqe_data vf_pf_channel;
 986        struct iscsi_eqe_data iscsi_info;
 987        struct iscsi_connect_done_results iscsi_conn_done_info;
 988        union rdma_eqe_data rdma_data;
 989        struct malicious_vf_eqe_data malicious_vf;
 990        struct initial_cleanup_eqe_data vf_init_cleanup;
 991};
 992
 993/* Event Ring Entry */
 994struct event_ring_entry {
 995        u8 protocol_id;
 996        u8 opcode;
 997        u8 reserved0;
 998        u8 vf_id;
 999        __le16 echo;
1000        u8 fw_return_code;
1001        u8 flags;
1002#define EVENT_RING_ENTRY_ASYNC_MASK             0x1
1003#define EVENT_RING_ENTRY_ASYNC_SHIFT            0
1004#define EVENT_RING_ENTRY_RESERVED1_MASK         0x7F
1005#define EVENT_RING_ENTRY_RESERVED1_SHIFT        1
1006        union event_ring_data data;
1007};
1008
1009/* Event Ring Next Page Address */
1010struct event_ring_next_addr {
1011        struct regpair addr;
1012        __le32 reserved[2];
1013};
1014
1015/* Event Ring Element */
1016union event_ring_element {
1017        struct event_ring_entry entry;
1018        struct event_ring_next_addr next_addr;
1019};
1020
1021/* Ports mode */
1022enum fw_flow_ctrl_mode {
1023        flow_ctrl_pause,
1024        flow_ctrl_pfc,
1025        MAX_FW_FLOW_CTRL_MODE
1026};
1027
1028/* GFT profile type */
1029enum gft_profile_type {
1030        GFT_PROFILE_TYPE_4_TUPLE,
1031        GFT_PROFILE_TYPE_L4_DST_PORT,
1032        GFT_PROFILE_TYPE_IP_DST_ADDR,
1033        GFT_PROFILE_TYPE_IP_SRC_ADDR,
1034        GFT_PROFILE_TYPE_TUNNEL_TYPE,
1035        MAX_GFT_PROFILE_TYPE
1036};
1037
1038/* Major and Minor hsi Versions */
1039struct hsi_fp_ver_struct {
1040        u8 minor_ver_arr[2];
1041        u8 major_ver_arr[2];
1042};
1043
1044enum iwarp_ll2_tx_queues {
1045        IWARP_LL2_IN_ORDER_TX_QUEUE = 1,
1046        IWARP_LL2_ALIGNED_TX_QUEUE,
1047        IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE,
1048        IWARP_LL2_ERROR,
1049        MAX_IWARP_LL2_TX_QUEUES
1050};
1051
1052/* Malicious VF error ID */
1053enum malicious_vf_error_id {
1054        MALICIOUS_VF_NO_ERROR,
1055        VF_PF_CHANNEL_NOT_READY,
1056        VF_ZONE_MSG_NOT_VALID,
1057        VF_ZONE_FUNC_NOT_ENABLED,
1058        ETH_PACKET_TOO_SMALL,
1059        ETH_ILLEGAL_VLAN_MODE,
1060        ETH_MTU_VIOLATION,
1061        ETH_ILLEGAL_INBAND_TAGS,
1062        ETH_VLAN_INSERT_AND_INBAND_VLAN,
1063        ETH_ILLEGAL_NBDS,
1064        ETH_FIRST_BD_WO_SOP,
1065        ETH_INSUFFICIENT_BDS,
1066        ETH_ILLEGAL_LSO_HDR_NBDS,
1067        ETH_ILLEGAL_LSO_MSS,
1068        ETH_ZERO_SIZE_BD,
1069        ETH_ILLEGAL_LSO_HDR_LEN,
1070        ETH_INSUFFICIENT_PAYLOAD,
1071        ETH_EDPM_OUT_OF_SYNC,
1072        ETH_TUNN_IPV6_EXT_NBD_ERR,
1073        ETH_CONTROL_PACKET_VIOLATION,
1074        ETH_ANTI_SPOOFING_ERR,
1075        ETH_PACKET_SIZE_TOO_LARGE,
1076        CORE_ILLEGAL_VLAN_MODE,
1077        CORE_ILLEGAL_NBDS,
1078        CORE_FIRST_BD_WO_SOP,
1079        CORE_INSUFFICIENT_BDS,
1080        CORE_PACKET_TOO_SMALL,
1081        CORE_ILLEGAL_INBAND_TAGS,
1082        CORE_VLAN_INSERT_AND_INBAND_VLAN,
1083        CORE_MTU_VIOLATION,
1084        CORE_CONTROL_PACKET_VIOLATION,
1085        CORE_ANTI_SPOOFING_ERR,
1086        CORE_PACKET_SIZE_TOO_LARGE,
1087        CORE_ILLEGAL_BD_FLAGS,
1088        CORE_GSI_PACKET_VIOLATION,
1089        MAX_MALICIOUS_VF_ERROR_ID,
1090};
1091
1092/* Mstorm non-triggering VF zone */
1093struct mstorm_non_trigger_vf_zone {
1094        struct eth_mstorm_per_queue_stat eth_queue_stat;
1095        struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD];
1096};
1097
1098/* Mstorm VF zone */
1099struct mstorm_vf_zone {
1100        struct mstorm_non_trigger_vf_zone non_trigger;
1101};
1102
1103/* vlan header including TPID and TCI fields */
1104struct vlan_header {
1105        __le16 tpid;
1106        __le16 tci;
1107};
1108
1109/* outer tag configurations */
1110struct outer_tag_config_struct {
1111        u8 enable_stag_pri_change;
1112        u8 pri_map_valid;
1113        u8 reserved[2];
1114        struct vlan_header outer_tag;
1115        u8 inner_to_outer_pri_map[8];
1116};
1117
1118/* personality per PF */
1119enum personality_type {
1120        BAD_PERSONALITY_TYP,
1121        PERSONALITY_ISCSI,
1122        PERSONALITY_FCOE,
1123        PERSONALITY_RDMA_AND_ETH,
1124        PERSONALITY_RDMA,
1125        PERSONALITY_CORE,
1126        PERSONALITY_ETH,
1127        PERSONALITY_RESERVED,
1128        MAX_PERSONALITY_TYPE
1129};
1130
1131/* tunnel configuration */
1132struct pf_start_tunnel_config {
1133        u8 set_vxlan_udp_port_flg;
1134        u8 set_geneve_udp_port_flg;
1135        u8 set_no_inner_l2_vxlan_udp_port_flg;
1136        u8 tunnel_clss_vxlan;
1137        u8 tunnel_clss_l2geneve;
1138        u8 tunnel_clss_ipgeneve;
1139        u8 tunnel_clss_l2gre;
1140        u8 tunnel_clss_ipgre;
1141        __le16 vxlan_udp_port;
1142        __le16 geneve_udp_port;
1143        __le16 no_inner_l2_vxlan_udp_port;
1144        __le16 reserved[3];
1145};
1146
1147/* Ramrod data for PF start ramrod */
1148struct pf_start_ramrod_data {
1149        struct regpair event_ring_pbl_addr;
1150        struct regpair consolid_q_pbl_addr;
1151        struct pf_start_tunnel_config tunnel_config;
1152        __le16 event_ring_sb_id;
1153        u8 base_vf_id;
1154        u8 num_vfs;
1155        u8 event_ring_num_pages;
1156        u8 event_ring_sb_index;
1157        u8 path_id;
1158        u8 warning_as_error;
1159        u8 dont_log_ramrods;
1160        u8 personality;
1161        __le16 log_type_mask;
1162        u8 mf_mode;
1163        u8 integ_phase;
1164        u8 allow_npar_tx_switching;
1165        u8 reserved0;
1166        struct hsi_fp_ver_struct hsi_fp_ver;
1167        struct outer_tag_config_struct outer_tag_config;
1168};
1169
1170/* Data for port update ramrod */
1171struct protocol_dcb_data {
1172        u8 dcb_enable_flag;
1173        u8 dscp_enable_flag;
1174        u8 dcb_priority;
1175        u8 dcb_tc;
1176        u8 dscp_val;
1177        u8 dcb_dont_add_vlan0;
1178};
1179
1180/* Update tunnel configuration */
1181struct pf_update_tunnel_config {
1182        u8 update_rx_pf_clss;
1183        u8 update_rx_def_ucast_clss;
1184        u8 update_rx_def_non_ucast_clss;
1185        u8 set_vxlan_udp_port_flg;
1186        u8 set_geneve_udp_port_flg;
1187        u8 set_no_inner_l2_vxlan_udp_port_flg;
1188        u8 tunnel_clss_vxlan;
1189        u8 tunnel_clss_l2geneve;
1190        u8 tunnel_clss_ipgeneve;
1191        u8 tunnel_clss_l2gre;
1192        u8 tunnel_clss_ipgre;
1193        u8 reserved;
1194        __le16 vxlan_udp_port;
1195        __le16 geneve_udp_port;
1196        __le16 no_inner_l2_vxlan_udp_port;
1197        __le16 reserved1[3];
1198};
1199
1200/* Data for port update ramrod */
1201struct pf_update_ramrod_data {
1202        u8 update_eth_dcb_data_mode;
1203        u8 update_fcoe_dcb_data_mode;
1204        u8 update_iscsi_dcb_data_mode;
1205        u8 update_roce_dcb_data_mode;
1206        u8 update_rroce_dcb_data_mode;
1207        u8 update_iwarp_dcb_data_mode;
1208        u8 update_mf_vlan_flag;
1209        u8 update_enable_stag_pri_change;
1210        struct protocol_dcb_data eth_dcb_data;
1211        struct protocol_dcb_data fcoe_dcb_data;
1212        struct protocol_dcb_data iscsi_dcb_data;
1213        struct protocol_dcb_data roce_dcb_data;
1214        struct protocol_dcb_data rroce_dcb_data;
1215        struct protocol_dcb_data iwarp_dcb_data;
1216        __le16 mf_vlan;
1217        u8 enable_stag_pri_change;
1218        u8 reserved;
1219        struct pf_update_tunnel_config tunnel_config;
1220};
1221
1222/* Ports mode */
1223enum ports_mode {
1224        ENGX2_PORTX1,
1225        ENGX2_PORTX2,
1226        ENGX1_PORTX1,
1227        ENGX1_PORTX2,
1228        ENGX1_PORTX4,
1229        MAX_PORTS_MODE
1230};
1231
1232/* use to index in hsi_fp_[major|minor]_ver_arr per protocol */
1233enum protocol_version_array_key {
1234        ETH_VER_KEY = 0,
1235        ROCE_VER_KEY,
1236        MAX_PROTOCOL_VERSION_ARRAY_KEY
1237};
1238
1239/* RDMA TX Stats */
1240struct rdma_sent_stats {
1241        struct regpair sent_bytes;
1242        struct regpair sent_pkts;
1243};
1244
1245/* Pstorm non-triggering VF zone */
1246struct pstorm_non_trigger_vf_zone {
1247        struct eth_pstorm_per_queue_stat eth_queue_stat;
1248        struct rdma_sent_stats rdma_stats;
1249};
1250
1251/* Pstorm VF zone */
1252struct pstorm_vf_zone {
1253        struct pstorm_non_trigger_vf_zone non_trigger;
1254        struct regpair reserved[7];
1255};
1256
1257/* Ramrod Header of SPQE */
1258struct ramrod_header {
1259        __le32 cid;
1260        u8 cmd_id;
1261        u8 protocol_id;
1262        __le16 echo;
1263};
1264
1265/* RDMA RX Stats */
1266struct rdma_rcv_stats {
1267        struct regpair rcv_bytes;
1268        struct regpair rcv_pkts;
1269};
1270
1271/* Data for update QCN/DCQCN RL ramrod */
1272struct rl_update_ramrod_data {
1273        u8 qcn_update_param_flg;
1274        u8 dcqcn_update_param_flg;
1275        u8 rl_init_flg;
1276        u8 rl_start_flg;
1277        u8 rl_stop_flg;
1278        u8 rl_id_first;
1279        u8 rl_id_last;
1280        u8 rl_dc_qcn_flg;
1281        u8 dcqcn_reset_alpha_on_idle;
1282        u8 rl_bc_stage_th;
1283        u8 rl_timer_stage_th;
1284        u8 reserved1;
1285        __le32 rl_bc_rate;
1286        __le16 rl_max_rate;
1287        __le16 rl_r_ai;
1288        __le16 rl_r_hai;
1289        __le16 dcqcn_g;
1290        __le32 dcqcn_k_us;
1291        __le32 dcqcn_timeuot_us;
1292        __le32 qcn_timeuot_us;
1293        __le32 reserved2;
1294};
1295
1296/* Slowpath Element (SPQE) */
1297struct slow_path_element {
1298        struct ramrod_header hdr;
1299        struct regpair data_ptr;
1300};
1301
1302/* Tstorm non-triggering VF zone */
1303struct tstorm_non_trigger_vf_zone {
1304        struct rdma_rcv_stats rdma_stats;
1305};
1306
1307struct tstorm_per_port_stat {
1308        struct regpair trunc_error_discard;
1309        struct regpair mac_error_discard;
1310        struct regpair mftag_filter_discard;
1311        struct regpair eth_mac_filter_discard;
1312        struct regpair ll2_mac_filter_discard;
1313        struct regpair ll2_conn_disabled_discard;
1314        struct regpair iscsi_irregular_pkt;
1315        struct regpair fcoe_irregular_pkt;
1316        struct regpair roce_irregular_pkt;
1317        struct regpair iwarp_irregular_pkt;
1318        struct regpair eth_irregular_pkt;
1319        struct regpair toe_irregular_pkt;
1320        struct regpair preroce_irregular_pkt;
1321        struct regpair eth_gre_tunn_filter_discard;
1322        struct regpair eth_vxlan_tunn_filter_discard;
1323        struct regpair eth_geneve_tunn_filter_discard;
1324        struct regpair eth_gft_drop_pkt;
1325};
1326
1327/* Tstorm VF zone */
1328struct tstorm_vf_zone {
1329        struct tstorm_non_trigger_vf_zone non_trigger;
1330};
1331
1332/* Tunnel classification scheme */
1333enum tunnel_clss {
1334        TUNNEL_CLSS_MAC_VLAN = 0,
1335        TUNNEL_CLSS_MAC_VNI,
1336        TUNNEL_CLSS_INNER_MAC_VLAN,
1337        TUNNEL_CLSS_INNER_MAC_VNI,
1338        TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE,
1339        MAX_TUNNEL_CLSS
1340};
1341
1342/* Ustorm non-triggering VF zone */
1343struct ustorm_non_trigger_vf_zone {
1344        struct eth_ustorm_per_queue_stat eth_queue_stat;
1345        struct regpair vf_pf_msg_addr;
1346};
1347
1348/* Ustorm triggering VF zone */
1349struct ustorm_trigger_vf_zone {
1350        u8 vf_pf_msg_valid;
1351        u8 reserved[7];
1352};
1353
1354/* Ustorm VF zone */
1355struct ustorm_vf_zone {
1356        struct ustorm_non_trigger_vf_zone non_trigger;
1357        struct ustorm_trigger_vf_zone trigger;
1358};
1359
1360/* VF-PF channel data */
1361struct vf_pf_channel_data {
1362        __le32 ready;
1363        u8 valid;
1364        u8 reserved0;
1365        __le16 reserved1;
1366};
1367
1368/* Ramrod data for VF start ramrod */
1369struct vf_start_ramrod_data {
1370        u8 vf_id;
1371        u8 enable_flr_ack;
1372        __le16 opaque_fid;
1373        u8 personality;
1374        u8 reserved[7];
1375        struct hsi_fp_ver_struct hsi_fp_ver;
1376
1377};
1378
1379/* Ramrod data for VF start ramrod */
1380struct vf_stop_ramrod_data {
1381        u8 vf_id;
1382        u8 reserved0;
1383        __le16 reserved1;
1384        __le32 reserved2;
1385};
1386
1387/* VF zone size mode */
1388enum vf_zone_size_mode {
1389        VF_ZONE_SIZE_MODE_DEFAULT,
1390        VF_ZONE_SIZE_MODE_DOUBLE,
1391        VF_ZONE_SIZE_MODE_QUAD,
1392        MAX_VF_ZONE_SIZE_MODE
1393};
1394
1395/* Xstorm non-triggering VF zone */
1396struct xstorm_non_trigger_vf_zone {
1397        struct regpair non_edpm_ack_pkts;
1398};
1399
1400/* Tstorm VF zone */
1401struct xstorm_vf_zone {
1402        struct xstorm_non_trigger_vf_zone non_trigger;
1403};
1404
1405/* Attentions status block */
1406struct atten_status_block {
1407        __le32 atten_bits;
1408        __le32 atten_ack;
1409        __le16 reserved0;
1410        __le16 sb_index;
1411        __le32 reserved1;
1412};
1413
1414/* DMAE command */
1415struct dmae_cmd {
1416        __le32 opcode;
1417#define DMAE_CMD_SRC_MASK               0x1
1418#define DMAE_CMD_SRC_SHIFT              0
1419#define DMAE_CMD_DST_MASK               0x3
1420#define DMAE_CMD_DST_SHIFT              1
1421#define DMAE_CMD_C_DST_MASK             0x1
1422#define DMAE_CMD_C_DST_SHIFT            3
1423#define DMAE_CMD_CRC_RESET_MASK         0x1
1424#define DMAE_CMD_CRC_RESET_SHIFT        4
1425#define DMAE_CMD_SRC_ADDR_RESET_MASK    0x1
1426#define DMAE_CMD_SRC_ADDR_RESET_SHIFT   5
1427#define DMAE_CMD_DST_ADDR_RESET_MASK    0x1
1428#define DMAE_CMD_DST_ADDR_RESET_SHIFT   6
1429#define DMAE_CMD_COMP_FUNC_MASK         0x1
1430#define DMAE_CMD_COMP_FUNC_SHIFT        7
1431#define DMAE_CMD_COMP_WORD_EN_MASK      0x1
1432#define DMAE_CMD_COMP_WORD_EN_SHIFT     8
1433#define DMAE_CMD_COMP_CRC_EN_MASK       0x1
1434#define DMAE_CMD_COMP_CRC_EN_SHIFT      9
1435#define DMAE_CMD_COMP_CRC_OFFSET_MASK   0x7
1436#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10
1437#define DMAE_CMD_RESERVED1_MASK         0x1
1438#define DMAE_CMD_RESERVED1_SHIFT        13
1439#define DMAE_CMD_ENDIANITY_MODE_MASK    0x3
1440#define DMAE_CMD_ENDIANITY_MODE_SHIFT   14
1441#define DMAE_CMD_ERR_HANDLING_MASK      0x3
1442#define DMAE_CMD_ERR_HANDLING_SHIFT     16
1443#define DMAE_CMD_PORT_ID_MASK           0x3
1444#define DMAE_CMD_PORT_ID_SHIFT          18
1445#define DMAE_CMD_SRC_PF_ID_MASK         0xF
1446#define DMAE_CMD_SRC_PF_ID_SHIFT        20
1447#define DMAE_CMD_DST_PF_ID_MASK         0xF
1448#define DMAE_CMD_DST_PF_ID_SHIFT        24
1449#define DMAE_CMD_SRC_VF_ID_VALID_MASK   0x1
1450#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28
1451#define DMAE_CMD_DST_VF_ID_VALID_MASK   0x1
1452#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29
1453#define DMAE_CMD_RESERVED2_MASK         0x3
1454#define DMAE_CMD_RESERVED2_SHIFT        30
1455        __le32 src_addr_lo;
1456        __le32 src_addr_hi;
1457        __le32 dst_addr_lo;
1458        __le32 dst_addr_hi;
1459        __le16 length_dw;
1460        __le16 opcode_b;
1461#define DMAE_CMD_SRC_VF_ID_MASK         0xFF
1462#define DMAE_CMD_SRC_VF_ID_SHIFT        0
1463#define DMAE_CMD_DST_VF_ID_MASK         0xFF
1464#define DMAE_CMD_DST_VF_ID_SHIFT        8
1465        __le32 comp_addr_lo;
1466        __le32 comp_addr_hi;
1467        __le32 comp_val;
1468        __le32 crc32;
1469        __le32 crc_32_c;
1470        __le16 crc16;
1471        __le16 crc16_c;
1472        __le16 crc10;
1473        __le16 error_bit_reserved;
1474#define DMAE_CMD_ERROR_BIT_MASK        0x1
1475#define DMAE_CMD_ERROR_BIT_SHIFT       0
1476#define DMAE_CMD_RESERVED_MASK         0x7FFF
1477#define DMAE_CMD_RESERVED_SHIFT        1
1478        __le16 xsum16;
1479        __le16 xsum8;
1480};
1481
1482enum dmae_cmd_comp_crc_en_enum {
1483        dmae_cmd_comp_crc_disabled,
1484        dmae_cmd_comp_crc_enabled,
1485        MAX_DMAE_CMD_COMP_CRC_EN_ENUM
1486};
1487
1488enum dmae_cmd_comp_func_enum {
1489        dmae_cmd_comp_func_to_src,
1490        dmae_cmd_comp_func_to_dst,
1491        MAX_DMAE_CMD_COMP_FUNC_ENUM
1492};
1493
1494enum dmae_cmd_comp_word_en_enum {
1495        dmae_cmd_comp_word_disabled,
1496        dmae_cmd_comp_word_enabled,
1497        MAX_DMAE_CMD_COMP_WORD_EN_ENUM
1498};
1499
1500enum dmae_cmd_c_dst_enum {
1501        dmae_cmd_c_dst_pcie,
1502        dmae_cmd_c_dst_grc,
1503        MAX_DMAE_CMD_C_DST_ENUM
1504};
1505
1506enum dmae_cmd_dst_enum {
1507        dmae_cmd_dst_none_0,
1508        dmae_cmd_dst_pcie,
1509        dmae_cmd_dst_grc,
1510        dmae_cmd_dst_none_3,
1511        MAX_DMAE_CMD_DST_ENUM
1512};
1513
1514enum dmae_cmd_error_handling_enum {
1515        dmae_cmd_error_handling_send_regular_comp,
1516        dmae_cmd_error_handling_send_comp_with_err,
1517        dmae_cmd_error_handling_dont_send_comp,
1518        MAX_DMAE_CMD_ERROR_HANDLING_ENUM
1519};
1520
1521enum dmae_cmd_src_enum {
1522        dmae_cmd_src_pcie,
1523        dmae_cmd_src_grc,
1524        MAX_DMAE_CMD_SRC_ENUM
1525};
1526
1527struct e4_mstorm_core_conn_ag_ctx {
1528        u8 byte0;
1529        u8 byte1;
1530        u8 flags0;
1531#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_MASK    0x1
1532#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT   0
1533#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_MASK    0x1
1534#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT   1
1535#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_MASK     0x3
1536#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT    2
1537#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_MASK     0x3
1538#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT    4
1539#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_MASK     0x3
1540#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT    6
1541        u8 flags1;
1542#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK           0x1
1543#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT          0
1544#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK           0x1
1545#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT          1
1546#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK           0x1
1547#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT          2
1548#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK         0x1
1549#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT        3
1550#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK         0x1
1551#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT        4
1552#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK         0x1
1553#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT        5
1554#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK         0x1
1555#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT        6
1556#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK         0x1
1557#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT        7
1558        __le16 word0;
1559        __le16 word1;
1560        __le32 reg0;
1561        __le32 reg1;
1562};
1563
1564struct e4_ystorm_core_conn_ag_ctx {
1565        u8 byte0;
1566        u8 byte1;
1567        u8 flags0;
1568#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_MASK    0x1
1569#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT   0
1570#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_MASK    0x1
1571#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT   1
1572#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_MASK     0x3
1573#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT    2
1574#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_MASK     0x3
1575#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT    4
1576#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_MASK     0x3
1577#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT    6
1578        u8 flags1;
1579#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK           0x1
1580#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT          0
1581#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK           0x1
1582#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT          1
1583#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK           0x1
1584#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT          2
1585#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK         0x1
1586#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT        3
1587#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK         0x1
1588#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT        4
1589#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK         0x1
1590#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT        5
1591#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK         0x1
1592#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT        6
1593#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK         0x1
1594#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT        7
1595        u8 byte2;
1596        u8 byte3;
1597        __le16 word0;
1598        __le32 reg0;
1599        __le32 reg1;
1600        __le16 word1;
1601        __le16 word2;
1602        __le16 word3;
1603        __le16 word4;
1604        __le32 reg2;
1605        __le32 reg3;
1606};
1607
1608/* DMAE parameters */
1609struct qed_dmae_params {
1610        u32 flags;
1611/* If QED_DMAE_PARAMS_RW_REPL_SRC flag is set and the
1612 * source is a block of length DMAE_MAX_RW_SIZE and the
1613 * destination is larger, the source block will be duplicated as
1614 * many times as required to fill the destination block. This is
1615 * used mostly to write a zeroed buffer to destination address
1616 * using DMA
1617 */
1618#define QED_DMAE_PARAMS_RW_REPL_SRC_MASK        0x1
1619#define QED_DMAE_PARAMS_RW_REPL_SRC_SHIFT       0
1620#define QED_DMAE_PARAMS_SRC_VF_VALID_MASK       0x1
1621#define QED_DMAE_PARAMS_SRC_VF_VALID_SHIFT      1
1622#define QED_DMAE_PARAMS_DST_VF_VALID_MASK       0x1
1623#define QED_DMAE_PARAMS_DST_VF_VALID_SHIFT      2
1624#define QED_DMAE_PARAMS_COMPLETION_DST_MASK     0x1
1625#define QED_DMAE_PARAMS_COMPLETION_DST_SHIFT    3
1626#define QED_DMAE_PARAMS_PORT_VALID_MASK         0x1
1627#define QED_DMAE_PARAMS_PORT_VALID_SHIFT        4
1628#define QED_DMAE_PARAMS_SRC_PF_VALID_MASK       0x1
1629#define QED_DMAE_PARAMS_SRC_PF_VALID_SHIFT      5
1630#define QED_DMAE_PARAMS_DST_PF_VALID_MASK       0x1
1631#define QED_DMAE_PARAMS_DST_PF_VALID_SHIFT      6
1632#define QED_DMAE_PARAMS_RESERVED_MASK           0x1FFFFFF
1633#define QED_DMAE_PARAMS_RESERVED_SHIFT          7
1634        u8 src_vfid;
1635        u8 dst_vfid;
1636        u8 port_id;
1637        u8 src_pfid;
1638        u8 dst_pfid;
1639        u8 reserved1;
1640        __le16 reserved2;
1641};
1642
1643/* IGU cleanup command */
1644struct igu_cleanup {
1645        __le32 sb_id_and_flags;
1646#define IGU_CLEANUP_RESERVED0_MASK      0x7FFFFFF
1647#define IGU_CLEANUP_RESERVED0_SHIFT     0
1648#define IGU_CLEANUP_CLEANUP_SET_MASK    0x1
1649#define IGU_CLEANUP_CLEANUP_SET_SHIFT   27
1650#define IGU_CLEANUP_CLEANUP_TYPE_MASK   0x7
1651#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT  28
1652#define IGU_CLEANUP_COMMAND_TYPE_MASK   0x1
1653#define IGU_CLEANUP_COMMAND_TYPE_SHIFT  31
1654        __le32 reserved1;
1655};
1656
1657/* IGU firmware driver command */
1658union igu_command {
1659        struct igu_prod_cons_update prod_cons_update;
1660        struct igu_cleanup cleanup;
1661};
1662
1663/* IGU firmware driver command */
1664struct igu_command_reg_ctrl {
1665        __le16 opaque_fid;
1666        __le16 igu_command_reg_ctrl_fields;
1667#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK  0xFFF
1668#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0
1669#define IGU_COMMAND_REG_CTRL_RESERVED_MASK      0x7
1670#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT     12
1671#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK  0x1
1672#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15
1673};
1674
1675/* IGU mapping line structure */
1676struct igu_mapping_line {
1677        __le32 igu_mapping_line_fields;
1678#define IGU_MAPPING_LINE_VALID_MASK             0x1
1679#define IGU_MAPPING_LINE_VALID_SHIFT            0
1680#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK     0xFF
1681#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT    1
1682#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK   0xFF
1683#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT  9
1684#define IGU_MAPPING_LINE_PF_VALID_MASK          0x1
1685#define IGU_MAPPING_LINE_PF_VALID_SHIFT         17
1686#define IGU_MAPPING_LINE_IPS_GROUP_MASK         0x3F
1687#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT        18
1688#define IGU_MAPPING_LINE_RESERVED_MASK          0xFF
1689#define IGU_MAPPING_LINE_RESERVED_SHIFT         24
1690};
1691
1692/* IGU MSIX line structure */
1693struct igu_msix_vector {
1694        struct regpair address;
1695        __le32 data;
1696        __le32 msix_vector_fields;
1697#define IGU_MSIX_VECTOR_MASK_BIT_MASK           0x1
1698#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT          0
1699#define IGU_MSIX_VECTOR_RESERVED0_MASK          0x7FFF
1700#define IGU_MSIX_VECTOR_RESERVED0_SHIFT         1
1701#define IGU_MSIX_VECTOR_STEERING_TAG_MASK       0xFF
1702#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT      16
1703#define IGU_MSIX_VECTOR_RESERVED1_MASK          0xFF
1704#define IGU_MSIX_VECTOR_RESERVED1_SHIFT         24
1705};
1706/* per encapsulation type enabling flags */
1707struct prs_reg_encapsulation_type_en {
1708        u8 flags;
1709#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK          0x1
1710#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT         0
1711#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK           0x1
1712#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT          1
1713#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK                 0x1
1714#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT                2
1715#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK                 0x1
1716#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT                3
1717#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK       0x1
1718#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT      4
1719#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK        0x1
1720#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT       5
1721#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK                     0x3
1722#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT                    6
1723};
1724
1725enum pxp_tph_st_hint {
1726        TPH_ST_HINT_BIDIR,
1727        TPH_ST_HINT_REQUESTER,
1728        TPH_ST_HINT_TARGET,
1729        TPH_ST_HINT_TARGET_PRIO,
1730        MAX_PXP_TPH_ST_HINT
1731};
1732
1733/* QM hardware structure of enable bypass credit mask */
1734struct qm_rf_bypass_mask {
1735        u8 flags;
1736#define QM_RF_BYPASS_MASK_LINEVOQ_MASK          0x1
1737#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT         0
1738#define QM_RF_BYPASS_MASK_RESERVED0_MASK        0x1
1739#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT       1
1740#define QM_RF_BYPASS_MASK_PFWFQ_MASK            0x1
1741#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT           2
1742#define QM_RF_BYPASS_MASK_VPWFQ_MASK            0x1
1743#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT           3
1744#define QM_RF_BYPASS_MASK_PFRL_MASK             0x1
1745#define QM_RF_BYPASS_MASK_PFRL_SHIFT            4
1746#define QM_RF_BYPASS_MASK_VPQCNRL_MASK          0x1
1747#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT         5
1748#define QM_RF_BYPASS_MASK_FWPAUSE_MASK          0x1
1749#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT         6
1750#define QM_RF_BYPASS_MASK_RESERVED1_MASK        0x1
1751#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT       7
1752};
1753
1754/* QM hardware structure of opportunistic credit mask */
1755struct qm_rf_opportunistic_mask {
1756        __le16 flags;
1757#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK           0x1
1758#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT          0
1759#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK           0x1
1760#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT          1
1761#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK             0x1
1762#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT            2
1763#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK             0x1
1764#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT            3
1765#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK              0x1
1766#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT             4
1767#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK           0x1
1768#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT          5
1769#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK           0x1
1770#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT          6
1771#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK         0x1
1772#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT        7
1773#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK        0x1
1774#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT       8
1775#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK         0x7F
1776#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT        9
1777};
1778
1779/* QM hardware structure of QM map memory */
1780struct qm_rf_pq_map_e4 {
1781        __le32 reg;
1782#define QM_RF_PQ_MAP_E4_PQ_VALID_MASK           0x1
1783#define QM_RF_PQ_MAP_E4_PQ_VALID_SHIFT          0
1784#define QM_RF_PQ_MAP_E4_RL_ID_MASK              0xFF
1785#define QM_RF_PQ_MAP_E4_RL_ID_SHIFT             1
1786#define QM_RF_PQ_MAP_E4_VP_PQ_ID_MASK           0x1FF
1787#define QM_RF_PQ_MAP_E4_VP_PQ_ID_SHIFT          9
1788#define QM_RF_PQ_MAP_E4_VOQ_MASK                0x1F
1789#define QM_RF_PQ_MAP_E4_VOQ_SHIFT               18
1790#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_MASK   0x3
1791#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_SHIFT  23
1792#define QM_RF_PQ_MAP_E4_RL_VALID_MASK           0x1
1793#define QM_RF_PQ_MAP_E4_RL_VALID_SHIFT          25
1794#define QM_RF_PQ_MAP_E4_RESERVED_MASK           0x3F
1795#define QM_RF_PQ_MAP_E4_RESERVED_SHIFT          26
1796};
1797
1798/* Completion params for aggregated interrupt completion */
1799struct sdm_agg_int_comp_params {
1800        __le16 params;
1801#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK      0x3F
1802#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT     0
1803#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK  0x1
1804#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6
1805#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK     0x1FF
1806#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT    7
1807};
1808
1809/* SDM operation gen command (generate aggregative interrupt) */
1810struct sdm_op_gen {
1811        __le32 command;
1812#define SDM_OP_GEN_COMP_PARAM_MASK      0xFFFF
1813#define SDM_OP_GEN_COMP_PARAM_SHIFT     0
1814#define SDM_OP_GEN_COMP_TYPE_MASK       0xF
1815#define SDM_OP_GEN_COMP_TYPE_SHIFT      16
1816#define SDM_OP_GEN_RESERVED_MASK        0xFFF
1817#define SDM_OP_GEN_RESERVED_SHIFT       20
1818};
1819
1820/* Physical memory descriptor */
1821struct phys_mem_desc {
1822        dma_addr_t phys_addr;
1823        void *virt_addr;
1824        u32 size;               /* In bytes */
1825};
1826
1827/* Virtual memory descriptor */
1828struct virt_mem_desc {
1829        void *ptr;
1830        u32 size;               /* In bytes */
1831};
1832
1833/****************************************/
1834/* Debug Tools HSI constants and macros */
1835/****************************************/
1836
1837enum block_id {
1838        BLOCK_GRC,
1839        BLOCK_MISCS,
1840        BLOCK_MISC,
1841        BLOCK_DBU,
1842        BLOCK_PGLUE_B,
1843        BLOCK_CNIG,
1844        BLOCK_CPMU,
1845        BLOCK_NCSI,
1846        BLOCK_OPTE,
1847        BLOCK_BMB,
1848        BLOCK_PCIE,
1849        BLOCK_MCP,
1850        BLOCK_MCP2,
1851        BLOCK_PSWHST,
1852        BLOCK_PSWHST2,
1853        BLOCK_PSWRD,
1854        BLOCK_PSWRD2,
1855        BLOCK_PSWWR,
1856        BLOCK_PSWWR2,
1857        BLOCK_PSWRQ,
1858        BLOCK_PSWRQ2,
1859        BLOCK_PGLCS,
1860        BLOCK_DMAE,
1861        BLOCK_PTU,
1862        BLOCK_TCM,
1863        BLOCK_MCM,
1864        BLOCK_UCM,
1865        BLOCK_XCM,
1866        BLOCK_YCM,
1867        BLOCK_PCM,
1868        BLOCK_QM,
1869        BLOCK_TM,
1870        BLOCK_DORQ,
1871        BLOCK_BRB,
1872        BLOCK_SRC,
1873        BLOCK_PRS,
1874        BLOCK_TSDM,
1875        BLOCK_MSDM,
1876        BLOCK_USDM,
1877        BLOCK_XSDM,
1878        BLOCK_YSDM,
1879        BLOCK_PSDM,
1880        BLOCK_TSEM,
1881        BLOCK_MSEM,
1882        BLOCK_USEM,
1883        BLOCK_XSEM,
1884        BLOCK_YSEM,
1885        BLOCK_PSEM,
1886        BLOCK_RSS,
1887        BLOCK_TMLD,
1888        BLOCK_MULD,
1889        BLOCK_YULD,
1890        BLOCK_XYLD,
1891        BLOCK_PRM,
1892        BLOCK_PBF_PB1,
1893        BLOCK_PBF_PB2,
1894        BLOCK_RPB,
1895        BLOCK_BTB,
1896        BLOCK_PBF,
1897        BLOCK_RDIF,
1898        BLOCK_TDIF,
1899        BLOCK_CDU,
1900        BLOCK_CCFC,
1901        BLOCK_TCFC,
1902        BLOCK_IGU,
1903        BLOCK_CAU,
1904        BLOCK_UMAC,
1905        BLOCK_XMAC,
1906        BLOCK_MSTAT,
1907        BLOCK_DBG,
1908        BLOCK_NIG,
1909        BLOCK_WOL,
1910        BLOCK_BMBN,
1911        BLOCK_IPC,
1912        BLOCK_NWM,
1913        BLOCK_NWS,
1914        BLOCK_MS,
1915        BLOCK_PHY_PCIE,
1916        BLOCK_LED,
1917        BLOCK_AVS_WRAP,
1918        BLOCK_PXPREQBUS,
1919        BLOCK_BAR0_MAP,
1920        BLOCK_MCP_FIO,
1921        BLOCK_LAST_INIT,
1922        BLOCK_PRS_FC,
1923        BLOCK_PBF_FC,
1924        BLOCK_NIG_LB_FC,
1925        BLOCK_NIG_LB_FC_PLLH,
1926        BLOCK_NIG_TX_FC_PLLH,
1927        BLOCK_NIG_TX_FC,
1928        BLOCK_NIG_RX_FC_PLLH,
1929        BLOCK_NIG_RX_FC,
1930        MAX_BLOCK_ID
1931};
1932
1933/* binary debug buffer types */
1934enum bin_dbg_buffer_type {
1935        BIN_BUF_DBG_MODE_TREE,
1936        BIN_BUF_DBG_DUMP_REG,
1937        BIN_BUF_DBG_DUMP_MEM,
1938        BIN_BUF_DBG_IDLE_CHK_REGS,
1939        BIN_BUF_DBG_IDLE_CHK_IMMS,
1940        BIN_BUF_DBG_IDLE_CHK_RULES,
1941        BIN_BUF_DBG_IDLE_CHK_PARSING_DATA,
1942        BIN_BUF_DBG_ATTN_BLOCKS,
1943        BIN_BUF_DBG_ATTN_REGS,
1944        BIN_BUF_DBG_ATTN_INDEXES,
1945        BIN_BUF_DBG_ATTN_NAME_OFFSETS,
1946        BIN_BUF_DBG_BLOCKS,
1947        BIN_BUF_DBG_BLOCKS_CHIP_DATA,
1948        BIN_BUF_DBG_BUS_LINES,
1949        BIN_BUF_DBG_BLOCKS_USER_DATA,
1950        BIN_BUF_DBG_BLOCKS_CHIP_USER_DATA,
1951        BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS,
1952        BIN_BUF_DBG_RESET_REGS,
1953        BIN_BUF_DBG_PARSING_STRINGS,
1954        MAX_BIN_DBG_BUFFER_TYPE
1955};
1956
1957
1958/* Attention bit mapping */
1959struct dbg_attn_bit_mapping {
1960        u16 data;
1961#define DBG_ATTN_BIT_MAPPING_VAL_MASK                   0x7FFF
1962#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT                  0
1963#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK     0x1
1964#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_SHIFT    15
1965};
1966
1967/* Attention block per-type data */
1968struct dbg_attn_block_type_data {
1969        u16 names_offset;
1970        u16 reserved1;
1971        u8 num_regs;
1972        u8 reserved2;
1973        u16 regs_offset;
1974
1975};
1976
1977/* Block attentions */
1978struct dbg_attn_block {
1979        struct dbg_attn_block_type_data per_type_data[2];
1980};
1981
1982/* Attention register result */
1983struct dbg_attn_reg_result {
1984        u32 data;
1985#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK    0xFFFFFF
1986#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT   0
1987#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK   0xFF
1988#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT  24
1989        u16 block_attn_offset;
1990        u16 reserved;
1991        u32 sts_val;
1992        u32 mask_val;
1993};
1994
1995/* Attention block result */
1996struct dbg_attn_block_result {
1997        u8 block_id;
1998        u8 data;
1999#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK    0x3
2000#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT   0
2001#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK     0x3F
2002#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT    2
2003        u16 names_offset;
2004        struct dbg_attn_reg_result reg_results[15];
2005};
2006
2007/* Mode header */
2008struct dbg_mode_hdr {
2009        u16 data;
2010#define DBG_MODE_HDR_EVAL_MODE_MASK             0x1
2011#define DBG_MODE_HDR_EVAL_MODE_SHIFT            0
2012#define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK      0x7FFF
2013#define DBG_MODE_HDR_MODES_BUF_OFFSET_SHIFT     1
2014};
2015
2016/* Attention register */
2017struct dbg_attn_reg {
2018        struct dbg_mode_hdr mode;
2019        u16 block_attn_offset;
2020        u32 data;
2021#define DBG_ATTN_REG_STS_ADDRESS_MASK   0xFFFFFF
2022#define DBG_ATTN_REG_STS_ADDRESS_SHIFT  0
2023#define DBG_ATTN_REG_NUM_REG_ATTN_MASK  0xFF
2024#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24
2025        u32 sts_clr_address;
2026        u32 mask_address;
2027};
2028
2029/* Attention types */
2030enum dbg_attn_type {
2031        ATTN_TYPE_INTERRUPT,
2032        ATTN_TYPE_PARITY,
2033        MAX_DBG_ATTN_TYPE
2034};
2035
2036/* Block debug data */
2037struct dbg_block {
2038        u8 name[15];
2039        u8 associated_storm_letter;
2040};
2041
2042/* Chip-specific block debug data */
2043struct dbg_block_chip {
2044        u8 flags;
2045#define DBG_BLOCK_CHIP_IS_REMOVED_MASK           0x1
2046#define DBG_BLOCK_CHIP_IS_REMOVED_SHIFT          0
2047#define DBG_BLOCK_CHIP_HAS_RESET_REG_MASK        0x1
2048#define DBG_BLOCK_CHIP_HAS_RESET_REG_SHIFT       1
2049#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_MASK  0x1
2050#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_SHIFT 2
2051#define DBG_BLOCK_CHIP_HAS_DBG_BUS_MASK          0x1
2052#define DBG_BLOCK_CHIP_HAS_DBG_BUS_SHIFT         3
2053#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_MASK   0x1
2054#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_SHIFT  4
2055#define DBG_BLOCK_CHIP_RESERVED0_MASK            0x7
2056#define DBG_BLOCK_CHIP_RESERVED0_SHIFT           5
2057        u8 dbg_client_id;
2058        u8 reset_reg_id;
2059        u8 reset_reg_bit_offset;
2060        struct dbg_mode_hdr dbg_bus_mode;
2061        u16 reserved1;
2062        u8 reserved2;
2063        u8 num_of_dbg_bus_lines;
2064        u16 dbg_bus_lines_offset;
2065        u32 dbg_select_reg_addr;
2066        u32 dbg_dword_enable_reg_addr;
2067        u32 dbg_shift_reg_addr;
2068        u32 dbg_force_valid_reg_addr;
2069        u32 dbg_force_frame_reg_addr;
2070};
2071
2072/* Chip-specific block user debug data */
2073struct dbg_block_chip_user {
2074        u8 num_of_dbg_bus_lines;
2075        u8 has_latency_events;
2076        u16 names_offset;
2077};
2078
2079/* Block user debug data */
2080struct dbg_block_user {
2081        u8 name[16];
2082};
2083
2084/* Block Debug line data */
2085struct dbg_bus_line {
2086        u8 data;
2087#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK         0xF
2088#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT        0
2089#define DBG_BUS_LINE_IS_256B_MASK               0x1
2090#define DBG_BUS_LINE_IS_256B_SHIFT              4
2091#define DBG_BUS_LINE_RESERVED_MASK              0x7
2092#define DBG_BUS_LINE_RESERVED_SHIFT             5
2093        u8 group_sizes;
2094};
2095
2096/* Condition header for registers dump */
2097struct dbg_dump_cond_hdr {
2098        struct dbg_mode_hdr mode; /* Mode header */
2099        u8 block_id; /* block ID */
2100        u8 data_size; /* size in dwords of the data following this header */
2101};
2102
2103/* Memory data for registers dump */
2104struct dbg_dump_mem {
2105        u32 dword0;
2106#define DBG_DUMP_MEM_ADDRESS_MASK       0xFFFFFF
2107#define DBG_DUMP_MEM_ADDRESS_SHIFT      0
2108#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK  0xFF
2109#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
2110        u32 dword1;
2111#define DBG_DUMP_MEM_LENGTH_MASK        0xFFFFFF
2112#define DBG_DUMP_MEM_LENGTH_SHIFT       0
2113#define DBG_DUMP_MEM_WIDE_BUS_MASK      0x1
2114#define DBG_DUMP_MEM_WIDE_BUS_SHIFT     24
2115#define DBG_DUMP_MEM_RESERVED_MASK      0x7F
2116#define DBG_DUMP_MEM_RESERVED_SHIFT     25
2117};
2118
2119/* Register data for registers dump */
2120struct dbg_dump_reg {
2121        u32 data;
2122#define DBG_DUMP_REG_ADDRESS_MASK       0x7FFFFF
2123#define DBG_DUMP_REG_ADDRESS_SHIFT      0
2124#define DBG_DUMP_REG_WIDE_BUS_MASK      0x1
2125#define DBG_DUMP_REG_WIDE_BUS_SHIFT     23
2126#define DBG_DUMP_REG_LENGTH_MASK        0xFF
2127#define DBG_DUMP_REG_LENGTH_SHIFT       24
2128};
2129
2130/* Split header for registers dump */
2131struct dbg_dump_split_hdr {
2132        u32 hdr;
2133#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK       0xFFFFFF
2134#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT      0
2135#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK   0xFF
2136#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT  24
2137};
2138
2139/* Condition header for idle check */
2140struct dbg_idle_chk_cond_hdr {
2141        struct dbg_mode_hdr mode; /* Mode header */
2142        u16 data_size; /* size in dwords of the data following this header */
2143};
2144
2145/* Idle Check condition register */
2146struct dbg_idle_chk_cond_reg {
2147        u32 data;
2148#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK      0x7FFFFF
2149#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT     0
2150#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK     0x1
2151#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT    23
2152#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK     0xFF
2153#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT    24
2154        u16 num_entries;
2155        u8 entry_size;
2156        u8 start_entry;
2157};
2158
2159/* Idle Check info register */
2160struct dbg_idle_chk_info_reg {
2161        u32 data;
2162#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK      0x7FFFFF
2163#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT     0
2164#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK     0x1
2165#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT    23
2166#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK     0xFF
2167#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT    24
2168        u16 size; /* register size in dwords */
2169        struct dbg_mode_hdr mode; /* Mode header */
2170};
2171
2172/* Idle Check register */
2173union dbg_idle_chk_reg {
2174        struct dbg_idle_chk_cond_reg cond_reg; /* condition register */
2175        struct dbg_idle_chk_info_reg info_reg; /* info register */
2176};
2177
2178/* Idle Check result header */
2179struct dbg_idle_chk_result_hdr {
2180        u16 rule_id; /* Failing rule index */
2181        u16 mem_entry_id; /* Failing memory entry index */
2182        u8 num_dumped_cond_regs; /* number of dumped condition registers */
2183        u8 num_dumped_info_regs; /* number of dumped condition registers */
2184        u8 severity; /* from dbg_idle_chk_severity_types enum */
2185        u8 reserved;
2186};
2187
2188/* Idle Check result register header */
2189struct dbg_idle_chk_result_reg_hdr {
2190        u8 data;
2191#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_MASK  0x1
2192#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_SHIFT 0
2193#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK  0x7F
2194#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1
2195        u8 start_entry; /* index of the first checked entry */
2196        u16 size; /* register size in dwords */
2197};
2198
2199/* Idle Check rule */
2200struct dbg_idle_chk_rule {
2201        u16 rule_id; /* Idle Check rule ID */
2202        u8 severity; /* value from dbg_idle_chk_severity_types enum */
2203        u8 cond_id; /* Condition ID */
2204        u8 num_cond_regs; /* number of condition registers */
2205        u8 num_info_regs; /* number of info registers */
2206        u8 num_imms; /* number of immediates in the condition */
2207        u8 reserved1;
2208        u16 reg_offset; /* offset of this rules registers in the idle check
2209                         * register array (in dbg_idle_chk_reg units).
2210                         */
2211        u16 imm_offset; /* offset of this rules immediate values in the
2212                         * immediate values array (in dwords).
2213                         */
2214};
2215
2216/* Idle Check rule parsing data */
2217struct dbg_idle_chk_rule_parsing_data {
2218        u32 data;
2219#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK  0x1
2220#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0
2221#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK  0x7FFFFFFF
2222#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1
2223};
2224
2225/* Idle check severity types */
2226enum dbg_idle_chk_severity_types {
2227        /* idle check failure should cause an error */
2228        IDLE_CHK_SEVERITY_ERROR,
2229        /* idle check failure should cause an error only if theres no traffic */
2230        IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC,
2231        /* idle check failure should cause a warning */
2232        IDLE_CHK_SEVERITY_WARNING,
2233        MAX_DBG_IDLE_CHK_SEVERITY_TYPES
2234};
2235
2236/* Reset register */
2237struct dbg_reset_reg {
2238        u32 data;
2239#define DBG_RESET_REG_ADDR_MASK        0xFFFFFF
2240#define DBG_RESET_REG_ADDR_SHIFT       0
2241#define DBG_RESET_REG_IS_REMOVED_MASK  0x1
2242#define DBG_RESET_REG_IS_REMOVED_SHIFT 24
2243#define DBG_RESET_REG_RESERVED_MASK    0x7F
2244#define DBG_RESET_REG_RESERVED_SHIFT   25
2245};
2246
2247/* Debug Bus block data */
2248struct dbg_bus_block_data {
2249        u8 enable_mask;
2250        u8 right_shift;
2251        u8 force_valid_mask;
2252        u8 force_frame_mask;
2253        u8 dword_mask;
2254        u8 line_num;
2255        u8 hw_id;
2256        u8 flags;
2257#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_MASK  0x1
2258#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_SHIFT 0
2259#define DBG_BUS_BLOCK_DATA_RESERVED_MASK      0x7F
2260#define DBG_BUS_BLOCK_DATA_RESERVED_SHIFT     1
2261};
2262
2263enum dbg_bus_clients {
2264        DBG_BUS_CLIENT_RBCN,
2265        DBG_BUS_CLIENT_RBCP,
2266        DBG_BUS_CLIENT_RBCR,
2267        DBG_BUS_CLIENT_RBCT,
2268        DBG_BUS_CLIENT_RBCU,
2269        DBG_BUS_CLIENT_RBCF,
2270        DBG_BUS_CLIENT_RBCX,
2271        DBG_BUS_CLIENT_RBCS,
2272        DBG_BUS_CLIENT_RBCH,
2273        DBG_BUS_CLIENT_RBCZ,
2274        DBG_BUS_CLIENT_OTHER_ENGINE,
2275        DBG_BUS_CLIENT_TIMESTAMP,
2276        DBG_BUS_CLIENT_CPU,
2277        DBG_BUS_CLIENT_RBCY,
2278        DBG_BUS_CLIENT_RBCQ,
2279        DBG_BUS_CLIENT_RBCM,
2280        DBG_BUS_CLIENT_RBCB,
2281        DBG_BUS_CLIENT_RBCW,
2282        DBG_BUS_CLIENT_RBCV,
2283        MAX_DBG_BUS_CLIENTS
2284};
2285
2286/* Debug Bus constraint operation types */
2287enum dbg_bus_constraint_ops {
2288        DBG_BUS_CONSTRAINT_OP_EQ,
2289        DBG_BUS_CONSTRAINT_OP_NE,
2290        DBG_BUS_CONSTRAINT_OP_LT,
2291        DBG_BUS_CONSTRAINT_OP_LTC,
2292        DBG_BUS_CONSTRAINT_OP_LE,
2293        DBG_BUS_CONSTRAINT_OP_LEC,
2294        DBG_BUS_CONSTRAINT_OP_GT,
2295        DBG_BUS_CONSTRAINT_OP_GTC,
2296        DBG_BUS_CONSTRAINT_OP_GE,
2297        DBG_BUS_CONSTRAINT_OP_GEC,
2298        MAX_DBG_BUS_CONSTRAINT_OPS
2299};
2300
2301/* Debug Bus trigger state data */
2302struct dbg_bus_trigger_state_data {
2303        u8 msg_len;
2304        u8 constraint_dword_mask;
2305        u8 storm_id;
2306        u8 reserved;
2307};
2308
2309/* Debug Bus memory address */
2310struct dbg_bus_mem_addr {
2311        u32 lo;
2312        u32 hi;
2313};
2314
2315/* Debug Bus PCI buffer data */
2316struct dbg_bus_pci_buf_data {
2317        struct dbg_bus_mem_addr phys_addr; /* PCI buffer physical address */
2318        struct dbg_bus_mem_addr virt_addr; /* PCI buffer virtual address */
2319        u32 size; /* PCI buffer size in bytes */
2320};
2321
2322/* Debug Bus Storm EID range filter params */
2323struct dbg_bus_storm_eid_range_params {
2324        u8 min; /* Minimal event ID to filter on */
2325        u8 max; /* Maximal event ID to filter on */
2326};
2327
2328/* Debug Bus Storm EID mask filter params */
2329struct dbg_bus_storm_eid_mask_params {
2330        u8 val; /* Event ID value */
2331        u8 mask; /* Event ID mask. 1s in the mask = dont care bits. */
2332};
2333
2334/* Debug Bus Storm EID filter params */
2335union dbg_bus_storm_eid_params {
2336        struct dbg_bus_storm_eid_range_params range;
2337        struct dbg_bus_storm_eid_mask_params mask;
2338};
2339
2340/* Debug Bus Storm data */
2341struct dbg_bus_storm_data {
2342        u8 enabled;
2343        u8 mode;
2344        u8 hw_id;
2345        u8 eid_filter_en;
2346        u8 eid_range_not_mask;
2347        u8 cid_filter_en;
2348        union dbg_bus_storm_eid_params eid_filter_params;
2349        u32 cid;
2350};
2351
2352/* Debug Bus data */
2353struct dbg_bus_data {
2354        u32 app_version;
2355        u8 state;
2356        u8 mode_256b_en;
2357        u8 num_enabled_blocks;
2358        u8 num_enabled_storms;
2359        u8 target;
2360        u8 one_shot_en;
2361        u8 grc_input_en;
2362        u8 timestamp_input_en;
2363        u8 filter_en;
2364        u8 adding_filter;
2365        u8 filter_pre_trigger;
2366        u8 filter_post_trigger;
2367        u8 trigger_en;
2368        u8 filter_constraint_dword_mask;
2369        u8 next_trigger_state;
2370        u8 next_constraint_id;
2371        struct dbg_bus_trigger_state_data trigger_states[3];
2372        u8 filter_msg_len;
2373        u8 rcv_from_other_engine;
2374        u8 blocks_dword_mask;
2375        u8 blocks_dword_overlap;
2376        u32 hw_id_mask;
2377        struct dbg_bus_pci_buf_data pci_buf;
2378        struct dbg_bus_block_data blocks[132];
2379        struct dbg_bus_storm_data storms[6];
2380};
2381
2382/* Debug bus states */
2383enum dbg_bus_states {
2384        DBG_BUS_STATE_IDLE,
2385        DBG_BUS_STATE_READY,
2386        DBG_BUS_STATE_RECORDING,
2387        DBG_BUS_STATE_STOPPED,
2388        MAX_DBG_BUS_STATES
2389};
2390
2391/* Debug Bus Storm modes */
2392enum dbg_bus_storm_modes {
2393        DBG_BUS_STORM_MODE_PRINTF,
2394        DBG_BUS_STORM_MODE_PRAM_ADDR,
2395        DBG_BUS_STORM_MODE_DRA_RW,
2396        DBG_BUS_STORM_MODE_DRA_W,
2397        DBG_BUS_STORM_MODE_LD_ST_ADDR,
2398        DBG_BUS_STORM_MODE_DRA_FSM,
2399        DBG_BUS_STORM_MODE_FAST_DBGMUX,
2400        DBG_BUS_STORM_MODE_RH,
2401        DBG_BUS_STORM_MODE_RH_WITH_STORE,
2402        DBG_BUS_STORM_MODE_FOC,
2403        DBG_BUS_STORM_MODE_EXT_STORE,
2404        MAX_DBG_BUS_STORM_MODES
2405};
2406
2407/* Debug bus target IDs */
2408enum dbg_bus_targets {
2409        DBG_BUS_TARGET_ID_INT_BUF,
2410        DBG_BUS_TARGET_ID_NIG,
2411        DBG_BUS_TARGET_ID_PCI,
2412        MAX_DBG_BUS_TARGETS
2413};
2414
2415/* GRC Dump data */
2416struct dbg_grc_data {
2417        u8 params_initialized;
2418        u8 reserved1;
2419        u16 reserved2;
2420        u32 param_val[48];
2421};
2422
2423/* Debug GRC params */
2424enum dbg_grc_params {
2425        DBG_GRC_PARAM_DUMP_TSTORM,
2426        DBG_GRC_PARAM_DUMP_MSTORM,
2427        DBG_GRC_PARAM_DUMP_USTORM,
2428        DBG_GRC_PARAM_DUMP_XSTORM,
2429        DBG_GRC_PARAM_DUMP_YSTORM,
2430        DBG_GRC_PARAM_DUMP_PSTORM,
2431        DBG_GRC_PARAM_DUMP_REGS,
2432        DBG_GRC_PARAM_DUMP_RAM,
2433        DBG_GRC_PARAM_DUMP_PBUF,
2434        DBG_GRC_PARAM_DUMP_IOR,
2435        DBG_GRC_PARAM_DUMP_VFC,
2436        DBG_GRC_PARAM_DUMP_CM_CTX,
2437        DBG_GRC_PARAM_DUMP_PXP,
2438        DBG_GRC_PARAM_DUMP_RSS,
2439        DBG_GRC_PARAM_DUMP_CAU,
2440        DBG_GRC_PARAM_DUMP_QM,
2441        DBG_GRC_PARAM_DUMP_MCP,
2442        DBG_GRC_PARAM_DUMP_DORQ,
2443        DBG_GRC_PARAM_DUMP_CFC,
2444        DBG_GRC_PARAM_DUMP_IGU,
2445        DBG_GRC_PARAM_DUMP_BRB,
2446        DBG_GRC_PARAM_DUMP_BTB,
2447        DBG_GRC_PARAM_DUMP_BMB,
2448        DBG_GRC_PARAM_RESERVD1,
2449        DBG_GRC_PARAM_DUMP_MULD,
2450        DBG_GRC_PARAM_DUMP_PRS,
2451        DBG_GRC_PARAM_DUMP_DMAE,
2452        DBG_GRC_PARAM_DUMP_TM,
2453        DBG_GRC_PARAM_DUMP_SDM,
2454        DBG_GRC_PARAM_DUMP_DIF,
2455        DBG_GRC_PARAM_DUMP_STATIC,
2456        DBG_GRC_PARAM_UNSTALL,
2457        DBG_GRC_PARAM_RESERVED2,
2458        DBG_GRC_PARAM_MCP_TRACE_META_SIZE,
2459        DBG_GRC_PARAM_EXCLUDE_ALL,
2460        DBG_GRC_PARAM_CRASH,
2461        DBG_GRC_PARAM_PARITY_SAFE,
2462        DBG_GRC_PARAM_DUMP_CM,
2463        DBG_GRC_PARAM_DUMP_PHY,
2464        DBG_GRC_PARAM_NO_MCP,
2465        DBG_GRC_PARAM_NO_FW_VER,
2466        DBG_GRC_PARAM_RESERVED3,
2467        DBG_GRC_PARAM_DUMP_MCP_HW_DUMP,
2468        DBG_GRC_PARAM_DUMP_ILT_CDUC,
2469        DBG_GRC_PARAM_DUMP_ILT_CDUT,
2470        DBG_GRC_PARAM_DUMP_CAU_EXT,
2471        MAX_DBG_GRC_PARAMS
2472};
2473
2474/* Debug status codes */
2475enum dbg_status {
2476        DBG_STATUS_OK,
2477        DBG_STATUS_APP_VERSION_NOT_SET,
2478        DBG_STATUS_UNSUPPORTED_APP_VERSION,
2479        DBG_STATUS_DBG_BLOCK_NOT_RESET,
2480        DBG_STATUS_INVALID_ARGS,
2481        DBG_STATUS_OUTPUT_ALREADY_SET,
2482        DBG_STATUS_INVALID_PCI_BUF_SIZE,
2483        DBG_STATUS_PCI_BUF_ALLOC_FAILED,
2484        DBG_STATUS_PCI_BUF_NOT_ALLOCATED,
2485        DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS,
2486        DBG_STATUS_NO_MATCHING_FRAMING_MODE,
2487        DBG_STATUS_VFC_READ_ERROR,
2488        DBG_STATUS_STORM_ALREADY_ENABLED,
2489        DBG_STATUS_STORM_NOT_ENABLED,
2490        DBG_STATUS_BLOCK_ALREADY_ENABLED,
2491        DBG_STATUS_BLOCK_NOT_ENABLED,
2492        DBG_STATUS_NO_INPUT_ENABLED,
2493        DBG_STATUS_NO_FILTER_TRIGGER_256B,
2494        DBG_STATUS_FILTER_ALREADY_ENABLED,
2495        DBG_STATUS_TRIGGER_ALREADY_ENABLED,
2496        DBG_STATUS_TRIGGER_NOT_ENABLED,
2497        DBG_STATUS_CANT_ADD_CONSTRAINT,
2498        DBG_STATUS_TOO_MANY_TRIGGER_STATES,
2499        DBG_STATUS_TOO_MANY_CONSTRAINTS,
2500        DBG_STATUS_RECORDING_NOT_STARTED,
2501        DBG_STATUS_DATA_DIDNT_TRIGGER,
2502        DBG_STATUS_NO_DATA_RECORDED,
2503        DBG_STATUS_DUMP_BUF_TOO_SMALL,
2504        DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED,
2505        DBG_STATUS_UNKNOWN_CHIP,
2506        DBG_STATUS_VIRT_MEM_ALLOC_FAILED,
2507        DBG_STATUS_BLOCK_IN_RESET,
2508        DBG_STATUS_INVALID_TRACE_SIGNATURE,
2509        DBG_STATUS_INVALID_NVRAM_BUNDLE,
2510        DBG_STATUS_NVRAM_GET_IMAGE_FAILED,
2511        DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE,
2512        DBG_STATUS_NVRAM_READ_FAILED,
2513        DBG_STATUS_IDLE_CHK_PARSE_FAILED,
2514        DBG_STATUS_MCP_TRACE_BAD_DATA,
2515        DBG_STATUS_MCP_TRACE_NO_META,
2516        DBG_STATUS_MCP_COULD_NOT_HALT,
2517        DBG_STATUS_MCP_COULD_NOT_RESUME,
2518        DBG_STATUS_RESERVED0,
2519        DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
2520        DBG_STATUS_IGU_FIFO_BAD_DATA,
2521        DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
2522        DBG_STATUS_FW_ASSERTS_PARSE_FAILED,
2523        DBG_STATUS_REG_FIFO_BAD_DATA,
2524        DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
2525        DBG_STATUS_DBG_ARRAY_NOT_SET,
2526        DBG_STATUS_RESERVED1,
2527        DBG_STATUS_NON_MATCHING_LINES,
2528        DBG_STATUS_INSUFFICIENT_HW_IDS,
2529        DBG_STATUS_DBG_BUS_IN_USE,
2530        DBG_STATUS_INVALID_STORM_DBG_MODE,
2531        DBG_STATUS_OTHER_ENGINE_BB_ONLY,
2532        DBG_STATUS_FILTER_SINGLE_HW_ID,
2533        DBG_STATUS_TRIGGER_SINGLE_HW_ID,
2534        DBG_STATUS_MISSING_TRIGGER_STATE_STORM,
2535        MAX_DBG_STATUS
2536};
2537
2538/* Debug Storms IDs */
2539enum dbg_storms {
2540        DBG_TSTORM_ID,
2541        DBG_MSTORM_ID,
2542        DBG_USTORM_ID,
2543        DBG_XSTORM_ID,
2544        DBG_YSTORM_ID,
2545        DBG_PSTORM_ID,
2546        MAX_DBG_STORMS
2547};
2548
2549/* Idle Check data */
2550struct idle_chk_data {
2551        u32 buf_size;
2552        u8 buf_size_set;
2553        u8 reserved1;
2554        u16 reserved2;
2555};
2556
2557struct pretend_params {
2558        u8 split_type;
2559        u8 reserved;
2560        u16 split_id;
2561};
2562
2563/* Debug Tools data (per HW function)
2564 */
2565struct dbg_tools_data {
2566        struct dbg_grc_data grc;
2567        struct dbg_bus_data bus;
2568        struct idle_chk_data idle_chk;
2569        u8 mode_enable[40];
2570        u8 block_in_reset[132];
2571        u8 chip_id;
2572        u8 hw_type;
2573        u8 num_ports;
2574        u8 num_pfs_per_port;
2575        u8 num_vfs;
2576        u8 initialized;
2577        u8 use_dmae;
2578        u8 reserved;
2579        struct pretend_params pretend;
2580        u32 num_regs_read;
2581};
2582
2583/* ILT Clients */
2584enum ilt_clients {
2585        ILT_CLI_CDUC,
2586        ILT_CLI_CDUT,
2587        ILT_CLI_QM,
2588        ILT_CLI_TM,
2589        ILT_CLI_SRC,
2590        ILT_CLI_TSDM,
2591        ILT_CLI_RGFS,
2592        ILT_CLI_TGFS,
2593        MAX_ILT_CLIENTS
2594};
2595
2596/********************************/
2597/* HSI Init Functions constants */
2598/********************************/
2599
2600/* Number of VLAN priorities */
2601#define NUM_OF_VLAN_PRIORITIES  8
2602
2603/* BRB RAM init requirements */
2604struct init_brb_ram_req {
2605        u32 guranteed_per_tc;
2606        u32 headroom_per_tc;
2607        u32 min_pkt_size;
2608        u32 max_ports_per_engine;
2609        u8 num_active_tcs[MAX_NUM_PORTS];
2610};
2611
2612/* ETS per-TC init requirements */
2613struct init_ets_tc_req {
2614        u8 use_sp;
2615        u8 use_wfq;
2616        u16 weight;
2617};
2618
2619/* ETS init requirements */
2620struct init_ets_req {
2621        u32 mtu;
2622        struct init_ets_tc_req tc_req[NUM_OF_TCS];
2623};
2624
2625/* NIG LB RL init requirements */
2626struct init_nig_lb_rl_req {
2627        u16 lb_mac_rate;
2628        u16 lb_rate;
2629        u32 mtu;
2630        u16 tc_rate[NUM_OF_PHYS_TCS];
2631};
2632
2633/* NIG TC mapping for each priority */
2634struct init_nig_pri_tc_map_entry {
2635        u8 tc_id;
2636        u8 valid;
2637};
2638
2639/* NIG priority to TC map init requirements */
2640struct init_nig_pri_tc_map_req {
2641        struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES];
2642};
2643
2644/* QM per global RL init parameters */
2645struct init_qm_global_rl_params {
2646        u32 rate_limit;
2647};
2648
2649/* QM per-port init parameters */
2650struct init_qm_port_params {
2651        u16 active_phys_tcs;
2652        u16 num_pbf_cmd_lines;
2653        u16 num_btb_blocks;
2654        u8 active;
2655        u8 reserved;
2656};
2657
2658/* QM per-PQ init parameters */
2659struct init_qm_pq_params {
2660        u8 vport_id;
2661        u8 tc_id;
2662        u8 wrr_group;
2663        u8 rl_valid;
2664        u16 rl_id;
2665        u8 port_id;
2666        u8 reserved;
2667};
2668
2669/* QM per-vport init parameters */
2670struct init_qm_vport_params {
2671        u16 wfq;
2672        u16 first_tx_pq_id[NUM_OF_TCS];
2673};
2674
2675/**************************************/
2676/* Init Tool HSI constants and macros */
2677/**************************************/
2678
2679/* Width of GRC address in bits (addresses are specified in dwords) */
2680#define GRC_ADDR_BITS   23
2681#define MAX_GRC_ADDR    (BIT(GRC_ADDR_BITS) - 1)
2682
2683/* indicates an init that should be applied to any phase ID */
2684#define ANY_PHASE_ID    0xffff
2685
2686/* Max size in dwords of a zipped array */
2687#define MAX_ZIPPED_SIZE 8192
2688enum chip_ids {
2689        CHIP_BB,
2690        CHIP_K2,
2691        MAX_CHIP_IDS
2692};
2693
2694struct fw_asserts_ram_section {
2695        __le16 section_ram_line_offset;
2696        __le16 section_ram_line_size;
2697        u8 list_dword_offset;
2698        u8 list_element_dword_size;
2699        u8 list_num_elements;
2700        u8 list_next_index_dword_offset;
2701};
2702
2703struct fw_ver_num {
2704        u8 major;
2705        u8 minor;
2706        u8 rev;
2707        u8 eng;
2708};
2709
2710struct fw_ver_info {
2711        __le16 tools_ver;
2712        u8 image_id;
2713        u8 reserved1;
2714        struct fw_ver_num num;
2715        __le32 timestamp;
2716        __le32 reserved2;
2717};
2718
2719struct fw_info {
2720        struct fw_ver_info ver;
2721        struct fw_asserts_ram_section fw_asserts_section;
2722};
2723
2724struct fw_info_location {
2725        __le32 grc_addr;
2726        __le32 size;
2727};
2728
2729enum init_modes {
2730        MODE_RESERVED,
2731        MODE_BB,
2732        MODE_K2,
2733        MODE_ASIC,
2734        MODE_RESERVED2,
2735        MODE_RESERVED3,
2736        MODE_RESERVED4,
2737        MODE_RESERVED5,
2738        MODE_SF,
2739        MODE_MF_SD,
2740        MODE_MF_SI,
2741        MODE_PORTS_PER_ENG_1,
2742        MODE_PORTS_PER_ENG_2,
2743        MODE_PORTS_PER_ENG_4,
2744        MODE_100G,
2745        MODE_RESERVED6,
2746        MODE_RESERVED7,
2747        MAX_INIT_MODES
2748};
2749
2750enum init_phases {
2751        PHASE_ENGINE,
2752        PHASE_PORT,
2753        PHASE_PF,
2754        PHASE_VF,
2755        PHASE_QM_PF,
2756        MAX_INIT_PHASES
2757};
2758
2759enum init_split_types {
2760        SPLIT_TYPE_NONE,
2761        SPLIT_TYPE_PORT,
2762        SPLIT_TYPE_PF,
2763        SPLIT_TYPE_PORT_PF,
2764        SPLIT_TYPE_VF,
2765        MAX_INIT_SPLIT_TYPES
2766};
2767
2768/* Binary buffer header */
2769struct bin_buffer_hdr {
2770        u32 offset;
2771        u32 length;
2772};
2773
2774/* Binary init buffer types */
2775enum bin_init_buffer_type {
2776        BIN_BUF_INIT_FW_VER_INFO,
2777        BIN_BUF_INIT_CMD,
2778        BIN_BUF_INIT_VAL,
2779        BIN_BUF_INIT_MODE_TREE,
2780        BIN_BUF_INIT_IRO,
2781        BIN_BUF_INIT_OVERLAYS,
2782        MAX_BIN_INIT_BUFFER_TYPE
2783};
2784
2785/* FW overlay buffer header */
2786struct fw_overlay_buf_hdr {
2787        u32 data;
2788#define FW_OVERLAY_BUF_HDR_STORM_ID_MASK  0xFF
2789#define FW_OVERLAY_BUF_HDR_STORM_ID_SHIFT 0
2790#define FW_OVERLAY_BUF_HDR_BUF_SIZE_MASK  0xFFFFFF
2791#define FW_OVERLAY_BUF_HDR_BUF_SIZE_SHIFT 8
2792};
2793
2794/* init array header: raw */
2795struct init_array_raw_hdr {
2796        __le32                                          data;
2797#define INIT_ARRAY_RAW_HDR_TYPE_MASK                    0xF
2798#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT                   0
2799#define INIT_ARRAY_RAW_HDR_PARAMS_MASK                  0xFFFFFFF
2800#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT                 4
2801};
2802
2803/* init array header: standard */
2804struct init_array_standard_hdr {
2805        __le32                                          data;
2806#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK               0xF
2807#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT              0
2808#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK               0xFFFFFFF
2809#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT              4
2810};
2811
2812/* init array header: zipped */
2813struct init_array_zipped_hdr {
2814        __le32                                          data;
2815#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK                 0xF
2816#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT                0
2817#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK          0xFFFFFFF
2818#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT         4
2819};
2820
2821/* init array header: pattern */
2822struct init_array_pattern_hdr {
2823        __le32                                          data;
2824#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK                0xF
2825#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT               0
2826#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK        0xF
2827#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT       4
2828#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK         0xFFFFFF
2829#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT        8
2830};
2831
2832/* init array header union */
2833union init_array_hdr {
2834        struct init_array_raw_hdr                       raw;
2835        struct init_array_standard_hdr                  standard;
2836        struct init_array_zipped_hdr                    zipped;
2837        struct init_array_pattern_hdr                   pattern;
2838};
2839
2840/* init array types */
2841enum init_array_types {
2842        INIT_ARR_STANDARD,
2843        INIT_ARR_ZIPPED,
2844        INIT_ARR_PATTERN,
2845        MAX_INIT_ARRAY_TYPES
2846};
2847
2848/* init operation: callback */
2849struct init_callback_op {
2850        __le32                                          op_data;
2851#define INIT_CALLBACK_OP_OP_MASK                        0xF
2852#define INIT_CALLBACK_OP_OP_SHIFT                       0
2853#define INIT_CALLBACK_OP_RESERVED_MASK                  0xFFFFFFF
2854#define INIT_CALLBACK_OP_RESERVED_SHIFT                 4
2855        __le16                                          callback_id;
2856        __le16                                          block_id;
2857};
2858
2859/* init operation: delay */
2860struct init_delay_op {
2861        __le32                                          op_data;
2862#define INIT_DELAY_OP_OP_MASK                           0xF
2863#define INIT_DELAY_OP_OP_SHIFT                          0
2864#define INIT_DELAY_OP_RESERVED_MASK                     0xFFFFFFF
2865#define INIT_DELAY_OP_RESERVED_SHIFT                    4
2866        __le32                                          delay;
2867};
2868
2869/* init operation: if_mode */
2870struct init_if_mode_op {
2871        __le32                                          op_data;
2872#define INIT_IF_MODE_OP_OP_MASK                         0xF
2873#define INIT_IF_MODE_OP_OP_SHIFT                        0
2874#define INIT_IF_MODE_OP_RESERVED1_MASK                  0xFFF
2875#define INIT_IF_MODE_OP_RESERVED1_SHIFT                 4
2876#define INIT_IF_MODE_OP_CMD_OFFSET_MASK                 0xFFFF
2877#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT                16
2878        __le16                                          reserved2;
2879        __le16                                          modes_buf_offset;
2880};
2881
2882/* init operation: if_phase */
2883struct init_if_phase_op {
2884        __le32                                          op_data;
2885#define INIT_IF_PHASE_OP_OP_MASK                        0xF
2886#define INIT_IF_PHASE_OP_OP_SHIFT                       0
2887#define INIT_IF_PHASE_OP_RESERVED1_MASK                 0xFFF
2888#define INIT_IF_PHASE_OP_RESERVED1_SHIFT                4
2889#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK                0xFFFF
2890#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT               16
2891        __le32                                          phase_data;
2892#define INIT_IF_PHASE_OP_PHASE_MASK                     0xFF
2893#define INIT_IF_PHASE_OP_PHASE_SHIFT                    0
2894#define INIT_IF_PHASE_OP_RESERVED2_MASK                 0xFF
2895#define INIT_IF_PHASE_OP_RESERVED2_SHIFT                8
2896#define INIT_IF_PHASE_OP_PHASE_ID_MASK                  0xFFFF
2897#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT                 16
2898};
2899
2900/* init mode operators */
2901enum init_mode_ops {
2902        INIT_MODE_OP_NOT,
2903        INIT_MODE_OP_OR,
2904        INIT_MODE_OP_AND,
2905        MAX_INIT_MODE_OPS
2906};
2907
2908/* init operation: raw */
2909struct init_raw_op {
2910        __le32                                          op_data;
2911#define INIT_RAW_OP_OP_MASK                             0xF
2912#define INIT_RAW_OP_OP_SHIFT                            0
2913#define INIT_RAW_OP_PARAM1_MASK                         0xFFFFFFF
2914#define INIT_RAW_OP_PARAM1_SHIFT                        4
2915        __le32                                          param2;
2916};
2917
2918/* init array params */
2919struct init_op_array_params {
2920        __le16                                          size;
2921        __le16                                          offset;
2922};
2923
2924/* Write init operation arguments */
2925union init_write_args {
2926        __le32                                          inline_val;
2927        __le32                                          zeros_count;
2928        __le32                                          array_offset;
2929        struct init_op_array_params                     runtime;
2930};
2931
2932/* init operation: write */
2933struct init_write_op {
2934        __le32                                          data;
2935#define INIT_WRITE_OP_OP_MASK                           0xF
2936#define INIT_WRITE_OP_OP_SHIFT                          0
2937#define INIT_WRITE_OP_SOURCE_MASK                       0x7
2938#define INIT_WRITE_OP_SOURCE_SHIFT                      4
2939#define INIT_WRITE_OP_RESERVED_MASK                     0x1
2940#define INIT_WRITE_OP_RESERVED_SHIFT                    7
2941#define INIT_WRITE_OP_WIDE_BUS_MASK                     0x1
2942#define INIT_WRITE_OP_WIDE_BUS_SHIFT                    8
2943#define INIT_WRITE_OP_ADDRESS_MASK                      0x7FFFFF
2944#define INIT_WRITE_OP_ADDRESS_SHIFT                     9
2945        union init_write_args                           args;
2946};
2947
2948/* init operation: read */
2949struct init_read_op {
2950        __le32                                          op_data;
2951#define INIT_READ_OP_OP_MASK                            0xF
2952#define INIT_READ_OP_OP_SHIFT                           0
2953#define INIT_READ_OP_POLL_TYPE_MASK                     0xF
2954#define INIT_READ_OP_POLL_TYPE_SHIFT                    4
2955#define INIT_READ_OP_RESERVED_MASK                      0x1
2956#define INIT_READ_OP_RESERVED_SHIFT                     8
2957#define INIT_READ_OP_ADDRESS_MASK                       0x7FFFFF
2958#define INIT_READ_OP_ADDRESS_SHIFT                      9
2959        __le32                                          expected_val;
2960};
2961
2962/* Init operations union */
2963union init_op {
2964        struct init_raw_op                              raw;
2965        struct init_write_op                            write;
2966        struct init_read_op                             read;
2967        struct init_if_mode_op                          if_mode;
2968        struct init_if_phase_op                         if_phase;
2969        struct init_callback_op                         callback;
2970        struct init_delay_op                            delay;
2971};
2972
2973/* Init command operation types */
2974enum init_op_types {
2975        INIT_OP_READ,
2976        INIT_OP_WRITE,
2977        INIT_OP_IF_MODE,
2978        INIT_OP_IF_PHASE,
2979        INIT_OP_DELAY,
2980        INIT_OP_CALLBACK,
2981        MAX_INIT_OP_TYPES
2982};
2983
2984/* init polling types */
2985enum init_poll_types {
2986        INIT_POLL_NONE,
2987        INIT_POLL_EQ,
2988        INIT_POLL_OR,
2989        INIT_POLL_AND,
2990        MAX_INIT_POLL_TYPES
2991};
2992
2993/* init source types */
2994enum init_source_types {
2995        INIT_SRC_INLINE,
2996        INIT_SRC_ZEROS,
2997        INIT_SRC_ARRAY,
2998        INIT_SRC_RUNTIME,
2999        MAX_INIT_SOURCE_TYPES
3000};
3001
3002/* Internal RAM Offsets macro data */
3003struct iro {
3004        u32 base;
3005        u16 m1;
3006        u16 m2;
3007        u16 m3;
3008        u16 size;
3009};
3010
3011/***************************** Public Functions *******************************/
3012
3013/**
3014 * @brief qed_dbg_set_bin_ptr - Sets a pointer to the binary data with debug
3015 *      arrays.
3016 *
3017 * @param p_hwfn -          HW device data
3018 * @param bin_ptr - a pointer to the binary data with debug arrays.
3019 */
3020enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn,
3021                                    const u8 * const bin_ptr);
3022
3023/**
3024 * @brief qed_read_regs - Reads registers into a buffer (using GRC).
3025 *
3026 * @param p_hwfn - HW device data
3027 * @param p_ptt - Ptt window used for writing the registers.
3028 * @param buf - Destination buffer.
3029 * @param addr - Source GRC address in dwords.
3030 * @param len - Number of registers to read.
3031 */
3032void qed_read_regs(struct qed_hwfn *p_hwfn,
3033                   struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len);
3034
3035/**
3036 * @brief qed_read_fw_info - Reads FW info from the chip.
3037 *
3038 * The FW info contains FW-related information, such as the FW version,
3039 * FW image (main/L2B/kuku), FW timestamp, etc.
3040 * The FW info is read from the internal RAM of the first Storm that is not in
3041 * reset.
3042 *
3043 * @param p_hwfn -          HW device data
3044 * @param p_ptt -           Ptt window used for writing the registers.
3045 * @param fw_info -     Out: a pointer to write the FW info into.
3046 *
3047 * @return true if the FW info was read successfully from one of the Storms,
3048 * or false if all Storms are in reset.
3049 */
3050bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
3051                      struct qed_ptt *p_ptt, struct fw_info *fw_info);
3052/**
3053 * @brief qed_dbg_grc_config - Sets the value of a GRC parameter.
3054 *
3055 * @param p_hwfn -      HW device data
3056 * @param grc_param -   GRC parameter
3057 * @param val -         Value to set.
3058 *
3059 * @return error if one of the following holds:
3060 *      - the version wasn't set
3061 *      - grc_param is invalid
3062 *      - val is outside the allowed boundaries
3063 */
3064enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
3065                                   enum dbg_grc_params grc_param, u32 val);
3066
3067/**
3068 * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their
3069 *      default value.
3070 *
3071 * @param p_hwfn                - HW device data
3072 */
3073void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn);
3074/**
3075 * @brief qed_dbg_grc_get_dump_buf_size - Returns the required buffer size for
3076 *      GRC Dump.
3077 *
3078 * @param p_hwfn - HW device data
3079 * @param p_ptt - Ptt window used for writing the registers.
3080 * @param buf_size - OUT: required buffer size (in dwords) for the GRC Dump
3081 *      data.
3082 *
3083 * @return error if one of the following holds:
3084 *      - the version wasn't set
3085 * Otherwise, returns ok.
3086 */
3087enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
3088                                              struct qed_ptt *p_ptt,
3089                                              u32 *buf_size);
3090
3091/**
3092 * @brief qed_dbg_grc_dump - Dumps GRC data into the specified buffer.
3093 *
3094 * @param p_hwfn - HW device data
3095 * @param p_ptt - Ptt window used for writing the registers.
3096 * @param dump_buf - Pointer to write the collected GRC data into.
3097 * @param buf_size_in_dwords - Size of the specified buffer in dwords.
3098 * @param num_dumped_dwords - OUT: number of dumped dwords.
3099 *
3100 * @return error if one of the following holds:
3101 *      - the version wasn't set
3102 *      - the specified dump buffer is too small
3103 * Otherwise, returns ok.
3104 */
3105enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
3106                                 struct qed_ptt *p_ptt,
3107                                 u32 *dump_buf,
3108                                 u32 buf_size_in_dwords,
3109                                 u32 *num_dumped_dwords);
3110
3111/**
3112 * @brief qed_dbg_idle_chk_get_dump_buf_size - Returns the required buffer size
3113 *      for idle check results.
3114 *
3115 * @param p_hwfn - HW device data
3116 * @param p_ptt - Ptt window used for writing the registers.
3117 * @param buf_size - OUT: required buffer size (in dwords) for the idle check
3118 *      data.
3119 *
3120 * @return error if one of the following holds:
3121 *      - the version wasn't set
3122 * Otherwise, returns ok.
3123 */
3124enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
3125                                                   struct qed_ptt *p_ptt,
3126                                                   u32 *buf_size);
3127
3128/**
3129 * @brief qed_dbg_idle_chk_dump - Performs idle check and writes the results
3130 *      into the specified buffer.
3131 *
3132 * @param p_hwfn - HW device data
3133 * @param p_ptt - Ptt window used for writing the registers.
3134 * @param dump_buf - Pointer to write the idle check data into.
3135 * @param buf_size_in_dwords - Size of the specified buffer in dwords.
3136 * @param num_dumped_dwords - OUT: number of dumped dwords.
3137 *
3138 * @return error if one of the following holds:
3139 *      - the version wasn't set
3140 *      - the specified buffer is too small
3141 * Otherwise, returns ok.
3142 */
3143enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
3144                                      struct qed_ptt *p_ptt,
3145                                      u32 *dump_buf,
3146                                      u32 buf_size_in_dwords,
3147                                      u32 *num_dumped_dwords);
3148
3149/**
3150 * @brief qed_dbg_mcp_trace_get_dump_buf_size - Returns the required buffer size
3151 *      for mcp trace results.
3152 *
3153 * @param p_hwfn - HW device data
3154 * @param p_ptt - Ptt window used for writing the registers.
3155 * @param buf_size - OUT: required buffer size (in dwords) for mcp trace data.
3156 *
3157 * @return error if one of the following holds:
3158 *      - the version wasn't set
3159 *      - the trace data in MCP scratchpad contain an invalid signature
3160 *      - the bundle ID in NVRAM is invalid
3161 *      - the trace meta data cannot be found (in NVRAM or image file)
3162 * Otherwise, returns ok.
3163 */
3164enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
3165                                                    struct qed_ptt *p_ptt,
3166                                                    u32 *buf_size);
3167
3168/**
3169 * @brief qed_dbg_mcp_trace_dump - Performs mcp trace and writes the results
3170 *      into the specified buffer.
3171 *
3172 * @param p_hwfn - HW device data
3173 * @param p_ptt - Ptt window used for writing the registers.
3174 * @param dump_buf - Pointer to write the mcp trace data into.
3175 * @param buf_size_in_dwords - Size of the specified buffer in dwords.
3176 * @param num_dumped_dwords - OUT: number of dumped dwords.
3177 *
3178 * @return error if one of the following holds:
3179 *      - the version wasn't set
3180 *      - the specified buffer is too small
3181 *      - the trace data in MCP scratchpad contain an invalid signature
3182 *      - the bundle ID in NVRAM is invalid
3183 *      - the trace meta data cannot be found (in NVRAM or image file)
3184 *      - the trace meta data cannot be read (from NVRAM or image file)
3185 * Otherwise, returns ok.
3186 */
3187enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
3188                                       struct qed_ptt *p_ptt,
3189                                       u32 *dump_buf,
3190                                       u32 buf_size_in_dwords,
3191                                       u32 *num_dumped_dwords);
3192
3193/**
3194 * @brief qed_dbg_reg_fifo_get_dump_buf_size - Returns the required buffer size
3195 *      for grc trace fifo results.
3196 *
3197 * @param p_hwfn - HW device data
3198 * @param p_ptt - Ptt window used for writing the registers.
3199 * @param buf_size - OUT: required buffer size (in dwords) for reg fifo data.
3200 *
3201 * @return error if one of the following holds:
3202 *      - the version wasn't set
3203 * Otherwise, returns ok.
3204 */
3205enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
3206                                                   struct qed_ptt *p_ptt,
3207                                                   u32 *buf_size);
3208
3209/**
3210 * @brief qed_dbg_reg_fifo_dump - Reads the reg fifo and writes the results into
3211 *      the specified buffer.
3212 *
3213 * @param p_hwfn - HW device data
3214 * @param p_ptt - Ptt window used for writing the registers.
3215 * @param dump_buf - Pointer to write the reg fifo data into.
3216 * @param buf_size_in_dwords - Size of the specified buffer in dwords.
3217 * @param num_dumped_dwords - OUT: number of dumped dwords.
3218 *
3219 * @return error if one of the following holds:
3220 *      - the version wasn't set
3221 *      - the specified buffer is too small
3222 *      - DMAE transaction failed
3223 * Otherwise, returns ok.
3224 */
3225enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
3226                                      struct qed_ptt *p_ptt,
3227                                      u32 *dump_buf,
3228                                      u32 buf_size_in_dwords,
3229                                      u32 *num_dumped_dwords);
3230
3231/**
3232 * @brief qed_dbg_igu_fifo_get_dump_buf_size - Returns the required buffer size
3233 *      for the IGU fifo results.
3234 *
3235 * @param p_hwfn - HW device data
3236 * @param p_ptt - Ptt window used for writing the registers.
3237 * @param buf_size - OUT: required buffer size (in dwords) for the IGU fifo
3238 *      data.
3239 *
3240 * @return error if one of the following holds:
3241 *      - the version wasn't set
3242 * Otherwise, returns ok.
3243 */
3244enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
3245                                                   struct qed_ptt *p_ptt,
3246                                                   u32 *buf_size);
3247
3248/**
3249 * @brief qed_dbg_igu_fifo_dump - Reads the IGU fifo and writes the results into
3250 *      the specified buffer.
3251 *
3252 * @param p_hwfn - HW device data
3253 * @param p_ptt - Ptt window used for writing the registers.
3254 * @param dump_buf - Pointer to write the IGU fifo data into.
3255 * @param buf_size_in_dwords - Size of the specified buffer in dwords.
3256 * @param num_dumped_dwords - OUT: number of dumped dwords.
3257 *
3258 * @return error if one of the following holds:
3259 *      - the version wasn't set
3260 *      - the specified buffer is too small
3261 *      - DMAE transaction failed
3262 * Otherwise, returns ok.
3263 */
3264enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
3265                                      struct qed_ptt *p_ptt,
3266                                      u32 *dump_buf,
3267                                      u32 buf_size_in_dwords,
3268                                      u32 *num_dumped_dwords);
3269
3270/**
3271 * @brief qed_dbg_protection_override_get_dump_buf_size - Returns the required
3272 *      buffer size for protection override window results.
3273 *
3274 * @param p_hwfn - HW device data
3275 * @param p_ptt - Ptt window used for writing the registers.
3276 * @param buf_size - OUT: required buffer size (in dwords) for protection
3277 *      override data.
3278 *
3279 * @return error if one of the following holds:
3280 *      - the version wasn't set
3281 * Otherwise, returns ok.
3282 */
3283enum dbg_status
3284qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
3285                                              struct qed_ptt *p_ptt,
3286                                              u32 *buf_size);
3287/**
3288 * @brief qed_dbg_protection_override_dump - Reads protection override window
3289 *      entries and writes the results into the specified buffer.
3290 *
3291 * @param p_hwfn - HW device data
3292 * @param p_ptt - Ptt window used for writing the registers.
3293 * @param dump_buf - Pointer to write the protection override data into.
3294 * @param buf_size_in_dwords - Size of the specified buffer in dwords.
3295 * @param num_dumped_dwords - OUT: number of dumped dwords.
3296 *
3297 * @return error if one of the following holds:
3298 *      - the version wasn't set
3299 *      - the specified buffer is too small
3300 *      - DMAE transaction failed
3301 * Otherwise, returns ok.
3302 */
3303enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
3304                                                 struct qed_ptt *p_ptt,
3305                                                 u32 *dump_buf,
3306                                                 u32 buf_size_in_dwords,
3307                                                 u32 *num_dumped_dwords);
3308/**
3309 * @brief qed_dbg_fw_asserts_get_dump_buf_size - Returns the required buffer
3310 *      size for FW Asserts results.
3311 *
3312 * @param p_hwfn - HW device data
3313 * @param p_ptt - Ptt window used for writing the registers.
3314 * @param buf_size - OUT: required buffer size (in dwords) for FW Asserts data.
3315 *
3316 * @return error if one of the following holds:
3317 *      - the version wasn't set
3318 * Otherwise, returns ok.
3319 */
3320enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
3321                                                     struct qed_ptt *p_ptt,
3322                                                     u32 *buf_size);
3323/**
3324 * @brief qed_dbg_fw_asserts_dump - Reads the FW Asserts and writes the results
3325 *      into the specified buffer.
3326 *
3327 * @param p_hwfn - HW device data
3328 * @param p_ptt - Ptt window used for writing the registers.
3329 * @param dump_buf - Pointer to write the FW Asserts data into.
3330 * @param buf_size_in_dwords - Size of the specified buffer in dwords.
3331 * @param num_dumped_dwords - OUT: number of dumped dwords.
3332 *
3333 * @return error if one of the following holds:
3334 *      - the version wasn't set
3335 *      - the specified buffer is too small
3336 * Otherwise, returns ok.
3337 */
3338enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
3339                                        struct qed_ptt *p_ptt,
3340                                        u32 *dump_buf,
3341                                        u32 buf_size_in_dwords,
3342                                        u32 *num_dumped_dwords);
3343
3344/**
3345 * @brief qed_dbg_read_attn - Reads the attention registers of the specified
3346 * block and type, and writes the results into the specified buffer.
3347 *
3348 * @param p_hwfn -       HW device data
3349 * @param p_ptt -        Ptt window used for writing the registers.
3350 * @param block -        Block ID.
3351 * @param attn_type -    Attention type.
3352 * @param clear_status - Indicates if the attention status should be cleared.
3353 * @param results -      OUT: Pointer to write the read results into
3354 *
3355 * @return error if one of the following holds:
3356 *      - the version wasn't set
3357 * Otherwise, returns ok.
3358 */
3359enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
3360                                  struct qed_ptt *p_ptt,
3361                                  enum block_id block,
3362                                  enum dbg_attn_type attn_type,
3363                                  bool clear_status,
3364                                  struct dbg_attn_block_result *results);
3365
3366/**
3367 * @brief qed_dbg_print_attn - Prints attention registers values in the
3368 *      specified results struct.
3369 *
3370 * @param p_hwfn
3371 * @param results - Pointer to the attention read results
3372 *
3373 * @return error if one of the following holds:
3374 *      - the version wasn't set
3375 * Otherwise, returns ok.
3376 */
3377enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
3378                                   struct dbg_attn_block_result *results);
3379
3380/******************************* Data Types **********************************/
3381
3382struct mcp_trace_format {
3383        u32 data;
3384#define MCP_TRACE_FORMAT_MODULE_MASK    0x0000ffff
3385#define MCP_TRACE_FORMAT_MODULE_OFFSET  0
3386#define MCP_TRACE_FORMAT_LEVEL_MASK     0x00030000
3387#define MCP_TRACE_FORMAT_LEVEL_OFFSET   16
3388#define MCP_TRACE_FORMAT_P1_SIZE_MASK   0x000c0000
3389#define MCP_TRACE_FORMAT_P1_SIZE_OFFSET 18
3390#define MCP_TRACE_FORMAT_P2_SIZE_MASK   0x00300000
3391#define MCP_TRACE_FORMAT_P2_SIZE_OFFSET 20
3392#define MCP_TRACE_FORMAT_P3_SIZE_MASK   0x00c00000
3393#define MCP_TRACE_FORMAT_P3_SIZE_OFFSET 22
3394#define MCP_TRACE_FORMAT_LEN_MASK       0xff000000
3395#define MCP_TRACE_FORMAT_LEN_OFFSET     24
3396
3397        char *format_str;
3398};
3399
3400/* MCP Trace Meta data structure */
3401struct mcp_trace_meta {
3402        u32 modules_num;
3403        char **modules;
3404        u32 formats_num;
3405        struct mcp_trace_format *formats;
3406        bool is_allocated;
3407};
3408
3409/* Debug Tools user data */
3410struct dbg_tools_user_data {
3411        struct mcp_trace_meta mcp_trace_meta;
3412        const u32 *mcp_trace_user_meta_buf;
3413};
3414
3415/******************************** Constants **********************************/
3416
3417#define MAX_NAME_LEN    16
3418
3419/***************************** Public Functions *******************************/
3420
3421/**
3422 * @brief qed_dbg_user_set_bin_ptr - Sets a pointer to the binary data with
3423 *      debug arrays.
3424 *
3425 * @param p_hwfn - HW device data
3426 * @param bin_ptr - a pointer to the binary data with debug arrays.
3427 */
3428enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn,
3429                                         const u8 * const bin_ptr);
3430
3431/**
3432 * @brief qed_dbg_alloc_user_data - Allocates user debug data.
3433 *
3434 * @param p_hwfn -               HW device data
3435 * @param user_data_ptr - OUT: a pointer to the allocated memory.
3436 */
3437enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn,
3438                                        void **user_data_ptr);
3439
3440/**
3441 * @brief qed_dbg_get_status_str - Returns a string for the specified status.
3442 *
3443 * @param status - a debug status code.
3444 *
3445 * @return a string for the specified status
3446 */
3447const char *qed_dbg_get_status_str(enum dbg_status status);
3448
3449/**
3450 * @brief qed_get_idle_chk_results_buf_size - Returns the required buffer size
3451 *      for idle check results (in bytes).
3452 *
3453 * @param p_hwfn - HW device data
3454 * @param dump_buf - idle check dump buffer.
3455 * @param num_dumped_dwords - number of dwords that were dumped.
3456 * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
3457 *      results.
3458 *
3459 * @return error if the parsing fails, ok otherwise.
3460 */
3461enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
3462                                                  u32 *dump_buf,
3463                                                  u32  num_dumped_dwords,
3464                                                  u32 *results_buf_size);
3465/**
3466 * @brief qed_print_idle_chk_results - Prints idle check results
3467 *
3468 * @param p_hwfn - HW device data
3469 * @param dump_buf - idle check dump buffer.
3470 * @param num_dumped_dwords - number of dwords that were dumped.
3471 * @param results_buf - buffer for printing the idle check results.
3472 * @param num_errors - OUT: number of errors found in idle check.
3473 * @param num_warnings - OUT: number of warnings found in idle check.
3474 *
3475 * @return error if the parsing fails, ok otherwise.
3476 */
3477enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
3478                                           u32 *dump_buf,
3479                                           u32 num_dumped_dwords,
3480                                           char *results_buf,
3481                                           u32 *num_errors,
3482                                           u32 *num_warnings);
3483
3484/**
3485 * @brief qed_dbg_mcp_trace_set_meta_data - Sets the MCP Trace meta data.
3486 *
3487 * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to
3488 * no NVRAM access).
3489 *
3490 * @param data - pointer to MCP Trace meta data
3491 * @param size - size of MCP Trace meta data in dwords
3492 */
3493void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn,
3494                                     const u32 *meta_buf);
3495
3496/**
3497 * @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size
3498 *      for MCP Trace results (in bytes).
3499 *
3500 * @param p_hwfn - HW device data
3501 * @param dump_buf - MCP Trace dump buffer.
3502 * @param num_dumped_dwords - number of dwords that were dumped.
3503 * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
3504 *      results.
3505 *
3506 * @return error if the parsing fails, ok otherwise.
3507 */
3508enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
3509                                                   u32 *dump_buf,
3510                                                   u32 num_dumped_dwords,
3511                                                   u32 *results_buf_size);
3512
3513/**
3514 * @brief qed_print_mcp_trace_results - Prints MCP Trace results
3515 *
3516 * @param p_hwfn - HW device data
3517 * @param dump_buf - mcp trace dump buffer, starting from the header.
3518 * @param num_dumped_dwords - number of dwords that were dumped.
3519 * @param results_buf - buffer for printing the mcp trace results.
3520 *
3521 * @return error if the parsing fails, ok otherwise.
3522 */
3523enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
3524                                            u32 *dump_buf,
3525                                            u32 num_dumped_dwords,
3526                                            char *results_buf);
3527
3528/**
3529 * @brief qed_print_mcp_trace_results_cont - Prints MCP Trace results, and
3530 * keeps the MCP trace meta data allocated, to support continuous MCP Trace
3531 * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should
3532 * be called to free the meta data.
3533 *
3534 * @param p_hwfn -            HW device data
3535 * @param dump_buf -          mcp trace dump buffer, starting from the header.
3536 * @param results_buf -       buffer for printing the mcp trace results.
3537 *
3538 * @return error if the parsing fails, ok otherwise.
3539 */
3540enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
3541                                                 u32 *dump_buf,
3542                                                 char *results_buf);
3543
3544/**
3545 * @brief print_mcp_trace_line - Prints MCP Trace results for a single line
3546 *
3547 * @param p_hwfn -            HW device data
3548 * @param dump_buf -          mcp trace dump buffer, starting from the header.
3549 * @param num_dumped_bytes -  number of bytes that were dumped.
3550 * @param results_buf -       buffer for printing the mcp trace results.
3551 *
3552 * @return error if the parsing fails, ok otherwise.
3553 */
3554enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
3555                                         u8 *dump_buf,
3556                                         u32 num_dumped_bytes,
3557                                         char *results_buf);
3558
3559/**
3560 * @brief mcp_trace_free_meta_data - Frees the MCP Trace meta data.
3561 * Should be called after continuous MCP Trace parsing.
3562 *
3563 * @param p_hwfn - HW device data
3564 */
3565void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn);
3566
3567/**
3568 * @brief qed_get_reg_fifo_results_buf_size - Returns the required buffer size
3569 *      for reg_fifo results (in bytes).
3570 *
3571 * @param p_hwfn - HW device data
3572 * @param dump_buf - reg fifo dump buffer.
3573 * @param num_dumped_dwords - number of dwords that were dumped.
3574 * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
3575 *      results.
3576 *
3577 * @return error if the parsing fails, ok otherwise.
3578 */
3579enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
3580                                                  u32 *dump_buf,
3581                                                  u32 num_dumped_dwords,
3582                                                  u32 *results_buf_size);
3583
3584/**
3585 * @brief qed_print_reg_fifo_results - Prints reg fifo results
3586 *
3587 * @param p_hwfn - HW device data
3588 * @param dump_buf - reg fifo dump buffer, starting from the header.
3589 * @param num_dumped_dwords - number of dwords that were dumped.
3590 * @param results_buf - buffer for printing the reg fifo results.
3591 *
3592 * @return error if the parsing fails, ok otherwise.
3593 */
3594enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
3595                                           u32 *dump_buf,
3596                                           u32 num_dumped_dwords,
3597                                           char *results_buf);
3598
3599/**
3600 * @brief qed_get_igu_fifo_results_buf_size - Returns the required buffer size
3601 *      for igu_fifo results (in bytes).
3602 *
3603 * @param p_hwfn - HW device data
3604 * @param dump_buf - IGU fifo dump buffer.
3605 * @param num_dumped_dwords - number of dwords that were dumped.
3606 * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
3607 *      results.
3608 *
3609 * @return error if the parsing fails, ok otherwise.
3610 */
3611enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
3612                                                  u32 *dump_buf,
3613                                                  u32 num_dumped_dwords,
3614                                                  u32 *results_buf_size);
3615
3616/**
3617 * @brief qed_print_igu_fifo_results - Prints IGU fifo results
3618 *
3619 * @param p_hwfn - HW device data
3620 * @param dump_buf - IGU fifo dump buffer, starting from the header.
3621 * @param num_dumped_dwords - number of dwords that were dumped.
3622 * @param results_buf - buffer for printing the IGU fifo results.
3623 *
3624 * @return error if the parsing fails, ok otherwise.
3625 */
3626enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
3627                                           u32 *dump_buf,
3628                                           u32 num_dumped_dwords,
3629                                           char *results_buf);
3630
3631/**
3632 * @brief qed_get_protection_override_results_buf_size - Returns the required
3633 *      buffer size for protection override results (in bytes).
3634 *
3635 * @param p_hwfn - HW device data
3636 * @param dump_buf - protection override dump buffer.
3637 * @param num_dumped_dwords - number of dwords that were dumped.
3638 * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
3639 *      results.
3640 *
3641 * @return error if the parsing fails, ok otherwise.
3642 */
3643enum dbg_status
3644qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
3645                                             u32 *dump_buf,
3646                                             u32 num_dumped_dwords,
3647                                             u32 *results_buf_size);
3648
3649/**
3650 * @brief qed_print_protection_override_results - Prints protection override
3651 *      results.
3652 *
3653 * @param p_hwfn - HW device data
3654 * @param dump_buf - protection override dump buffer, starting from the header.
3655 * @param num_dumped_dwords - number of dwords that were dumped.
3656 * @param results_buf - buffer for printing the reg fifo results.
3657 *
3658 * @return error if the parsing fails, ok otherwise.
3659 */
3660enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
3661                                                      u32 *dump_buf,
3662                                                      u32 num_dumped_dwords,
3663                                                      char *results_buf);
3664
3665/**
3666 * @brief qed_get_fw_asserts_results_buf_size - Returns the required buffer size
3667 *      for FW Asserts results (in bytes).
3668 *
3669 * @param p_hwfn - HW device data
3670 * @param dump_buf - FW Asserts dump buffer.
3671 * @param num_dumped_dwords - number of dwords that were dumped.
3672 * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
3673 *      results.
3674 *
3675 * @return error if the parsing fails, ok otherwise.
3676 */
3677enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
3678                                                    u32 *dump_buf,
3679                                                    u32 num_dumped_dwords,
3680                                                    u32 *results_buf_size);
3681
3682/**
3683 * @brief qed_print_fw_asserts_results - Prints FW Asserts results
3684 *
3685 * @param p_hwfn - HW device data
3686 * @param dump_buf - FW Asserts dump buffer, starting from the header.
3687 * @param num_dumped_dwords - number of dwords that were dumped.
3688 * @param results_buf - buffer for printing the FW Asserts results.
3689 *
3690 * @return error if the parsing fails, ok otherwise.
3691 */
3692enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
3693                                             u32 *dump_buf,
3694                                             u32 num_dumped_dwords,
3695                                             char *results_buf);
3696
3697/**
3698 * @brief qed_dbg_parse_attn - Parses and prints attention registers values in
3699 * the specified results struct.
3700 *
3701 * @param p_hwfn -  HW device data
3702 * @param results - Pointer to the attention read results
3703 *
3704 * @return error if one of the following holds:
3705 *      - the version wasn't set
3706 * Otherwise, returns ok.
3707 */
3708enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
3709                                   struct dbg_attn_block_result *results);
3710
3711/* Win 2 */
3712#define GTT_BAR0_MAP_REG_IGU_CMD        0x00f000UL
3713
3714/* Win 3 */
3715#define GTT_BAR0_MAP_REG_TSDM_RAM       0x010000UL
3716
3717/* Win 4 */
3718#define GTT_BAR0_MAP_REG_MSDM_RAM       0x011000UL
3719
3720/* Win 5 */
3721#define GTT_BAR0_MAP_REG_MSDM_RAM_1024  0x012000UL
3722
3723/* Win 6 */
3724#define GTT_BAR0_MAP_REG_MSDM_RAM_2048  0x013000UL
3725
3726/* Win 7 */
3727#define GTT_BAR0_MAP_REG_USDM_RAM       0x014000UL
3728
3729/* Win 8 */
3730#define GTT_BAR0_MAP_REG_USDM_RAM_1024  0x015000UL
3731
3732/* Win 9 */
3733#define GTT_BAR0_MAP_REG_USDM_RAM_2048  0x016000UL
3734
3735/* Win 10 */
3736#define GTT_BAR0_MAP_REG_XSDM_RAM       0x017000UL
3737
3738/* Win 11 */
3739#define GTT_BAR0_MAP_REG_XSDM_RAM_1024  0x018000UL
3740
3741/* Win 12 */
3742#define GTT_BAR0_MAP_REG_YSDM_RAM       0x019000UL
3743
3744/* Win 13 */
3745#define GTT_BAR0_MAP_REG_PSDM_RAM       0x01a000UL
3746
3747/**
3748 * @brief qed_qm_pf_mem_size - prepare QM ILT sizes
3749 *
3750 * Returns the required host memory size in 4KB units.
3751 * Must be called before all QM init HSI functions.
3752 *
3753 * @param num_pf_cids - number of connections used by this PF
3754 * @param num_vf_cids - number of connections used by VFs of this PF
3755 * @param num_tids - number of tasks used by this PF
3756 * @param num_pf_pqs - number of PQs used by this PF
3757 * @param num_vf_pqs - number of PQs used by VFs of this PF
3758 *
3759 * @return The required host memory size in 4KB units.
3760 */
3761u32 qed_qm_pf_mem_size(u32 num_pf_cids,
3762                       u32 num_vf_cids,
3763                       u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs);
3764
3765struct qed_qm_common_rt_init_params {
3766        u8 max_ports_per_engine;
3767        u8 max_phys_tcs_per_port;
3768        bool pf_rl_en;
3769        bool pf_wfq_en;
3770        bool global_rl_en;
3771        bool vport_wfq_en;
3772        struct init_qm_port_params *port_params;
3773};
3774
3775int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
3776                          struct qed_qm_common_rt_init_params *p_params);
3777
3778struct qed_qm_pf_rt_init_params {
3779        u8 port_id;
3780        u8 pf_id;
3781        u8 max_phys_tcs_per_port;
3782        bool is_pf_loading;
3783        u32 num_pf_cids;
3784        u32 num_vf_cids;
3785        u32 num_tids;
3786        u16 start_pq;
3787        u16 num_pf_pqs;
3788        u16 num_vf_pqs;
3789        u16 start_vport;
3790        u16 num_vports;
3791        u16 pf_wfq;
3792        u32 pf_rl;
3793        struct init_qm_pq_params *pq_params;
3794        struct init_qm_vport_params *vport_params;
3795};
3796
3797int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
3798        struct qed_ptt *p_ptt,
3799        struct qed_qm_pf_rt_init_params *p_params);
3800
3801/**
3802 * @brief qed_init_pf_wfq - Initializes the WFQ weight of the specified PF
3803 *
3804 * @param p_hwfn
3805 * @param p_ptt - ptt window used for writing the registers
3806 * @param pf_id - PF ID
3807 * @param pf_wfq - WFQ weight. Must be non-zero.
3808 *
3809 * @return 0 on success, -1 on error.
3810 */
3811int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
3812                    struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq);
3813
3814/**
3815 * @brief qed_init_pf_rl - Initializes the rate limit of the specified PF
3816 *
3817 * @param p_hwfn
3818 * @param p_ptt - ptt window used for writing the registers
3819 * @param pf_id - PF ID
3820 * @param pf_rl - rate limit in Mb/sec units
3821 *
3822 * @return 0 on success, -1 on error.
3823 */
3824int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
3825                   struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl);
3826
3827/**
3828 * @brief qed_init_vport_wfq Initializes the WFQ weight of the specified VPORT
3829 *
3830 * @param p_hwfn
3831 * @param p_ptt - ptt window used for writing the registers
3832 * @param first_tx_pq_id- An array containing the first Tx PQ ID associated
3833 *        with the VPORT for each TC. This array is filled by
3834 *        qed_qm_pf_rt_init
3835 * @param vport_wfq - WFQ weight. Must be non-zero.
3836 *
3837 * @return 0 on success, -1 on error.
3838 */
3839int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
3840                       struct qed_ptt *p_ptt,
3841                       u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq);
3842
3843/**
3844 * @brief qed_init_global_rl - Initializes the rate limit of the specified
3845 * rate limiter
3846 *
3847 * @param p_hwfn
3848 * @param p_ptt - ptt window used for writing the registers
3849 * @param rl_id - RL ID
3850 * @param rate_limit - rate limit in Mb/sec units
3851 *
3852 * @return 0 on success, -1 on error.
3853 */
3854int qed_init_global_rl(struct qed_hwfn *p_hwfn,
3855                       struct qed_ptt *p_ptt,
3856                       u16 rl_id, u32 rate_limit);
3857
3858/**
3859 * @brief qed_send_qm_stop_cmd  Sends a stop command to the QM
3860 *
3861 * @param p_hwfn
3862 * @param p_ptt
3863 * @param is_release_cmd - true for release, false for stop.
3864 * @param is_tx_pq - true for Tx PQs, false for Other PQs.
3865 * @param start_pq - first PQ ID to stop
3866 * @param num_pqs - Number of PQs to stop, starting from start_pq.
3867 *
3868 * @return bool, true if successful, false if timeout occurred while waiting for
3869 *      QM command done.
3870 */
3871bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
3872                          struct qed_ptt *p_ptt,
3873                          bool is_release_cmd,
3874                          bool is_tx_pq, u16 start_pq, u16 num_pqs);
3875
3876/**
3877 * @brief qed_set_vxlan_dest_port - initializes vxlan tunnel destination udp port
3878 *
3879 * @param p_hwfn
3880 * @param p_ptt - ptt window used for writing the registers.
3881 * @param dest_port - vxlan destination udp port.
3882 */
3883void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
3884                             struct qed_ptt *p_ptt, u16 dest_port);
3885
3886/**
3887 * @brief qed_set_vxlan_enable - enable or disable VXLAN tunnel in HW
3888 *
3889 * @param p_hwfn
3890 * @param p_ptt - ptt window used for writing the registers.
3891 * @param vxlan_enable - vxlan enable flag.
3892 */
3893void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
3894                          struct qed_ptt *p_ptt, bool vxlan_enable);
3895
3896/**
3897 * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW
3898 *
3899 * @param p_hwfn
3900 * @param p_ptt - ptt window used for writing the registers.
3901 * @param eth_gre_enable - eth GRE enable enable flag.
3902 * @param ip_gre_enable - IP GRE enable enable flag.
3903 */
3904void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
3905                        struct qed_ptt *p_ptt,
3906                        bool eth_gre_enable, bool ip_gre_enable);
3907
3908/**
3909 * @brief qed_set_geneve_dest_port - initializes geneve tunnel destination udp port
3910 *
3911 * @param p_hwfn
3912 * @param p_ptt - ptt window used for writing the registers.
3913 * @param dest_port - geneve destination udp port.
3914 */
3915void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
3916                              struct qed_ptt *p_ptt, u16 dest_port);
3917
3918/**
3919 * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW
3920 *
3921 * @param p_ptt - ptt window used for writing the registers.
3922 * @param eth_geneve_enable - eth GENEVE enable enable flag.
3923 * @param ip_geneve_enable - IP GENEVE enable enable flag.
3924 */
3925void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
3926                           struct qed_ptt *p_ptt,
3927                           bool eth_geneve_enable, bool ip_geneve_enable);
3928
3929void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
3930                                struct qed_ptt *p_ptt, bool enable);
3931
3932/**
3933 * @brief qed_gft_disable - Disable GFT
3934 *
3935 * @param p_hwfn
3936 * @param p_ptt - ptt window used for writing the registers.
3937 * @param pf_id - pf on which to disable GFT.
3938 */
3939void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id);
3940
3941/**
3942 * @brief qed_gft_config - Enable and configure HW for GFT
3943 *
3944 * @param p_hwfn - HW device data
3945 * @param p_ptt - ptt window used for writing the registers.
3946 * @param pf_id - pf on which to enable GFT.
3947 * @param tcp - set profile tcp packets.
3948 * @param udp - set profile udp  packet.
3949 * @param ipv4 - set profile ipv4 packet.
3950 * @param ipv6 - set profile ipv6 packet.
3951 * @param profile_type - define packet same fields. Use enum gft_profile_type.
3952 */
3953void qed_gft_config(struct qed_hwfn *p_hwfn,
3954                    struct qed_ptt *p_ptt,
3955                    u16 pf_id,
3956                    bool tcp,
3957                    bool udp,
3958                    bool ipv4, bool ipv6, enum gft_profile_type profile_type);
3959
3960/**
3961 * @brief qed_enable_context_validation - Enable and configure context
3962 *      validation.
3963 *
3964 * @param p_hwfn
3965 * @param p_ptt - ptt window used for writing the registers.
3966 */
3967void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
3968                                   struct qed_ptt *p_ptt);
3969
3970/**
3971 * @brief qed_calc_session_ctx_validation - Calcualte validation byte for
3972 *      session context.
3973 *
3974 * @param p_ctx_mem - pointer to context memory.
3975 * @param ctx_size - context size.
3976 * @param ctx_type - context type.
3977 * @param cid - context cid.
3978 */
3979void qed_calc_session_ctx_validation(void *p_ctx_mem,
3980                                     u16 ctx_size, u8 ctx_type, u32 cid);
3981
3982/**
3983 * @brief qed_calc_task_ctx_validation - Calcualte validation byte for task
3984 *      context.
3985 *
3986 * @param p_ctx_mem - pointer to context memory.
3987 * @param ctx_size - context size.
3988 * @param ctx_type - context type.
3989 * @param tid - context tid.
3990 */
3991void qed_calc_task_ctx_validation(void *p_ctx_mem,
3992                                  u16 ctx_size, u8 ctx_type, u32 tid);
3993
3994/**
3995 * @brief qed_memset_session_ctx - Memset session context to 0 while
3996 *      preserving validation bytes.
3997 *
3998 * @param p_hwfn -
3999 * @param p_ctx_mem - pointer to context memory.
4000 * @param ctx_size - size to initialzie.
4001 * @param ctx_type - context type.
4002 */
4003void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
4004
4005/**
4006 * @brief qed_memset_task_ctx - Memset task context to 0 while preserving
4007 *      validation bytes.
4008 *
4009 * @param p_ctx_mem - pointer to context memory.
4010 * @param ctx_size - size to initialzie.
4011 * @param ctx_type - context type.
4012 */
4013void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
4014
4015#define NUM_STORMS 6
4016
4017/**
4018 * @brief qed_set_rdma_error_level - Sets the RDMA assert level.
4019 *                                   If the severity of the error will be
4020 *                                   above the level, the FW will assert.
4021 * @param p_hwfn - HW device data
4022 * @param p_ptt - ptt window used for writing the registers
4023 * @param assert_level - An array of assert levels for each storm.
4024 *
4025 */
4026void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
4027                              struct qed_ptt *p_ptt,
4028                              u8 assert_level[NUM_STORMS]);
4029/**
4030 * @brief qed_fw_overlay_mem_alloc - Allocates and fills the FW overlay memory.
4031 *
4032 * @param p_hwfn - HW device data
4033 * @param fw_overlay_in_buf - the input FW overlay buffer.
4034 * @param buf_size - the size of the input FW overlay buffer in bytes.
4035 *                   must be aligned to dwords.
4036 * @param fw_overlay_out_mem - OUT: a pointer to the allocated overlays memory.
4037 *
4038 * @return a pointer to the allocated overlays memory,
4039 * or NULL in case of failures.
4040 */
4041struct phys_mem_desc *
4042qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
4043                         const u32 * const fw_overlay_in_buf,
4044                         u32 buf_size_in_bytes);
4045
4046/**
4047 * @brief qed_fw_overlay_init_ram - Initializes the FW overlay RAM.
4048 *
4049 * @param p_hwfn - HW device data.
4050 * @param p_ptt - ptt window used for writing the registers.
4051 * @param fw_overlay_mem - the allocated FW overlay memory.
4052 */
4053void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn,
4054                             struct qed_ptt *p_ptt,
4055                             struct phys_mem_desc *fw_overlay_mem);
4056
4057/**
4058 * @brief qed_fw_overlay_mem_free - Frees the FW overlay memory.
4059 *
4060 * @param p_hwfn - HW device data.
4061 * @param fw_overlay_mem - the allocated FW overlay memory to free.
4062 */
4063void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
4064                             struct phys_mem_desc *fw_overlay_mem);
4065
4066/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
4067#define YSTORM_FLOW_CONTROL_MODE_OFFSET                 (IRO[0].base)
4068#define YSTORM_FLOW_CONTROL_MODE_SIZE                   (IRO[0].size)
4069
4070/* Tstorm port statistics */
4071#define TSTORM_PORT_STAT_OFFSET(port_id) \
4072        (IRO[1].base + ((port_id) * IRO[1].m1))
4073#define TSTORM_PORT_STAT_SIZE                           (IRO[1].size)
4074
4075/* Tstorm ll2 port statistics */
4076#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \
4077        (IRO[2].base + ((port_id) * IRO[2].m1))
4078#define TSTORM_LL2_PORT_STAT_SIZE                       (IRO[2].size)
4079
4080/* Ustorm VF-PF Channel ready flag */
4081#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
4082        (IRO[3].base + ((vf_id) * IRO[3].m1))
4083#define USTORM_VF_PF_CHANNEL_READY_SIZE                 (IRO[3].size)
4084
4085/* Ustorm Final flr cleanup ack */
4086#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \
4087        (IRO[4].base + ((pf_id) * IRO[4].m1))
4088#define USTORM_FLR_FINAL_ACK_SIZE                       (IRO[4].size)
4089
4090/* Ustorm Event ring consumer */
4091#define USTORM_EQE_CONS_OFFSET(pf_id) \
4092        (IRO[5].base + ((pf_id) * IRO[5].m1))
4093#define USTORM_EQE_CONS_SIZE                            (IRO[5].size)
4094
4095/* Ustorm eth queue zone */
4096#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \
4097        (IRO[6].base + ((queue_zone_id) * IRO[6].m1))
4098#define USTORM_ETH_QUEUE_ZONE_SIZE                      (IRO[6].size)
4099
4100/* Ustorm Common Queue ring consumer */
4101#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \
4102        (IRO[7].base + ((queue_zone_id) * IRO[7].m1))
4103#define USTORM_COMMON_QUEUE_CONS_SIZE                   (IRO[7].size)
4104
4105/* Xstorm common PQ info */
4106#define XSTORM_PQ_INFO_OFFSET(pq_id) \
4107        (IRO[8].base + ((pq_id) * IRO[8].m1))
4108#define XSTORM_PQ_INFO_SIZE                             (IRO[8].size)
4109
4110/* Xstorm Integration Test Data */
4111#define XSTORM_INTEG_TEST_DATA_OFFSET                   (IRO[9].base)
4112#define XSTORM_INTEG_TEST_DATA_SIZE                     (IRO[9].size)
4113
4114/* Ystorm Integration Test Data */
4115#define YSTORM_INTEG_TEST_DATA_OFFSET                   (IRO[10].base)
4116#define YSTORM_INTEG_TEST_DATA_SIZE                     (IRO[10].size)
4117
4118/* Pstorm Integration Test Data */
4119#define PSTORM_INTEG_TEST_DATA_OFFSET                   (IRO[11].base)
4120#define PSTORM_INTEG_TEST_DATA_SIZE                     (IRO[11].size)
4121
4122/* Tstorm Integration Test Data */
4123#define TSTORM_INTEG_TEST_DATA_OFFSET                   (IRO[12].base)
4124#define TSTORM_INTEG_TEST_DATA_SIZE                     (IRO[12].size)
4125
4126/* Mstorm Integration Test Data */
4127#define MSTORM_INTEG_TEST_DATA_OFFSET                   (IRO[13].base)
4128#define MSTORM_INTEG_TEST_DATA_SIZE                     (IRO[13].size)
4129
4130/* Ustorm Integration Test Data */
4131#define USTORM_INTEG_TEST_DATA_OFFSET                   (IRO[14].base)
4132#define USTORM_INTEG_TEST_DATA_SIZE                     (IRO[14].size)
4133
4134/* Xstorm overlay buffer host address */
4135#define XSTORM_OVERLAY_BUF_ADDR_OFFSET                  (IRO[15].base)
4136#define XSTORM_OVERLAY_BUF_ADDR_SIZE                    (IRO[15].size)
4137
4138/* Ystorm overlay buffer host address */
4139#define YSTORM_OVERLAY_BUF_ADDR_OFFSET                  (IRO[16].base)
4140#define YSTORM_OVERLAY_BUF_ADDR_SIZE                    (IRO[16].size)
4141
4142/* Pstorm overlay buffer host address */
4143#define PSTORM_OVERLAY_BUF_ADDR_OFFSET                  (IRO[17].base)
4144#define PSTORM_OVERLAY_BUF_ADDR_SIZE                    (IRO[17].size)
4145
4146/* Tstorm overlay buffer host address */
4147#define TSTORM_OVERLAY_BUF_ADDR_OFFSET                  (IRO[18].base)
4148#define TSTORM_OVERLAY_BUF_ADDR_SIZE                    (IRO[18].size)
4149
4150/* Mstorm overlay buffer host address */
4151#define MSTORM_OVERLAY_BUF_ADDR_OFFSET                  (IRO[19].base)
4152#define MSTORM_OVERLAY_BUF_ADDR_SIZE                    (IRO[19].size)
4153
4154/* Ustorm overlay buffer host address */
4155#define USTORM_OVERLAY_BUF_ADDR_OFFSET                  (IRO[20].base)
4156#define USTORM_OVERLAY_BUF_ADDR_SIZE                    (IRO[20].size)
4157
4158/* Tstorm producers */
4159#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \
4160        (IRO[21].base + ((core_rx_queue_id) * IRO[21].m1))
4161#define TSTORM_LL2_RX_PRODS_SIZE                        (IRO[21].size)
4162
4163/* Tstorm LightL2 queue statistics */
4164#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
4165        (IRO[22].base + ((core_rx_queue_id) * IRO[22].m1))
4166#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE             (IRO[22].size)
4167
4168/* Ustorm LiteL2 queue statistics */
4169#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
4170        (IRO[23].base + ((core_rx_queue_id) * IRO[23].m1))
4171#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE             (IRO[23].size)
4172
4173/* Pstorm LiteL2 queue statistics */
4174#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
4175        (IRO[24].base + ((core_tx_stats_id) * IRO[24].m1))
4176#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE             (IRO[24].size)
4177
4178/* Mstorm queue statistics */
4179#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
4180        (IRO[25].base + ((stat_counter_id) * IRO[25].m1))
4181#define MSTORM_QUEUE_STAT_SIZE                          (IRO[25].size)
4182
4183/* TPA agregation timeout in us resolution (on ASIC) */
4184#define MSTORM_TPA_TIMEOUT_US_OFFSET                    (IRO[26].base)
4185#define MSTORM_TPA_TIMEOUT_US_SIZE                      (IRO[26].size)
4186
4187/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone size
4188 * mode
4189 */
4190#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \
4191        (IRO[27].base + ((vf_id) * IRO[27].m1) + ((vf_queue_id) * IRO[27].m2))
4192#define MSTORM_ETH_VF_PRODS_SIZE                        (IRO[27].size)
4193
4194/* Mstorm ETH PF queues producers */
4195#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
4196        (IRO[28].base + ((queue_id) * IRO[28].m1))
4197#define MSTORM_ETH_PF_PRODS_SIZE                        (IRO[28].size)
4198
4199/* Mstorm pf statistics */
4200#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
4201        (IRO[29].base + ((pf_id) * IRO[29].m1))
4202#define MSTORM_ETH_PF_STAT_SIZE                         (IRO[29].size)
4203
4204/* Ustorm queue statistics */
4205#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
4206        (IRO[30].base + ((stat_counter_id) * IRO[30].m1))
4207#define USTORM_QUEUE_STAT_SIZE                          (IRO[30].size)
4208
4209/* Ustorm pf statistics */
4210#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \
4211        (IRO[31].base + ((pf_id) * IRO[31].m1))
4212#define USTORM_ETH_PF_STAT_SIZE                         (IRO[31].size)
4213
4214/* Pstorm queue statistics */
4215#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id)       \
4216        (IRO[32].base + ((stat_counter_id) * IRO[32].m1))
4217#define PSTORM_QUEUE_STAT_SIZE                          (IRO[32].size)
4218
4219/* Pstorm pf statistics */
4220#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
4221        (IRO[33].base + ((pf_id) * IRO[33].m1))
4222#define PSTORM_ETH_PF_STAT_SIZE                         (IRO[33].size)
4223
4224/* Control frame's EthType configuration for TX control frame security */
4225#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(eth_type_id)    \
4226        (IRO[34].base + ((eth_type_id) * IRO[34].m1))
4227#define PSTORM_CTL_FRAME_ETHTYPE_SIZE                   (IRO[34].size)
4228
4229/* Tstorm last parser message */
4230#define TSTORM_ETH_PRS_INPUT_OFFSET                     (IRO[35].base)
4231#define TSTORM_ETH_PRS_INPUT_SIZE                       (IRO[35].size)
4232
4233/* Tstorm Eth limit Rx rate */
4234#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
4235        (IRO[36].base + ((pf_id) * IRO[36].m1))
4236#define ETH_RX_RATE_LIMIT_SIZE                          (IRO[36].size)
4237
4238/* RSS indirection table entry update command per PF offset in TSTORM PF BAR0.
4239 * Use eth_tstorm_rss_update_data for update
4240 */
4241#define TSTORM_ETH_RSS_UPDATE_OFFSET(pf_id) \
4242        (IRO[37].base + ((pf_id) * IRO[37].m1))
4243#define TSTORM_ETH_RSS_UPDATE_SIZE                      (IRO[37].size)
4244
4245/* Xstorm queue zone */
4246#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
4247        (IRO[38].base + ((queue_id) * IRO[38].m1))
4248#define XSTORM_ETH_QUEUE_ZONE_SIZE                      (IRO[38].size)
4249
4250/* Ystorm cqe producer */
4251#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
4252        (IRO[39].base + ((rss_id) * IRO[39].m1))
4253#define YSTORM_TOE_CQ_PROD_SIZE                         (IRO[39].size)
4254
4255/* Ustorm cqe producer */
4256#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
4257        (IRO[40].base + ((rss_id) * IRO[40].m1))
4258#define USTORM_TOE_CQ_PROD_SIZE                         (IRO[40].size)
4259
4260/* Ustorm grq producer */
4261#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
4262        (IRO[41].base + ((pf_id) * IRO[41].m1))
4263#define USTORM_TOE_GRQ_PROD_SIZE                        (IRO[41].size)
4264
4265/* Tstorm cmdq-cons of given command queue-id */
4266#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
4267        (IRO[42].base + ((cmdq_queue_id) * IRO[42].m1))
4268#define TSTORM_SCSI_CMDQ_CONS_SIZE                      (IRO[42].size)
4269
4270/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID,
4271 * BDqueue-id
4272 */
4273#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id, bdq_id) \
4274        (IRO[43].base + ((storage_func_id) * IRO[43].m1) + \
4275         ((bdq_id) * IRO[43].m2))
4276#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE                   (IRO[43].size)
4277
4278/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */
4279#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id, bdq_id) \
4280        (IRO[44].base + ((storage_func_id) * IRO[44].m1) + \
4281         ((bdq_id) * IRO[44].m2))
4282#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE                   (IRO[44].size)
4283
4284/* Tstorm iSCSI RX stats */
4285#define TSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
4286        (IRO[45].base + ((storage_func_id) * IRO[45].m1))
4287#define TSTORM_ISCSI_RX_STATS_SIZE                      (IRO[45].size)
4288
4289/* Mstorm iSCSI RX stats */
4290#define MSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
4291        (IRO[46].base + ((storage_func_id) * IRO[46].m1))
4292#define MSTORM_ISCSI_RX_STATS_SIZE                      (IRO[46].size)
4293
4294/* Ustorm iSCSI RX stats */
4295#define USTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
4296        (IRO[47].base + ((storage_func_id) * IRO[47].m1))
4297#define USTORM_ISCSI_RX_STATS_SIZE                      (IRO[47].size)
4298
4299/* Xstorm iSCSI TX stats */
4300#define XSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
4301        (IRO[48].base + ((storage_func_id) * IRO[48].m1))
4302#define XSTORM_ISCSI_TX_STATS_SIZE                      (IRO[48].size)
4303
4304/* Ystorm iSCSI TX stats */
4305#define YSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
4306        (IRO[49].base + ((storage_func_id) * IRO[49].m1))
4307#define YSTORM_ISCSI_TX_STATS_SIZE                      (IRO[49].size)
4308
4309/* Pstorm iSCSI TX stats */
4310#define PSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
4311        (IRO[50].base + ((storage_func_id) * IRO[50].m1))
4312#define PSTORM_ISCSI_TX_STATS_SIZE                      (IRO[50].size)
4313
4314/* Tstorm FCoE RX stats */
4315#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
4316        (IRO[51].base + ((pf_id) * IRO[51].m1))
4317#define TSTORM_FCOE_RX_STATS_SIZE                       (IRO[51].size)
4318
4319/* Pstorm FCoE TX stats */
4320#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
4321        (IRO[52].base + ((pf_id) * IRO[52].m1))
4322#define PSTORM_FCOE_TX_STATS_SIZE                       (IRO[52].size)
4323
4324/* Pstorm RDMA queue statistics */
4325#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
4326        (IRO[53].base + ((rdma_stat_counter_id) * IRO[53].m1))
4327#define PSTORM_RDMA_QUEUE_STAT_SIZE                     (IRO[53].size)