1
2
3
4
5
6
7#include <linux/module.h>
8#include <linux/vmalloc.h>
9#include <linux/crc32.h>
10#include "qed.h"
11#include "qed_cxt.h"
12#include "qed_hsi.h"
13#include "qed_hw.h"
14#include "qed_mcp.h"
15#include "qed_reg_addr.h"
16
17
18enum mem_groups {
19 MEM_GROUP_PXP_MEM,
20 MEM_GROUP_DMAE_MEM,
21 MEM_GROUP_CM_MEM,
22 MEM_GROUP_QM_MEM,
23 MEM_GROUP_DORQ_MEM,
24 MEM_GROUP_BRB_RAM,
25 MEM_GROUP_BRB_MEM,
26 MEM_GROUP_PRS_MEM,
27 MEM_GROUP_SDM_MEM,
28 MEM_GROUP_PBUF,
29 MEM_GROUP_IOR,
30 MEM_GROUP_RAM,
31 MEM_GROUP_BTB_RAM,
32 MEM_GROUP_RDIF_CTX,
33 MEM_GROUP_TDIF_CTX,
34 MEM_GROUP_CFC_MEM,
35 MEM_GROUP_CONN_CFC_MEM,
36 MEM_GROUP_CAU_PI,
37 MEM_GROUP_CAU_MEM,
38 MEM_GROUP_CAU_MEM_EXT,
39 MEM_GROUP_PXP_ILT,
40 MEM_GROUP_MULD_MEM,
41 MEM_GROUP_BTB_MEM,
42 MEM_GROUP_IGU_MEM,
43 MEM_GROUP_IGU_MSIX,
44 MEM_GROUP_CAU_SB,
45 MEM_GROUP_BMB_RAM,
46 MEM_GROUP_BMB_MEM,
47 MEM_GROUP_TM_MEM,
48 MEM_GROUP_TASK_CFC_MEM,
49 MEM_GROUPS_NUM
50};
51
52
53static const char * const s_mem_group_names[] = {
54 "PXP_MEM",
55 "DMAE_MEM",
56 "CM_MEM",
57 "QM_MEM",
58 "DORQ_MEM",
59 "BRB_RAM",
60 "BRB_MEM",
61 "PRS_MEM",
62 "SDM_MEM",
63 "PBUF",
64 "IOR",
65 "RAM",
66 "BTB_RAM",
67 "RDIF_CTX",
68 "TDIF_CTX",
69 "CFC_MEM",
70 "CONN_CFC_MEM",
71 "CAU_PI",
72 "CAU_MEM",
73 "CAU_MEM_EXT",
74 "PXP_ILT",
75 "MULD_MEM",
76 "BTB_MEM",
77 "IGU_MEM",
78 "IGU_MSIX",
79 "CAU_SB",
80 "BMB_RAM",
81 "BMB_MEM",
82 "TM_MEM",
83 "TASK_CFC_MEM",
84};
85
86
87
88static u32 cond5(const u32 *r, const u32 *imm)
89{
90 return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
91}
92
93static u32 cond7(const u32 *r, const u32 *imm)
94{
95 return ((r[0] >> imm[0]) & imm[1]) != imm[2];
96}
97
98static u32 cond6(const u32 *r, const u32 *imm)
99{
100 return (r[0] & imm[0]) != imm[1];
101}
102
103static u32 cond9(const u32 *r, const u32 *imm)
104{
105 return ((r[0] & imm[0]) >> imm[1]) !=
106 (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
107}
108
109static u32 cond10(const u32 *r, const u32 *imm)
110{
111 return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
112}
113
114static u32 cond4(const u32 *r, const u32 *imm)
115{
116 return (r[0] & ~imm[0]) != imm[1];
117}
118
119static u32 cond0(const u32 *r, const u32 *imm)
120{
121 return (r[0] & ~r[1]) != imm[0];
122}
123
124static u32 cond1(const u32 *r, const u32 *imm)
125{
126 return r[0] != imm[0];
127}
128
129static u32 cond11(const u32 *r, const u32 *imm)
130{
131 return r[0] != r[1] && r[2] == imm[0];
132}
133
134static u32 cond12(const u32 *r, const u32 *imm)
135{
136 return r[0] != r[1] && r[2] > imm[0];
137}
138
139static u32 cond3(const u32 *r, const u32 *imm)
140{
141 return r[0] != r[1];
142}
143
144static u32 cond13(const u32 *r, const u32 *imm)
145{
146 return r[0] & imm[0];
147}
148
149static u32 cond8(const u32 *r, const u32 *imm)
150{
151 return r[0] < (r[1] - imm[0]);
152}
153
154static u32 cond2(const u32 *r, const u32 *imm)
155{
156 return r[0] > imm[0];
157}
158
159
160static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
161 cond0,
162 cond1,
163 cond2,
164 cond3,
165 cond4,
166 cond5,
167 cond6,
168 cond7,
169 cond8,
170 cond9,
171 cond10,
172 cond11,
173 cond12,
174 cond13,
175};
176
177#define NUM_PHYS_BLOCKS 84
178
179#define NUM_DBG_RESET_REGS 8
180
181
182
183enum hw_types {
184 HW_TYPE_ASIC,
185 PLATFORM_RESERVED,
186 PLATFORM_RESERVED2,
187 PLATFORM_RESERVED3,
188 PLATFORM_RESERVED4,
189 MAX_HW_TYPES
190};
191
192
193enum cm_ctx_types {
194 CM_CTX_CONN_AG,
195 CM_CTX_CONN_ST,
196 CM_CTX_TASK_AG,
197 CM_CTX_TASK_ST,
198 NUM_CM_CTX_TYPES
199};
200
201
202enum dbg_bus_frame_modes {
203 DBG_BUS_FRAME_MODE_4ST = 0,
204 DBG_BUS_FRAME_MODE_2ST_2HW = 1,
205 DBG_BUS_FRAME_MODE_1ST_3HW = 2,
206 DBG_BUS_FRAME_MODE_4HW = 3,
207 DBG_BUS_FRAME_MODE_8HW = 4,
208 DBG_BUS_NUM_FRAME_MODES
209};
210
211
212struct chip_defs {
213 const char *name;
214 u32 num_ilt_pages;
215};
216
217
218struct hw_type_defs {
219 const char *name;
220 u32 delay_factor;
221 u32 dmae_thresh;
222 u32 log_thresh;
223};
224
225
226struct rbc_reset_defs {
227 u32 reset_reg_addr;
228 u32 reset_val[MAX_CHIP_IDS];
229};
230
231
232
233
234struct storm_defs {
235 char letter;
236 enum block_id sem_block_id;
237 enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
238 bool has_vfc;
239 u32 sem_fast_mem_addr;
240 u32 sem_frame_mode_addr;
241 u32 sem_slow_enable_addr;
242 u32 sem_slow_mode_addr;
243 u32 sem_slow_mode1_conf_addr;
244 u32 sem_sync_dbg_empty_addr;
245 u32 sem_gpre_vect_addr;
246 u32 cm_ctx_wr_addr;
247 u32 cm_ctx_rd_addr[NUM_CM_CTX_TYPES];
248 u32 cm_ctx_lid_sizes[MAX_CHIP_IDS][NUM_CM_CTX_TYPES];
249};
250
251
252struct dbg_bus_constraint_op_defs {
253 u8 hw_op_val;
254 bool is_cyclic;
255};
256
257
258struct storm_mode_defs {
259 const char *name;
260 bool is_fast_dbg;
261 u8 id_in_hw;
262 u32 src_disable_reg_addr;
263 u32 src_enable_val;
264 bool exists[MAX_CHIP_IDS];
265};
266
267struct grc_param_defs {
268 u32 default_val[MAX_CHIP_IDS];
269 u32 min;
270 u32 max;
271 bool is_preset;
272 bool is_persistent;
273 u32 exclude_all_preset_val;
274 u32 crash_preset_val[MAX_CHIP_IDS];
275};
276
277
278struct rss_mem_defs {
279 const char *mem_name;
280 const char *type_name;
281 u32 addr;
282 u32 entry_width;
283 u32 num_entries[MAX_CHIP_IDS];
284};
285
286struct vfc_ram_defs {
287 const char *mem_name;
288 const char *type_name;
289 u32 base_row;
290 u32 num_rows;
291};
292
293struct big_ram_defs {
294 const char *instance_name;
295 enum mem_groups mem_group_id;
296 enum mem_groups ram_mem_group_id;
297 enum dbg_grc_params grc_param;
298 u32 addr_reg_addr;
299 u32 data_reg_addr;
300 u32 is_256b_reg_addr;
301 u32 is_256b_bit_offset[MAX_CHIP_IDS];
302 u32 ram_size[MAX_CHIP_IDS];
303};
304
305struct phy_defs {
306 const char *phy_name;
307
308
309 u32 base_addr;
310
311
312 u32 tbus_addr_lo_addr;
313
314
315 u32 tbus_addr_hi_addr;
316
317
318 u32 tbus_data_lo_addr;
319
320
321 u32 tbus_data_hi_addr;
322};
323
324
325struct split_type_defs {
326 const char *name;
327};
328
329
330
331#define BYTES_IN_DWORD sizeof(u32)
332
333#define CEIL_DWORDS(size) DIV_ROUND_UP(size, 32)
334#define FIELD_BIT_OFFSET(type, field) type ## _ ## field ## _ ## OFFSET
335#define FIELD_BIT_SIZE(type, field) type ## _ ## field ## _ ## SIZE
336#define FIELD_DWORD_OFFSET(type, field) \
337 (int)(FIELD_BIT_OFFSET(type, field) / 32)
338#define FIELD_DWORD_SHIFT(type, field) (FIELD_BIT_OFFSET(type, field) % 32)
339#define FIELD_BIT_MASK(type, field) \
340 (((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
341 FIELD_DWORD_SHIFT(type, field))
342
343#define SET_VAR_FIELD(var, type, field, val) \
344 do { \
345 var[FIELD_DWORD_OFFSET(type, field)] &= \
346 (~FIELD_BIT_MASK(type, field)); \
347 var[FIELD_DWORD_OFFSET(type, field)] |= \
348 (val) << FIELD_DWORD_SHIFT(type, field); \
349 } while (0)
350
351#define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
352 do { \
353 for (i = 0; i < (arr_size); i++) \
354 qed_wr(dev, ptt, addr, (arr)[i]); \
355 } while (0)
356
357#define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
358#define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
359
360
361#define NUM_EXTRA_DBG_LINES(block) \
362 (GET_FIELD((block)->flags, DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS) ? 2 : 1)
363#define NUM_DBG_LINES(block) \
364 ((block)->num_of_dbg_bus_lines + NUM_EXTRA_DBG_LINES(block))
365
366#define USE_DMAE true
367#define PROTECT_WIDE_BUS true
368
369#define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
370#define RAM_LINES_TO_BYTES(lines) \
371 DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
372
373#define REG_DUMP_LEN_SHIFT 24
374#define MEM_DUMP_ENTRY_SIZE_DWORDS \
375 BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
376
377#define IDLE_CHK_RULE_SIZE_DWORDS \
378 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
379
380#define IDLE_CHK_RESULT_HDR_DWORDS \
381 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
382
383#define IDLE_CHK_RESULT_REG_HDR_DWORDS \
384 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
385
386#define PAGE_MEM_DESC_SIZE_DWORDS \
387 BYTES_TO_DWORDS(sizeof(struct phys_mem_desc))
388
389#define IDLE_CHK_MAX_ENTRIES_SIZE 32
390
391
392#define VFC_CAM_CMD_STRUCT_SIZE 64
393#define VFC_CAM_CMD_ROW_OFFSET 48
394#define VFC_CAM_CMD_ROW_SIZE 9
395#define VFC_CAM_ADDR_STRUCT_SIZE 16
396#define VFC_CAM_ADDR_OP_OFFSET 0
397#define VFC_CAM_ADDR_OP_SIZE 4
398#define VFC_CAM_RESP_STRUCT_SIZE 256
399#define VFC_RAM_ADDR_STRUCT_SIZE 16
400#define VFC_RAM_ADDR_OP_OFFSET 0
401#define VFC_RAM_ADDR_OP_SIZE 2
402#define VFC_RAM_ADDR_ROW_OFFSET 2
403#define VFC_RAM_ADDR_ROW_SIZE 10
404#define VFC_RAM_RESP_STRUCT_SIZE 256
405
406#define VFC_CAM_CMD_DWORDS CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
407#define VFC_CAM_ADDR_DWORDS CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
408#define VFC_CAM_RESP_DWORDS CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
409#define VFC_RAM_CMD_DWORDS VFC_CAM_CMD_DWORDS
410#define VFC_RAM_ADDR_DWORDS CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
411#define VFC_RAM_RESP_DWORDS CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
412
413#define NUM_VFC_RAM_TYPES 4
414
415#define VFC_CAM_NUM_ROWS 512
416
417#define VFC_OPCODE_CAM_RD 14
418#define VFC_OPCODE_RAM_RD 0
419
420#define NUM_RSS_MEM_TYPES 5
421
422#define NUM_BIG_RAM_TYPES 3
423#define BIG_RAM_NAME_LEN 3
424
425#define NUM_PHY_TBUS_ADDRESSES 2048
426#define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2)
427
428#define RESET_REG_UNRESET_OFFSET 4
429
430#define STALL_DELAY_MS 500
431
432#define STATIC_DEBUG_LINE_DWORDS 9
433
434#define NUM_COMMON_GLOBAL_PARAMS 9
435
436#define MAX_RECURSION_DEPTH 10
437
438#define FW_IMG_MAIN 1
439
440#define REG_FIFO_ELEMENT_DWORDS 2
441#define REG_FIFO_DEPTH_ELEMENTS 32
442#define REG_FIFO_DEPTH_DWORDS \
443 (REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
444
445#define IGU_FIFO_ELEMENT_DWORDS 4
446#define IGU_FIFO_DEPTH_ELEMENTS 64
447#define IGU_FIFO_DEPTH_DWORDS \
448 (IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
449
450#define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
451#define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
452#define PROTECTION_OVERRIDE_DEPTH_DWORDS \
453 (PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
454 PROTECTION_OVERRIDE_ELEMENT_DWORDS)
455
456#define MCP_SPAD_TRACE_OFFSIZE_ADDR \
457 (MCP_REG_SCRATCH + \
458 offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
459
460#define MAX_SW_PLTAFORM_STR_SIZE 64
461
462#define EMPTY_FW_VERSION_STR "???_???_???_???"
463#define EMPTY_FW_IMAGE_STR "???????????????"
464
465
466
467
468static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
469 {"bb", PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2},
470 {"ah", PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2}
471};
472
473
474static struct storm_defs s_storm_defs[] = {
475
476 {'T', BLOCK_TSEM,
477 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
478 true,
479 TSEM_REG_FAST_MEMORY,
480 TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
481 TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
482 TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_DBG_GPRE_VECT,
483 TCM_REG_CTX_RBC_ACCS,
484 {TCM_REG_AGG_CON_CTX, TCM_REG_SM_CON_CTX, TCM_REG_AGG_TASK_CTX,
485 TCM_REG_SM_TASK_CTX},
486 {{4, 16, 2, 4}, {4, 16, 2, 4}}
487 },
488
489
490 {'M', BLOCK_MSEM,
491 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
492 false,
493 MSEM_REG_FAST_MEMORY,
494 MSEM_REG_DBG_FRAME_MODE_BB_K2,
495 MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
496 MSEM_REG_SLOW_DBG_MODE_BB_K2,
497 MSEM_REG_DBG_MODE1_CFG_BB_K2,
498 MSEM_REG_SYNC_DBG_EMPTY,
499 MSEM_REG_DBG_GPRE_VECT,
500 MCM_REG_CTX_RBC_ACCS,
501 {MCM_REG_AGG_CON_CTX, MCM_REG_SM_CON_CTX, MCM_REG_AGG_TASK_CTX,
502 MCM_REG_SM_TASK_CTX },
503 {{1, 10, 2, 7}, {1, 10, 2, 7}}
504 },
505
506
507 {'U', BLOCK_USEM,
508 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
509 false,
510 USEM_REG_FAST_MEMORY,
511 USEM_REG_DBG_FRAME_MODE_BB_K2,
512 USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
513 USEM_REG_SLOW_DBG_MODE_BB_K2,
514 USEM_REG_DBG_MODE1_CFG_BB_K2,
515 USEM_REG_SYNC_DBG_EMPTY,
516 USEM_REG_DBG_GPRE_VECT,
517 UCM_REG_CTX_RBC_ACCS,
518 {UCM_REG_AGG_CON_CTX, UCM_REG_SM_CON_CTX, UCM_REG_AGG_TASK_CTX,
519 UCM_REG_SM_TASK_CTX},
520 {{2, 13, 3, 3}, {2, 13, 3, 3}}
521 },
522
523
524 {'X', BLOCK_XSEM,
525 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
526 false,
527 XSEM_REG_FAST_MEMORY,
528 XSEM_REG_DBG_FRAME_MODE_BB_K2,
529 XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
530 XSEM_REG_SLOW_DBG_MODE_BB_K2,
531 XSEM_REG_DBG_MODE1_CFG_BB_K2,
532 XSEM_REG_SYNC_DBG_EMPTY,
533 XSEM_REG_DBG_GPRE_VECT,
534 XCM_REG_CTX_RBC_ACCS,
535 {XCM_REG_AGG_CON_CTX, XCM_REG_SM_CON_CTX, 0, 0},
536 {{9, 15, 0, 0}, {9, 15, 0, 0}}
537 },
538
539
540 {'Y', BLOCK_YSEM,
541 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
542 false,
543 YSEM_REG_FAST_MEMORY,
544 YSEM_REG_DBG_FRAME_MODE_BB_K2,
545 YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
546 YSEM_REG_SLOW_DBG_MODE_BB_K2,
547 YSEM_REG_DBG_MODE1_CFG_BB_K2,
548 YSEM_REG_SYNC_DBG_EMPTY,
549 YSEM_REG_DBG_GPRE_VECT,
550 YCM_REG_CTX_RBC_ACCS,
551 {YCM_REG_AGG_CON_CTX, YCM_REG_SM_CON_CTX, YCM_REG_AGG_TASK_CTX,
552 YCM_REG_SM_TASK_CTX},
553 {{2, 3, 2, 12}, {2, 3, 2, 12}}
554 },
555
556
557 {'P', BLOCK_PSEM,
558 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
559 true,
560 PSEM_REG_FAST_MEMORY,
561 PSEM_REG_DBG_FRAME_MODE_BB_K2,
562 PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
563 PSEM_REG_SLOW_DBG_MODE_BB_K2,
564 PSEM_REG_DBG_MODE1_CFG_BB_K2,
565 PSEM_REG_SYNC_DBG_EMPTY,
566 PSEM_REG_DBG_GPRE_VECT,
567 PCM_REG_CTX_RBC_ACCS,
568 {0, PCM_REG_SM_CON_CTX, 0, 0},
569 {{0, 10, 0, 0}, {0, 10, 0, 0}}
570 },
571};
572
573static struct hw_type_defs s_hw_type_defs[] = {
574
575 {"asic", 1, 256, 32768},
576 {"reserved", 0, 0, 0},
577 {"reserved2", 0, 0, 0},
578 {"reserved3", 0, 0, 0}
579};
580
581static struct grc_param_defs s_grc_param_defs[] = {
582
583 {{1, 1}, 0, 1, false, false, 1, {1, 1}},
584
585
586 {{1, 1}, 0, 1, false, false, 1, {1, 1}},
587
588
589 {{1, 1}, 0, 1, false, false, 1, {1, 1}},
590
591
592 {{1, 1}, 0, 1, false, false, 1, {1, 1}},
593
594
595 {{1, 1}, 0, 1, false, false, 1, {1, 1}},
596
597
598 {{1, 1}, 0, 1, false, false, 1, {1, 1}},
599
600
601 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
602
603
604 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
605
606
607 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
608
609
610 {{0, 0}, 0, 1, false, false, 0, {1, 1}},
611
612
613 {{0, 0}, 0, 1, false, false, 0, {1, 1}},
614
615
616 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
617
618
619 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
620
621
622 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
623
624
625 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
626
627
628 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
629
630
631 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
632
633
634 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
635
636
637 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
638
639
640 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
641
642
643 {{0, 0}, 0, 1, false, false, 0, {1, 1}},
644
645
646 {{0, 0}, 0, 1, false, false, 0, {1, 1}},
647
648
649 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
650
651
652 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
653
654
655 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
656
657
658 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
659
660
661 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
662
663
664 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
665
666
667 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
668
669
670 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
671
672
673 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
674
675
676 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
677
678
679 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
680
681
682 {{0, 0}, 1, 0xffffffff, false, true, 0, {0, 0}},
683
684
685 {{0, 0}, 0, 1, true, false, 0, {0, 0}},
686
687
688 {{0, 0}, 0, 1, true, false, 0, {0, 0}},
689
690
691 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
692
693
694 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
695
696
697 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
698
699
700 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
701
702
703 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
704
705
706 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
707
708
709 {{0, 1}, 0, 1, false, false, 0, {0, 1}},
710
711
712 {{1, 1}, 0, 1, false, false, 0, {0, 0}},
713
714
715 {{1, 1}, 0, 1, false, false, 0, {0, 0}},
716
717
718 {{0, 0}, 0, 1, false, false, 0, {1, 1}}
719};
720
721static struct rss_mem_defs s_rss_mem_defs[] = {
722 {"rss_mem_cid", "rss_cid", 0, 32,
723 {256, 320}},
724
725 {"rss_mem_key_msb", "rss_key", 1024, 256,
726 {128, 208}},
727
728 {"rss_mem_key_lsb", "rss_key", 2048, 64,
729 {128, 208}},
730
731 {"rss_mem_info", "rss_info", 3072, 16,
732 {128, 208}},
733
734 {"rss_mem_ind", "rss_ind", 4096, 16,
735 {16384, 26624}}
736};
737
738static struct vfc_ram_defs s_vfc_ram_defs[] = {
739 {"vfc_ram_tt1", "vfc_ram", 0, 512},
740 {"vfc_ram_mtt2", "vfc_ram", 512, 128},
741 {"vfc_ram_stt2", "vfc_ram", 640, 32},
742 {"vfc_ram_ro_vect", "vfc_ram", 672, 32}
743};
744
745static struct big_ram_defs s_big_ram_defs[] = {
746 {"BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
747 BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
748 MISC_REG_BLOCK_256B_EN, {0, 0},
749 {153600, 180224}},
750
751 {"BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
752 BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
753 MISC_REG_BLOCK_256B_EN, {0, 1},
754 {92160, 117760}},
755
756 {"BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
757 BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
758 MISCS_REG_BLOCK_256B_EN, {0, 0},
759 {36864, 36864}}
760};
761
762static struct rbc_reset_defs s_rbc_reset_defs[] = {
763 {MISCS_REG_RESET_PL_HV,
764 {0x0, 0x400}},
765 {MISC_REG_RESET_PL_PDA_VMAIN_1,
766 {0x4404040, 0x4404040}},
767 {MISC_REG_RESET_PL_PDA_VMAIN_2,
768 {0x7, 0x7c00007}},
769 {MISC_REG_RESET_PL_PDA_VAUX,
770 {0x2, 0x2}},
771};
772
773static struct phy_defs s_phy_defs[] = {
774 {"nw_phy", NWS_REG_NWS_CMU_K2,
775 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5,
776 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5,
777 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5,
778 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5},
779 {"sgmii_phy", MS_REG_MS_CMU_K2_E5,
780 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
781 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
782 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
783 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
784 {"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5,
785 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
786 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
787 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
788 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
789 {"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5,
790 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
791 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
792 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
793 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
794};
795
796static struct split_type_defs s_split_type_defs[] = {
797
798 {"eng"},
799
800
801 {"port"},
802
803
804 {"pf"},
805
806
807 {"port"},
808
809
810 {"vf"}
811};
812
813
814
815
816static u32 qed_read_unaligned_dword(u8 *buf)
817{
818 u32 dword;
819
820 memcpy((u8 *)&dword, buf, sizeof(dword));
821 return dword;
822}
823
824
825static void qed_grc_set_param(struct qed_hwfn *p_hwfn,
826 enum dbg_grc_params grc_param, u32 val)
827{
828 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
829
830 dev_data->grc.param_val[grc_param] = val;
831}
832
833
834static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
835 enum dbg_grc_params grc_param)
836{
837 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
838
839 return dev_data->grc.param_val[grc_param];
840}
841
842
843static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
844{
845 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
846
847 if (!dev_data->grc.params_initialized) {
848 qed_dbg_grc_set_params_default(p_hwfn);
849 dev_data->grc.params_initialized = 1;
850 }
851}
852
853
854static void qed_set_dbg_bin_buf(struct qed_hwfn *p_hwfn,
855 enum bin_dbg_buffer_type buf_type,
856 const u32 *ptr, u32 size)
857{
858 struct virt_mem_desc *buf = &p_hwfn->dbg_arrays[buf_type];
859
860 buf->ptr = (void *)ptr;
861 buf->size = size;
862}
863
864
865static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn)
866{
867 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
868 u8 num_pfs = 0, max_pfs_per_port = 0;
869
870 if (dev_data->initialized)
871 return DBG_STATUS_OK;
872
873
874 if (QED_IS_K2(p_hwfn->cdev)) {
875 dev_data->chip_id = CHIP_K2;
876 dev_data->mode_enable[MODE_K2] = 1;
877 dev_data->num_vfs = MAX_NUM_VFS_K2;
878 num_pfs = MAX_NUM_PFS_K2;
879 max_pfs_per_port = MAX_NUM_PFS_K2 / 2;
880 } else if (QED_IS_BB_B0(p_hwfn->cdev)) {
881 dev_data->chip_id = CHIP_BB;
882 dev_data->mode_enable[MODE_BB] = 1;
883 dev_data->num_vfs = MAX_NUM_VFS_BB;
884 num_pfs = MAX_NUM_PFS_BB;
885 max_pfs_per_port = MAX_NUM_PFS_BB;
886 } else {
887 return DBG_STATUS_UNKNOWN_CHIP;
888 }
889
890
891 dev_data->hw_type = HW_TYPE_ASIC;
892 dev_data->mode_enable[MODE_ASIC] = 1;
893
894
895 switch (p_hwfn->cdev->num_ports_in_engine) {
896 case 1:
897 dev_data->mode_enable[MODE_PORTS_PER_ENG_1] = 1;
898 break;
899 case 2:
900 dev_data->mode_enable[MODE_PORTS_PER_ENG_2] = 1;
901 break;
902 case 4:
903 dev_data->mode_enable[MODE_PORTS_PER_ENG_4] = 1;
904 break;
905 }
906
907
908 if (QED_IS_CMT(p_hwfn->cdev))
909 dev_data->mode_enable[MODE_100G] = 1;
910
911
912 if (dev_data->mode_enable[MODE_PORTS_PER_ENG_1] ||
913 dev_data->mode_enable[MODE_100G])
914 dev_data->num_ports = 1;
915 else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_2])
916 dev_data->num_ports = 2;
917 else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_4])
918 dev_data->num_ports = 4;
919
920
921 dev_data->num_pfs_per_port = min_t(u32,
922 num_pfs / dev_data->num_ports,
923 max_pfs_per_port);
924
925
926 qed_dbg_grc_init_params(p_hwfn);
927
928 dev_data->use_dmae = true;
929 dev_data->initialized = 1;
930
931 return DBG_STATUS_OK;
932}
933
934static const struct dbg_block *get_dbg_block(struct qed_hwfn *p_hwfn,
935 enum block_id block_id)
936{
937 const struct dbg_block *dbg_block;
938
939 dbg_block = p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS].ptr;
940 return dbg_block + block_id;
941}
942
943static const struct dbg_block_chip *qed_get_dbg_block_per_chip(struct qed_hwfn
944 *p_hwfn,
945 enum block_id
946 block_id)
947{
948 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
949
950 return (const struct dbg_block_chip *)
951 p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_CHIP_DATA].ptr +
952 block_id * MAX_CHIP_IDS + dev_data->chip_id;
953}
954
955static const struct dbg_reset_reg *qed_get_dbg_reset_reg(struct qed_hwfn
956 *p_hwfn,
957 u8 reset_reg_id)
958{
959 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
960
961 return (const struct dbg_reset_reg *)
962 p_hwfn->dbg_arrays[BIN_BUF_DBG_RESET_REGS].ptr +
963 reset_reg_id * MAX_CHIP_IDS + dev_data->chip_id;
964}
965
966
967
968
969static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
970 struct qed_ptt *p_ptt,
971 u8 storm_id, struct fw_info *fw_info)
972{
973 struct storm_defs *storm = &s_storm_defs[storm_id];
974 struct fw_info_location fw_info_location;
975 u32 addr, i, size, *dest;
976
977 memset(&fw_info_location, 0, sizeof(fw_info_location));
978 memset(fw_info, 0, sizeof(*fw_info));
979
980
981
982
983 addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
984 DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
985 sizeof(fw_info_location);
986
987 dest = (u32 *)&fw_info_location;
988 size = BYTES_TO_DWORDS(sizeof(fw_info_location));
989
990 for (i = 0; i < size; i++, addr += BYTES_IN_DWORD)
991 dest[i] = qed_rd(p_hwfn, p_ptt, addr);
992
993
994
995
996 cpu_to_le32_array(dest, size);
997
998
999 size = le32_to_cpu(fw_info_location.size);
1000 if (!size || size > sizeof(*fw_info))
1001 return;
1002
1003 addr = le32_to_cpu(fw_info_location.grc_addr);
1004 dest = (u32 *)fw_info;
1005 size = BYTES_TO_DWORDS(size);
1006
1007 for (i = 0; i < size; i++, addr += BYTES_IN_DWORD)
1008 dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1009
1010 cpu_to_le32_array(dest, size);
1011}
1012
1013
1014
1015
1016static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
1017{
1018 if (dump)
1019 strcpy(dump_buf, str);
1020
1021 return (u32)strlen(str) + 1;
1022}
1023
1024
1025
1026
1027static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
1028{
1029 u8 offset_in_dword, align_size;
1030
1031 offset_in_dword = (u8)(byte_offset & 0x3);
1032 align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
1033
1034 if (dump && align_size)
1035 memset(dump_buf, 0, align_size);
1036
1037 return align_size;
1038}
1039
1040
1041
1042
1043static u32 qed_dump_str_param(u32 *dump_buf,
1044 bool dump,
1045 const char *param_name, const char *param_val)
1046{
1047 char *char_buf = (char *)dump_buf;
1048 u32 offset = 0;
1049
1050
1051 offset += qed_dump_str(char_buf + offset, dump, param_name);
1052
1053
1054 if (dump)
1055 *(char_buf + offset) = 1;
1056 offset++;
1057
1058
1059 offset += qed_dump_str(char_buf + offset, dump, param_val);
1060
1061
1062 offset += qed_dump_align(char_buf + offset, dump, offset);
1063
1064 return BYTES_TO_DWORDS(offset);
1065}
1066
1067
1068
1069
1070static u32 qed_dump_num_param(u32 *dump_buf,
1071 bool dump, const char *param_name, u32 param_val)
1072{
1073 char *char_buf = (char *)dump_buf;
1074 u32 offset = 0;
1075
1076
1077 offset += qed_dump_str(char_buf + offset, dump, param_name);
1078
1079
1080 if (dump)
1081 *(char_buf + offset) = 0;
1082 offset++;
1083
1084
1085 offset += qed_dump_align(char_buf + offset, dump, offset);
1086
1087
1088 offset = BYTES_TO_DWORDS(offset);
1089 if (dump)
1090 *(dump_buf + offset) = param_val;
1091 offset++;
1092
1093 return offset;
1094}
1095
1096
1097
1098
1099static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
1100 struct qed_ptt *p_ptt,
1101 u32 *dump_buf, bool dump)
1102{
1103 char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
1104 char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
1105 struct fw_info fw_info = { {0}, {0} };
1106 u32 offset = 0;
1107
1108 if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1109
1110 qed_read_fw_info(p_hwfn, p_ptt, &fw_info);
1111
1112
1113 if (snprintf(fw_ver_str, sizeof(fw_ver_str),
1114 "%d_%d_%d_%d", fw_info.ver.num.major,
1115 fw_info.ver.num.minor, fw_info.ver.num.rev,
1116 fw_info.ver.num.eng) < 0)
1117 DP_NOTICE(p_hwfn,
1118 "Unexpected debug error: invalid FW version string\n");
1119 switch (fw_info.ver.image_id) {
1120 case FW_IMG_MAIN:
1121 strcpy(fw_img_str, "main");
1122 break;
1123 default:
1124 strcpy(fw_img_str, "unknown");
1125 break;
1126 }
1127 }
1128
1129
1130 offset += qed_dump_str_param(dump_buf + offset,
1131 dump, "fw-version", fw_ver_str);
1132 offset += qed_dump_str_param(dump_buf + offset,
1133 dump, "fw-image", fw_img_str);
1134 offset += qed_dump_num_param(dump_buf + offset, dump, "fw-timestamp",
1135 le32_to_cpu(fw_info.ver.timestamp));
1136
1137 return offset;
1138}
1139
1140
1141
1142
1143static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
1144 struct qed_ptt *p_ptt,
1145 u32 *dump_buf, bool dump)
1146{
1147 char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
1148
1149 if (dump &&
1150 !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1151 u32 global_section_offsize, global_section_addr, mfw_ver;
1152 u32 public_data_addr, global_section_offsize_addr;
1153
1154
1155
1156
1157 public_data_addr = qed_rd(p_hwfn,
1158 p_ptt,
1159 MISC_REG_SHARED_MEM_ADDR) |
1160 MCP_REG_SCRATCH;
1161
1162
1163 global_section_offsize_addr = public_data_addr +
1164 offsetof(struct mcp_public_data,
1165 sections) +
1166 sizeof(offsize_t) * PUBLIC_GLOBAL;
1167 global_section_offsize = qed_rd(p_hwfn, p_ptt,
1168 global_section_offsize_addr);
1169 global_section_addr =
1170 MCP_REG_SCRATCH +
1171 (global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
1172
1173
1174 mfw_ver = qed_rd(p_hwfn, p_ptt,
1175 global_section_addr +
1176 offsetof(struct public_global, mfw_ver));
1177
1178
1179 if (snprintf(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d",
1180 (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16),
1181 (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
1182 DP_NOTICE(p_hwfn,
1183 "Unexpected debug error: invalid MFW version string\n");
1184 }
1185
1186 return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
1187}
1188
1189
1190
1191
1192static u32 qed_dump_chip_revision_param(struct qed_hwfn *p_hwfn,
1193 struct qed_ptt *p_ptt,
1194 u32 *dump_buf, bool dump)
1195{
1196 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1197 char param_str[3] = "??";
1198
1199 if (dev_data->hw_type == HW_TYPE_ASIC) {
1200 u32 chip_rev, chip_metal;
1201
1202 chip_rev = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
1203 chip_metal = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
1204
1205 param_str[0] = 'a' + (u8)chip_rev;
1206 param_str[1] = '0' + (u8)chip_metal;
1207 }
1208
1209 return qed_dump_str_param(dump_buf, dump, "chip-revision", param_str);
1210}
1211
1212
1213
1214
1215static u32 qed_dump_section_hdr(u32 *dump_buf,
1216 bool dump, const char *name, u32 num_params)
1217{
1218 return qed_dump_num_param(dump_buf, dump, name, num_params);
1219}
1220
1221
1222
1223
1224static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
1225 struct qed_ptt *p_ptt,
1226 u32 *dump_buf,
1227 bool dump,
1228 u8 num_specific_global_params)
1229{
1230 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1231 u32 offset = 0;
1232 u8 num_params;
1233
1234
1235 num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params +
1236 (dev_data->chip_id == CHIP_BB ? 1 : 0);
1237 offset += qed_dump_section_hdr(dump_buf + offset,
1238 dump, "global_params", num_params);
1239
1240
1241 offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
1242 offset += qed_dump_mfw_ver_param(p_hwfn,
1243 p_ptt, dump_buf + offset, dump);
1244 offset += qed_dump_chip_revision_param(p_hwfn,
1245 p_ptt, dump_buf + offset, dump);
1246 offset += qed_dump_num_param(dump_buf + offset,
1247 dump, "tools-version", TOOLS_VERSION);
1248 offset += qed_dump_str_param(dump_buf + offset,
1249 dump,
1250 "chip",
1251 s_chip_defs[dev_data->chip_id].name);
1252 offset += qed_dump_str_param(dump_buf + offset,
1253 dump,
1254 "platform",
1255 s_hw_type_defs[dev_data->hw_type].name);
1256 offset += qed_dump_num_param(dump_buf + offset,
1257 dump, "pci-func", p_hwfn->abs_pf_id);
1258 if (dev_data->chip_id == CHIP_BB)
1259 offset += qed_dump_num_param(dump_buf + offset,
1260 dump, "path", QED_PATH_ID(p_hwfn));
1261
1262 return offset;
1263}
1264
1265
1266
1267
1268static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
1269{
1270 u32 start_offset = offset;
1271
1272
1273 offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
1274
1275
1276 if (dump)
1277 *(dump_buf + offset) = ~crc32(0xffffffff,
1278 (u8 *)dump_buf,
1279 DWORDS_TO_BYTES(offset));
1280
1281 offset++;
1282
1283 return offset - start_offset;
1284}
1285
1286
1287static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
1288 struct qed_ptt *p_ptt)
1289{
1290 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1291 u32 reg_val[NUM_DBG_RESET_REGS] = { 0 };
1292 u8 rst_reg_id;
1293 u32 blk_id;
1294
1295
1296 for (rst_reg_id = 0; rst_reg_id < NUM_DBG_RESET_REGS; rst_reg_id++) {
1297 const struct dbg_reset_reg *rst_reg;
1298 bool rst_reg_removed;
1299 u32 rst_reg_addr;
1300
1301 rst_reg = qed_get_dbg_reset_reg(p_hwfn, rst_reg_id);
1302 rst_reg_removed = GET_FIELD(rst_reg->data,
1303 DBG_RESET_REG_IS_REMOVED);
1304 rst_reg_addr = DWORDS_TO_BYTES(GET_FIELD(rst_reg->data,
1305 DBG_RESET_REG_ADDR));
1306
1307 if (!rst_reg_removed)
1308 reg_val[rst_reg_id] = qed_rd(p_hwfn, p_ptt,
1309 rst_reg_addr);
1310 }
1311
1312
1313 for (blk_id = 0; blk_id < NUM_PHYS_BLOCKS; blk_id++) {
1314 const struct dbg_block_chip *blk;
1315 bool has_rst_reg;
1316 bool is_removed;
1317
1318 blk = qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)blk_id);
1319 is_removed = GET_FIELD(blk->flags, DBG_BLOCK_CHIP_IS_REMOVED);
1320 has_rst_reg = GET_FIELD(blk->flags,
1321 DBG_BLOCK_CHIP_HAS_RESET_REG);
1322
1323 if (!is_removed && has_rst_reg)
1324 dev_data->block_in_reset[blk_id] =
1325 !(reg_val[blk->reset_reg_id] &
1326 BIT(blk->reset_reg_bit_offset));
1327 }
1328}
1329
1330
1331static bool qed_is_mode_match_rec(struct qed_hwfn *p_hwfn,
1332 u16 *modes_buf_offset, u8 rec_depth)
1333{
1334 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1335 u8 *dbg_array;
1336 bool arg1, arg2;
1337 u8 tree_val;
1338
1339 if (rec_depth > MAX_RECURSION_DEPTH) {
1340 DP_NOTICE(p_hwfn,
1341 "Unexpected error: is_mode_match_rec exceeded the max recursion depth. This is probably due to a corrupt init/debug buffer.\n");
1342 return false;
1343 }
1344
1345
1346 dbg_array = p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
1347 tree_val = dbg_array[(*modes_buf_offset)++];
1348
1349 switch (tree_val) {
1350 case INIT_MODE_OP_NOT:
1351 return !qed_is_mode_match_rec(p_hwfn,
1352 modes_buf_offset, rec_depth + 1);
1353 case INIT_MODE_OP_OR:
1354 case INIT_MODE_OP_AND:
1355 arg1 = qed_is_mode_match_rec(p_hwfn,
1356 modes_buf_offset, rec_depth + 1);
1357 arg2 = qed_is_mode_match_rec(p_hwfn,
1358 modes_buf_offset, rec_depth + 1);
1359 return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
1360 arg2) : (arg1 && arg2);
1361 default:
1362 return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
1363 }
1364}
1365
1366
1367static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
1368{
1369 return qed_is_mode_match_rec(p_hwfn, modes_buf_offset, 0);
1370}
1371
1372
1373static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
1374 struct qed_ptt *p_ptt, bool enable)
1375{
1376 qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
1377}
1378
1379
1380static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
1381 struct qed_ptt *p_ptt)
1382{
1383 u32 reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
1384 const struct dbg_reset_reg *reset_reg;
1385 const struct dbg_block_chip *block;
1386
1387 block = qed_get_dbg_block_per_chip(p_hwfn, BLOCK_DBG);
1388 reset_reg = qed_get_dbg_reset_reg(p_hwfn, block->reset_reg_id);
1389 reset_reg_addr =
1390 DWORDS_TO_BYTES(GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR));
1391
1392 old_reset_reg_val = qed_rd(p_hwfn, p_ptt, reset_reg_addr);
1393 new_reset_reg_val =
1394 old_reset_reg_val & ~BIT(block->reset_reg_bit_offset);
1395
1396 qed_wr(p_hwfn, p_ptt, reset_reg_addr, new_reset_reg_val);
1397 qed_wr(p_hwfn, p_ptt, reset_reg_addr, old_reset_reg_val);
1398}
1399
1400
1401
1402
1403static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
1404 struct qed_ptt *p_ptt, u32 client_mask)
1405{
1406 qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
1407}
1408
1409static void qed_bus_config_dbg_line(struct qed_hwfn *p_hwfn,
1410 struct qed_ptt *p_ptt,
1411 enum block_id block_id,
1412 u8 line_id,
1413 u8 enable_mask,
1414 u8 right_shift,
1415 u8 force_valid_mask, u8 force_frame_mask)
1416{
1417 const struct dbg_block_chip *block =
1418 qed_get_dbg_block_per_chip(p_hwfn, block_id);
1419
1420 qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_select_reg_addr),
1421 line_id);
1422 qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_dword_enable_reg_addr),
1423 enable_mask);
1424 qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_shift_reg_addr),
1425 right_shift);
1426 qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_valid_reg_addr),
1427 force_valid_mask);
1428 qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_frame_reg_addr),
1429 force_frame_mask);
1430}
1431
1432
1433static void qed_bus_disable_blocks(struct qed_hwfn *p_hwfn,
1434 struct qed_ptt *p_ptt)
1435{
1436 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1437 u32 block_id;
1438
1439
1440 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
1441 const struct dbg_block_chip *block_per_chip =
1442 qed_get_dbg_block_per_chip(p_hwfn,
1443 (enum block_id)block_id);
1444
1445 if (GET_FIELD(block_per_chip->flags,
1446 DBG_BLOCK_CHIP_IS_REMOVED) ||
1447 dev_data->block_in_reset[block_id])
1448 continue;
1449
1450
1451 if (GET_FIELD(block_per_chip->flags,
1452 DBG_BLOCK_CHIP_HAS_DBG_BUS)) {
1453 u32 dbg_en_addr =
1454 block_per_chip->dbg_dword_enable_reg_addr;
1455 u16 modes_buf_offset =
1456 GET_FIELD(block_per_chip->dbg_bus_mode.data,
1457 DBG_MODE_HDR_MODES_BUF_OFFSET);
1458 bool eval_mode =
1459 GET_FIELD(block_per_chip->dbg_bus_mode.data,
1460 DBG_MODE_HDR_EVAL_MODE) > 0;
1461
1462 if (!eval_mode ||
1463 qed_is_mode_match(p_hwfn, &modes_buf_offset))
1464 qed_wr(p_hwfn, p_ptt,
1465 DWORDS_TO_BYTES(dbg_en_addr),
1466 0);
1467 }
1468 }
1469}
1470
1471
1472
1473
1474static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
1475 enum dbg_grc_params grc_param)
1476{
1477 return qed_grc_get_param(p_hwfn, grc_param) > 0;
1478}
1479
1480
1481
1482
1483static enum dbg_storms qed_get_id_from_letter(char storm_letter)
1484{
1485 u8 storm_id;
1486
1487 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++)
1488 if (s_storm_defs[storm_id].letter == storm_letter)
1489 return (enum dbg_storms)storm_id;
1490
1491 return MAX_DBG_STORMS;
1492}
1493
1494
1495
1496
1497static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
1498 enum dbg_storms storm)
1499{
1500 return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
1501}
1502
1503
1504
1505
1506static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
1507 enum block_id block_id, u8 mem_group_id)
1508{
1509 const struct dbg_block *block;
1510 u8 i;
1511
1512 block = get_dbg_block(p_hwfn, block_id);
1513
1514
1515 if (block->associated_storm_letter) {
1516 enum dbg_storms associated_storm_id =
1517 qed_get_id_from_letter(block->associated_storm_letter);
1518
1519 if (associated_storm_id == MAX_DBG_STORMS ||
1520 !qed_grc_is_storm_included(p_hwfn, associated_storm_id))
1521 return false;
1522 }
1523
1524 for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
1525 struct big_ram_defs *big_ram = &s_big_ram_defs[i];
1526
1527 if (mem_group_id == big_ram->mem_group_id ||
1528 mem_group_id == big_ram->ram_mem_group_id)
1529 return qed_grc_is_included(p_hwfn, big_ram->grc_param);
1530 }
1531
1532 switch (mem_group_id) {
1533 case MEM_GROUP_PXP_ILT:
1534 case MEM_GROUP_PXP_MEM:
1535 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
1536 case MEM_GROUP_RAM:
1537 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
1538 case MEM_GROUP_PBUF:
1539 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
1540 case MEM_GROUP_CAU_MEM:
1541 case MEM_GROUP_CAU_SB:
1542 case MEM_GROUP_CAU_PI:
1543 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
1544 case MEM_GROUP_CAU_MEM_EXT:
1545 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU_EXT);
1546 case MEM_GROUP_QM_MEM:
1547 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
1548 case MEM_GROUP_CFC_MEM:
1549 case MEM_GROUP_CONN_CFC_MEM:
1550 case MEM_GROUP_TASK_CFC_MEM:
1551 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
1552 qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
1553 case MEM_GROUP_DORQ_MEM:
1554 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DORQ);
1555 case MEM_GROUP_IGU_MEM:
1556 case MEM_GROUP_IGU_MSIX:
1557 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
1558 case MEM_GROUP_MULD_MEM:
1559 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
1560 case MEM_GROUP_PRS_MEM:
1561 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
1562 case MEM_GROUP_DMAE_MEM:
1563 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
1564 case MEM_GROUP_TM_MEM:
1565 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
1566 case MEM_GROUP_SDM_MEM:
1567 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
1568 case MEM_GROUP_TDIF_CTX:
1569 case MEM_GROUP_RDIF_CTX:
1570 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
1571 case MEM_GROUP_CM_MEM:
1572 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
1573 case MEM_GROUP_IOR:
1574 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
1575 default:
1576 return true;
1577 }
1578}
1579
1580
1581static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
1582 struct qed_ptt *p_ptt, bool stall)
1583{
1584 u32 reg_addr;
1585 u8 storm_id;
1586
1587 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
1588 if (!qed_grc_is_storm_included(p_hwfn,
1589 (enum dbg_storms)storm_id))
1590 continue;
1591
1592 reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
1593 SEM_FAST_REG_STALL_0_BB_K2;
1594 qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
1595 }
1596
1597 msleep(STALL_DELAY_MS);
1598}
1599
1600
1601
1602
1603static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
1604 struct qed_ptt *p_ptt, bool rbc_only)
1605{
1606 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1607 u8 chip_id = dev_data->chip_id;
1608 u32 i;
1609
1610
1611 for (i = 0; i < ARRAY_SIZE(s_rbc_reset_defs); i++)
1612 if (s_rbc_reset_defs[i].reset_val[dev_data->chip_id])
1613 qed_wr(p_hwfn,
1614 p_ptt,
1615 s_rbc_reset_defs[i].reset_reg_addr +
1616 RESET_REG_UNRESET_OFFSET,
1617 s_rbc_reset_defs[i].reset_val[chip_id]);
1618
1619 if (!rbc_only) {
1620 u32 reg_val[NUM_DBG_RESET_REGS] = { 0 };
1621 u8 reset_reg_id;
1622 u32 block_id;
1623
1624
1625 for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
1626 bool is_removed, has_reset_reg, unreset_before_dump;
1627 const struct dbg_block_chip *block;
1628
1629 block = qed_get_dbg_block_per_chip(p_hwfn,
1630 (enum block_id)
1631 block_id);
1632 is_removed =
1633 GET_FIELD(block->flags, DBG_BLOCK_CHIP_IS_REMOVED);
1634 has_reset_reg =
1635 GET_FIELD(block->flags,
1636 DBG_BLOCK_CHIP_HAS_RESET_REG);
1637 unreset_before_dump =
1638 GET_FIELD(block->flags,
1639 DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP);
1640
1641 if (!is_removed && has_reset_reg && unreset_before_dump)
1642 reg_val[block->reset_reg_id] |=
1643 BIT(block->reset_reg_bit_offset);
1644 }
1645
1646
1647 for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS;
1648 reset_reg_id++) {
1649 const struct dbg_reset_reg *reset_reg;
1650 u32 reset_reg_addr;
1651
1652 reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id);
1653
1654 if (GET_FIELD
1655 (reset_reg->data, DBG_RESET_REG_IS_REMOVED))
1656 continue;
1657
1658 if (reg_val[reset_reg_id]) {
1659 reset_reg_addr =
1660 GET_FIELD(reset_reg->data,
1661 DBG_RESET_REG_ADDR);
1662 qed_wr(p_hwfn,
1663 p_ptt,
1664 DWORDS_TO_BYTES(reset_reg_addr) +
1665 RESET_REG_UNRESET_OFFSET,
1666 reg_val[reset_reg_id]);
1667 }
1668 }
1669 }
1670}
1671
1672
1673static const struct dbg_attn_block_type_data *
1674qed_get_block_attn_data(struct qed_hwfn *p_hwfn,
1675 enum block_id block_id, enum dbg_attn_type attn_type)
1676{
1677 const struct dbg_attn_block *base_attn_block_arr =
1678 (const struct dbg_attn_block *)
1679 p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
1680
1681 return &base_attn_block_arr[block_id].per_type_data[attn_type];
1682}
1683
1684
1685static const struct dbg_attn_reg *
1686qed_get_block_attn_regs(struct qed_hwfn *p_hwfn,
1687 enum block_id block_id, enum dbg_attn_type attn_type,
1688 u8 *num_attn_regs)
1689{
1690 const struct dbg_attn_block_type_data *block_type_data =
1691 qed_get_block_attn_data(p_hwfn, block_id, attn_type);
1692
1693 *num_attn_regs = block_type_data->num_regs;
1694
1695 return (const struct dbg_attn_reg *)
1696 p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr +
1697 block_type_data->regs_offset;
1698}
1699
1700
1701static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
1702 struct qed_ptt *p_ptt)
1703{
1704 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1705 const struct dbg_attn_reg *attn_reg_arr;
1706 u8 reg_idx, num_attn_regs;
1707 u32 block_id;
1708
1709 for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
1710 if (dev_data->block_in_reset[block_id])
1711 continue;
1712
1713 attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
1714 (enum block_id)block_id,
1715 ATTN_TYPE_PARITY,
1716 &num_attn_regs);
1717
1718 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
1719 const struct dbg_attn_reg *reg_data =
1720 &attn_reg_arr[reg_idx];
1721 u16 modes_buf_offset;
1722 bool eval_mode;
1723
1724
1725 eval_mode = GET_FIELD(reg_data->mode.data,
1726 DBG_MODE_HDR_EVAL_MODE) > 0;
1727 modes_buf_offset =
1728 GET_FIELD(reg_data->mode.data,
1729 DBG_MODE_HDR_MODES_BUF_OFFSET);
1730
1731
1732 if (!eval_mode ||
1733 qed_is_mode_match(p_hwfn, &modes_buf_offset))
1734 qed_rd(p_hwfn, p_ptt,
1735 DWORDS_TO_BYTES(reg_data->
1736 sts_clr_address));
1737 }
1738 }
1739}
1740
1741
1742
1743
1744
1745
1746
1747
1748static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
1749 bool dump,
1750 u32 num_reg_entries,
1751 enum init_split_types split_type,
1752 u8 split_id, const char *reg_type_name)
1753{
1754 u8 num_params = 2 +
1755 (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (reg_type_name ? 1 : 0);
1756 u32 offset = 0;
1757
1758 offset += qed_dump_section_hdr(dump_buf + offset,
1759 dump, "grc_regs", num_params);
1760 offset += qed_dump_num_param(dump_buf + offset,
1761 dump, "count", num_reg_entries);
1762 offset += qed_dump_str_param(dump_buf + offset,
1763 dump, "split",
1764 s_split_type_defs[split_type].name);
1765 if (split_type != SPLIT_TYPE_NONE)
1766 offset += qed_dump_num_param(dump_buf + offset,
1767 dump, "id", split_id);
1768 if (reg_type_name)
1769 offset += qed_dump_str_param(dump_buf + offset,
1770 dump, "type", reg_type_name);
1771
1772 return offset;
1773}
1774
1775
1776
1777
1778void qed_read_regs(struct qed_hwfn *p_hwfn,
1779 struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len)
1780{
1781 u32 i;
1782
1783 for (i = 0; i < len; i++)
1784 buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
1785}
1786
1787
1788
1789
1790
1791static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
1792 struct qed_ptt *p_ptt,
1793 u32 *dump_buf,
1794 bool dump, u32 addr, u32 len, bool wide_bus,
1795 enum init_split_types split_type,
1796 u8 split_id)
1797{
1798 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1799 u8 port_id = 0, pf_id = 0, vf_id = 0, fid = 0;
1800 bool read_using_dmae = false;
1801 u32 thresh;
1802
1803 if (!dump)
1804 return len;
1805
1806 switch (split_type) {
1807 case SPLIT_TYPE_PORT:
1808 port_id = split_id;
1809 break;
1810 case SPLIT_TYPE_PF:
1811 pf_id = split_id;
1812 break;
1813 case SPLIT_TYPE_PORT_PF:
1814 port_id = split_id / dev_data->num_pfs_per_port;
1815 pf_id = port_id + dev_data->num_ports *
1816 (split_id % dev_data->num_pfs_per_port);
1817 break;
1818 case SPLIT_TYPE_VF:
1819 vf_id = split_id;
1820 break;
1821 default:
1822 break;
1823 }
1824
1825
1826 if (dev_data->use_dmae && split_type != SPLIT_TYPE_VF &&
1827 (len >= s_hw_type_defs[dev_data->hw_type].dmae_thresh ||
1828 (PROTECT_WIDE_BUS && wide_bus))) {
1829 struct qed_dmae_params dmae_params;
1830
1831
1832 memset(&dmae_params, 0, sizeof(dmae_params));
1833 SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 1);
1834 switch (split_type) {
1835 case SPLIT_TYPE_PORT:
1836 SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID,
1837 1);
1838 dmae_params.port_id = port_id;
1839 break;
1840 case SPLIT_TYPE_PF:
1841 SET_FIELD(dmae_params.flags,
1842 QED_DMAE_PARAMS_SRC_PF_VALID, 1);
1843 dmae_params.src_pfid = pf_id;
1844 break;
1845 case SPLIT_TYPE_PORT_PF:
1846 SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID,
1847 1);
1848 SET_FIELD(dmae_params.flags,
1849 QED_DMAE_PARAMS_SRC_PF_VALID, 1);
1850 dmae_params.port_id = port_id;
1851 dmae_params.src_pfid = pf_id;
1852 break;
1853 default:
1854 break;
1855 }
1856
1857
1858 read_using_dmae = !qed_dmae_grc2host(p_hwfn,
1859 p_ptt,
1860 DWORDS_TO_BYTES(addr),
1861 (u64)(uintptr_t)(dump_buf),
1862 len, &dmae_params);
1863 if (!read_using_dmae) {
1864 dev_data->use_dmae = 0;
1865 DP_VERBOSE(p_hwfn,
1866 QED_MSG_DEBUG,
1867 "Failed reading from chip using DMAE, using GRC instead\n");
1868 }
1869 }
1870
1871 if (read_using_dmae)
1872 goto print_log;
1873
1874
1875
1876
1877 if (split_type != dev_data->pretend.split_type ||
1878 split_id != dev_data->pretend.split_id) {
1879 switch (split_type) {
1880 case SPLIT_TYPE_PORT:
1881 qed_port_pretend(p_hwfn, p_ptt, port_id);
1882 break;
1883 case SPLIT_TYPE_PF:
1884 fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
1885 pf_id);
1886 qed_fid_pretend(p_hwfn, p_ptt, fid);
1887 break;
1888 case SPLIT_TYPE_PORT_PF:
1889 fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
1890 pf_id);
1891 qed_port_fid_pretend(p_hwfn, p_ptt, port_id, fid);
1892 break;
1893 case SPLIT_TYPE_VF:
1894 fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFVALID, 1)
1895 | FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFID,
1896 vf_id);
1897 qed_fid_pretend(p_hwfn, p_ptt, fid);
1898 break;
1899 default:
1900 break;
1901 }
1902
1903 dev_data->pretend.split_type = (u8)split_type;
1904 dev_data->pretend.split_id = split_id;
1905 }
1906
1907
1908 qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
1909
1910print_log:
1911
1912 dev_data->num_regs_read += len;
1913 thresh = s_hw_type_defs[dev_data->hw_type].log_thresh;
1914 if ((dev_data->num_regs_read / thresh) >
1915 ((dev_data->num_regs_read - len) / thresh))
1916 DP_VERBOSE(p_hwfn,
1917 QED_MSG_DEBUG,
1918 "Dumped %d registers...\n", dev_data->num_regs_read);
1919
1920 return len;
1921}
1922
1923
1924
1925
1926static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf,
1927 bool dump, u32 addr, u32 len)
1928{
1929 if (dump)
1930 *dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
1931
1932 return 1;
1933}
1934
1935
1936
1937
1938static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
1939 struct qed_ptt *p_ptt,
1940 u32 *dump_buf,
1941 bool dump, u32 addr, u32 len, bool wide_bus,
1942 enum init_split_types split_type, u8 split_id)
1943{
1944 u32 offset = 0;
1945
1946 offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
1947 offset += qed_grc_dump_addr_range(p_hwfn,
1948 p_ptt,
1949 dump_buf + offset,
1950 dump, addr, len, wide_bus,
1951 split_type, split_id);
1952
1953 return offset;
1954}
1955
1956
1957
1958
1959
1960
1961
1962
1963static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
1964 struct qed_ptt *p_ptt,
1965 u32 *dump_buf,
1966 bool dump,
1967 u32 addr,
1968 u32 total_len,
1969 u32 read_len, u32 skip_len)
1970{
1971 u32 offset = 0, reg_offset = 0;
1972
1973 offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
1974
1975 if (!dump)
1976 return offset + total_len;
1977
1978 while (reg_offset < total_len) {
1979 u32 curr_len = min_t(u32, read_len, total_len - reg_offset);
1980
1981 offset += qed_grc_dump_addr_range(p_hwfn,
1982 p_ptt,
1983 dump_buf + offset,
1984 dump, addr, curr_len, false,
1985 SPLIT_TYPE_NONE, 0);
1986 reg_offset += curr_len;
1987 addr += curr_len;
1988
1989 if (reg_offset < total_len) {
1990 curr_len = min_t(u32, skip_len, total_len - skip_len);
1991 memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
1992 offset += curr_len;
1993 reg_offset += curr_len;
1994 addr += curr_len;
1995 }
1996 }
1997
1998 return offset;
1999}
2000
2001
2002static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
2003 struct qed_ptt *p_ptt,
2004 struct virt_mem_desc input_regs_arr,
2005 u32 *dump_buf,
2006 bool dump,
2007 enum init_split_types split_type,
2008 u8 split_id,
2009 bool block_enable[MAX_BLOCK_ID],
2010 u32 *num_dumped_reg_entries)
2011{
2012 u32 i, offset = 0, input_offset = 0;
2013 bool mode_match = true;
2014
2015 *num_dumped_reg_entries = 0;
2016
2017 while (input_offset < BYTES_TO_DWORDS(input_regs_arr.size)) {
2018 const struct dbg_dump_cond_hdr *cond_hdr =
2019 (const struct dbg_dump_cond_hdr *)
2020 input_regs_arr.ptr + input_offset++;
2021 u16 modes_buf_offset;
2022 bool eval_mode;
2023
2024
2025 eval_mode = GET_FIELD(cond_hdr->mode.data,
2026 DBG_MODE_HDR_EVAL_MODE) > 0;
2027 if (eval_mode) {
2028 modes_buf_offset =
2029 GET_FIELD(cond_hdr->mode.data,
2030 DBG_MODE_HDR_MODES_BUF_OFFSET);
2031 mode_match = qed_is_mode_match(p_hwfn,
2032 &modes_buf_offset);
2033 }
2034
2035 if (!mode_match || !block_enable[cond_hdr->block_id]) {
2036 input_offset += cond_hdr->data_size;
2037 continue;
2038 }
2039
2040 for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
2041 const struct dbg_dump_reg *reg =
2042 (const struct dbg_dump_reg *)
2043 input_regs_arr.ptr + input_offset;
2044 u32 addr, len;
2045 bool wide_bus;
2046
2047 addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS);
2048 len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
2049 wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS);
2050 offset += qed_grc_dump_reg_entry(p_hwfn,
2051 p_ptt,
2052 dump_buf + offset,
2053 dump,
2054 addr,
2055 len,
2056 wide_bus,
2057 split_type, split_id);
2058 (*num_dumped_reg_entries)++;
2059 }
2060 }
2061
2062 return offset;
2063}
2064
2065
2066static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
2067 struct qed_ptt *p_ptt,
2068 struct virt_mem_desc input_regs_arr,
2069 u32 *dump_buf,
2070 bool dump,
2071 bool block_enable[MAX_BLOCK_ID],
2072 enum init_split_types split_type,
2073 u8 split_id, const char *reg_type_name)
2074{
2075 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2076 enum init_split_types hdr_split_type = split_type;
2077 u32 num_dumped_reg_entries, offset;
2078 u8 hdr_split_id = split_id;
2079
2080
2081 if (split_type == SPLIT_TYPE_PORT_PF) {
2082 hdr_split_type = SPLIT_TYPE_PORT;
2083 hdr_split_id = split_id / dev_data->num_pfs_per_port;
2084 }
2085
2086
2087 offset = qed_grc_dump_regs_hdr(dump_buf,
2088 false,
2089 0,
2090 hdr_split_type,
2091 hdr_split_id, reg_type_name);
2092
2093
2094 offset += qed_grc_dump_regs_entries(p_hwfn,
2095 p_ptt,
2096 input_regs_arr,
2097 dump_buf + offset,
2098 dump,
2099 split_type,
2100 split_id,
2101 block_enable,
2102 &num_dumped_reg_entries);
2103
2104
2105 if (dump && num_dumped_reg_entries > 0)
2106 qed_grc_dump_regs_hdr(dump_buf,
2107 dump,
2108 num_dumped_reg_entries,
2109 hdr_split_type,
2110 hdr_split_id, reg_type_name);
2111
2112 return num_dumped_reg_entries > 0 ? offset : 0;
2113}
2114
2115
2116
2117
2118static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
2119 struct qed_ptt *p_ptt,
2120 u32 *dump_buf,
2121 bool dump,
2122 bool block_enable[MAX_BLOCK_ID],
2123 const char *reg_type_name)
2124{
2125 struct virt_mem_desc *dbg_buf =
2126 &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG];
2127 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2128 u32 offset = 0, input_offset = 0;
2129
2130 while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
2131 const struct dbg_dump_split_hdr *split_hdr;
2132 struct virt_mem_desc curr_input_regs_arr;
2133 enum init_split_types split_type;
2134 u16 split_count = 0;
2135 u32 split_data_size;
2136 u8 split_id;
2137
2138 split_hdr =
2139 (const struct dbg_dump_split_hdr *)
2140 dbg_buf->ptr + input_offset++;
2141 split_type =
2142 GET_FIELD(split_hdr->hdr,
2143 DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2144 split_data_size = GET_FIELD(split_hdr->hdr,
2145 DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2146 curr_input_regs_arr.ptr =
2147 (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr +
2148 input_offset;
2149 curr_input_regs_arr.size = DWORDS_TO_BYTES(split_data_size);
2150
2151 switch (split_type) {
2152 case SPLIT_TYPE_NONE:
2153 split_count = 1;
2154 break;
2155 case SPLIT_TYPE_PORT:
2156 split_count = dev_data->num_ports;
2157 break;
2158 case SPLIT_TYPE_PF:
2159 case SPLIT_TYPE_PORT_PF:
2160 split_count = dev_data->num_ports *
2161 dev_data->num_pfs_per_port;
2162 break;
2163 case SPLIT_TYPE_VF:
2164 split_count = dev_data->num_vfs;
2165 break;
2166 default:
2167 return 0;
2168 }
2169
2170 for (split_id = 0; split_id < split_count; split_id++)
2171 offset += qed_grc_dump_split_data(p_hwfn, p_ptt,
2172 curr_input_regs_arr,
2173 dump_buf + offset,
2174 dump, block_enable,
2175 split_type,
2176 split_id,
2177 reg_type_name);
2178
2179 input_offset += split_data_size;
2180 }
2181
2182
2183 if (dump) {
2184 qed_fid_pretend(p_hwfn, p_ptt,
2185 FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
2186 p_hwfn->rel_pf_id));
2187 dev_data->pretend.split_type = SPLIT_TYPE_NONE;
2188 dev_data->pretend.split_id = 0;
2189 }
2190
2191 return offset;
2192}
2193
2194
2195static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
2196 struct qed_ptt *p_ptt,
2197 u32 *dump_buf, bool dump)
2198{
2199 u32 offset = 0, num_regs = 0;
2200 u8 reset_reg_id;
2201
2202
2203 offset += qed_grc_dump_regs_hdr(dump_buf,
2204 false,
2205 0, SPLIT_TYPE_NONE, 0, "RESET_REGS");
2206
2207
2208 for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS;
2209 reset_reg_id++) {
2210 const struct dbg_reset_reg *reset_reg;
2211 u32 reset_reg_addr;
2212
2213 reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id);
2214
2215 if (GET_FIELD(reset_reg->data, DBG_RESET_REG_IS_REMOVED))
2216 continue;
2217
2218 reset_reg_addr = GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR);
2219 offset += qed_grc_dump_reg_entry(p_hwfn,
2220 p_ptt,
2221 dump_buf + offset,
2222 dump,
2223 reset_reg_addr,
2224 1, false, SPLIT_TYPE_NONE, 0);
2225 num_regs++;
2226 }
2227
2228
2229 if (dump)
2230 qed_grc_dump_regs_hdr(dump_buf,
2231 true, num_regs, SPLIT_TYPE_NONE,
2232 0, "RESET_REGS");
2233
2234 return offset;
2235}
2236
2237
2238
2239
2240static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
2241 struct qed_ptt *p_ptt,
2242 u32 *dump_buf, bool dump)
2243{
2244 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2245 u32 block_id, offset = 0, stall_regs_offset;
2246 const struct dbg_attn_reg *attn_reg_arr;
2247 u8 storm_id, reg_idx, num_attn_regs;
2248 u32 num_reg_entries = 0;
2249
2250
2251 offset += qed_grc_dump_regs_hdr(dump_buf,
2252 false,
2253 0, SPLIT_TYPE_NONE, 0, "ATTN_REGS");
2254
2255
2256 for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
2257 if (dev_data->block_in_reset[block_id] && dump)
2258 continue;
2259
2260 attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
2261 (enum block_id)block_id,
2262 ATTN_TYPE_PARITY,
2263 &num_attn_regs);
2264
2265 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2266 const struct dbg_attn_reg *reg_data =
2267 &attn_reg_arr[reg_idx];
2268 u16 modes_buf_offset;
2269 bool eval_mode;
2270 u32 addr;
2271
2272
2273 eval_mode = GET_FIELD(reg_data->mode.data,
2274 DBG_MODE_HDR_EVAL_MODE) > 0;
2275 modes_buf_offset =
2276 GET_FIELD(reg_data->mode.data,
2277 DBG_MODE_HDR_MODES_BUF_OFFSET);
2278 if (eval_mode &&
2279 !qed_is_mode_match(p_hwfn, &modes_buf_offset))
2280 continue;
2281
2282
2283 addr = reg_data->mask_address;
2284 offset += qed_grc_dump_reg_entry(p_hwfn,
2285 p_ptt,
2286 dump_buf + offset,
2287 dump,
2288 addr,
2289 1, false,
2290 SPLIT_TYPE_NONE, 0);
2291 addr = GET_FIELD(reg_data->data,
2292 DBG_ATTN_REG_STS_ADDRESS);
2293 offset += qed_grc_dump_reg_entry(p_hwfn,
2294 p_ptt,
2295 dump_buf + offset,
2296 dump,
2297 addr,
2298 1, false,
2299 SPLIT_TYPE_NONE, 0);
2300 num_reg_entries += 2;
2301 }
2302 }
2303
2304
2305 if (dump)
2306 qed_grc_dump_regs_hdr(dump_buf,
2307 true,
2308 num_reg_entries,
2309 SPLIT_TYPE_NONE, 0, "ATTN_REGS");
2310
2311
2312 stall_regs_offset = offset;
2313 offset += qed_grc_dump_regs_hdr(dump_buf,
2314 false, 0, SPLIT_TYPE_NONE, 0, "REGS");
2315
2316
2317 for (storm_id = 0, num_reg_entries = 0; storm_id < MAX_DBG_STORMS;
2318 storm_id++) {
2319 struct storm_defs *storm = &s_storm_defs[storm_id];
2320 u32 addr;
2321
2322 if (dev_data->block_in_reset[storm->sem_block_id] && dump)
2323 continue;
2324
2325 addr =
2326 BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
2327 SEM_FAST_REG_STALLED);
2328 offset += qed_grc_dump_reg_entry(p_hwfn,
2329 p_ptt,
2330 dump_buf + offset,
2331 dump,
2332 addr,
2333 1,
2334 false, SPLIT_TYPE_NONE, 0);
2335 num_reg_entries++;
2336 }
2337
2338
2339 if (dump)
2340 qed_grc_dump_regs_hdr(dump_buf + stall_regs_offset,
2341 true,
2342 num_reg_entries,
2343 SPLIT_TYPE_NONE, 0, "REGS");
2344
2345 return offset;
2346}
2347
2348
2349static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
2350 struct qed_ptt *p_ptt,
2351 u32 *dump_buf, bool dump)
2352{
2353 u32 offset = 0, addr;
2354
2355 offset += qed_grc_dump_regs_hdr(dump_buf,
2356 dump, 2, SPLIT_TYPE_NONE, 0, "REGS");
2357
2358
2359
2360
2361 addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
2362 offset += qed_grc_dump_reg_entry_skip(p_hwfn,
2363 p_ptt,
2364 dump_buf + offset,
2365 dump,
2366 addr,
2367 RDIF_REG_DEBUG_ERROR_INFO_SIZE,
2368 7,
2369 1);
2370 addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
2371 offset +=
2372 qed_grc_dump_reg_entry_skip(p_hwfn,
2373 p_ptt,
2374 dump_buf + offset,
2375 dump,
2376 addr,
2377 TDIF_REG_DEBUG_ERROR_INFO_SIZE,
2378 7,
2379 1);
2380
2381 return offset;
2382}
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
2397 u32 *dump_buf,
2398 bool dump,
2399 const char *name,
2400 u32 addr,
2401 u32 len,
2402 u32 bit_width,
2403 bool packed,
2404 const char *mem_group, char storm_letter)
2405{
2406 u8 num_params = 3;
2407 u32 offset = 0;
2408 char buf[64];
2409
2410 if (!len)
2411 DP_NOTICE(p_hwfn,
2412 "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
2413
2414 if (bit_width)
2415 num_params++;
2416 if (packed)
2417 num_params++;
2418
2419
2420 offset += qed_dump_section_hdr(dump_buf + offset,
2421 dump, "grc_mem", num_params);
2422
2423 if (name) {
2424
2425 if (storm_letter) {
2426 strcpy(buf, "?STORM_");
2427 buf[0] = storm_letter;
2428 strcpy(buf + strlen(buf), name);
2429 } else {
2430 strcpy(buf, name);
2431 }
2432
2433 offset += qed_dump_str_param(dump_buf + offset,
2434 dump, "name", buf);
2435 } else {
2436
2437 u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
2438
2439 offset += qed_dump_num_param(dump_buf + offset,
2440 dump, "addr", addr_in_bytes);
2441 }
2442
2443
2444 offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
2445
2446
2447 if (bit_width)
2448 offset += qed_dump_num_param(dump_buf + offset,
2449 dump, "width", bit_width);
2450
2451
2452 if (packed)
2453 offset += qed_dump_num_param(dump_buf + offset,
2454 dump, "packed", 1);
2455
2456
2457 if (storm_letter) {
2458 strcpy(buf, "?STORM_");
2459 buf[0] = storm_letter;
2460 strcpy(buf + strlen(buf), mem_group);
2461 } else {
2462 strcpy(buf, mem_group);
2463 }
2464
2465 offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
2466
2467 return offset;
2468}
2469
2470
2471
2472
2473
2474static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
2475 struct qed_ptt *p_ptt,
2476 u32 *dump_buf,
2477 bool dump,
2478 const char *name,
2479 u32 addr,
2480 u32 len,
2481 bool wide_bus,
2482 u32 bit_width,
2483 bool packed,
2484 const char *mem_group, char storm_letter)
2485{
2486 u32 offset = 0;
2487
2488 offset += qed_grc_dump_mem_hdr(p_hwfn,
2489 dump_buf + offset,
2490 dump,
2491 name,
2492 addr,
2493 len,
2494 bit_width,
2495 packed, mem_group, storm_letter);
2496 offset += qed_grc_dump_addr_range(p_hwfn,
2497 p_ptt,
2498 dump_buf + offset,
2499 dump, addr, len, wide_bus,
2500 SPLIT_TYPE_NONE, 0);
2501
2502 return offset;
2503}
2504
2505
2506static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
2507 struct qed_ptt *p_ptt,
2508 struct virt_mem_desc input_mems_arr,
2509 u32 *dump_buf, bool dump)
2510{
2511 u32 i, offset = 0, input_offset = 0;
2512 bool mode_match = true;
2513
2514 while (input_offset < BYTES_TO_DWORDS(input_mems_arr.size)) {
2515 const struct dbg_dump_cond_hdr *cond_hdr;
2516 u16 modes_buf_offset;
2517 u32 num_entries;
2518 bool eval_mode;
2519
2520 cond_hdr =
2521 (const struct dbg_dump_cond_hdr *)input_mems_arr.ptr +
2522 input_offset++;
2523 num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
2524
2525
2526 eval_mode = GET_FIELD(cond_hdr->mode.data,
2527 DBG_MODE_HDR_EVAL_MODE) > 0;
2528 if (eval_mode) {
2529 modes_buf_offset =
2530 GET_FIELD(cond_hdr->mode.data,
2531 DBG_MODE_HDR_MODES_BUF_OFFSET);
2532 mode_match = qed_is_mode_match(p_hwfn,
2533 &modes_buf_offset);
2534 }
2535
2536 if (!mode_match) {
2537 input_offset += cond_hdr->data_size;
2538 continue;
2539 }
2540
2541 for (i = 0; i < num_entries;
2542 i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
2543 const struct dbg_dump_mem *mem =
2544 (const struct dbg_dump_mem *)((u32 *)
2545 input_mems_arr.ptr
2546 + input_offset);
2547 const struct dbg_block *block;
2548 char storm_letter = 0;
2549 u32 mem_addr, mem_len;
2550 bool mem_wide_bus;
2551 u8 mem_group_id;
2552
2553 mem_group_id = GET_FIELD(mem->dword0,
2554 DBG_DUMP_MEM_MEM_GROUP_ID);
2555 if (mem_group_id >= MEM_GROUPS_NUM) {
2556 DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
2557 return 0;
2558 }
2559
2560 if (!qed_grc_is_mem_included(p_hwfn,
2561 (enum block_id)
2562 cond_hdr->block_id,
2563 mem_group_id))
2564 continue;
2565
2566 mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
2567 mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
2568 mem_wide_bus = GET_FIELD(mem->dword1,
2569 DBG_DUMP_MEM_WIDE_BUS);
2570
2571 block = get_dbg_block(p_hwfn,
2572 cond_hdr->block_id);
2573
2574
2575
2576
2577 if (block->associated_storm_letter)
2578 storm_letter = block->associated_storm_letter;
2579
2580
2581 offset += qed_grc_dump_mem(p_hwfn,
2582 p_ptt,
2583 dump_buf + offset,
2584 dump,
2585 NULL,
2586 mem_addr,
2587 mem_len,
2588 mem_wide_bus,
2589 0,
2590 false,
2591 s_mem_group_names[mem_group_id],
2592 storm_letter);
2593 }
2594 }
2595
2596 return offset;
2597}
2598
2599
2600
2601
2602static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
2603 struct qed_ptt *p_ptt,
2604 u32 *dump_buf, bool dump)
2605{
2606 struct virt_mem_desc *dbg_buf =
2607 &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM];
2608 u32 offset = 0, input_offset = 0;
2609
2610 while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
2611 const struct dbg_dump_split_hdr *split_hdr;
2612 struct virt_mem_desc curr_input_mems_arr;
2613 enum init_split_types split_type;
2614 u32 split_data_size;
2615
2616 split_hdr =
2617 (const struct dbg_dump_split_hdr *)dbg_buf->ptr +
2618 input_offset++;
2619 split_type = GET_FIELD(split_hdr->hdr,
2620 DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2621 split_data_size = GET_FIELD(split_hdr->hdr,
2622 DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2623 curr_input_mems_arr.ptr = (u32 *)dbg_buf->ptr + input_offset;
2624 curr_input_mems_arr.size = DWORDS_TO_BYTES(split_data_size);
2625
2626 if (split_type == SPLIT_TYPE_NONE)
2627 offset += qed_grc_dump_mem_entries(p_hwfn,
2628 p_ptt,
2629 curr_input_mems_arr,
2630 dump_buf + offset,
2631 dump);
2632 else
2633 DP_NOTICE(p_hwfn,
2634 "Dumping split memories is currently not supported\n");
2635
2636 input_offset += split_data_size;
2637 }
2638
2639 return offset;
2640}
2641
2642
2643
2644
2645
2646static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
2647 struct qed_ptt *p_ptt,
2648 u32 *dump_buf,
2649 bool dump,
2650 const char *name,
2651 u32 num_lids,
2652 enum cm_ctx_types ctx_type, u8 storm_id)
2653{
2654 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2655 struct storm_defs *storm = &s_storm_defs[storm_id];
2656 u32 i, lid, lid_size, total_size;
2657 u32 rd_reg_addr, offset = 0;
2658
2659
2660 lid_size = storm->cm_ctx_lid_sizes[dev_data->chip_id][ctx_type] * 4;
2661
2662 if (!lid_size)
2663 return 0;
2664
2665 total_size = num_lids * lid_size;
2666
2667 offset += qed_grc_dump_mem_hdr(p_hwfn,
2668 dump_buf + offset,
2669 dump,
2670 name,
2671 0,
2672 total_size,
2673 lid_size * 32,
2674 false, name, storm->letter);
2675
2676 if (!dump)
2677 return offset + total_size;
2678
2679 rd_reg_addr = BYTES_TO_DWORDS(storm->cm_ctx_rd_addr[ctx_type]);
2680
2681
2682 for (lid = 0; lid < num_lids; lid++) {
2683 for (i = 0; i < lid_size; i++) {
2684 qed_wr(p_hwfn,
2685 p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
2686 offset += qed_grc_dump_addr_range(p_hwfn,
2687 p_ptt,
2688 dump_buf + offset,
2689 dump,
2690 rd_reg_addr,
2691 1,
2692 false,
2693 SPLIT_TYPE_NONE, 0);
2694 }
2695 }
2696
2697 return offset;
2698}
2699
2700
2701static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
2702 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
2703{
2704 u32 offset = 0;
2705 u8 storm_id;
2706
2707 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2708 if (!qed_grc_is_storm_included(p_hwfn,
2709 (enum dbg_storms)storm_id))
2710 continue;
2711
2712
2713 offset += qed_grc_dump_ctx_data(p_hwfn,
2714 p_ptt,
2715 dump_buf + offset,
2716 dump,
2717 "CONN_AG_CTX",
2718 NUM_OF_LCIDS,
2719 CM_CTX_CONN_AG, storm_id);
2720
2721
2722 offset += qed_grc_dump_ctx_data(p_hwfn,
2723 p_ptt,
2724 dump_buf + offset,
2725 dump,
2726 "CONN_ST_CTX",
2727 NUM_OF_LCIDS,
2728 CM_CTX_CONN_ST, storm_id);
2729
2730
2731 offset += qed_grc_dump_ctx_data(p_hwfn,
2732 p_ptt,
2733 dump_buf + offset,
2734 dump,
2735 "TASK_AG_CTX",
2736 NUM_OF_LTIDS,
2737 CM_CTX_TASK_AG, storm_id);
2738
2739
2740 offset += qed_grc_dump_ctx_data(p_hwfn,
2741 p_ptt,
2742 dump_buf + offset,
2743 dump,
2744 "TASK_ST_CTX",
2745 NUM_OF_LTIDS,
2746 CM_CTX_TASK_ST, storm_id);
2747 }
2748
2749 return offset;
2750}
2751
2752#define VFC_STATUS_RESP_READY_BIT 0
2753#define VFC_STATUS_BUSY_BIT 1
2754#define VFC_STATUS_SENDING_CMD_BIT 2
2755
2756#define VFC_POLLING_DELAY_MS 1
2757#define VFC_POLLING_COUNT 20
2758
2759
2760
2761
2762static u32 qed_grc_dump_read_from_vfc(struct qed_hwfn *p_hwfn,
2763 struct qed_ptt *p_ptt,
2764 struct storm_defs *storm,
2765 u32 *cmd_data,
2766 u32 cmd_size,
2767 u32 *addr_data,
2768 u32 addr_size,
2769 u32 resp_size, u32 *dump_buf)
2770{
2771 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2772 u32 vfc_status, polling_ms, polling_count = 0, i;
2773 u32 reg_addr, sem_base;
2774 bool is_ready = false;
2775
2776 sem_base = storm->sem_fast_mem_addr;
2777 polling_ms = VFC_POLLING_DELAY_MS *
2778 s_hw_type_defs[dev_data->hw_type].delay_factor;
2779
2780
2781 ARR_REG_WR(p_hwfn,
2782 p_ptt,
2783 sem_base + SEM_FAST_REG_VFC_DATA_WR,
2784 cmd_data, cmd_size);
2785
2786
2787 ARR_REG_WR(p_hwfn,
2788 p_ptt,
2789 sem_base + SEM_FAST_REG_VFC_ADDR,
2790 addr_data, addr_size);
2791
2792
2793 for (i = 0; i < resp_size; i++) {
2794
2795 do {
2796 reg_addr = sem_base + SEM_FAST_REG_VFC_STATUS;
2797 qed_grc_dump_addr_range(p_hwfn,
2798 p_ptt,
2799 &vfc_status,
2800 true,
2801 BYTES_TO_DWORDS(reg_addr),
2802 1,
2803 false, SPLIT_TYPE_NONE, 0);
2804 is_ready = vfc_status & BIT(VFC_STATUS_RESP_READY_BIT);
2805
2806 if (!is_ready) {
2807 if (polling_count++ == VFC_POLLING_COUNT)
2808 return 0;
2809
2810 msleep(polling_ms);
2811 }
2812 } while (!is_ready);
2813
2814 reg_addr = sem_base + SEM_FAST_REG_VFC_DATA_RD;
2815 qed_grc_dump_addr_range(p_hwfn,
2816 p_ptt,
2817 dump_buf + i,
2818 true,
2819 BYTES_TO_DWORDS(reg_addr),
2820 1, false, SPLIT_TYPE_NONE, 0);
2821 }
2822
2823 return resp_size;
2824}
2825
2826
2827static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
2828 struct qed_ptt *p_ptt,
2829 u32 *dump_buf, bool dump, u8 storm_id)
2830{
2831 u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
2832 struct storm_defs *storm = &s_storm_defs[storm_id];
2833 u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
2834 u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
2835 u32 row, offset = 0;
2836
2837 offset += qed_grc_dump_mem_hdr(p_hwfn,
2838 dump_buf + offset,
2839 dump,
2840 "vfc_cam",
2841 0,
2842 total_size,
2843 256,
2844 false, "vfc_cam", storm->letter);
2845
2846 if (!dump)
2847 return offset + total_size;
2848
2849
2850 SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
2851
2852
2853 for (row = 0; row < VFC_CAM_NUM_ROWS; row++) {
2854 SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
2855 offset += qed_grc_dump_read_from_vfc(p_hwfn,
2856 p_ptt,
2857 storm,
2858 cam_cmd,
2859 VFC_CAM_CMD_DWORDS,
2860 cam_addr,
2861 VFC_CAM_ADDR_DWORDS,
2862 VFC_CAM_RESP_DWORDS,
2863 dump_buf + offset);
2864 }
2865
2866 return offset;
2867}
2868
2869
2870static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
2871 struct qed_ptt *p_ptt,
2872 u32 *dump_buf,
2873 bool dump,
2874 u8 storm_id, struct vfc_ram_defs *ram_defs)
2875{
2876 u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
2877 struct storm_defs *storm = &s_storm_defs[storm_id];
2878 u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
2879 u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
2880 u32 row, offset = 0;
2881
2882 offset += qed_grc_dump_mem_hdr(p_hwfn,
2883 dump_buf + offset,
2884 dump,
2885 ram_defs->mem_name,
2886 0,
2887 total_size,
2888 256,
2889 false,
2890 ram_defs->type_name,
2891 storm->letter);
2892
2893 if (!dump)
2894 return offset + total_size;
2895
2896
2897 SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
2898
2899
2900 for (row = ram_defs->base_row;
2901 row < ram_defs->base_row + ram_defs->num_rows; row++) {
2902 SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
2903 offset += qed_grc_dump_read_from_vfc(p_hwfn,
2904 p_ptt,
2905 storm,
2906 ram_cmd,
2907 VFC_RAM_CMD_DWORDS,
2908 ram_addr,
2909 VFC_RAM_ADDR_DWORDS,
2910 VFC_RAM_RESP_DWORDS,
2911 dump_buf + offset);
2912 }
2913
2914 return offset;
2915}
2916
2917
2918static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
2919 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
2920{
2921 u8 storm_id, i;
2922 u32 offset = 0;
2923
2924 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2925 if (!qed_grc_is_storm_included(p_hwfn,
2926 (enum dbg_storms)storm_id) ||
2927 !s_storm_defs[storm_id].has_vfc)
2928 continue;
2929
2930
2931 offset += qed_grc_dump_vfc_cam(p_hwfn,
2932 p_ptt,
2933 dump_buf + offset,
2934 dump, storm_id);
2935
2936
2937 for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
2938 offset += qed_grc_dump_vfc_ram(p_hwfn,
2939 p_ptt,
2940 dump_buf + offset,
2941 dump,
2942 storm_id,
2943 &s_vfc_ram_defs[i]);
2944 }
2945
2946 return offset;
2947}
2948
2949
2950static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
2951 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
2952{
2953 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2954 u32 offset = 0;
2955 u8 rss_mem_id;
2956
2957 for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
2958 u32 rss_addr, num_entries, total_dwords;
2959 struct rss_mem_defs *rss_defs;
2960 u32 addr, num_dwords_to_read;
2961 bool packed;
2962
2963 rss_defs = &s_rss_mem_defs[rss_mem_id];
2964 rss_addr = rss_defs->addr;
2965 num_entries = rss_defs->num_entries[dev_data->chip_id];
2966 total_dwords = (num_entries * rss_defs->entry_width) / 32;
2967 packed = (rss_defs->entry_width == 16);
2968
2969 offset += qed_grc_dump_mem_hdr(p_hwfn,
2970 dump_buf + offset,
2971 dump,
2972 rss_defs->mem_name,
2973 0,
2974 total_dwords,
2975 rss_defs->entry_width,
2976 packed,
2977 rss_defs->type_name, 0);
2978
2979
2980 if (!dump) {
2981 offset += total_dwords;
2982 continue;
2983 }
2984
2985 addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
2986 while (total_dwords) {
2987 num_dwords_to_read = min_t(u32,
2988 RSS_REG_RSS_RAM_DATA_SIZE,
2989 total_dwords);
2990 qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
2991 offset += qed_grc_dump_addr_range(p_hwfn,
2992 p_ptt,
2993 dump_buf + offset,
2994 dump,
2995 addr,
2996 num_dwords_to_read,
2997 false,
2998 SPLIT_TYPE_NONE, 0);
2999 total_dwords -= num_dwords_to_read;
3000 rss_addr++;
3001 }
3002 }
3003
3004 return offset;
3005}
3006
3007
3008static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
3009 struct qed_ptt *p_ptt,
3010 u32 *dump_buf, bool dump, u8 big_ram_id)
3011{
3012 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3013 u32 block_size, ram_size, offset = 0, reg_val, i;
3014 char mem_name[12] = "???_BIG_RAM";
3015 char type_name[8] = "???_RAM";
3016 struct big_ram_defs *big_ram;
3017
3018 big_ram = &s_big_ram_defs[big_ram_id];
3019 ram_size = big_ram->ram_size[dev_data->chip_id];
3020
3021 reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
3022 block_size = reg_val &
3023 BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
3024 : 128;
3025
3026 strncpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3027 strncpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3028
3029
3030 offset += qed_grc_dump_mem_hdr(p_hwfn,
3031 dump_buf + offset,
3032 dump,
3033 mem_name,
3034 0,
3035 ram_size,
3036 block_size * 8,
3037 false, type_name, 0);
3038
3039
3040 if (!dump)
3041 return offset + ram_size;
3042
3043
3044 for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE);
3045 i++) {
3046 u32 addr, len;
3047
3048 qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3049 addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
3050 len = BRB_REG_BIG_RAM_DATA_SIZE;
3051 offset += qed_grc_dump_addr_range(p_hwfn,
3052 p_ptt,
3053 dump_buf + offset,
3054 dump,
3055 addr,
3056 len,
3057 false, SPLIT_TYPE_NONE, 0);
3058 }
3059
3060 return offset;
3061}
3062
3063
3064static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
3065 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3066{
3067 bool block_enable[MAX_BLOCK_ID] = { 0 };
3068 u32 offset = 0, addr;
3069 bool halted = false;
3070
3071
3072 if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3073 halted = !qed_mcp_halt(p_hwfn, p_ptt);
3074 if (!halted)
3075 DP_NOTICE(p_hwfn, "MCP halt failed!\n");
3076 }
3077
3078
3079 offset += qed_grc_dump_mem(p_hwfn,
3080 p_ptt,
3081 dump_buf + offset,
3082 dump,
3083 NULL,
3084 BYTES_TO_DWORDS(MCP_REG_SCRATCH),
3085 MCP_REG_SCRATCH_SIZE,
3086 false, 0, false, "MCP", 0);
3087
3088
3089 offset += qed_grc_dump_mem(p_hwfn,
3090 p_ptt,
3091 dump_buf + offset,
3092 dump,
3093 NULL,
3094 BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
3095 MCP_REG_CPU_REG_FILE_SIZE,
3096 false, 0, false, "MCP", 0);
3097
3098
3099 block_enable[BLOCK_MCP] = true;
3100 offset += qed_grc_dump_registers(p_hwfn,
3101 p_ptt,
3102 dump_buf + offset,
3103 dump, block_enable, "MCP");
3104
3105
3106 offset += qed_grc_dump_regs_hdr(dump_buf + offset,
3107 dump, 1, SPLIT_TYPE_NONE, 0,
3108 "MCP");
3109 addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
3110 offset += qed_grc_dump_reg_entry(p_hwfn,
3111 p_ptt,
3112 dump_buf + offset,
3113 dump,
3114 addr,
3115 1,
3116 false, SPLIT_TYPE_NONE, 0);
3117
3118
3119 if (halted && qed_mcp_resume(p_hwfn, p_ptt))
3120 DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
3121
3122 return offset;
3123}
3124
3125
3126
3127
3128static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
3129 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3130{
3131 u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3132 char mem_name[32];
3133 u8 phy_id;
3134
3135 for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
3136 u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3137 struct phy_defs *phy_defs;
3138 u8 *bytes_buf;
3139
3140 phy_defs = &s_phy_defs[phy_id];
3141 addr_lo_addr = phy_defs->base_addr +
3142 phy_defs->tbus_addr_lo_addr;
3143 addr_hi_addr = phy_defs->base_addr +
3144 phy_defs->tbus_addr_hi_addr;
3145 data_lo_addr = phy_defs->base_addr +
3146 phy_defs->tbus_data_lo_addr;
3147 data_hi_addr = phy_defs->base_addr +
3148 phy_defs->tbus_data_hi_addr;
3149
3150 if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
3151 phy_defs->phy_name) < 0)
3152 DP_NOTICE(p_hwfn,
3153 "Unexpected debug error: invalid PHY memory name\n");
3154
3155 offset += qed_grc_dump_mem_hdr(p_hwfn,
3156 dump_buf + offset,
3157 dump,
3158 mem_name,
3159 0,
3160 PHY_DUMP_SIZE_DWORDS,
3161 16, true, mem_name, 0);
3162
3163 if (!dump) {
3164 offset += PHY_DUMP_SIZE_DWORDS;
3165 continue;
3166 }
3167
3168 bytes_buf = (u8 *)(dump_buf + offset);
3169 for (tbus_hi_offset = 0;
3170 tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
3171 tbus_hi_offset++) {
3172 qed_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3173 for (tbus_lo_offset = 0; tbus_lo_offset < 256;
3174 tbus_lo_offset++) {
3175 qed_wr(p_hwfn,
3176 p_ptt, addr_lo_addr, tbus_lo_offset);
3177 *(bytes_buf++) = (u8)qed_rd(p_hwfn,
3178 p_ptt,
3179 data_lo_addr);
3180 *(bytes_buf++) = (u8)qed_rd(p_hwfn,
3181 p_ptt,
3182 data_hi_addr);
3183 }
3184 }
3185
3186 offset += PHY_DUMP_SIZE_DWORDS;
3187 }
3188
3189 return offset;
3190}
3191
3192static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
3193 struct qed_ptt *p_ptt,
3194 u32 image_type,
3195 u32 *nvram_offset_bytes,
3196 u32 *nvram_size_bytes);
3197
3198static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
3199 struct qed_ptt *p_ptt,
3200 u32 nvram_offset_bytes,
3201 u32 nvram_size_bytes, u32 *ret_buf);
3202
3203
3204static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn,
3205 struct qed_ptt *p_ptt,
3206 u32 *dump_buf, bool dump)
3207{
3208 u32 hw_dump_offset_bytes = 0, hw_dump_size_bytes = 0;
3209 u32 hw_dump_size_dwords = 0, offset = 0;
3210 enum dbg_status status;
3211
3212
3213 status = qed_find_nvram_image(p_hwfn,
3214 p_ptt,
3215 NVM_TYPE_HW_DUMP_OUT,
3216 &hw_dump_offset_bytes,
3217 &hw_dump_size_bytes);
3218 if (status != DBG_STATUS_OK)
3219 return 0;
3220
3221 hw_dump_size_dwords = BYTES_TO_DWORDS(hw_dump_size_bytes);
3222
3223
3224 offset += qed_dump_section_hdr(dump_buf + offset,
3225 dump, "mcp_hw_dump", 1);
3226 offset += qed_dump_num_param(dump_buf + offset,
3227 dump, "size", hw_dump_size_dwords);
3228
3229
3230 if (dump && hw_dump_size_dwords) {
3231 status = qed_nvram_read(p_hwfn,
3232 p_ptt,
3233 hw_dump_offset_bytes,
3234 hw_dump_size_bytes, dump_buf + offset);
3235 if (status != DBG_STATUS_OK) {
3236 DP_NOTICE(p_hwfn,
3237 "Failed to read MCP HW Dump image from NVRAM\n");
3238 return 0;
3239 }
3240 }
3241 offset += hw_dump_size_dwords;
3242
3243 return offset;
3244}
3245
3246
3247static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
3248 struct qed_ptt *p_ptt,
3249 u32 *dump_buf, bool dump)
3250{
3251 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3252 u32 block_id, line_id, offset = 0, addr, len;
3253
3254
3255 if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3256 return 0;
3257
3258 if (dump) {
3259
3260 qed_bus_disable_blocks(p_hwfn, p_ptt);
3261
3262 qed_bus_reset_dbg_block(p_hwfn, p_ptt);
3263 qed_wr(p_hwfn,
3264 p_ptt, DBG_REG_FRAMING_MODE, DBG_BUS_FRAME_MODE_8HW);
3265 qed_wr(p_hwfn,
3266 p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3267 qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3268 qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3269 }
3270
3271
3272 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3273 const struct dbg_block_chip *block_per_chip;
3274 const struct dbg_block *block;
3275 bool is_removed, has_dbg_bus;
3276 u16 modes_buf_offset;
3277 u32 block_dwords;
3278
3279 block_per_chip =
3280 qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)block_id);
3281 is_removed = GET_FIELD(block_per_chip->flags,
3282 DBG_BLOCK_CHIP_IS_REMOVED);
3283 has_dbg_bus = GET_FIELD(block_per_chip->flags,
3284 DBG_BLOCK_CHIP_HAS_DBG_BUS);
3285
3286
3287 if (block_id == BLOCK_NWS)
3288 continue;
3289
3290 if (!is_removed && has_dbg_bus &&
3291 GET_FIELD(block_per_chip->dbg_bus_mode.data,
3292 DBG_MODE_HDR_EVAL_MODE) > 0) {
3293 modes_buf_offset =
3294 GET_FIELD(block_per_chip->dbg_bus_mode.data,
3295 DBG_MODE_HDR_MODES_BUF_OFFSET);
3296 if (!qed_is_mode_match(p_hwfn, &modes_buf_offset))
3297 has_dbg_bus = false;
3298 }
3299
3300 if (is_removed || !has_dbg_bus)
3301 continue;
3302
3303 block_dwords = NUM_DBG_LINES(block_per_chip) *
3304 STATIC_DEBUG_LINE_DWORDS;
3305
3306
3307 block = get_dbg_block(p_hwfn, (enum block_id)block_id);
3308 offset += qed_grc_dump_mem_hdr(p_hwfn,
3309 dump_buf + offset,
3310 dump,
3311 block->name,
3312 0,
3313 block_dwords,
3314 32, false, "STATIC", 0);
3315
3316 if (!dump) {
3317 offset += block_dwords;
3318 continue;
3319 }
3320
3321
3322 if (dev_data->block_in_reset[block_id]) {
3323 memset(dump_buf + offset, 0,
3324 DWORDS_TO_BYTES(block_dwords));
3325 offset += block_dwords;
3326 continue;
3327 }
3328
3329
3330 qed_bus_enable_clients(p_hwfn,
3331 p_ptt,
3332 BIT(block_per_chip->dbg_client_id));
3333
3334 addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
3335 len = STATIC_DEBUG_LINE_DWORDS;
3336 for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_per_chip);
3337 line_id++) {
3338
3339 qed_bus_config_dbg_line(p_hwfn,
3340 p_ptt,
3341 (enum block_id)block_id,
3342 (u8)line_id, 0xf, 0, 0, 0);
3343
3344
3345 offset += qed_grc_dump_addr_range(p_hwfn,
3346 p_ptt,
3347 dump_buf + offset,
3348 dump,
3349 addr,
3350 len,
3351 true, SPLIT_TYPE_NONE,
3352 0);
3353 }
3354
3355
3356 qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3357 qed_bus_config_dbg_line(p_hwfn, p_ptt,
3358 (enum block_id)block_id, 0, 0, 0, 0, 0);
3359 }
3360
3361 if (dump) {
3362 qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
3363 qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3364 }
3365
3366 return offset;
3367}
3368
3369
3370
3371
3372static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
3373 struct qed_ptt *p_ptt,
3374 u32 *dump_buf,
3375 bool dump, u32 *num_dumped_dwords)
3376{
3377 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3378 u32 dwords_read, offset = 0;
3379 bool parities_masked = false;
3380 u8 i;
3381
3382 *num_dumped_dwords = 0;
3383 dev_data->num_regs_read = 0;
3384
3385
3386 if (dump)
3387 qed_update_blocks_reset_state(p_hwfn, p_ptt);
3388
3389
3390 offset += qed_dump_common_global_params(p_hwfn,
3391 p_ptt,
3392 dump_buf + offset, dump, 4);
3393 offset += qed_dump_str_param(dump_buf + offset,
3394 dump, "dump-type", "grc-dump");
3395 offset += qed_dump_num_param(dump_buf + offset,
3396 dump,
3397 "num-lcids",
3398 NUM_OF_LCIDS);
3399 offset += qed_dump_num_param(dump_buf + offset,
3400 dump,
3401 "num-ltids",
3402 NUM_OF_LTIDS);
3403 offset += qed_dump_num_param(dump_buf + offset,
3404 dump, "num-ports", dev_data->num_ports);
3405
3406
3407 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3408 offset += qed_grc_dump_reset_regs(p_hwfn,
3409 p_ptt,
3410 dump_buf + offset, dump);
3411
3412
3413 if (dump) {
3414 qed_grc_unreset_blocks(p_hwfn, p_ptt, false);
3415 qed_update_blocks_reset_state(p_hwfn, p_ptt);
3416 }
3417
3418
3419 if (dump &&
3420 !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3421 parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
3422 if (!parities_masked) {
3423 DP_NOTICE(p_hwfn,
3424 "Failed to mask parities using MFW\n");
3425 if (qed_grc_get_param
3426 (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
3427 return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
3428 }
3429 }
3430
3431
3432 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3433 offset += qed_grc_dump_modified_regs(p_hwfn,
3434 p_ptt,
3435 dump_buf + offset, dump);
3436
3437
3438 if (dump &&
3439 (qed_grc_is_included(p_hwfn,
3440 DBG_GRC_PARAM_DUMP_IOR) ||
3441 qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
3442 qed_grc_stall_storms(p_hwfn, p_ptt, true);
3443
3444
3445 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
3446 bool block_enable[MAX_BLOCK_ID];
3447
3448
3449 for (i = 0; i < MAX_BLOCK_ID; i++)
3450 block_enable[i] = true;
3451 block_enable[BLOCK_MCP] = false;
3452 offset += qed_grc_dump_registers(p_hwfn,
3453 p_ptt,
3454 dump_buf +
3455 offset,
3456 dump,
3457 block_enable, NULL);
3458
3459
3460 offset += qed_grc_dump_special_regs(p_hwfn,
3461 p_ptt,
3462 dump_buf + offset, dump);
3463 }
3464
3465
3466 offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
3467
3468
3469 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
3470 offset += qed_grc_dump_mcp(p_hwfn,
3471 p_ptt, dump_buf + offset, dump);
3472
3473
3474 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
3475 offset += qed_grc_dump_ctx(p_hwfn,
3476 p_ptt, dump_buf + offset, dump);
3477
3478
3479 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
3480 offset += qed_grc_dump_rss(p_hwfn,
3481 p_ptt, dump_buf + offset, dump);
3482
3483
3484 for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
3485 if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
3486 offset += qed_grc_dump_big_ram(p_hwfn,
3487 p_ptt,
3488 dump_buf + offset,
3489 dump, i);
3490
3491
3492 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)) {
3493 dwords_read = qed_grc_dump_vfc(p_hwfn,
3494 p_ptt, dump_buf + offset, dump);
3495 offset += dwords_read;
3496 if (!dwords_read)
3497 return DBG_STATUS_VFC_READ_ERROR;
3498 }
3499
3500
3501 if (qed_grc_is_included(p_hwfn,
3502 DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
3503 CHIP_K2 && dev_data->hw_type == HW_TYPE_ASIC)
3504 offset += qed_grc_dump_phy(p_hwfn,
3505 p_ptt, dump_buf + offset, dump);
3506
3507
3508 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP_HW_DUMP) &&
3509 !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP) && 1)
3510 offset += qed_grc_dump_mcp_hw_dump(p_hwfn,
3511 p_ptt,
3512 dump_buf + offset, dump);
3513
3514
3515 if (qed_grc_is_included(p_hwfn,
3516 DBG_GRC_PARAM_DUMP_STATIC) &&
3517 (!dump || dev_data->bus.state == DBG_BUS_STATE_IDLE))
3518 offset += qed_grc_dump_static_debug(p_hwfn,
3519 p_ptt,
3520 dump_buf + offset, dump);
3521
3522
3523 offset += qed_dump_last_section(dump_buf, offset, dump);
3524
3525 if (dump) {
3526
3527 if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
3528 qed_grc_stall_storms(p_hwfn, p_ptt, false);
3529
3530
3531 qed_grc_clear_all_prty(p_hwfn, p_ptt);
3532
3533
3534 if (parities_masked)
3535 qed_mcp_mask_parities(p_hwfn, p_ptt, 0);
3536 }
3537
3538 *num_dumped_dwords = offset;
3539
3540 return DBG_STATUS_OK;
3541}
3542
3543
3544
3545
3546static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
3547 struct qed_ptt *p_ptt,
3548 u32 *
3549 dump_buf,
3550 bool dump,
3551 u16 rule_id,
3552 const struct dbg_idle_chk_rule *rule,
3553 u16 fail_entry_id, u32 *cond_reg_values)
3554{
3555 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3556 const struct dbg_idle_chk_cond_reg *cond_regs;
3557 const struct dbg_idle_chk_info_reg *info_regs;
3558 u32 i, next_reg_offset = 0, offset = 0;
3559 struct dbg_idle_chk_result_hdr *hdr;
3560 const union dbg_idle_chk_reg *regs;
3561 u8 reg_id;
3562
3563 hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
3564 regs = (const union dbg_idle_chk_reg *)
3565 p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr +
3566 rule->reg_offset;
3567 cond_regs = ®s[0].cond_reg;
3568 info_regs = ®s[rule->num_cond_regs].info_reg;
3569
3570
3571 if (dump) {
3572 memset(hdr, 0, sizeof(*hdr));
3573 hdr->rule_id = rule_id;
3574 hdr->mem_entry_id = fail_entry_id;
3575 hdr->severity = rule->severity;
3576 hdr->num_dumped_cond_regs = rule->num_cond_regs;
3577 }
3578
3579 offset += IDLE_CHK_RESULT_HDR_DWORDS;
3580
3581
3582 for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
3583 const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
3584 struct dbg_idle_chk_result_reg_hdr *reg_hdr;
3585
3586 reg_hdr =
3587 (struct dbg_idle_chk_result_reg_hdr *)(dump_buf + offset);
3588
3589
3590 if (!dump) {
3591 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
3592 reg->entry_size;
3593 continue;
3594 }
3595
3596 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
3597 memset(reg_hdr, 0, sizeof(*reg_hdr));
3598 reg_hdr->start_entry = reg->start_entry;
3599 reg_hdr->size = reg->entry_size;
3600 SET_FIELD(reg_hdr->data,
3601 DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
3602 reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
3603 SET_FIELD(reg_hdr->data,
3604 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
3605
3606
3607 for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
3608 dump_buf[offset] = cond_reg_values[next_reg_offset];
3609 }
3610
3611
3612 for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
3613 const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
3614 u32 block_id;
3615
3616
3617 if (!dump) {
3618 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
3619 continue;
3620 }
3621
3622 block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
3623 if (block_id >= MAX_BLOCK_ID) {
3624 DP_NOTICE(p_hwfn, "Invalid block_id\n");
3625 return 0;
3626 }
3627
3628 if (!dev_data->block_in_reset[block_id]) {
3629 struct dbg_idle_chk_result_reg_hdr *reg_hdr;
3630 bool wide_bus, eval_mode, mode_match = true;
3631 u16 modes_buf_offset;
3632 u32 addr;
3633
3634 reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
3635 (dump_buf + offset);
3636
3637
3638 eval_mode = GET_FIELD(reg->mode.data,
3639 DBG_MODE_HDR_EVAL_MODE) > 0;
3640 if (eval_mode) {
3641 modes_buf_offset =
3642 GET_FIELD(reg->mode.data,
3643 DBG_MODE_HDR_MODES_BUF_OFFSET);
3644 mode_match =
3645 qed_is_mode_match(p_hwfn,
3646 &modes_buf_offset);
3647 }
3648
3649 if (!mode_match)
3650 continue;
3651
3652 addr = GET_FIELD(reg->data,
3653 DBG_IDLE_CHK_INFO_REG_ADDRESS);
3654 wide_bus = GET_FIELD(reg->data,
3655 DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
3656
3657
3658 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
3659 hdr->num_dumped_info_regs++;
3660 memset(reg_hdr, 0, sizeof(*reg_hdr));
3661 reg_hdr->size = reg->size;
3662 SET_FIELD(reg_hdr->data,
3663 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
3664 rule->num_cond_regs + reg_id);
3665
3666
3667 offset += qed_grc_dump_addr_range(p_hwfn,
3668 p_ptt,
3669 dump_buf + offset,
3670 dump,
3671 addr,
3672 reg->size, wide_bus,
3673 SPLIT_TYPE_NONE, 0);
3674 }
3675 }
3676
3677 return offset;
3678}
3679
3680
3681static u32
3682qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3683 u32 *dump_buf, bool dump,
3684 const struct dbg_idle_chk_rule *input_rules,
3685 u32 num_input_rules, u32 *num_failing_rules)
3686{
3687 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3688 u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
3689 u32 i, offset = 0;
3690 u16 entry_id;
3691 u8 reg_id;
3692
3693 *num_failing_rules = 0;
3694
3695 for (i = 0; i < num_input_rules; i++) {
3696 const struct dbg_idle_chk_cond_reg *cond_regs;
3697 const struct dbg_idle_chk_rule *rule;
3698 const union dbg_idle_chk_reg *regs;
3699 u16 num_reg_entries = 1;
3700 bool check_rule = true;
3701 const u32 *imm_values;
3702
3703 rule = &input_rules[i];
3704 regs = (const union dbg_idle_chk_reg *)
3705 p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr +
3706 rule->reg_offset;
3707 cond_regs = ®s[0].cond_reg;
3708 imm_values =
3709 (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr +
3710 rule->imm_offset;
3711
3712
3713
3714
3715
3716 for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
3717 reg_id++) {
3718 u32 block_id =
3719 GET_FIELD(cond_regs[reg_id].data,
3720 DBG_IDLE_CHK_COND_REG_BLOCK_ID);
3721
3722 if (block_id >= MAX_BLOCK_ID) {
3723 DP_NOTICE(p_hwfn, "Invalid block_id\n");
3724 return 0;
3725 }
3726
3727 check_rule = !dev_data->block_in_reset[block_id];
3728 if (cond_regs[reg_id].num_entries > num_reg_entries)
3729 num_reg_entries = cond_regs[reg_id].num_entries;
3730 }
3731
3732 if (!check_rule && dump)
3733 continue;
3734
3735 if (!dump) {
3736 u32 entry_dump_size =
3737 qed_idle_chk_dump_failure(p_hwfn,
3738 p_ptt,
3739 dump_buf + offset,
3740 false,
3741 rule->rule_id,
3742 rule,
3743 0,
3744 NULL);
3745
3746 offset += num_reg_entries * entry_dump_size;
3747 (*num_failing_rules) += num_reg_entries;
3748 continue;
3749 }
3750
3751
3752
3753
3754 for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
3755 u32 next_reg_offset = 0;
3756
3757
3758 for (reg_id = 0; reg_id < rule->num_cond_regs;
3759 reg_id++) {
3760 const struct dbg_idle_chk_cond_reg *reg =
3761 &cond_regs[reg_id];
3762 u32 padded_entry_size, addr;
3763 bool wide_bus;
3764
3765
3766
3767
3768 addr = GET_FIELD(reg->data,
3769 DBG_IDLE_CHK_COND_REG_ADDRESS);
3770 wide_bus =
3771 GET_FIELD(reg->data,
3772 DBG_IDLE_CHK_COND_REG_WIDE_BUS);
3773 if (reg->num_entries > 1 ||
3774 reg->start_entry > 0) {
3775 padded_entry_size =
3776 reg->entry_size > 1 ?
3777 roundup_pow_of_two(reg->entry_size) :
3778 1;
3779 addr += (reg->start_entry + entry_id) *
3780 padded_entry_size;
3781 }
3782
3783
3784 if (next_reg_offset + reg->entry_size >=
3785 IDLE_CHK_MAX_ENTRIES_SIZE) {
3786 DP_NOTICE(p_hwfn,
3787 "idle check registers entry is too large\n");
3788 return 0;
3789 }
3790
3791 next_reg_offset +=
3792 qed_grc_dump_addr_range(p_hwfn, p_ptt,
3793 cond_reg_values +
3794 next_reg_offset,
3795 dump, addr,
3796 reg->entry_size,
3797 wide_bus,
3798 SPLIT_TYPE_NONE, 0);
3799 }
3800
3801
3802
3803
3804 if ((*cond_arr[rule->cond_id]) (cond_reg_values,
3805 imm_values)) {
3806 offset += qed_idle_chk_dump_failure(p_hwfn,
3807 p_ptt,
3808 dump_buf + offset,
3809 dump,
3810 rule->rule_id,
3811 rule,
3812 entry_id,
3813 cond_reg_values);
3814 (*num_failing_rules)++;
3815 }
3816 }
3817 }
3818
3819 return offset;
3820}
3821
3822
3823
3824
3825static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
3826 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3827{
3828 struct virt_mem_desc *dbg_buf =
3829 &p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES];
3830 u32 num_failing_rules_offset, offset = 0,
3831 input_offset = 0, num_failing_rules = 0;
3832
3833
3834 offset += qed_dump_common_global_params(p_hwfn,
3835 p_ptt,
3836 dump_buf + offset, dump, 1);
3837 offset += qed_dump_str_param(dump_buf + offset,
3838 dump, "dump-type", "idle-chk");
3839
3840
3841 offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
3842 num_failing_rules_offset = offset;
3843 offset += qed_dump_num_param(