1 // SPDX-License-Identifier: GPL-2.0-only
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015 QLogic Corporation
6 #include <linux/module.h>
7 #include <linux/vmalloc.h>
8 #include <linux/crc32.h>
14 #include "qed_reg_addr.h"
16 /* Memory groups enum */
34 MEM_GROUP_CONN_CFC_MEM,
37 MEM_GROUP_CAU_MEM_EXT,
47 MEM_GROUP_TASK_CFC_MEM,
51 /* Memory groups names */
52 static const char * const s_mem_group_names[] = {
85 /* Idle check conditions */
87 static u32 cond5(const u32 *r, const u32 *imm)
89 return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
92 static u32 cond7(const u32 *r, const u32 *imm)
94 return ((r[0] >> imm[0]) & imm[1]) != imm[2];
97 static u32 cond6(const u32 *r, const u32 *imm)
99 return (r[0] & imm[0]) != imm[1];
102 static u32 cond9(const u32 *r, const u32 *imm)
104 return ((r[0] & imm[0]) >> imm[1]) !=
105 (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
108 static u32 cond10(const u32 *r, const u32 *imm)
110 return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
113 static u32 cond4(const u32 *r, const u32 *imm)
115 return (r[0] & ~imm[0]) != imm[1];
118 static u32 cond0(const u32 *r, const u32 *imm)
120 return (r[0] & ~r[1]) != imm[0];
123 static u32 cond1(const u32 *r, const u32 *imm)
125 return r[0] != imm[0];
128 static u32 cond11(const u32 *r, const u32 *imm)
130 return r[0] != r[1] && r[2] == imm[0];
133 static u32 cond12(const u32 *r, const u32 *imm)
135 return r[0] != r[1] && r[2] > imm[0];
138 static u32 cond3(const u32 *r, const u32 *imm)
143 static u32 cond13(const u32 *r, const u32 *imm)
145 return r[0] & imm[0];
148 static u32 cond8(const u32 *r, const u32 *imm)
150 return r[0] < (r[1] - imm[0]);
153 static u32 cond2(const u32 *r, const u32 *imm)
155 return r[0] > imm[0];
158 /* Array of Idle Check conditions */
159 static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
176 #define NUM_PHYS_BLOCKS 84
178 #define NUM_DBG_RESET_REGS 8
180 /******************************* Data Types **********************************/
191 /* CM context types */
200 /* Debug bus frame modes */
201 enum dbg_bus_frame_modes {
202 DBG_BUS_FRAME_MODE_4ST = 0, /* 4 Storm dwords (no HW) */
203 DBG_BUS_FRAME_MODE_2ST_2HW = 1, /* 2 Storm dwords, 2 HW dwords */
204 DBG_BUS_FRAME_MODE_1ST_3HW = 2, /* 1 Storm dwords, 3 HW dwords */
205 DBG_BUS_FRAME_MODE_4HW = 3, /* 4 HW dwords (no Storms) */
206 DBG_BUS_FRAME_MODE_8HW = 4, /* 8 HW dwords (no Storms) */
207 DBG_BUS_NUM_FRAME_MODES
210 /* Chip constant definitions */
216 /* HW type constant definitions */
217 struct hw_type_defs {
224 /* RBC reset definitions */
225 struct rbc_reset_defs {
227 u32 reset_val[MAX_CHIP_IDS];
230 /* Storm constant definitions.
231 * Addresses are in bytes, sizes are in quad-regs.
235 enum block_id sem_block_id;
236 enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
238 u32 sem_fast_mem_addr;
239 u32 sem_frame_mode_addr;
240 u32 sem_slow_enable_addr;
241 u32 sem_slow_mode_addr;
242 u32 sem_slow_mode1_conf_addr;
243 u32 sem_sync_dbg_empty_addr;
244 u32 sem_gpre_vect_addr;
246 u32 cm_ctx_rd_addr[NUM_CM_CTX_TYPES];
247 u32 cm_ctx_lid_sizes[MAX_CHIP_IDS][NUM_CM_CTX_TYPES];
250 /* Debug Bus Constraint operation constant definitions */
251 struct dbg_bus_constraint_op_defs {
256 /* Storm Mode definitions */
257 struct storm_mode_defs {
261 u32 src_disable_reg_addr;
263 bool exists[MAX_CHIP_IDS];
266 struct grc_param_defs {
267 u32 default_val[MAX_CHIP_IDS];
272 u32 exclude_all_preset_val;
273 u32 crash_preset_val[MAX_CHIP_IDS];
276 /* Address is in 128b units. Width is in bits. */
277 struct rss_mem_defs {
278 const char *mem_name;
279 const char *type_name;
282 u32 num_entries[MAX_CHIP_IDS];
285 struct vfc_ram_defs {
286 const char *mem_name;
287 const char *type_name;
292 struct big_ram_defs {
293 const char *instance_name;
294 enum mem_groups mem_group_id;
295 enum mem_groups ram_mem_group_id;
296 enum dbg_grc_params grc_param;
299 u32 is_256b_reg_addr;
300 u32 is_256b_bit_offset[MAX_CHIP_IDS];
301 u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
305 const char *phy_name;
307 /* PHY base GRC address */
310 /* Relative address of indirect TBUS address register (bits 0..7) */
311 u32 tbus_addr_lo_addr;
313 /* Relative address of indirect TBUS address register (bits 8..10) */
314 u32 tbus_addr_hi_addr;
316 /* Relative address of indirect TBUS data register (bits 0..7) */
317 u32 tbus_data_lo_addr;
319 /* Relative address of indirect TBUS data register (bits 8..11) */
320 u32 tbus_data_hi_addr;
323 /* Split type definitions */
324 struct split_type_defs {
328 /******************************** Constants **********************************/
330 #define BYTES_IN_DWORD sizeof(u32)
331 /* In the macros below, size and offset are specified in bits */
332 #define CEIL_DWORDS(size) DIV_ROUND_UP(size, 32)
333 #define FIELD_BIT_OFFSET(type, field) type ## _ ## field ## _ ## OFFSET
334 #define FIELD_BIT_SIZE(type, field) type ## _ ## field ## _ ## SIZE
335 #define FIELD_DWORD_OFFSET(type, field) \
336 (int)(FIELD_BIT_OFFSET(type, field) / 32)
337 #define FIELD_DWORD_SHIFT(type, field) (FIELD_BIT_OFFSET(type, field) % 32)
338 #define FIELD_BIT_MASK(type, field) \
339 (((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
340 FIELD_DWORD_SHIFT(type, field))
342 #define SET_VAR_FIELD(var, type, field, val) \
344 var[FIELD_DWORD_OFFSET(type, field)] &= \
345 (~FIELD_BIT_MASK(type, field)); \
346 var[FIELD_DWORD_OFFSET(type, field)] |= \
347 (val) << FIELD_DWORD_SHIFT(type, field); \
350 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
352 for (i = 0; i < (arr_size); i++) \
353 qed_wr(dev, ptt, addr, (arr)[i]); \
356 #define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
357 #define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
359 /* extra lines include a signature line + optional latency events line */
360 #define NUM_EXTRA_DBG_LINES(block) \
361 (GET_FIELD((block)->flags, DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS) ? 2 : 1)
362 #define NUM_DBG_LINES(block) \
363 ((block)->num_of_dbg_bus_lines + NUM_EXTRA_DBG_LINES(block))
365 #define USE_DMAE true
366 #define PROTECT_WIDE_BUS true
368 #define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
369 #define RAM_LINES_TO_BYTES(lines) \
370 DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
372 #define REG_DUMP_LEN_SHIFT 24
373 #define MEM_DUMP_ENTRY_SIZE_DWORDS \
374 BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
376 #define IDLE_CHK_RULE_SIZE_DWORDS \
377 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
379 #define IDLE_CHK_RESULT_HDR_DWORDS \
380 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
382 #define IDLE_CHK_RESULT_REG_HDR_DWORDS \
383 BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
385 #define PAGE_MEM_DESC_SIZE_DWORDS \
386 BYTES_TO_DWORDS(sizeof(struct phys_mem_desc))
388 #define IDLE_CHK_MAX_ENTRIES_SIZE 32
390 /* The sizes and offsets below are specified in bits */
391 #define VFC_CAM_CMD_STRUCT_SIZE 64
392 #define VFC_CAM_CMD_ROW_OFFSET 48
393 #define VFC_CAM_CMD_ROW_SIZE 9
394 #define VFC_CAM_ADDR_STRUCT_SIZE 16
395 #define VFC_CAM_ADDR_OP_OFFSET 0
396 #define VFC_CAM_ADDR_OP_SIZE 4
397 #define VFC_CAM_RESP_STRUCT_SIZE 256
398 #define VFC_RAM_ADDR_STRUCT_SIZE 16
399 #define VFC_RAM_ADDR_OP_OFFSET 0
400 #define VFC_RAM_ADDR_OP_SIZE 2
401 #define VFC_RAM_ADDR_ROW_OFFSET 2
402 #define VFC_RAM_ADDR_ROW_SIZE 10
403 #define VFC_RAM_RESP_STRUCT_SIZE 256
405 #define VFC_CAM_CMD_DWORDS CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
406 #define VFC_CAM_ADDR_DWORDS CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
407 #define VFC_CAM_RESP_DWORDS CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
408 #define VFC_RAM_CMD_DWORDS VFC_CAM_CMD_DWORDS
409 #define VFC_RAM_ADDR_DWORDS CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
410 #define VFC_RAM_RESP_DWORDS CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
412 #define NUM_VFC_RAM_TYPES 4
414 #define VFC_CAM_NUM_ROWS 512
416 #define VFC_OPCODE_CAM_RD 14
417 #define VFC_OPCODE_RAM_RD 0
419 #define NUM_RSS_MEM_TYPES 5
421 #define NUM_BIG_RAM_TYPES 3
422 #define BIG_RAM_NAME_LEN 3
424 #define NUM_PHY_TBUS_ADDRESSES 2048
425 #define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2)
427 #define RESET_REG_UNRESET_OFFSET 4
429 #define STALL_DELAY_MS 500
431 #define STATIC_DEBUG_LINE_DWORDS 9
433 #define NUM_COMMON_GLOBAL_PARAMS 9
435 #define MAX_RECURSION_DEPTH 10
437 #define FW_IMG_MAIN 1
439 #define REG_FIFO_ELEMENT_DWORDS 2
440 #define REG_FIFO_DEPTH_ELEMENTS 32
441 #define REG_FIFO_DEPTH_DWORDS \
442 (REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
444 #define IGU_FIFO_ELEMENT_DWORDS 4
445 #define IGU_FIFO_DEPTH_ELEMENTS 64
446 #define IGU_FIFO_DEPTH_DWORDS \
447 (IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
449 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
450 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
451 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \
452 (PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
453 PROTECTION_OVERRIDE_ELEMENT_DWORDS)
455 #define MCP_SPAD_TRACE_OFFSIZE_ADDR \
457 offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
459 #define MAX_SW_PLTAFORM_STR_SIZE 64
461 #define EMPTY_FW_VERSION_STR "???_???_???_???"
462 #define EMPTY_FW_IMAGE_STR "???????????????"
464 /***************************** Constant Arrays *******************************/
466 /* Chip constant definitions array */
467 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
468 {"bb", PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2},
469 {"ah", PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2}
472 /* Storm constant definitions array */
473 static struct storm_defs s_storm_defs[] = {
476 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
478 TSEM_REG_FAST_MEMORY,
479 TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
480 TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
481 TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_DBG_GPRE_VECT,
482 TCM_REG_CTX_RBC_ACCS,
483 {TCM_REG_AGG_CON_CTX, TCM_REG_SM_CON_CTX, TCM_REG_AGG_TASK_CTX,
484 TCM_REG_SM_TASK_CTX},
485 {{4, 16, 2, 4}, {4, 16, 2, 4}} /* {bb} {k2} */
490 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
492 MSEM_REG_FAST_MEMORY,
493 MSEM_REG_DBG_FRAME_MODE_BB_K2,
494 MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
495 MSEM_REG_SLOW_DBG_MODE_BB_K2,
496 MSEM_REG_DBG_MODE1_CFG_BB_K2,
497 MSEM_REG_SYNC_DBG_EMPTY,
498 MSEM_REG_DBG_GPRE_VECT,
499 MCM_REG_CTX_RBC_ACCS,
500 {MCM_REG_AGG_CON_CTX, MCM_REG_SM_CON_CTX, MCM_REG_AGG_TASK_CTX,
501 MCM_REG_SM_TASK_CTX },
502 {{1, 10, 2, 7}, {1, 10, 2, 7}} /* {bb} {k2}*/
507 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
509 USEM_REG_FAST_MEMORY,
510 USEM_REG_DBG_FRAME_MODE_BB_K2,
511 USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
512 USEM_REG_SLOW_DBG_MODE_BB_K2,
513 USEM_REG_DBG_MODE1_CFG_BB_K2,
514 USEM_REG_SYNC_DBG_EMPTY,
515 USEM_REG_DBG_GPRE_VECT,
516 UCM_REG_CTX_RBC_ACCS,
517 {UCM_REG_AGG_CON_CTX, UCM_REG_SM_CON_CTX, UCM_REG_AGG_TASK_CTX,
518 UCM_REG_SM_TASK_CTX},
519 {{2, 13, 3, 3}, {2, 13, 3, 3}} /* {bb} {k2} */
524 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
526 XSEM_REG_FAST_MEMORY,
527 XSEM_REG_DBG_FRAME_MODE_BB_K2,
528 XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
529 XSEM_REG_SLOW_DBG_MODE_BB_K2,
530 XSEM_REG_DBG_MODE1_CFG_BB_K2,
531 XSEM_REG_SYNC_DBG_EMPTY,
532 XSEM_REG_DBG_GPRE_VECT,
533 XCM_REG_CTX_RBC_ACCS,
534 {XCM_REG_AGG_CON_CTX, XCM_REG_SM_CON_CTX, 0, 0},
535 {{9, 15, 0, 0}, {9, 15, 0, 0}} /* {bb} {k2} */
540 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
542 YSEM_REG_FAST_MEMORY,
543 YSEM_REG_DBG_FRAME_MODE_BB_K2,
544 YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
545 YSEM_REG_SLOW_DBG_MODE_BB_K2,
546 YSEM_REG_DBG_MODE1_CFG_BB_K2,
547 YSEM_REG_SYNC_DBG_EMPTY,
548 YSEM_REG_DBG_GPRE_VECT,
549 YCM_REG_CTX_RBC_ACCS,
550 {YCM_REG_AGG_CON_CTX, YCM_REG_SM_CON_CTX, YCM_REG_AGG_TASK_CTX,
551 YCM_REG_SM_TASK_CTX},
552 {{2, 3, 2, 12}, {2, 3, 2, 12}} /* {bb} {k2} */
557 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
559 PSEM_REG_FAST_MEMORY,
560 PSEM_REG_DBG_FRAME_MODE_BB_K2,
561 PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
562 PSEM_REG_SLOW_DBG_MODE_BB_K2,
563 PSEM_REG_DBG_MODE1_CFG_BB_K2,
564 PSEM_REG_SYNC_DBG_EMPTY,
565 PSEM_REG_DBG_GPRE_VECT,
566 PCM_REG_CTX_RBC_ACCS,
567 {0, PCM_REG_SM_CON_CTX, 0, 0},
568 {{0, 10, 0, 0}, {0, 10, 0, 0}} /* {bb} {k2} */
572 static struct hw_type_defs s_hw_type_defs[] = {
574 {"asic", 1, 256, 32768},
575 {"reserved", 0, 0, 0},
576 {"reserved2", 0, 0, 0},
577 {"reserved3", 0, 0, 0}
580 static struct grc_param_defs s_grc_param_defs[] = {
581 /* DBG_GRC_PARAM_DUMP_TSTORM */
582 {{1, 1}, 0, 1, false, false, 1, {1, 1}},
584 /* DBG_GRC_PARAM_DUMP_MSTORM */
585 {{1, 1}, 0, 1, false, false, 1, {1, 1}},
587 /* DBG_GRC_PARAM_DUMP_USTORM */
588 {{1, 1}, 0, 1, false, false, 1, {1, 1}},
590 /* DBG_GRC_PARAM_DUMP_XSTORM */
591 {{1, 1}, 0, 1, false, false, 1, {1, 1}},
593 /* DBG_GRC_PARAM_DUMP_YSTORM */
594 {{1, 1}, 0, 1, false, false, 1, {1, 1}},
596 /* DBG_GRC_PARAM_DUMP_PSTORM */
597 {{1, 1}, 0, 1, false, false, 1, {1, 1}},
599 /* DBG_GRC_PARAM_DUMP_REGS */
600 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
602 /* DBG_GRC_PARAM_DUMP_RAM */
603 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
605 /* DBG_GRC_PARAM_DUMP_PBUF */
606 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
608 /* DBG_GRC_PARAM_DUMP_IOR */
609 {{0, 0}, 0, 1, false, false, 0, {1, 1}},
611 /* DBG_GRC_PARAM_DUMP_VFC */
612 {{0, 0}, 0, 1, false, false, 0, {1, 1}},
614 /* DBG_GRC_PARAM_DUMP_CM_CTX */
615 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
617 /* DBG_GRC_PARAM_DUMP_ILT */
618 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
620 /* DBG_GRC_PARAM_DUMP_RSS */
621 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
623 /* DBG_GRC_PARAM_DUMP_CAU */
624 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
626 /* DBG_GRC_PARAM_DUMP_QM */
627 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
629 /* DBG_GRC_PARAM_DUMP_MCP */
630 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
632 /* DBG_GRC_PARAM_DUMP_DORQ */
633 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
635 /* DBG_GRC_PARAM_DUMP_CFC */
636 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
638 /* DBG_GRC_PARAM_DUMP_IGU */
639 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
641 /* DBG_GRC_PARAM_DUMP_BRB */
642 {{0, 0}, 0, 1, false, false, 0, {1, 1}},
644 /* DBG_GRC_PARAM_DUMP_BTB */
645 {{0, 0}, 0, 1, false, false, 0, {1, 1}},
647 /* DBG_GRC_PARAM_DUMP_BMB */
648 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
650 /* DBG_GRC_PARAM_RESERVED1 */
651 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
653 /* DBG_GRC_PARAM_DUMP_MULD */
654 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
656 /* DBG_GRC_PARAM_DUMP_PRS */
657 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
659 /* DBG_GRC_PARAM_DUMP_DMAE */
660 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
662 /* DBG_GRC_PARAM_DUMP_TM */
663 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
665 /* DBG_GRC_PARAM_DUMP_SDM */
666 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
668 /* DBG_GRC_PARAM_DUMP_DIF */
669 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
671 /* DBG_GRC_PARAM_DUMP_STATIC */
672 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
674 /* DBG_GRC_PARAM_UNSTALL */
675 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
677 /* DBG_GRC_PARAM_RESERVED2 */
678 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
680 /* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */
681 {{0, 0}, 1, 0xffffffff, false, true, 0, {0, 0}},
683 /* DBG_GRC_PARAM_EXCLUDE_ALL */
684 {{0, 0}, 0, 1, true, false, 0, {0, 0}},
686 /* DBG_GRC_PARAM_CRASH */
687 {{0, 0}, 0, 1, true, false, 0, {0, 0}},
689 /* DBG_GRC_PARAM_PARITY_SAFE */
690 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
692 /* DBG_GRC_PARAM_DUMP_CM */
693 {{1, 1}, 0, 1, false, false, 0, {1, 1}},
695 /* DBG_GRC_PARAM_DUMP_PHY */
696 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
698 /* DBG_GRC_PARAM_NO_MCP */
699 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
701 /* DBG_GRC_PARAM_NO_FW_VER */
702 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
704 /* DBG_GRC_PARAM_RESERVED3 */
705 {{0, 0}, 0, 1, false, false, 0, {0, 0}},
707 /* DBG_GRC_PARAM_DUMP_MCP_HW_DUMP */
708 {{0, 1}, 0, 1, false, false, 0, {0, 1}},
710 /* DBG_GRC_PARAM_DUMP_ILT_CDUC */
711 {{1, 1}, 0, 1, false, false, 0, {0, 0}},
713 /* DBG_GRC_PARAM_DUMP_ILT_CDUT */
714 {{1, 1}, 0, 1, false, false, 0, {0, 0}},
716 /* DBG_GRC_PARAM_DUMP_CAU_EXT */
717 {{0, 0}, 0, 1, false, false, 0, {1, 1}}
720 static struct rss_mem_defs s_rss_mem_defs[] = {
721 {"rss_mem_cid", "rss_cid", 0, 32,
724 {"rss_mem_key_msb", "rss_key", 1024, 256,
727 {"rss_mem_key_lsb", "rss_key", 2048, 64,
730 {"rss_mem_info", "rss_info", 3072, 16,
733 {"rss_mem_ind", "rss_ind", 4096, 16,
737 static struct vfc_ram_defs s_vfc_ram_defs[] = {
738 {"vfc_ram_tt1", "vfc_ram", 0, 512},
739 {"vfc_ram_mtt2", "vfc_ram", 512, 128},
740 {"vfc_ram_stt2", "vfc_ram", 640, 32},
741 {"vfc_ram_ro_vect", "vfc_ram", 672, 32}
744 static struct big_ram_defs s_big_ram_defs[] = {
745 {"BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
746 BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
747 MISC_REG_BLOCK_256B_EN, {0, 0},
750 {"BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
751 BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
752 MISC_REG_BLOCK_256B_EN, {0, 1},
755 {"BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
756 BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
757 MISCS_REG_BLOCK_256B_EN, {0, 0},
761 static struct rbc_reset_defs s_rbc_reset_defs[] = {
762 {MISCS_REG_RESET_PL_HV,
764 {MISC_REG_RESET_PL_PDA_VMAIN_1,
765 {0x4404040, 0x4404040}},
766 {MISC_REG_RESET_PL_PDA_VMAIN_2,
768 {MISC_REG_RESET_PL_PDA_VAUX,
772 static struct phy_defs s_phy_defs[] = {
773 {"nw_phy", NWS_REG_NWS_CMU_K2,
774 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5,
775 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5,
776 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5,
777 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5},
778 {"sgmii_phy", MS_REG_MS_CMU_K2_E5,
779 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
780 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
781 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
782 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
783 {"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5,
784 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
785 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
786 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
787 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
788 {"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5,
789 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
790 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
791 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
792 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
795 static struct split_type_defs s_split_type_defs[] = {
796 /* SPLIT_TYPE_NONE */
799 /* SPLIT_TYPE_PORT */
805 /* SPLIT_TYPE_PORT_PF */
812 /**************************** Private Functions ******************************/
814 /* Reads and returns a single dword from the specified unaligned buffer */
815 static u32 qed_read_unaligned_dword(u8 *buf)
819 memcpy((u8 *)&dword, buf, sizeof(dword));
823 /* Sets the value of the specified GRC param */
824 static void qed_grc_set_param(struct qed_hwfn *p_hwfn,
825 enum dbg_grc_params grc_param, u32 val)
827 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
829 dev_data->grc.param_val[grc_param] = val;
832 /* Returns the value of the specified GRC param */
833 static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
834 enum dbg_grc_params grc_param)
836 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
838 return dev_data->grc.param_val[grc_param];
841 /* Initializes the GRC parameters */
842 static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
844 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
846 if (!dev_data->grc.params_initialized) {
847 qed_dbg_grc_set_params_default(p_hwfn);
848 dev_data->grc.params_initialized = 1;
852 /* Sets pointer and size for the specified binary buffer type */
853 static void qed_set_dbg_bin_buf(struct qed_hwfn *p_hwfn,
854 enum bin_dbg_buffer_type buf_type,
855 const u32 *ptr, u32 size)
857 struct virt_mem_desc *buf = &p_hwfn->dbg_arrays[buf_type];
859 buf->ptr = (void *)ptr;
863 /* Initializes debug data for the specified device */
864 static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn)
866 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
867 u8 num_pfs = 0, max_pfs_per_port = 0;
869 if (dev_data->initialized)
870 return DBG_STATUS_OK;
873 if (QED_IS_K2(p_hwfn->cdev)) {
874 dev_data->chip_id = CHIP_K2;
875 dev_data->mode_enable[MODE_K2] = 1;
876 dev_data->num_vfs = MAX_NUM_VFS_K2;
877 num_pfs = MAX_NUM_PFS_K2;
878 max_pfs_per_port = MAX_NUM_PFS_K2 / 2;
879 } else if (QED_IS_BB_B0(p_hwfn->cdev)) {
880 dev_data->chip_id = CHIP_BB;
881 dev_data->mode_enable[MODE_BB] = 1;
882 dev_data->num_vfs = MAX_NUM_VFS_BB;
883 num_pfs = MAX_NUM_PFS_BB;
884 max_pfs_per_port = MAX_NUM_PFS_BB;
886 return DBG_STATUS_UNKNOWN_CHIP;
890 dev_data->hw_type = HW_TYPE_ASIC;
891 dev_data->mode_enable[MODE_ASIC] = 1;
894 switch (p_hwfn->cdev->num_ports_in_engine) {
896 dev_data->mode_enable[MODE_PORTS_PER_ENG_1] = 1;
899 dev_data->mode_enable[MODE_PORTS_PER_ENG_2] = 1;
902 dev_data->mode_enable[MODE_PORTS_PER_ENG_4] = 1;
907 if (QED_IS_CMT(p_hwfn->cdev))
908 dev_data->mode_enable[MODE_100G] = 1;
910 /* Set number of ports */
911 if (dev_data->mode_enable[MODE_PORTS_PER_ENG_1] ||
912 dev_data->mode_enable[MODE_100G])
913 dev_data->num_ports = 1;
914 else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_2])
915 dev_data->num_ports = 2;
916 else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_4])
917 dev_data->num_ports = 4;
919 /* Set number of PFs per port */
920 dev_data->num_pfs_per_port = min_t(u32,
921 num_pfs / dev_data->num_ports,
924 /* Initializes the GRC parameters */
925 qed_dbg_grc_init_params(p_hwfn);
927 dev_data->use_dmae = true;
928 dev_data->initialized = 1;
930 return DBG_STATUS_OK;
933 static const struct dbg_block *get_dbg_block(struct qed_hwfn *p_hwfn,
934 enum block_id block_id)
936 const struct dbg_block *dbg_block;
938 dbg_block = p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS].ptr;
939 return dbg_block + block_id;
942 static const struct dbg_block_chip *qed_get_dbg_block_per_chip(struct qed_hwfn
947 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
949 return (const struct dbg_block_chip *)
950 p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_CHIP_DATA].ptr +
951 block_id * MAX_CHIP_IDS + dev_data->chip_id;
954 static const struct dbg_reset_reg *qed_get_dbg_reset_reg(struct qed_hwfn
958 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
960 return (const struct dbg_reset_reg *)
961 p_hwfn->dbg_arrays[BIN_BUF_DBG_RESET_REGS].ptr +
962 reset_reg_id * MAX_CHIP_IDS + dev_data->chip_id;
965 /* Reads the FW info structure for the specified Storm from the chip,
966 * and writes it to the specified fw_info pointer.
968 static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
969 struct qed_ptt *p_ptt,
970 u8 storm_id, struct fw_info *fw_info)
972 struct storm_defs *storm = &s_storm_defs[storm_id];
973 struct fw_info_location fw_info_location;
976 memset(&fw_info_location, 0, sizeof(fw_info_location));
977 memset(fw_info, 0, sizeof(*fw_info));
979 /* Read first the address that points to fw_info location.
980 * The address is located in the last line of the Storm RAM.
982 addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
983 DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
984 sizeof(fw_info_location);
986 dest = (u32 *)&fw_info_location;
988 for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
989 i++, addr += BYTES_IN_DWORD)
990 dest[i] = qed_rd(p_hwfn, p_ptt, addr);
992 /* Read FW version info from Storm RAM */
993 if (fw_info_location.size > 0 && fw_info_location.size <=
995 addr = fw_info_location.grc_addr;
996 dest = (u32 *)fw_info;
997 for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size);
998 i++, addr += BYTES_IN_DWORD)
999 dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1003 /* Dumps the specified string to the specified buffer.
1004 * Returns the dumped size in bytes.
1006 static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
1009 strcpy(dump_buf, str);
1011 return (u32)strlen(str) + 1;
1014 /* Dumps zeros to align the specified buffer to dwords.
1015 * Returns the dumped size in bytes.
1017 static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
1019 u8 offset_in_dword, align_size;
1021 offset_in_dword = (u8)(byte_offset & 0x3);
1022 align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
1024 if (dump && align_size)
1025 memset(dump_buf, 0, align_size);
1030 /* Writes the specified string param to the specified buffer.
1031 * Returns the dumped size in dwords.
1033 static u32 qed_dump_str_param(u32 *dump_buf,
1035 const char *param_name, const char *param_val)
1037 char *char_buf = (char *)dump_buf;
1040 /* Dump param name */
1041 offset += qed_dump_str(char_buf + offset, dump, param_name);
1043 /* Indicate a string param value */
1045 *(char_buf + offset) = 1;
1048 /* Dump param value */
1049 offset += qed_dump_str(char_buf + offset, dump, param_val);
1051 /* Align buffer to next dword */
1052 offset += qed_dump_align(char_buf + offset, dump, offset);
1054 return BYTES_TO_DWORDS(offset);
1057 /* Writes the specified numeric param to the specified buffer.
1058 * Returns the dumped size in dwords.
1060 static u32 qed_dump_num_param(u32 *dump_buf,
1061 bool dump, const char *param_name, u32 param_val)
1063 char *char_buf = (char *)dump_buf;
1066 /* Dump param name */
1067 offset += qed_dump_str(char_buf + offset, dump, param_name);
1069 /* Indicate a numeric param value */
1071 *(char_buf + offset) = 0;
1074 /* Align buffer to next dword */
1075 offset += qed_dump_align(char_buf + offset, dump, offset);
1077 /* Dump param value (and change offset from bytes to dwords) */
1078 offset = BYTES_TO_DWORDS(offset);
1080 *(dump_buf + offset) = param_val;
1086 /* Reads the FW version and writes it as a param to the specified buffer.
1087 * Returns the dumped size in dwords.
1089 static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
1090 struct qed_ptt *p_ptt,
1091 u32 *dump_buf, bool dump)
1093 char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
1094 char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
1095 struct fw_info fw_info = { {0}, {0} };
1098 if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1099 /* Read FW info from chip */
1100 qed_read_fw_info(p_hwfn, p_ptt, &fw_info);
1102 /* Create FW version/image strings */
1103 if (snprintf(fw_ver_str, sizeof(fw_ver_str),
1104 "%d_%d_%d_%d", fw_info.ver.num.major,
1105 fw_info.ver.num.minor, fw_info.ver.num.rev,
1106 fw_info.ver.num.eng) < 0)
1108 "Unexpected debug error: invalid FW version string\n");
1109 switch (fw_info.ver.image_id) {
1111 strcpy(fw_img_str, "main");
1114 strcpy(fw_img_str, "unknown");
1119 /* Dump FW version, image and timestamp */
1120 offset += qed_dump_str_param(dump_buf + offset,
1121 dump, "fw-version", fw_ver_str);
1122 offset += qed_dump_str_param(dump_buf + offset,
1123 dump, "fw-image", fw_img_str);
1124 offset += qed_dump_num_param(dump_buf + offset,
1126 "fw-timestamp", fw_info.ver.timestamp);
1131 /* Reads the MFW version and writes it as a param to the specified buffer.
1132 * Returns the dumped size in dwords.
1134 static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
1135 struct qed_ptt *p_ptt,
1136 u32 *dump_buf, bool dump)
1138 char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
1141 !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1142 u32 global_section_offsize, global_section_addr, mfw_ver;
1143 u32 public_data_addr, global_section_offsize_addr;
1145 /* Find MCP public data GRC address. Needs to be ORed with
1146 * MCP_REG_SCRATCH due to a HW bug.
1148 public_data_addr = qed_rd(p_hwfn,
1150 MISC_REG_SHARED_MEM_ADDR) |
1153 /* Find MCP public global section offset */
1154 global_section_offsize_addr = public_data_addr +
1155 offsetof(struct mcp_public_data,
1157 sizeof(offsize_t) * PUBLIC_GLOBAL;
1158 global_section_offsize = qed_rd(p_hwfn, p_ptt,
1159 global_section_offsize_addr);
1160 global_section_addr =
1162 (global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
1164 /* Read MFW version from MCP public global section */
1165 mfw_ver = qed_rd(p_hwfn, p_ptt,
1166 global_section_addr +
1167 offsetof(struct public_global, mfw_ver));
1169 /* Dump MFW version param */
1170 if (snprintf(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d",
1171 (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16),
1172 (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
1174 "Unexpected debug error: invalid MFW version string\n");
1177 return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
1180 /* Reads the chip revision from the chip and writes it as a param to the
1181 * specified buffer. Returns the dumped size in dwords.
1183 static u32 qed_dump_chip_revision_param(struct qed_hwfn *p_hwfn,
1184 struct qed_ptt *p_ptt,
1185 u32 *dump_buf, bool dump)
1187 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1188 char param_str[3] = "??";
1190 if (dev_data->hw_type == HW_TYPE_ASIC) {
1191 u32 chip_rev, chip_metal;
1193 chip_rev = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
1194 chip_metal = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
1196 param_str[0] = 'a' + (u8)chip_rev;
1197 param_str[1] = '0' + (u8)chip_metal;
1200 return qed_dump_str_param(dump_buf, dump, "chip-revision", param_str);
1203 /* Writes a section header to the specified buffer.
1204 * Returns the dumped size in dwords.
1206 static u32 qed_dump_section_hdr(u32 *dump_buf,
1207 bool dump, const char *name, u32 num_params)
1209 return qed_dump_num_param(dump_buf, dump, name, num_params);
1212 /* Writes the common global params to the specified buffer.
1213 * Returns the dumped size in dwords.
1215 static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
1216 struct qed_ptt *p_ptt,
1219 u8 num_specific_global_params)
1221 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1225 /* Dump global params section header */
1226 num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params +
1227 (dev_data->chip_id == CHIP_BB ? 1 : 0);
1228 offset += qed_dump_section_hdr(dump_buf + offset,
1229 dump, "global_params", num_params);
1232 offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
1233 offset += qed_dump_mfw_ver_param(p_hwfn,
1234 p_ptt, dump_buf + offset, dump);
1235 offset += qed_dump_chip_revision_param(p_hwfn,
1236 p_ptt, dump_buf + offset, dump);
1237 offset += qed_dump_num_param(dump_buf + offset,
1238 dump, "tools-version", TOOLS_VERSION);
1239 offset += qed_dump_str_param(dump_buf + offset,
1242 s_chip_defs[dev_data->chip_id].name);
1243 offset += qed_dump_str_param(dump_buf + offset,
1246 s_hw_type_defs[dev_data->hw_type].name);
1247 offset += qed_dump_num_param(dump_buf + offset,
1248 dump, "pci-func", p_hwfn->abs_pf_id);
1249 if (dev_data->chip_id == CHIP_BB)
1250 offset += qed_dump_num_param(dump_buf + offset,
1251 dump, "path", QED_PATH_ID(p_hwfn));
1256 /* Writes the "last" section (including CRC) to the specified buffer at the
1257 * given offset. Returns the dumped size in dwords.
1259 static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
1261 u32 start_offset = offset;
1263 /* Dump CRC section header */
1264 offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
1266 /* Calculate CRC32 and add it to the dword after the "last" section */
1268 *(dump_buf + offset) = ~crc32(0xffffffff,
1270 DWORDS_TO_BYTES(offset));
1274 return offset - start_offset;
1277 /* Update blocks reset state */
1278 static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
1279 struct qed_ptt *p_ptt)
1281 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1282 u32 reg_val[NUM_DBG_RESET_REGS] = { 0 };
1286 /* Read reset registers */
1287 for (rst_reg_id = 0; rst_reg_id < NUM_DBG_RESET_REGS; rst_reg_id++) {
1288 const struct dbg_reset_reg *rst_reg;
1289 bool rst_reg_removed;
1292 rst_reg = qed_get_dbg_reset_reg(p_hwfn, rst_reg_id);
1293 rst_reg_removed = GET_FIELD(rst_reg->data,
1294 DBG_RESET_REG_IS_REMOVED);
1295 rst_reg_addr = DWORDS_TO_BYTES(GET_FIELD(rst_reg->data,
1296 DBG_RESET_REG_ADDR));
1298 if (!rst_reg_removed)
1299 reg_val[rst_reg_id] = qed_rd(p_hwfn, p_ptt,
1303 /* Check if blocks are in reset */
1304 for (blk_id = 0; blk_id < NUM_PHYS_BLOCKS; blk_id++) {
1305 const struct dbg_block_chip *blk;
1309 blk = qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)blk_id);
1310 is_removed = GET_FIELD(blk->flags, DBG_BLOCK_CHIP_IS_REMOVED);
1311 has_rst_reg = GET_FIELD(blk->flags,
1312 DBG_BLOCK_CHIP_HAS_RESET_REG);
1314 if (!is_removed && has_rst_reg)
1315 dev_data->block_in_reset[blk_id] =
1316 !(reg_val[blk->reset_reg_id] &
1317 BIT(blk->reset_reg_bit_offset));
1321 /* is_mode_match recursive function */
1322 static bool qed_is_mode_match_rec(struct qed_hwfn *p_hwfn,
1323 u16 *modes_buf_offset, u8 rec_depth)
1325 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1330 if (rec_depth > MAX_RECURSION_DEPTH) {
1332 "Unexpected error: is_mode_match_rec exceeded the max recursion depth. This is probably due to a corrupt init/debug buffer.\n");
1336 /* Get next element from modes tree buffer */
1337 dbg_array = p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
1338 tree_val = dbg_array[(*modes_buf_offset)++];
1341 case INIT_MODE_OP_NOT:
1342 return !qed_is_mode_match_rec(p_hwfn,
1343 modes_buf_offset, rec_depth + 1);
1344 case INIT_MODE_OP_OR:
1345 case INIT_MODE_OP_AND:
1346 arg1 = qed_is_mode_match_rec(p_hwfn,
1347 modes_buf_offset, rec_depth + 1);
1348 arg2 = qed_is_mode_match_rec(p_hwfn,
1349 modes_buf_offset, rec_depth + 1);
1350 return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
1351 arg2) : (arg1 && arg2);
1353 return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
1357 /* Returns true if the mode (specified using modes_buf_offset) is enabled */
1358 static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
1360 return qed_is_mode_match_rec(p_hwfn, modes_buf_offset, 0);
1363 /* Enable / disable the Debug block */
1364 static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
1365 struct qed_ptt *p_ptt, bool enable)
1367 qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
1370 /* Resets the Debug block */
1371 static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
1372 struct qed_ptt *p_ptt)
1374 u32 reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
1375 const struct dbg_reset_reg *reset_reg;
1376 const struct dbg_block_chip *block;
1378 block = qed_get_dbg_block_per_chip(p_hwfn, BLOCK_DBG);
1379 reset_reg = qed_get_dbg_reset_reg(p_hwfn, block->reset_reg_id);
1381 DWORDS_TO_BYTES(GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR));
1383 old_reset_reg_val = qed_rd(p_hwfn, p_ptt, reset_reg_addr);
1385 old_reset_reg_val & ~BIT(block->reset_reg_bit_offset);
1387 qed_wr(p_hwfn, p_ptt, reset_reg_addr, new_reset_reg_val);
1388 qed_wr(p_hwfn, p_ptt, reset_reg_addr, old_reset_reg_val);
1391 /* Enable / disable Debug Bus clients according to the specified mask
1392 * (1 = enable, 0 = disable).
1394 static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
1395 struct qed_ptt *p_ptt, u32 client_mask)
1397 qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
1400 static void qed_bus_config_dbg_line(struct qed_hwfn *p_hwfn,
1401 struct qed_ptt *p_ptt,
1402 enum block_id block_id,
1406 u8 force_valid_mask, u8 force_frame_mask)
1408 const struct dbg_block_chip *block =
1409 qed_get_dbg_block_per_chip(p_hwfn, block_id);
1411 qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_select_reg_addr),
1413 qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_dword_enable_reg_addr),
1415 qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_shift_reg_addr),
1417 qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_valid_reg_addr),
1419 qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_frame_reg_addr),
1423 /* Disable debug bus in all blocks */
1424 static void qed_bus_disable_blocks(struct qed_hwfn *p_hwfn,
1425 struct qed_ptt *p_ptt)
1427 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1430 /* Disable all blocks */
1431 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
1432 const struct dbg_block_chip *block_per_chip =
1433 qed_get_dbg_block_per_chip(p_hwfn,
1434 (enum block_id)block_id);
1436 if (GET_FIELD(block_per_chip->flags,
1437 DBG_BLOCK_CHIP_IS_REMOVED) ||
1438 dev_data->block_in_reset[block_id])
1441 /* Disable debug bus */
1442 if (GET_FIELD(block_per_chip->flags,
1443 DBG_BLOCK_CHIP_HAS_DBG_BUS)) {
1445 block_per_chip->dbg_dword_enable_reg_addr;
1446 u16 modes_buf_offset =
1447 GET_FIELD(block_per_chip->dbg_bus_mode.data,
1448 DBG_MODE_HDR_MODES_BUF_OFFSET);
1450 GET_FIELD(block_per_chip->dbg_bus_mode.data,
1451 DBG_MODE_HDR_EVAL_MODE) > 0;
1454 qed_is_mode_match(p_hwfn, &modes_buf_offset))
1455 qed_wr(p_hwfn, p_ptt,
1456 DWORDS_TO_BYTES(dbg_en_addr),
1462 /* Returns true if the specified entity (indicated by GRC param) should be
1463 * included in the dump, false otherwise.
1465 static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
1466 enum dbg_grc_params grc_param)
1468 return qed_grc_get_param(p_hwfn, grc_param) > 0;
1471 /* Returns the storm_id that matches the specified Storm letter,
1472 * or MAX_DBG_STORMS if invalid storm letter.
1474 static enum dbg_storms qed_get_id_from_letter(char storm_letter)
1478 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++)
1479 if (s_storm_defs[storm_id].letter == storm_letter)
1480 return (enum dbg_storms)storm_id;
1482 return MAX_DBG_STORMS;
1485 /* Returns true of the specified Storm should be included in the dump, false
1488 static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
1489 enum dbg_storms storm)
1491 return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
1494 /* Returns true if the specified memory should be included in the dump, false
1497 static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
1498 enum block_id block_id, u8 mem_group_id)
1500 const struct dbg_block *block;
1503 block = get_dbg_block(p_hwfn, block_id);
1505 /* If the block is associated with a Storm, check Storm match */
1506 if (block->associated_storm_letter) {
1507 enum dbg_storms associated_storm_id =
1508 qed_get_id_from_letter(block->associated_storm_letter);
1510 if (associated_storm_id == MAX_DBG_STORMS ||
1511 !qed_grc_is_storm_included(p_hwfn, associated_storm_id))
1515 for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
1516 struct big_ram_defs *big_ram = &s_big_ram_defs[i];
1518 if (mem_group_id == big_ram->mem_group_id ||
1519 mem_group_id == big_ram->ram_mem_group_id)
1520 return qed_grc_is_included(p_hwfn, big_ram->grc_param);
1523 switch (mem_group_id) {
1524 case MEM_GROUP_PXP_ILT:
1525 case MEM_GROUP_PXP_MEM:
1526 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
1528 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
1529 case MEM_GROUP_PBUF:
1530 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
1531 case MEM_GROUP_CAU_MEM:
1532 case MEM_GROUP_CAU_SB:
1533 case MEM_GROUP_CAU_PI:
1534 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
1535 case MEM_GROUP_CAU_MEM_EXT:
1536 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU_EXT);
1537 case MEM_GROUP_QM_MEM:
1538 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
1539 case MEM_GROUP_CFC_MEM:
1540 case MEM_GROUP_CONN_CFC_MEM:
1541 case MEM_GROUP_TASK_CFC_MEM:
1542 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
1543 qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
1544 case MEM_GROUP_DORQ_MEM:
1545 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DORQ);
1546 case MEM_GROUP_IGU_MEM:
1547 case MEM_GROUP_IGU_MSIX:
1548 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
1549 case MEM_GROUP_MULD_MEM:
1550 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
1551 case MEM_GROUP_PRS_MEM:
1552 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
1553 case MEM_GROUP_DMAE_MEM:
1554 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
1555 case MEM_GROUP_TM_MEM:
1556 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
1557 case MEM_GROUP_SDM_MEM:
1558 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
1559 case MEM_GROUP_TDIF_CTX:
1560 case MEM_GROUP_RDIF_CTX:
1561 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
1562 case MEM_GROUP_CM_MEM:
1563 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
1565 return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
1571 /* Stalls all Storms */
1572 static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
1573 struct qed_ptt *p_ptt, bool stall)
1578 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
1579 if (!qed_grc_is_storm_included(p_hwfn,
1580 (enum dbg_storms)storm_id))
1583 reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
1584 SEM_FAST_REG_STALL_0_BB_K2;
1585 qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
1588 msleep(STALL_DELAY_MS);
1591 /* Takes all blocks out of reset. If rbc_only is true, only RBC clients are
1592 * taken out of reset.
1594 static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
1595 struct qed_ptt *p_ptt, bool rbc_only)
1597 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1598 u8 chip_id = dev_data->chip_id;
1601 /* Take RBCs out of reset */
1602 for (i = 0; i < ARRAY_SIZE(s_rbc_reset_defs); i++)
1603 if (s_rbc_reset_defs[i].reset_val[dev_data->chip_id])
1606 s_rbc_reset_defs[i].reset_reg_addr +
1607 RESET_REG_UNRESET_OFFSET,
1608 s_rbc_reset_defs[i].reset_val[chip_id]);
1611 u32 reg_val[NUM_DBG_RESET_REGS] = { 0 };
1615 /* Fill reset regs values */
1616 for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
1617 bool is_removed, has_reset_reg, unreset_before_dump;
1618 const struct dbg_block_chip *block;
1620 block = qed_get_dbg_block_per_chip(p_hwfn,
1624 GET_FIELD(block->flags, DBG_BLOCK_CHIP_IS_REMOVED);
1626 GET_FIELD(block->flags,
1627 DBG_BLOCK_CHIP_HAS_RESET_REG);
1628 unreset_before_dump =
1629 GET_FIELD(block->flags,
1630 DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP);
1632 if (!is_removed && has_reset_reg && unreset_before_dump)
1633 reg_val[block->reset_reg_id] |=
1634 BIT(block->reset_reg_bit_offset);
1637 /* Write reset registers */
1638 for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS;
1640 const struct dbg_reset_reg *reset_reg;
1643 reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id);
1646 (reset_reg->data, DBG_RESET_REG_IS_REMOVED))
1649 if (reg_val[reset_reg_id]) {
1651 GET_FIELD(reset_reg->data,
1652 DBG_RESET_REG_ADDR);
1655 DWORDS_TO_BYTES(reset_reg_addr) +
1656 RESET_REG_UNRESET_OFFSET,
1657 reg_val[reset_reg_id]);
1663 /* Returns the attention block data of the specified block */
1664 static const struct dbg_attn_block_type_data *
1665 qed_get_block_attn_data(struct qed_hwfn *p_hwfn,
1666 enum block_id block_id, enum dbg_attn_type attn_type)
1668 const struct dbg_attn_block *base_attn_block_arr =
1669 (const struct dbg_attn_block *)
1670 p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
1672 return &base_attn_block_arr[block_id].per_type_data[attn_type];
1675 /* Returns the attention registers of the specified block */
1676 static const struct dbg_attn_reg *
1677 qed_get_block_attn_regs(struct qed_hwfn *p_hwfn,
1678 enum block_id block_id, enum dbg_attn_type attn_type,
1681 const struct dbg_attn_block_type_data *block_type_data =
1682 qed_get_block_attn_data(p_hwfn, block_id, attn_type);
1684 *num_attn_regs = block_type_data->num_regs;
1686 return (const struct dbg_attn_reg *)
1687 p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr +
1688 block_type_data->regs_offset;
1691 /* For each block, clear the status of all parities */
1692 static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
1693 struct qed_ptt *p_ptt)
1695 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1696 const struct dbg_attn_reg *attn_reg_arr;
1697 u8 reg_idx, num_attn_regs;
1700 for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
1701 if (dev_data->block_in_reset[block_id])
1704 attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
1705 (enum block_id)block_id,
1709 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
1710 const struct dbg_attn_reg *reg_data =
1711 &attn_reg_arr[reg_idx];
1712 u16 modes_buf_offset;
1716 eval_mode = GET_FIELD(reg_data->mode.data,
1717 DBG_MODE_HDR_EVAL_MODE) > 0;
1719 GET_FIELD(reg_data->mode.data,
1720 DBG_MODE_HDR_MODES_BUF_OFFSET);
1722 /* If Mode match: clear parity status */
1724 qed_is_mode_match(p_hwfn, &modes_buf_offset))
1725 qed_rd(p_hwfn, p_ptt,
1726 DWORDS_TO_BYTES(reg_data->
1732 /* Dumps GRC registers section header. Returns the dumped size in dwords.
1733 * the following parameters are dumped:
1734 * - count: no. of dumped entries
1735 * - split_type: split type
1736 * - split_id: split ID (dumped only if split_id != SPLIT_TYPE_NONE)
1737 * - reg_type_name: register type name (dumped only if reg_type_name != NULL)
1739 static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
1741 u32 num_reg_entries,
1742 enum init_split_types split_type,
1743 u8 split_id, const char *reg_type_name)
1746 (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (reg_type_name ? 1 : 0);
1749 offset += qed_dump_section_hdr(dump_buf + offset,
1750 dump, "grc_regs", num_params);
1751 offset += qed_dump_num_param(dump_buf + offset,
1752 dump, "count", num_reg_entries);
1753 offset += qed_dump_str_param(dump_buf + offset,
1755 s_split_type_defs[split_type].name);
1756 if (split_type != SPLIT_TYPE_NONE)
1757 offset += qed_dump_num_param(dump_buf + offset,
1758 dump, "id", split_id);
1760 offset += qed_dump_str_param(dump_buf + offset,
1761 dump, "type", reg_type_name);
1766 /* Reads the specified registers into the specified buffer.
1767 * The addr and len arguments are specified in dwords.
1769 void qed_read_regs(struct qed_hwfn *p_hwfn,
1770 struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len)
1774 for (i = 0; i < len; i++)
1775 buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
1778 /* Dumps the GRC registers in the specified address range.
1779 * Returns the dumped size in dwords.
1780 * The addr and len arguments are specified in dwords.
1782 static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
1783 struct qed_ptt *p_ptt,
1785 bool dump, u32 addr, u32 len, bool wide_bus,
1786 enum init_split_types split_type,
1789 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1790 u8 port_id = 0, pf_id = 0, vf_id = 0, fid = 0;
1791 bool read_using_dmae = false;
1797 switch (split_type) {
1798 case SPLIT_TYPE_PORT:
1804 case SPLIT_TYPE_PORT_PF:
1805 port_id = split_id / dev_data->num_pfs_per_port;
1806 pf_id = port_id + dev_data->num_ports *
1807 (split_id % dev_data->num_pfs_per_port);
1816 /* Try reading using DMAE */
1817 if (dev_data->use_dmae && split_type != SPLIT_TYPE_VF &&
1818 (len >= s_hw_type_defs[dev_data->hw_type].dmae_thresh ||
1819 (PROTECT_WIDE_BUS && wide_bus))) {
1820 struct qed_dmae_params dmae_params;
1822 /* Set DMAE params */
1823 memset(&dmae_params, 0, sizeof(dmae_params));
1824 SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 1);
1825 switch (split_type) {
1826 case SPLIT_TYPE_PORT:
1827 SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID,
1829 dmae_params.port_id = port_id;
1832 SET_FIELD(dmae_params.flags,
1833 QED_DMAE_PARAMS_SRC_PF_VALID, 1);
1834 dmae_params.src_pfid = pf_id;
1836 case SPLIT_TYPE_PORT_PF:
1837 SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID,
1839 SET_FIELD(dmae_params.flags,
1840 QED_DMAE_PARAMS_SRC_PF_VALID, 1);
1841 dmae_params.port_id = port_id;
1842 dmae_params.src_pfid = pf_id;
1848 /* Execute DMAE command */
1849 read_using_dmae = !qed_dmae_grc2host(p_hwfn,
1851 DWORDS_TO_BYTES(addr),
1852 (u64)(uintptr_t)(dump_buf),
1854 if (!read_using_dmae) {
1855 dev_data->use_dmae = 0;
1858 "Failed reading from chip using DMAE, using GRC instead\n");
1862 if (read_using_dmae)
1865 /* If not read using DMAE, read using GRC */
1868 if (split_type != dev_data->pretend.split_type ||
1869 split_id != dev_data->pretend.split_id) {
1870 switch (split_type) {
1871 case SPLIT_TYPE_PORT:
1872 qed_port_pretend(p_hwfn, p_ptt, port_id);
1875 fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
1877 qed_fid_pretend(p_hwfn, p_ptt, fid);
1879 case SPLIT_TYPE_PORT_PF:
1880 fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
1882 qed_port_fid_pretend(p_hwfn, p_ptt, port_id, fid);
1885 fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFVALID, 1)
1886 | FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFID,
1888 qed_fid_pretend(p_hwfn, p_ptt, fid);
1894 dev_data->pretend.split_type = (u8)split_type;
1895 dev_data->pretend.split_id = split_id;
1898 /* Read registers using GRC */
1899 qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
1903 dev_data->num_regs_read += len;
1904 thresh = s_hw_type_defs[dev_data->hw_type].log_thresh;
1905 if ((dev_data->num_regs_read / thresh) >
1906 ((dev_data->num_regs_read - len) / thresh))
1909 "Dumped %d registers...\n", dev_data->num_regs_read);
1914 /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
1915 * The addr and len arguments are specified in dwords.
1917 static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf,
1918 bool dump, u32 addr, u32 len)
1921 *dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
1926 /* Dumps GRC registers sequence. Returns the dumped size in dwords.
1927 * The addr and len arguments are specified in dwords.
1929 static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
1930 struct qed_ptt *p_ptt,
1932 bool dump, u32 addr, u32 len, bool wide_bus,
1933 enum init_split_types split_type, u8 split_id)
1937 offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
1938 offset += qed_grc_dump_addr_range(p_hwfn,
1941 dump, addr, len, wide_bus,
1942 split_type, split_id);
1947 /* Dumps GRC registers sequence with skip cycle.
1948 * Returns the dumped size in dwords.
1949 * - addr: start GRC address in dwords
1950 * - total_len: total no. of dwords to dump
1951 * - read_len: no. consecutive dwords to read
1952 * - skip_len: no. of dwords to skip (and fill with zeros)
1954 static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
1955 struct qed_ptt *p_ptt,
1960 u32 read_len, u32 skip_len)
1962 u32 offset = 0, reg_offset = 0;
1964 offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
1967 return offset + total_len;
1969 while (reg_offset < total_len) {
1970 u32 curr_len = min_t(u32, read_len, total_len - reg_offset);
1972 offset += qed_grc_dump_addr_range(p_hwfn,
1975 dump, addr, curr_len, false,
1976 SPLIT_TYPE_NONE, 0);
1977 reg_offset += curr_len;
1980 if (reg_offset < total_len) {
1981 curr_len = min_t(u32, skip_len, total_len - skip_len);
1982 memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
1984 reg_offset += curr_len;
1992 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
1993 static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
1994 struct qed_ptt *p_ptt,
1995 struct virt_mem_desc input_regs_arr,
1998 enum init_split_types split_type,
2000 bool block_enable[MAX_BLOCK_ID],
2001 u32 *num_dumped_reg_entries)
2003 u32 i, offset = 0, input_offset = 0;
2004 bool mode_match = true;
2006 *num_dumped_reg_entries = 0;
2008 while (input_offset < BYTES_TO_DWORDS(input_regs_arr.size)) {
2009 const struct dbg_dump_cond_hdr *cond_hdr =
2010 (const struct dbg_dump_cond_hdr *)
2011 input_regs_arr.ptr + input_offset++;
2012 u16 modes_buf_offset;
2015 /* Check mode/block */
2016 eval_mode = GET_FIELD(cond_hdr->mode.data,
2017 DBG_MODE_HDR_EVAL_MODE) > 0;
2020 GET_FIELD(cond_hdr->mode.data,
2021 DBG_MODE_HDR_MODES_BUF_OFFSET);
2022 mode_match = qed_is_mode_match(p_hwfn,
2026 if (!mode_match || !block_enable[cond_hdr->block_id]) {
2027 input_offset += cond_hdr->data_size;
2031 for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
2032 const struct dbg_dump_reg *reg =
2033 (const struct dbg_dump_reg *)
2034 input_regs_arr.ptr + input_offset;
2038 addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS);
2039 len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
2040 wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS);
2041 offset += qed_grc_dump_reg_entry(p_hwfn,
2048 split_type, split_id);
2049 (*num_dumped_reg_entries)++;
2056 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2057 static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
2058 struct qed_ptt *p_ptt,
2059 struct virt_mem_desc input_regs_arr,
2062 bool block_enable[MAX_BLOCK_ID],
2063 enum init_split_types split_type,
2064 u8 split_id, const char *reg_type_name)
2066 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2067 enum init_split_types hdr_split_type = split_type;
2068 u32 num_dumped_reg_entries, offset;
2069 u8 hdr_split_id = split_id;
2071 /* In PORT_PF split type, print a port split header */
2072 if (split_type == SPLIT_TYPE_PORT_PF) {
2073 hdr_split_type = SPLIT_TYPE_PORT;
2074 hdr_split_id = split_id / dev_data->num_pfs_per_port;
2077 /* Calculate register dump header size (and skip it for now) */
2078 offset = qed_grc_dump_regs_hdr(dump_buf,
2082 hdr_split_id, reg_type_name);
2084 /* Dump registers */
2085 offset += qed_grc_dump_regs_entries(p_hwfn,
2093 &num_dumped_reg_entries);
2095 /* Write register dump header */
2096 if (dump && num_dumped_reg_entries > 0)
2097 qed_grc_dump_regs_hdr(dump_buf,
2099 num_dumped_reg_entries,
2101 hdr_split_id, reg_type_name);
2103 return num_dumped_reg_entries > 0 ? offset : 0;
2106 /* Dumps registers according to the input registers array. Returns the dumped
2109 static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
2110 struct qed_ptt *p_ptt,
2113 bool block_enable[MAX_BLOCK_ID],
2114 const char *reg_type_name)
2116 struct virt_mem_desc *dbg_buf =
2117 &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG];
2118 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2119 u32 offset = 0, input_offset = 0;
2121 while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
2122 const struct dbg_dump_split_hdr *split_hdr;
2123 struct virt_mem_desc curr_input_regs_arr;
2124 enum init_split_types split_type;
2125 u16 split_count = 0;
2126 u32 split_data_size;
2130 (const struct dbg_dump_split_hdr *)
2131 dbg_buf->ptr + input_offset++;
2133 GET_FIELD(split_hdr->hdr,
2134 DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2135 split_data_size = GET_FIELD(split_hdr->hdr,
2136 DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2137 curr_input_regs_arr.ptr =
2138 (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr +
2140 curr_input_regs_arr.size = DWORDS_TO_BYTES(split_data_size);
2142 switch (split_type) {
2143 case SPLIT_TYPE_NONE:
2146 case SPLIT_TYPE_PORT:
2147 split_count = dev_data->num_ports;
2150 case SPLIT_TYPE_PORT_PF:
2151 split_count = dev_data->num_ports *
2152 dev_data->num_pfs_per_port;
2155 split_count = dev_data->num_vfs;
2161 for (split_id = 0; split_id < split_count; split_id++)
2162 offset += qed_grc_dump_split_data(p_hwfn, p_ptt,
2163 curr_input_regs_arr,
2170 input_offset += split_data_size;
2173 /* Cancel pretends (pretend to original PF) */
2175 qed_fid_pretend(p_hwfn, p_ptt,
2176 FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
2177 p_hwfn->rel_pf_id));
2178 dev_data->pretend.split_type = SPLIT_TYPE_NONE;
2179 dev_data->pretend.split_id = 0;
2185 /* Dump reset registers. Returns the dumped size in dwords. */
2186 static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
2187 struct qed_ptt *p_ptt,
2188 u32 *dump_buf, bool dump)
2190 u32 offset = 0, num_regs = 0;
2193 /* Calculate header size */
2194 offset += qed_grc_dump_regs_hdr(dump_buf,
2196 0, SPLIT_TYPE_NONE, 0, "RESET_REGS");
2198 /* Write reset registers */
2199 for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS;
2201 const struct dbg_reset_reg *reset_reg;
2204 reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id);
2206 if (GET_FIELD(reset_reg->data, DBG_RESET_REG_IS_REMOVED))
2209 reset_reg_addr = GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR);
2210 offset += qed_grc_dump_reg_entry(p_hwfn,
2215 1, false, SPLIT_TYPE_NONE, 0);
2221 qed_grc_dump_regs_hdr(dump_buf,
2222 true, num_regs, SPLIT_TYPE_NONE,
2228 /* Dump registers that are modified during GRC Dump and therefore must be
2229 * dumped first. Returns the dumped size in dwords.
2231 static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
2232 struct qed_ptt *p_ptt,
2233 u32 *dump_buf, bool dump)
2235 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2236 u32 block_id, offset = 0, stall_regs_offset;
2237 const struct dbg_attn_reg *attn_reg_arr;
2238 u8 storm_id, reg_idx, num_attn_regs;
2239 u32 num_reg_entries = 0;
2241 /* Write empty header for attention registers */
2242 offset += qed_grc_dump_regs_hdr(dump_buf,
2244 0, SPLIT_TYPE_NONE, 0, "ATTN_REGS");
2246 /* Write parity registers */
2247 for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
2248 if (dev_data->block_in_reset[block_id] && dump)
2251 attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
2252 (enum block_id)block_id,
2256 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2257 const struct dbg_attn_reg *reg_data =
2258 &attn_reg_arr[reg_idx];
2259 u16 modes_buf_offset;
2264 eval_mode = GET_FIELD(reg_data->mode.data,
2265 DBG_MODE_HDR_EVAL_MODE) > 0;
2267 GET_FIELD(reg_data->mode.data,
2268 DBG_MODE_HDR_MODES_BUF_OFFSET);
2270 !qed_is_mode_match(p_hwfn, &modes_buf_offset))
2273 /* Mode match: read & dump registers */
2274 addr = reg_data->mask_address;
2275 offset += qed_grc_dump_reg_entry(p_hwfn,
2281 SPLIT_TYPE_NONE, 0);
2282 addr = GET_FIELD(reg_data->data,
2283 DBG_ATTN_REG_STS_ADDRESS);
2284 offset += qed_grc_dump_reg_entry(p_hwfn,
2290 SPLIT_TYPE_NONE, 0);
2291 num_reg_entries += 2;
2295 /* Overwrite header for attention registers */
2297 qed_grc_dump_regs_hdr(dump_buf,
2300 SPLIT_TYPE_NONE, 0, "ATTN_REGS");
2302 /* Write empty header for stall registers */
2303 stall_regs_offset = offset;
2304 offset += qed_grc_dump_regs_hdr(dump_buf,
2305 false, 0, SPLIT_TYPE_NONE, 0, "REGS");
2307 /* Write Storm stall status registers */
2308 for (storm_id = 0, num_reg_entries = 0; storm_id < MAX_DBG_STORMS;
2310 struct storm_defs *storm = &s_storm_defs[storm_id];
2313 if (dev_data->block_in_reset[storm->sem_block_id] && dump)
2317 BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
2318 SEM_FAST_REG_STALLED);
2319 offset += qed_grc_dump_reg_entry(p_hwfn,
2325 false, SPLIT_TYPE_NONE, 0);
2329 /* Overwrite header for stall registers */
2331 qed_grc_dump_regs_hdr(dump_buf + stall_regs_offset,
2334 SPLIT_TYPE_NONE, 0, "REGS");
2339 /* Dumps registers that can't be represented in the debug arrays */
2340 static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
2341 struct qed_ptt *p_ptt,
2342 u32 *dump_buf, bool dump)
2344 u32 offset = 0, addr;
2346 offset += qed_grc_dump_regs_hdr(dump_buf,
2347 dump, 2, SPLIT_TYPE_NONE, 0, "REGS");
2349 /* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
2352 addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
2353 offset += qed_grc_dump_reg_entry_skip(p_hwfn,
2358 RDIF_REG_DEBUG_ERROR_INFO_SIZE,
2361 addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
2363 qed_grc_dump_reg_entry_skip(p_hwfn,
2368 TDIF_REG_DEBUG_ERROR_INFO_SIZE,
2375 /* Dumps a GRC memory header (section and params). Returns the dumped size in
2376 * dwords. The following parameters are dumped:
2377 * - name: dumped only if it's not NULL.
2378 * - addr: in dwords, dumped only if name is NULL.
2379 * - len: in dwords, always dumped.
2380 * - width: dumped if it's not zero.
2381 * - packed: dumped only if it's not false.
2382 * - mem_group: always dumped.
2383 * - is_storm: true only if the memory is related to a Storm.
2384 * - storm_letter: valid only if is_storm is true.
2387 static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
2395 const char *mem_group, char storm_letter)
2403 "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
2410 /* Dump section header */
2411 offset += qed_dump_section_hdr(dump_buf + offset,
2412 dump, "grc_mem", num_params);
2417 strcpy(buf, "?STORM_");
2418 buf[0] = storm_letter;
2419 strcpy(buf + strlen(buf), name);
2424 offset += qed_dump_str_param(dump_buf + offset,
2428 u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
2430 offset += qed_dump_num_param(dump_buf + offset,
2431 dump, "addr", addr_in_bytes);
2435 offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
2437 /* Dump bit width */
2439 offset += qed_dump_num_param(dump_buf + offset,
2440 dump, "width", bit_width);
2444 offset += qed_dump_num_param(dump_buf + offset,
2449 strcpy(buf, "?STORM_");
2450 buf[0] = storm_letter;
2451 strcpy(buf + strlen(buf), mem_group);
2453 strcpy(buf, mem_group);
2456 offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
2461 /* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
2462 * Returns the dumped size in dwords.
2463 * The addr and len arguments are specified in dwords.
2465 static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
2466 struct qed_ptt *p_ptt,
2475 const char *mem_group, char storm_letter)
2479 offset += qed_grc_dump_mem_hdr(p_hwfn,
2486 packed, mem_group, storm_letter);
2487 offset += qed_grc_dump_addr_range(p_hwfn,
2490 dump, addr, len, wide_bus,
2491 SPLIT_TYPE_NONE, 0);
2496 /* Dumps GRC memories entries. Returns the dumped size in dwords. */
2497 static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
2498 struct qed_ptt *p_ptt,
2499 struct virt_mem_desc input_mems_arr,
2500 u32 *dump_buf, bool dump)
2502 u32 i, offset = 0, input_offset = 0;
2503 bool mode_match = true;
2505 while (input_offset < BYTES_TO_DWORDS(input_mems_arr.size)) {
2506 const struct dbg_dump_cond_hdr *cond_hdr;
2507 u16 modes_buf_offset;
2512 (const struct dbg_dump_cond_hdr *)input_mems_arr.ptr +
2514 num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
2516 /* Check required mode */
2517 eval_mode = GET_FIELD(cond_hdr->mode.data,
2518 DBG_MODE_HDR_EVAL_MODE) > 0;
2521 GET_FIELD(cond_hdr->mode.data,
2522 DBG_MODE_HDR_MODES_BUF_OFFSET);
2523 mode_match = qed_is_mode_match(p_hwfn,
2528 input_offset += cond_hdr->data_size;
2532 for (i = 0; i < num_entries;
2533 i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
2534 const struct dbg_dump_mem *mem =
2535 (const struct dbg_dump_mem *)((u32 *)
2538 const struct dbg_block *block;
2539 char storm_letter = 0;
2540 u32 mem_addr, mem_len;
2544 mem_group_id = GET_FIELD(mem->dword0,
2545 DBG_DUMP_MEM_MEM_GROUP_ID);
2546 if (mem_group_id >= MEM_GROUPS_NUM) {
2547 DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
2551 if (!qed_grc_is_mem_included(p_hwfn,
2557 mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
2558 mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
2559 mem_wide_bus = GET_FIELD(mem->dword1,
2560 DBG_DUMP_MEM_WIDE_BUS);
2562 block = get_dbg_block(p_hwfn,
2563 cond_hdr->block_id);
2565 /* If memory is associated with Storm,
2566 * update storm details
2568 if (block->associated_storm_letter)
2569 storm_letter = block->associated_storm_letter;
2572 offset += qed_grc_dump_mem(p_hwfn,
2582 s_mem_group_names[mem_group_id],
2590 /* Dumps GRC memories according to the input array dump_mem.
2591 * Returns the dumped size in dwords.
2593 static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
2594 struct qed_ptt *p_ptt,
2595 u32 *dump_buf, bool dump)
2597 struct virt_mem_desc *dbg_buf =
2598 &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM];
2599 u32 offset = 0, input_offset = 0;
2601 while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
2602 const struct dbg_dump_split_hdr *split_hdr;
2603 struct virt_mem_desc curr_input_mems_arr;
2604 enum init_split_types split_type;
2605 u32 split_data_size;
2608 (const struct dbg_dump_split_hdr *)dbg_buf->ptr +
2610 split_type = GET_FIELD(split_hdr->hdr,
2611 DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2612 split_data_size = GET_FIELD(split_hdr->hdr,
2613 DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2614 curr_input_mems_arr.ptr = (u32 *)dbg_buf->ptr + input_offset;
2615 curr_input_mems_arr.size = DWORDS_TO_BYTES(split_data_size);
2617 if (split_type == SPLIT_TYPE_NONE)
2618 offset += qed_grc_dump_mem_entries(p_hwfn,
2620 curr_input_mems_arr,
2625 "Dumping split memories is currently not supported\n");
2627 input_offset += split_data_size;
2633 /* Dumps GRC context data for the specified Storm.
2634 * Returns the dumped size in dwords.
2635 * The lid_size argument is specified in quad-regs.
2637 static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
2638 struct qed_ptt *p_ptt,
2643 enum cm_ctx_types ctx_type, u8 storm_id)
2645 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2646 struct storm_defs *storm = &s_storm_defs[storm_id];
2647 u32 i, lid, lid_size, total_size;
2648 u32 rd_reg_addr, offset = 0;
2650 /* Convert quad-regs to dwords */
2651 lid_size = storm->cm_ctx_lid_sizes[dev_data->chip_id][ctx_type] * 4;
2656 total_size = num_lids * lid_size;
2658 offset += qed_grc_dump_mem_hdr(p_hwfn,
2665 false, name, storm->letter);
2668 return offset + total_size;
2670 rd_reg_addr = BYTES_TO_DWORDS(storm->cm_ctx_rd_addr[ctx_type]);
2672 /* Dump context data */
2673 for (lid = 0; lid < num_lids; lid++) {
2674 for (i = 0; i < lid_size; i++) {
2676 p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
2677 offset += qed_grc_dump_addr_range(p_hwfn,
2684 SPLIT_TYPE_NONE, 0);
2691 /* Dumps GRC contexts. Returns the dumped size in dwords. */
2692 static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
2693 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
2698 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2699 if (!qed_grc_is_storm_included(p_hwfn,
2700 (enum dbg_storms)storm_id))
2703 /* Dump Conn AG context size */
2704 offset += qed_grc_dump_ctx_data(p_hwfn,
2710 CM_CTX_CONN_AG, storm_id);
2712 /* Dump Conn ST context size */
2713 offset += qed_grc_dump_ctx_data(p_hwfn,
2719 CM_CTX_CONN_ST, storm_id);
2721 /* Dump Task AG context size */
2722 offset += qed_grc_dump_ctx_data(p_hwfn,
2728 CM_CTX_TASK_AG, storm_id);
2730 /* Dump Task ST context size */
2731 offset += qed_grc_dump_ctx_data(p_hwfn,
2737 CM_CTX_TASK_ST, storm_id);
2743 #define VFC_STATUS_RESP_READY_BIT 0
2744 #define VFC_STATUS_BUSY_BIT 1
2745 #define VFC_STATUS_SENDING_CMD_BIT 2
2747 #define VFC_POLLING_DELAY_MS 1
2748 #define VFC_POLLING_COUNT 20
2750 /* Reads data from VFC. Returns the number of dwords read (0 on error).
2751 * Sizes are specified in dwords.
2753 static u32 qed_grc_dump_read_from_vfc(struct qed_hwfn *p_hwfn,
2754 struct qed_ptt *p_ptt,
2755 struct storm_defs *storm,
2760 u32 resp_size, u32 *dump_buf)
2762 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2763 u32 vfc_status, polling_ms, polling_count = 0, i;
2764 u32 reg_addr, sem_base;
2765 bool is_ready = false;
2767 sem_base = storm->sem_fast_mem_addr;
2768 polling_ms = VFC_POLLING_DELAY_MS *
2769 s_hw_type_defs[dev_data->hw_type].delay_factor;
2771 /* Write VFC command */
2774 sem_base + SEM_FAST_REG_VFC_DATA_WR,
2775 cmd_data, cmd_size);
2777 /* Write VFC address */
2780 sem_base + SEM_FAST_REG_VFC_ADDR,
2781 addr_data, addr_size);
2784 for (i = 0; i < resp_size; i++) {
2785 /* Poll until ready */
2787 reg_addr = sem_base + SEM_FAST_REG_VFC_STATUS;
2788 qed_grc_dump_addr_range(p_hwfn,
2792 BYTES_TO_DWORDS(reg_addr),
2794 false, SPLIT_TYPE_NONE, 0);
2795 is_ready = vfc_status & BIT(VFC_STATUS_RESP_READY_BIT);
2798 if (polling_count++ == VFC_POLLING_COUNT)
2803 } while (!is_ready);
2805 reg_addr = sem_base + SEM_FAST_REG_VFC_DATA_RD;
2806 qed_grc_dump_addr_range(p_hwfn,
2810 BYTES_TO_DWORDS(reg_addr),
2811 1, false, SPLIT_TYPE_NONE, 0);
2817 /* Dump VFC CAM. Returns the dumped size in dwords. */
2818 static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
2819 struct qed_ptt *p_ptt,
2820 u32 *dump_buf, bool dump, u8 storm_id)
2822 u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
2823 struct storm_defs *storm = &s_storm_defs[storm_id];
2824 u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
2825 u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
2826 u32 row, offset = 0;
2828 offset += qed_grc_dump_mem_hdr(p_hwfn,
2835 false, "vfc_cam", storm->letter);
2838 return offset + total_size;
2840 /* Prepare CAM address */
2841 SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
2843 /* Read VFC CAM data */
2844 for (row = 0; row < VFC_CAM_NUM_ROWS; row++) {
2845 SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
2846 offset += qed_grc_dump_read_from_vfc(p_hwfn,
2852 VFC_CAM_ADDR_DWORDS,
2853 VFC_CAM_RESP_DWORDS,
2860 /* Dump VFC RAM. Returns the dumped size in dwords. */
2861 static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
2862 struct qed_ptt *p_ptt,
2865 u8 storm_id, struct vfc_ram_defs *ram_defs)
2867 u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
2868 struct storm_defs *storm = &s_storm_defs[storm_id];
2869 u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
2870 u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
2871 u32 row, offset = 0;
2873 offset += qed_grc_dump_mem_hdr(p_hwfn,
2881 ram_defs->type_name,
2885 return offset + total_size;
2887 /* Prepare RAM address */
2888 SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
2890 /* Read VFC RAM data */
2891 for (row = ram_defs->base_row;
2892 row < ram_defs->base_row + ram_defs->num_rows; row++) {
2893 SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
2894 offset += qed_grc_dump_read_from_vfc(p_hwfn,
2900 VFC_RAM_ADDR_DWORDS,
2901 VFC_RAM_RESP_DWORDS,
2908 /* Dumps GRC VFC data. Returns the dumped size in dwords. */
2909 static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
2910 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
2915 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2916 if (!qed_grc_is_storm_included(p_hwfn,
2917 (enum dbg_storms)storm_id) ||
2918 !s_storm_defs[storm_id].has_vfc)
2922 offset += qed_grc_dump_vfc_cam(p_hwfn,
2928 for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
2929 offset += qed_grc_dump_vfc_ram(p_hwfn,
2934 &s_vfc_ram_defs[i]);
2940 /* Dumps GRC RSS data. Returns the dumped size in dwords. */
2941 static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
2942 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
2944 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2948 for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
2949 u32 rss_addr, num_entries, total_dwords;
2950 struct rss_mem_defs *rss_defs;
2951 u32 addr, num_dwords_to_read;
2954 rss_defs = &s_rss_mem_defs[rss_mem_id];
2955 rss_addr = rss_defs->addr;
2956 num_entries = rss_defs->num_entries[dev_data->chip_id];
2957 total_dwords = (num_entries * rss_defs->entry_width) / 32;
2958 packed = (rss_defs->entry_width == 16);
2960 offset += qed_grc_dump_mem_hdr(p_hwfn,
2966 rss_defs->entry_width,
2968 rss_defs->type_name, 0);
2972 offset += total_dwords;
2976 addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
2977 while (total_dwords) {
2978 num_dwords_to_read = min_t(u32,
2979 RSS_REG_RSS_RAM_DATA_SIZE,
2981 qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
2982 offset += qed_grc_dump_addr_range(p_hwfn,
2989 SPLIT_TYPE_NONE, 0);
2990 total_dwords -= num_dwords_to_read;
2998 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
2999 static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
3000 struct qed_ptt *p_ptt,
3001 u32 *dump_buf, bool dump, u8 big_ram_id)
3003 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3004 u32 block_size, ram_size, offset = 0, reg_val, i;
3005 char mem_name[12] = "???_BIG_RAM";
3006 char type_name[8] = "???_RAM";
3007 struct big_ram_defs *big_ram;
3009 big_ram = &s_big_ram_defs[big_ram_id];
3010 ram_size = big_ram->ram_size[dev_data->chip_id];
3012 reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
3013 block_size = reg_val &
3014 BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
3017 strncpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3018 strncpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
3020 /* Dump memory header */
3021 offset += qed_grc_dump_mem_hdr(p_hwfn,
3028 false, type_name, 0);
3030 /* Read and dump Big RAM data */
3032 return offset + ram_size;
3035 for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE);
3039 qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3040 addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
3041 len = BRB_REG_BIG_RAM_DATA_SIZE;
3042 offset += qed_grc_dump_addr_range(p_hwfn,
3048 false, SPLIT_TYPE_NONE, 0);
3054 /* Dumps MCP scratchpad. Returns the dumped size in dwords. */
3055 static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
3056 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3058 bool block_enable[MAX_BLOCK_ID] = { 0 };
3059 u32 offset = 0, addr;
3060 bool halted = false;
3063 if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3064 halted = !qed_mcp_halt(p_hwfn, p_ptt);
3066 DP_NOTICE(p_hwfn, "MCP halt failed!\n");
3069 /* Dump MCP scratchpad */
3070 offset += qed_grc_dump_mem(p_hwfn,
3075 BYTES_TO_DWORDS(MCP_REG_SCRATCH),
3076 MCP_REG_SCRATCH_SIZE,
3077 false, 0, false, "MCP", 0);
3079 /* Dump MCP cpu_reg_file */
3080 offset += qed_grc_dump_mem(p_hwfn,
3085 BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
3086 MCP_REG_CPU_REG_FILE_SIZE,
3087 false, 0, false, "MCP", 0);
3089 /* Dump MCP registers */
3090 block_enable[BLOCK_MCP] = true;
3091 offset += qed_grc_dump_registers(p_hwfn,
3094 dump, block_enable, "MCP");
3096 /* Dump required non-MCP registers */
3097 offset += qed_grc_dump_regs_hdr(dump_buf + offset,
3098 dump, 1, SPLIT_TYPE_NONE, 0,
3100 addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
3101 offset += qed_grc_dump_reg_entry(p_hwfn,
3107 false, SPLIT_TYPE_NONE, 0);
3110 if (halted && qed_mcp_resume(p_hwfn, p_ptt))
3111 DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
3116 /* Dumps the tbus indirect memory for all PHYs.
3117 * Returns the dumped size in dwords.
3119 static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
3120 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3122 u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3126 for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
3127 u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3128 struct phy_defs *phy_defs;
3131 phy_defs = &s_phy_defs[phy_id];
3132 addr_lo_addr = phy_defs->base_addr +
3133 phy_defs->tbus_addr_lo_addr;
3134 addr_hi_addr = phy_defs->base_addr +
3135 phy_defs->tbus_addr_hi_addr;
3136 data_lo_addr = phy_defs->base_addr +
3137 phy_defs->tbus_data_lo_addr;
3138 data_hi_addr = phy_defs->base_addr +
3139 phy_defs->tbus_data_hi_addr;
3141 if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
3142 phy_defs->phy_name) < 0)
3144 "Unexpected debug error: invalid PHY memory name\n");
3146 offset += qed_grc_dump_mem_hdr(p_hwfn,
3151 PHY_DUMP_SIZE_DWORDS,
3152 16, true, mem_name, 0);
3155 offset += PHY_DUMP_SIZE_DWORDS;
3159 bytes_buf = (u8 *)(dump_buf + offset);
3160 for (tbus_hi_offset = 0;
3161 tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
3163 qed_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3164 for (tbus_lo_offset = 0; tbus_lo_offset < 256;
3167 p_ptt, addr_lo_addr, tbus_lo_offset);
3168 *(bytes_buf++) = (u8)qed_rd(p_hwfn,
3171 *(bytes_buf++) = (u8)qed_rd(p_hwfn,
3177 offset += PHY_DUMP_SIZE_DWORDS;
3183 static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
3184 struct qed_ptt *p_ptt,
3186 u32 *nvram_offset_bytes,
3187 u32 *nvram_size_bytes);
3189 static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
3190 struct qed_ptt *p_ptt,
3191 u32 nvram_offset_bytes,
3192 u32 nvram_size_bytes, u32 *ret_buf);
3194 /* Dumps the MCP HW dump from NVRAM. Returns the dumped size in dwords. */
3195 static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn,
3196 struct qed_ptt *p_ptt,
3197 u32 *dump_buf, bool dump)
3199 u32 hw_dump_offset_bytes = 0, hw_dump_size_bytes = 0;
3200 u32 hw_dump_size_dwords = 0, offset = 0;
3201 enum dbg_status status;
3203 /* Read HW dump image from NVRAM */
3204 status = qed_find_nvram_image(p_hwfn,
3206 NVM_TYPE_HW_DUMP_OUT,
3207 &hw_dump_offset_bytes,
3208 &hw_dump_size_bytes);
3209 if (status != DBG_STATUS_OK)
3212 hw_dump_size_dwords = BYTES_TO_DWORDS(hw_dump_size_bytes);
3214 /* Dump HW dump image section */
3215 offset += qed_dump_section_hdr(dump_buf + offset,
3216 dump, "mcp_hw_dump", 1);
3217 offset += qed_dump_num_param(dump_buf + offset,
3218 dump, "size", hw_dump_size_dwords);
3220 /* Read MCP HW dump image into dump buffer */
3221 if (dump && hw_dump_size_dwords) {
3222 status = qed_nvram_read(p_hwfn,
3224 hw_dump_offset_bytes,
3225 hw_dump_size_bytes, dump_buf + offset);
3226 if (status != DBG_STATUS_OK) {
3228 "Failed to read MCP HW Dump image from NVRAM\n");
3232 offset += hw_dump_size_dwords;
3237 /* Dumps Static Debug data. Returns the dumped size in dwords. */
3238 static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
3239 struct qed_ptt *p_ptt,
3240 u32 *dump_buf, bool dump)
3242 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3243 u32 block_id, line_id, offset = 0, addr, len;
3245 /* Don't dump static debug if a debug bus recording is in progress */
3246 if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3250 /* Disable debug bus in all blocks */
3251 qed_bus_disable_blocks(p_hwfn, p_ptt);
3253 qed_bus_reset_dbg_block(p_hwfn, p_ptt);
3255 p_ptt, DBG_REG_FRAMING_MODE, DBG_BUS_FRAME_MODE_8HW);
3257 p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3258 qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3259 qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3262 /* Dump all static debug lines for each relevant block */
3263 for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3264 const struct dbg_block_chip *block_per_chip;
3265 const struct dbg_block *block;
3266 bool is_removed, has_dbg_bus;
3267 u16 modes_buf_offset;
3271 qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)block_id);
3272 is_removed = GET_FIELD(block_per_chip->flags,
3273 DBG_BLOCK_CHIP_IS_REMOVED);
3274 has_dbg_bus = GET_FIELD(block_per_chip->flags,
3275 DBG_BLOCK_CHIP_HAS_DBG_BUS);
3277 /* read+clear for NWS parity is not working, skip NWS block */
3278 if (block_id == BLOCK_NWS)
3281 if (!is_removed && has_dbg_bus &&
3282 GET_FIELD(block_per_chip->dbg_bus_mode.data,
3283 DBG_MODE_HDR_EVAL_MODE) > 0) {
3285 GET_FIELD(block_per_chip->dbg_bus_mode.data,
3286 DBG_MODE_HDR_MODES_BUF_OFFSET);
3287 if (!qed_is_mode_match(p_hwfn, &modes_buf_offset))
3288 has_dbg_bus = false;
3291 if (is_removed || !has_dbg_bus)
3294 block_dwords = NUM_DBG_LINES(block_per_chip) *
3295 STATIC_DEBUG_LINE_DWORDS;
3297 /* Dump static section params */
3298 block = get_dbg_block(p_hwfn, (enum block_id)block_id);
3299 offset += qed_grc_dump_mem_hdr(p_hwfn,
3305 32, false, "STATIC", 0);
3308 offset += block_dwords;
3312 /* If all lines are invalid - dump zeros */
3313 if (dev_data->block_in_reset[block_id]) {
3314 memset(dump_buf + offset, 0,
3315 DWORDS_TO_BYTES(block_dwords));
3316 offset += block_dwords;
3320 /* Enable block's client */
3321 qed_bus_enable_clients(p_hwfn,
3323 BIT(block_per_chip->dbg_client_id));
3325 addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
3326 len = STATIC_DEBUG_LINE_DWORDS;
3327 for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_per_chip);
3329 /* Configure debug line ID */
3330 qed_bus_config_dbg_line(p_hwfn,
3332 (enum block_id)block_id,
3333 (u8)line_id, 0xf, 0, 0, 0);
3335 /* Read debug line info */
3336 offset += qed_grc_dump_addr_range(p_hwfn,
3342 true, SPLIT_TYPE_NONE,
3346 /* Disable block's client and debug output */
3347 qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3348 qed_bus_config_dbg_line(p_hwfn, p_ptt,
3349 (enum block_id)block_id, 0, 0, 0, 0, 0);
3353 qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
3354 qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3360 /* Performs GRC Dump to the specified buffer.
3361 * Returns the dumped size in dwords.
3363 static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
3364 struct qed_ptt *p_ptt,
3366 bool dump, u32 *num_dumped_dwords)
3368 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3369 u32 dwords_read, offset = 0;
3370 bool parities_masked = false;
3373 *num_dumped_dwords = 0;
3374 dev_data->num_regs_read = 0;
3376 /* Update reset state */
3378 qed_update_blocks_reset_state(p_hwfn, p_ptt);
3380 /* Dump global params */
3381 offset += qed_dump_common_global_params(p_hwfn,
3383 dump_buf + offset, dump, 4);
3384 offset += qed_dump_str_param(dump_buf + offset,
3385 dump, "dump-type", "grc-dump");
3386 offset += qed_dump_num_param(dump_buf + offset,
3390 offset += qed_dump_num_param(dump_buf + offset,
3394 offset += qed_dump_num_param(dump_buf + offset,
3395 dump, "num-ports", dev_data->num_ports);
3397 /* Dump reset registers (dumped before taking blocks out of reset ) */
3398 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3399 offset += qed_grc_dump_reset_regs(p_hwfn,
3401 dump_buf + offset, dump);
3403 /* Take all blocks out of reset (using reset registers) */
3405 qed_grc_unreset_blocks(p_hwfn, p_ptt, false);
3406 qed_update_blocks_reset_state(p_hwfn, p_ptt);
3409 /* Disable all parities using MFW command */
3411 !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3412 parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
3413 if (!parities_masked) {
3415 "Failed to mask parities using MFW\n");
3416 if (qed_grc_get_param
3417 (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
3418 return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
3422 /* Dump modified registers (dumped before modifying them) */
3423 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3424 offset += qed_grc_dump_modified_regs(p_hwfn,
3426 dump_buf + offset, dump);
3430 (qed_grc_is_included(p_hwfn,
3431 DBG_GRC_PARAM_DUMP_IOR) ||
3432 qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
3433 qed_grc_stall_storms(p_hwfn, p_ptt, true);
3436 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
3437 bool block_enable[MAX_BLOCK_ID];
3439 /* Dump all blocks except MCP */
3440 for (i = 0; i < MAX_BLOCK_ID; i++)
3441 block_enable[i] = true;
3442 block_enable[BLOCK_MCP] = false;
3443 offset += qed_grc_dump_registers(p_hwfn,
3448 block_enable, NULL);
3450 /* Dump special registers */
3451 offset += qed_grc_dump_special_regs(p_hwfn,
3453 dump_buf + offset, dump);
3457 offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
3460 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
3461 offset += qed_grc_dump_mcp(p_hwfn,
3462 p_ptt, dump_buf + offset, dump);
3465 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
3466 offset += qed_grc_dump_ctx(p_hwfn,
3467 p_ptt, dump_buf + offset, dump);
3469 /* Dump RSS memories */
3470 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
3471 offset += qed_grc_dump_rss(p_hwfn,
3472 p_ptt, dump_buf + offset, dump);
3475 for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
3476 if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
3477 offset += qed_grc_dump_big_ram(p_hwfn,
3483 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)) {
3484 dwords_read = qed_grc_dump_vfc(p_hwfn,
3485 p_ptt, dump_buf + offset, dump);
3486 offset += dwords_read;
3488 return DBG_STATUS_VFC_READ_ERROR;
3492 if (qed_grc_is_included(p_hwfn,
3493 DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
3494 CHIP_K2 && dev_data->hw_type == HW_TYPE_ASIC)
3495 offset += qed_grc_dump_phy(p_hwfn,
3496 p_ptt, dump_buf + offset, dump);
3498 /* Dump MCP HW Dump */
3499 if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP_HW_DUMP) &&
3500 !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP) && 1)
3501 offset += qed_grc_dump_mcp_hw_dump(p_hwfn,
3503 dump_buf + offset, dump);
3505 /* Dump static debug data (only if not during debug bus recording) */
3506 if (qed_grc_is_included(p_hwfn,
3507 DBG_GRC_PARAM_DUMP_STATIC) &&
3508 (!dump || dev_data->bus.state == DBG_BUS_STATE_IDLE))
3509 offset += qed_grc_dump_static_debug(p_hwfn,
3511 dump_buf + offset, dump);
3513 /* Dump last section */
3514 offset += qed_dump_last_section(dump_buf, offset, dump);
3517 /* Unstall storms */
3518 if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
3519 qed_grc_stall_storms(p_hwfn, p_ptt, false);
3521 /* Clear parity status */
3522 qed_grc_clear_all_prty(p_hwfn, p_ptt);
3524 /* Enable all parities using MFW command */
3525 if (parities_masked)
3526 qed_mcp_mask_parities(p_hwfn, p_ptt, 0);
3529 *num_dumped_dwords = offset;
3531 return DBG_STATUS_OK;
3534 /* Writes the specified failing Idle Check rule to the specified buffer.
3535 * Returns the dumped size in dwords.
3537 static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
3538 struct qed_ptt *p_ptt,
3543 const struct dbg_idle_chk_rule *rule,
3544 u16 fail_entry_id, u32 *cond_reg_values)
3546 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3547 const struct dbg_idle_chk_cond_reg *cond_regs;
3548 const struct dbg_idle_chk_info_reg *info_regs;
3549 u32 i, next_reg_offset = 0, offset = 0;
3550 struct dbg_idle_chk_result_hdr *hdr;
3551 const union dbg_idle_chk_reg *regs;
3554 hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
3555 regs = (const union dbg_idle_chk_reg *)
3556 p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr +
3558 cond_regs = ®s[0].cond_reg;
3559 info_regs = ®s[rule->num_cond_regs].info_reg;
3561 /* Dump rule data */
3563 memset(hdr, 0, sizeof(*hdr));
3564 hdr->rule_id = rule_id;
3565 hdr->mem_entry_id = fail_entry_id;
3566 hdr->severity = rule->severity;
3567 hdr->num_dumped_cond_regs = rule->num_cond_regs;
3570 offset += IDLE_CHK_RESULT_HDR_DWORDS;
3572 /* Dump condition register values */
3573 for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
3574 const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
3575 struct dbg_idle_chk_result_reg_hdr *reg_hdr;
3578 (struct dbg_idle_chk_result_reg_hdr *)(dump_buf + offset);
3580 /* Write register header */
3582 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
3587 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
3588 memset(reg_hdr, 0, sizeof(*reg_hdr));
3589 reg_hdr->start_entry = reg->start_entry;
3590 reg_hdr->size = reg->entry_size;
3591 SET_FIELD(reg_hdr->data,
3592 DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
3593 reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
3594 SET_FIELD(reg_hdr->data,
3595 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
3597 /* Write register values */
3598 for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
3599 dump_buf[offset] = cond_reg_values[next_reg_offset];
3602 /* Dump info register values */
3603 for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
3604 const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
3607 /* Check if register's block is in reset */
3609 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
3613 block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
3614 if (block_id >= MAX_BLOCK_ID) {
3615 DP_NOTICE(p_hwfn, "Invalid block_id\n");
3619 if (!dev_data->block_in_reset[block_id]) {
3620 struct dbg_idle_chk_result_reg_hdr *reg_hdr;
3621 bool wide_bus, eval_mode, mode_match = true;
3622 u16 modes_buf_offset;
3625 reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
3626 (dump_buf + offset);
3629 eval_mode = GET_FIELD(reg->mode.data,
3630 DBG_MODE_HDR_EVAL_MODE) > 0;
3633 GET_FIELD(reg->mode.data,
3634 DBG_MODE_HDR_MODES_BUF_OFFSET);
3636 qed_is_mode_match(p_hwfn,
3643 addr = GET_FIELD(reg->data,
3644 DBG_IDLE_CHK_INFO_REG_ADDRESS);
3645 wide_bus = GET_FIELD(reg->data,
3646 DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
3648 /* Write register header */
3649 offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
3650 hdr->num_dumped_info_regs++;
3651 memset(reg_hdr, 0, sizeof(*reg_hdr));
3652 reg_hdr->size = reg->size;
3653 SET_FIELD(reg_hdr->data,
3654 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
3655 rule->num_cond_regs + reg_id);
3657 /* Write register values */
3658 offset += qed_grc_dump_addr_range(p_hwfn,
3663 reg->size, wide_bus,
3664 SPLIT_TYPE_NONE, 0);
3671 /* Dumps idle check rule entries. Returns the dumped size in dwords. */
3673 qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3674 u32 *dump_buf, bool dump,
3675 const struct dbg_idle_chk_rule *input_rules,
3676 u32 num_input_rules, u32 *num_failing_rules)
3678 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3679 u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
3684 *num_failing_rules = 0;
3686 for (i = 0; i < num_input_rules; i++) {
3687 const struct dbg_idle_chk_cond_reg *cond_regs;
3688 const struct dbg_idle_chk_rule *rule;
3689 const union dbg_idle_chk_reg *regs;
3690 u16 num_reg_entries = 1;
3691 bool check_rule = true;
3692 const u32 *imm_values;
3694 rule = &input_rules[i];
3695 regs = (const union dbg_idle_chk_reg *)
3696 p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr +
3698 cond_regs = ®s[0].cond_reg;
3700 (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr +
3703 /* Check if all condition register blocks are out of reset, and
3704 * find maximal number of entries (all condition registers that
3705 * are memories must have the same size, which is > 1).
3707 for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
3710 GET_FIELD(cond_regs[reg_id].data,
3711 DBG_IDLE_CHK_COND_REG_BLOCK_ID);
3713 if (block_id >= MAX_BLOCK_ID) {
3714 DP_NOTICE(p_hwfn, "Invalid block_id\n");
3718 check_rule = !dev_data->block_in_reset[block_id];
3719 if (cond_regs[reg_id].num_entries > num_reg_entries)
3720 num_reg_entries = cond_regs[reg_id].num_entries;
3723 if (!check_rule && dump)
3727 u32 entry_dump_size =
3728 qed_idle_chk_dump_failure(p_hwfn,
3737 offset += num_reg_entries * entry_dump_size;
3738 (*num_failing_rules) += num_reg_entries;
3742 /* Go over all register entries (number of entries is the same
3743 * for all condition registers).
3745 for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
3746 u32 next_reg_offset = 0;
3748 /* Read current entry of all condition registers */
3749 for (reg_id = 0; reg_id < rule->num_cond_regs;
3751 const struct dbg_idle_chk_cond_reg *reg =
3753 u32 padded_entry_size, addr;
3756 /* Find GRC address (if it's a memory, the
3757 * address of the specific entry is calculated).
3759 addr = GET_FIELD(reg->data,
3760 DBG_IDLE_CHK_COND_REG_ADDRESS);
3762 GET_FIELD(reg->data,
3763 DBG_IDLE_CHK_COND_REG_WIDE_BUS);
3764 if (reg->num_entries > 1 ||
3765 reg->start_entry > 0) {
3767 reg->entry_size > 1 ?
3768 roundup_pow_of_two(reg->entry_size) :
3770 addr += (reg->start_entry + entry_id) *
3774 /* Read registers */
3775 if (next_reg_offset + reg->entry_size >=
3776 IDLE_CHK_MAX_ENTRIES_SIZE) {
3778 "idle check registers entry is too large\n");
3783 qed_grc_dump_addr_range(p_hwfn, p_ptt,
3789 SPLIT_TYPE_NONE, 0);
3792 /* Call rule condition function.
3793 * If returns true, it's a failure.
3795 if ((*cond_arr[rule->cond_id]) (cond_reg_values,
3797 offset += qed_idle_chk_dump_failure(p_hwfn,
3805 (*num_failing_rules)++;
3813 /* Performs Idle Check Dump to the specified buffer.
3814 * Returns the dumped size in dwords.
3816 static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
3817 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3819 struct virt_mem_desc *dbg_buf =
3820 &p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES];
3821 u32 num_failing_rules_offset, offset = 0,
3822 input_offset = 0, num_failing_rules = 0;
3824 /* Dump global params - 1 must match below amount of params */
3825 offset += qed_dump_common_global_params(p_hwfn,
3827 dump_buf + offset, dump, 1);
3828 offset += qed_dump_str_param(dump_buf + offset,
3829 dump, "dump-type", "idle-chk");
3831 /* Dump idle check section header with a single parameter */
3832 offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
3833 num_failing_rules_offset = offset;
3834 offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
3836 while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
3837 const struct dbg_idle_chk_cond_hdr *cond_hdr =
3838 (const struct dbg_idle_chk_cond_hdr *)dbg_buf->ptr +
3840 bool eval_mode, mode_match = true;
3841 u32 curr_failing_rules;
3842 u16 modes_buf_offset;
3845 eval_mode = GET_FIELD(cond_hdr->mode.data,
3846 DBG_MODE_HDR_EVAL_MODE) > 0;
3849 GET_FIELD(cond_hdr->mode.data,
3850 DBG_MODE_HDR_MODES_BUF_OFFSET);
3851 mode_match = qed_is_mode_match(p_hwfn,
3856 const struct dbg_idle_chk_rule *rule =
3857 (const struct dbg_idle_chk_rule *)((u32 *)
3860 u32 num_input_rules =
3861 cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS;
3863 qed_idle_chk_dump_rule_entries(p_hwfn,
3870 &curr_failing_rules);
3871 num_failing_rules += curr_failing_rules;
3874 input_offset += cond_hdr->data_size;
3877 /* Overwrite num_rules parameter */
3879 qed_dump_num_param(dump_buf + num_failing_rules_offset,
3880 dump, "num_rules", num_failing_rules);
3882 /* Dump last section */
3883 offset += qed_dump_last_section(dump_buf, offset, dump);
3888 /* Finds the meta data image in NVRAM */
3889 static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
3890 struct qed_ptt *p_ptt,
3892 u32 *nvram_offset_bytes,
3893 u32 *nvram_size_bytes)
3895 u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
3896 struct mcp_file_att file_att;
3899 /* Call NVRAM get file command */
3900 nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
3902 DRV_MSG_CODE_NVM_GET_FILE_ATT,
3906 &ret_txn_size, (u32 *)&file_att);
3908 /* Check response */
3910 (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
3911 return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
3913 /* Update return values */
3914 *nvram_offset_bytes = file_att.nvm_start_addr;
3915 *nvram_size_bytes = file_att.len;
3919 "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
3920 image_type, *nvram_offset_bytes, *nvram_size_bytes);
3922 /* Check alignment */
3923 if (*nvram_size_bytes & 0x3)
3924 return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
3926 return DBG_STATUS_OK;
3929 /* Reads data from NVRAM */
3930 static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
3931 struct qed_ptt *p_ptt,
3932 u32 nvram_offset_bytes,
3933 u32 nvram_size_bytes, u32 *ret_buf)
3935 u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
3936 s32 bytes_left = nvram_size_bytes;
3937 u32 read_offset = 0, param = 0;
3941 "nvram_read: reading image of size %d bytes from NVRAM\n",
3947 MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
3949 /* Call NVRAM read command */
3950 SET_MFW_FIELD(param,
3951 DRV_MB_PARAM_NVM_OFFSET,
3952 nvram_offset_bytes + read_offset);
3953 SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_LEN, bytes_to_copy);
3954 if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3955 DRV_MSG_CODE_NVM_READ_NVRAM, param,
3957 &ret_mcp_param, &ret_read_size,
3958 (u32 *)((u8 *)ret_buf + read_offset)))
3959 return DBG_STATUS_NVRAM_READ_FAILED;
3961 /* Check response */
3962 if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
3963 return DBG_STATUS_NVRAM_READ_FAILED;
3965 /* Update read offset */
3966 read_offset += ret_read_size;
3967 bytes_left -= ret_read_size;
3968 } while (bytes_left > 0);
3970 return DBG_STATUS_OK;
3973 /* Get info on the MCP Trace data in the scratchpad:
3974 * - trace_data_grc_addr (OUT): trace data GRC address in bytes
3975 * - trace_data_size (OUT): trace data size in bytes (without the header)
3977 static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
3978 struct qed_ptt *p_ptt,
3979 u32 *trace_data_grc_addr,
3980 u32 *trace_data_size)
3982 u32 spad_trace_offsize, signature;
3984 /* Read trace section offsize structure from MCP scratchpad */
3985 spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
3987 /* Extract trace section address from offsize (in scratchpad) */
3988 *trace_data_grc_addr =
3989 MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
3991 /* Read signature from MCP trace section */
3992 signature = qed_rd(p_hwfn, p_ptt,
3993 *trace_data_grc_addr +
3994 offsetof(struct mcp_trace, signature));
3996 if (signature != MFW_TRACE_SIGNATURE)
3997 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
3999 /* Read trace size from MCP trace section */
4000 *trace_data_size = qed_rd(p_hwfn,
4002 *trace_data_grc_addr +
4003 offsetof(struct mcp_trace, size));
4005 return DBG_STATUS_OK;
4008 /* Reads MCP trace meta data image from NVRAM
4009 * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
4010 * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
4011 * loaded from file).
4012 * - trace_meta_size (OUT): size in bytes of the trace meta data.
4014 static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
4015 struct qed_ptt *p_ptt,
4016 u32 trace_data_size_bytes,
4017 u32 *running_bundle_id,
4018 u32 *trace_meta_offset,
4019 u32 *trace_meta_size)
4021 u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
4023 /* Read MCP trace section offsize structure from MCP scratchpad */
4024 spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4026 /* Find running bundle ID */
4028 MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
4029 QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4030 *running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
4031 if (*running_bundle_id > 1)
4032 return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4034 /* Find image in NVRAM */
4036 (*running_bundle_id ==
4037 DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4038 return qed_find_nvram_image(p_hwfn,
4041 trace_meta_offset, trace_meta_size);
4044 /* Reads the MCP Trace meta data from NVRAM into the specified buffer */
4045 static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
4046 struct qed_ptt *p_ptt,
4047 u32 nvram_offset_in_bytes,
4048 u32 size_in_bytes, u32 *buf)
4050 u8 modules_num, module_len, i, *byte_buf = (u8 *)buf;
4051 enum dbg_status status;
4054 /* Read meta data from NVRAM */
4055 status = qed_nvram_read(p_hwfn,
4057 nvram_offset_in_bytes, size_in_bytes, buf);
4058 if (status != DBG_STATUS_OK)
4061 /* Extract and check first signature */
4062 signature = qed_read_unaligned_dword(byte_buf);
4063 byte_buf += sizeof(signature);
4064 if (signature != NVM_MAGIC_VALUE)
4065 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4067 /* Extract number of modules */
4068 modules_num = *(byte_buf++);
4070 /* Skip all modules */
4071 for (i = 0; i < modules_num; i++) {
4072 module_len = *(byte_buf++);
4073 byte_buf += module_len;
4076 /* Extract and check second signature */
4077 signature = qed_read_unaligned_dword(byte_buf);
4078 byte_buf += sizeof(signature);
4079 if (signature != NVM_MAGIC_VALUE)
4080 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4082 return DBG_STATUS_OK;
4085 /* Dump MCP Trace */
4086 static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4087 struct qed_ptt *p_ptt,
4089 bool dump, u32 *num_dumped_dwords)
4091 u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4092 u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
4093 u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
4094 enum dbg_status status;
4098 *num_dumped_dwords = 0;
4100 use_mfw = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4102 /* Get trace data info */
4103 status = qed_mcp_trace_get_data_info(p_hwfn,
4105 &trace_data_grc_addr,
4106 &trace_data_size_bytes);
4107 if (status != DBG_STATUS_OK)
4110 /* Dump global params */
4111 offset += qed_dump_common_global_params(p_hwfn,
4113 dump_buf + offset, dump, 1);
4114 offset += qed_dump_str_param(dump_buf + offset,
4115 dump, "dump-type", "mcp-trace");
4117 /* Halt MCP while reading from scratchpad so the read data will be
4118 * consistent. if halt fails, MCP trace is taken anyway, with a small
4119 * risk that it may be corrupt.
4121 if (dump && use_mfw) {
4122 halted = !qed_mcp_halt(p_hwfn, p_ptt);
4124 DP_NOTICE(p_hwfn, "MCP halt failed!\n");
4127 /* Find trace data size */
4128 trace_data_size_dwords =
4129 DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
4132 /* Dump trace data section header and param */
4133 offset += qed_dump_section_hdr(dump_buf + offset,
4134 dump, "mcp_trace_data", 1);
4135 offset += qed_dump_num_param(dump_buf + offset,
4136 dump, "size", trace_data_size_dwords);
4138 /* Read trace data from scratchpad into dump buffer */
4139 offset += qed_grc_dump_addr_range(p_hwfn,
4143 BYTES_TO_DWORDS(trace_data_grc_addr),
4144 trace_data_size_dwords, false,
4145 SPLIT_TYPE_NONE, 0);
4147 /* Resume MCP (only if halt succeeded) */
4148 if (halted && qed_mcp_resume(p_hwfn, p_ptt))
4149 DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
4151 /* Dump trace meta section header */
4152 offset += qed_dump_section_hdr(dump_buf + offset,
4153 dump, "mcp_trace_meta", 1);
4155 /* If MCP Trace meta size parameter was set, use it.
4156 * Otherwise, read trace meta.
4157 * trace_meta_size_bytes is dword-aligned.
4159 trace_meta_size_bytes =
4160 qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_MCP_TRACE_META_SIZE);
4161 if ((!trace_meta_size_bytes || dump) && use_mfw)
4162 status = qed_mcp_trace_get_meta_info(p_hwfn,
4164 trace_data_size_bytes,
4166 &trace_meta_offset_bytes,
4167 &trace_meta_size_bytes);
4168 if (status == DBG_STATUS_OK)
4169 trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
4171 /* Dump trace meta size param */
4172 offset += qed_dump_num_param(dump_buf + offset,
4173 dump, "size", trace_meta_size_dwords);
4175 /* Read trace meta image into dump buffer */
4176 if (dump && trace_meta_size_dwords)
4177 status = qed_mcp_trace_read_meta(p_hwfn,
4179 trace_meta_offset_bytes,
4180 trace_meta_size_bytes,
4182 if (status == DBG_STATUS_OK)
4183 offset += trace_meta_size_dwords;
4185 /* Dump last section */
4186 offset += qed_dump_last_section(dump_buf, offset, dump);
4188 *num_dumped_dwords = offset;
4190 /* If no mcp access, indicate that the dump doesn't contain the meta
4193 return use_mfw ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4197 static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
4198 struct qed_ptt *p_ptt,
4200 bool dump, u32 *num_dumped_dwords)
4202 u32 dwords_read, size_param_offset, offset = 0, addr, len;
4205 *num_dumped_dwords = 0;
4207 /* Dump global params */
4208 offset += qed_dump_common_global_params(p_hwfn,
4210 dump_buf + offset, dump, 1);
4211 offset += qed_dump_str_param(dump_buf + offset,
4212 dump, "dump-type", "reg-fifo");
4214 /* Dump fifo data section header and param. The size param is 0 for
4215 * now, and is overwritten after reading the FIFO.
4217 offset += qed_dump_section_hdr(dump_buf + offset,
4218 dump, "reg_fifo_data", 1);
4219 size_param_offset = offset;
4220 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4223 /* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4224 * test how much data is available, except for reading it.
4226 offset += REG_FIFO_DEPTH_DWORDS;
4230 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4231 GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4233 /* Pull available data from fifo. Use DMAE since this is widebus memory
4234 * and must be accessed atomically. Test for dwords_read not passing
4235 * buffer size since more entries could be added to the buffer as we are
4238 addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO);
4239 len = REG_FIFO_ELEMENT_DWORDS;
4240 for (dwords_read = 0;
4241 fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
4242 dwords_read += REG_FIFO_ELEMENT_DWORDS) {
4243 offset += qed_grc_dump_addr_range(p_hwfn,
4249 true, SPLIT_TYPE_NONE,
4251 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4252 GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4255 qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4258 /* Dump last section */
4259 offset += qed_dump_last_section(dump_buf, offset, dump);
4261 *num_dumped_dwords = offset;
4263 return DBG_STATUS_OK;
4267 static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
4268 struct qed_ptt *p_ptt,
4270 bool dump, u32 *num_dumped_dwords)
4272 u32 dwords_read, size_param_offset, offset = 0, addr, len;
4275 *num_dumped_dwords = 0;
4277 /* Dump global params */
4278 offset += qed_dump_common_global_params(p_hwfn,
4280 dump_buf + offset, dump, 1);
4281 offset += qed_dump_str_param(dump_buf + offset,
4282 dump, "dump-type", "igu-fifo");
4284 /* Dump fifo data section header and param. The size param is 0 for
4285 * now, and is overwritten after reading the FIFO.
4287 offset += qed_dump_section_hdr(dump_buf + offset,
4288 dump, "igu_fifo_data", 1);
4289 size_param_offset = offset;
4290 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4293 /* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4294 * test how much data is available, except for reading it.
4296 offset += IGU_FIFO_DEPTH_DWORDS;
4300 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4301 IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4303 /* Pull available data from fifo. Use DMAE since this is widebus memory
4304 * and must be accessed atomically. Test for dwords_read not passing
4305 * buffer size since more entries could be added to the buffer as we are
4308 addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY);
4309 len = IGU_FIFO_ELEMENT_DWORDS;
4310 for (dwords_read = 0;
4311 fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
4312 dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
4313 offset += qed_grc_dump_addr_range(p_hwfn,
4319 true, SPLIT_TYPE_NONE,
4321 fifo_has_data = qed_rd(p_hwfn, p_ptt,
4322 IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4325 qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4328 /* Dump last section */
4329 offset += qed_dump_last_section(dump_buf, offset, dump);
4331 *num_dumped_dwords = offset;
4333 return DBG_STATUS_OK;
4336 /* Protection Override dump */
4337 static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
4338 struct qed_ptt *p_ptt,
4341 u32 *num_dumped_dwords)
4343 u32 size_param_offset, override_window_dwords, offset = 0, addr;
4345 *num_dumped_dwords = 0;
4347 /* Dump global params */
4348 offset += qed_dump_common_global_params(p_hwfn,
4350 dump_buf + offset, dump, 1);
4351 offset += qed_dump_str_param(dump_buf + offset,
4352 dump, "dump-type", "protection-override");
4354 /* Dump data section header and param. The size param is 0 for now,
4355 * and is overwritten after reading the data.
4357 offset += qed_dump_section_hdr(dump_buf + offset,
4358 dump, "protection_override_data", 1);
4359 size_param_offset = offset;
4360 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4363 offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4367 /* Add override window info to buffer */
4368 override_window_dwords =
4369 qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
4370 PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4371 if (override_window_dwords) {
4372 addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
4373 offset += qed_grc_dump_addr_range(p_hwfn,
4378 override_window_dwords,
4379 true, SPLIT_TYPE_NONE, 0);
4380 qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4381 override_window_dwords);
4384 /* Dump last section */
4385 offset += qed_dump_last_section(dump_buf, offset, dump);
4387 *num_dumped_dwords = offset;
4389 return DBG_STATUS_OK;
4392 /* Performs FW Asserts Dump to the specified buffer.
4393 * Returns the dumped size in dwords.
4395 static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
4396 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4398 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4399 struct fw_asserts_ram_section *asserts;
4400 char storm_letter_str[2] = "?";
4401 struct fw_info fw_info;
4405 /* Dump global params */
4406 offset += qed_dump_common_global_params(p_hwfn,
4408 dump_buf + offset, dump, 1);
4409 offset += qed_dump_str_param(dump_buf + offset,
4410 dump, "dump-type", "fw-asserts");
4412 /* Find Storm dump size */
4413 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4414 u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
4415 struct storm_defs *storm = &s_storm_defs[storm_id];
4416 u32 last_list_idx, addr;
4418 if (dev_data->block_in_reset[storm->sem_block_id])
4421 /* Read FW info for the current Storm */
4422 qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
4424 asserts = &fw_info.fw_asserts_section;
4426 /* Dump FW Asserts section header and params */
4427 storm_letter_str[0] = storm->letter;
4428 offset += qed_dump_section_hdr(dump_buf + offset,
4429 dump, "fw_asserts", 2);
4430 offset += qed_dump_str_param(dump_buf + offset,
4431 dump, "storm", storm_letter_str);
4432 offset += qed_dump_num_param(dump_buf + offset,
4435 asserts->list_element_dword_size);
4437 /* Read and dump FW Asserts data */
4439 offset += asserts->list_element_dword_size;
4443 fw_asserts_section_addr = storm->sem_fast_mem_addr +
4444 SEM_FAST_REG_INT_RAM +
4445 RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
4446 next_list_idx_addr = fw_asserts_section_addr +
4447 DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
4448 next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
4449 last_list_idx = (next_list_idx > 0 ?
4451 asserts->list_num_elements) - 1;
4452 addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
4453 asserts->list_dword_offset +
4454 last_list_idx * asserts->list_element_dword_size;
4456 qed_grc_dump_addr_range(p_hwfn, p_ptt,
4459 asserts->list_element_dword_size,
4460 false, SPLIT_TYPE_NONE, 0);
4463 /* Dump last section */
4464 offset += qed_dump_last_section(dump_buf, offset, dump);
4469 /* Dumps the specified ILT pages to the specified buffer.
4470 * Returns the dumped size in dwords.
4472 static u32 qed_ilt_dump_pages_range(u32 *dump_buf,
4476 struct phys_mem_desc *ilt_pages,
4479 u32 page_id, end_page_id, offset = 0;
4484 end_page_id = start_page_id + num_pages - 1;
4486 for (page_id = start_page_id; page_id <= end_page_id; page_id++) {
4487 struct phys_mem_desc *mem_desc = &ilt_pages[page_id];
4491 * if (page_id >= ->p_cxt_mngr->ilt_shadow_size)
4495 if (!ilt_pages[page_id].virt_addr)
4498 if (dump_page_ids) {
4499 /* Copy page ID to dump buffer */
4501 *(dump_buf + offset) = page_id;
4504 /* Copy page memory to dump buffer */
4506 memcpy(dump_buf + offset,
4507 mem_desc->virt_addr, mem_desc->size);
4508 offset += BYTES_TO_DWORDS(mem_desc->size);
4515 /* Dumps a section containing the dumped ILT pages.
4516 * Returns the dumped size in dwords.
4518 static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn,
4521 u32 valid_conn_pf_pages,
4522 u32 valid_conn_vf_pages,
4523 struct phys_mem_desc *ilt_pages,
4526 struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
4527 u32 pf_start_line, start_page_id, offset = 0;
4528 u32 cdut_pf_init_pages, cdut_vf_init_pages;
4529 u32 cdut_pf_work_pages, cdut_vf_work_pages;
4530 u32 base_data_offset, size_param_offset;
4531 u32 cdut_pf_pages, cdut_vf_pages;
4532 const char *section_name;
4535 section_name = dump_page_ids ? "ilt_page_ids" : "ilt_page_mem";
4536 cdut_pf_init_pages = qed_get_cdut_num_pf_init_pages(p_hwfn);
4537 cdut_vf_init_pages = qed_get_cdut_num_vf_init_pages(p_hwfn);
4538 cdut_pf_work_pages = qed_get_cdut_num_pf_work_pages(p_hwfn);
4539 cdut_vf_work_pages = qed_get_cdut_num_vf_work_pages(p_hwfn);
4540 cdut_pf_pages = cdut_pf_init_pages + cdut_pf_work_pages;
4541 cdut_vf_pages = cdut_vf_init_pages + cdut_vf_work_pages;
4542 pf_start_line = p_hwfn->p_cxt_mngr->pf_start_line;
4545 qed_dump_section_hdr(dump_buf + offset, dump, section_name, 1);
4547 /* Dump size parameter (0 for now, overwritten with real size later) */
4548 size_param_offset = offset;
4549 offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4550 base_data_offset = offset;
4552 /* CDUC pages are ordered as follows:
4553 * - PF pages - valid section (included in PF connection type mapping)
4554 * - PF pages - invalid section (not dumped)
4555 * - For each VF in the PF:
4556 * - VF pages - valid section (included in VF connection type mapping)
4557 * - VF pages - invalid section (not dumped)
4559 if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUC)) {
4560 /* Dump connection PF pages */
4561 start_page_id = clients[ILT_CLI_CDUC].first.val - pf_start_line;
4562 offset += qed_ilt_dump_pages_range(dump_buf + offset,
4565 valid_conn_pf_pages,
4566 ilt_pages, dump_page_ids);
4568 /* Dump connection VF pages */
4569 start_page_id += clients[ILT_CLI_CDUC].pf_total_lines;
4570 for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
4571 i++, start_page_id += clients[ILT_CLI_CDUC].vf_total_lines)
4572 offset += qed_ilt_dump_pages_range(dump_buf + offset,
4575 valid_conn_vf_pages,
4580 /* CDUT pages are ordered as follows:
4581 * - PF init pages (not dumped)
4583 * - For each VF in the PF:
4584 * - VF init pages (not dumped)
4587 if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUT)) {
4588 /* Dump task PF pages */
4589 start_page_id = clients[ILT_CLI_CDUT].first.val +
4590 cdut_pf_init_pages - pf_start_line;
4591 offset += qed_ilt_dump_pages_range(dump_buf + offset,
4595 ilt_pages, dump_page_ids);
4597 /* Dump task VF pages */
4598 start_page_id = clients[ILT_CLI_CDUT].first.val +
4599 cdut_pf_pages + cdut_vf_init_pages - pf_start_line;
4600 for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
4601 i++, start_page_id += cdut_vf_pages)
4602 offset += qed_ilt_dump_pages_range(dump_buf + offset,
4610 /* Overwrite size param */
4612 qed_dump_num_param(dump_buf + size_param_offset,
4613 dump, "size", offset - base_data_offset);
4618 /* Performs ILT Dump to the specified buffer.
4619 * Returns the dumped size in dwords.
4621 static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
4622 struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4624 struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
4625 u32 valid_conn_vf_cids, valid_conn_vf_pages, offset = 0;
4626 u32 valid_conn_pf_cids, valid_conn_pf_pages, num_pages;
4627 u32 num_cids_per_page, conn_ctx_size;
4628 u32 cduc_page_size, cdut_page_size;
4629 struct phys_mem_desc *ilt_pages;
4632 cduc_page_size = 1 <<
4633 (clients[ILT_CLI_CDUC].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
4634 cdut_page_size = 1 <<
4635 (clients[ILT_CLI_CDUT].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
4636 conn_ctx_size = p_hwfn->p_cxt_mngr->conn_ctx_size;
4637 num_cids_per_page = (int)(cduc_page_size / conn_ctx_size);
4638 ilt_pages = p_hwfn->p_cxt_mngr->ilt_shadow;
4640 /* Dump global params - 22 must match number of params below */
4641 offset += qed_dump_common_global_params(p_hwfn, p_ptt,
4642 dump_buf + offset, dump, 22);
4643 offset += qed_dump_str_param(dump_buf + offset,
4644 dump, "dump-type", "ilt-dump");
4645 offset += qed_dump_num_param(dump_buf + offset,
4647 "cduc-page-size", cduc_page_size);
4648 offset += qed_dump_num_param(dump_buf + offset,
4650 "cduc-first-page-id",
4651 clients[ILT_CLI_CDUC].first.val);
4652 offset += qed_dump_num_param(dump_buf + offset,
4654 "cduc-last-page-id",
4655 clients[ILT_CLI_CDUC].last.val);
4656 offset += qed_dump_num_param(dump_buf + offset,
4658 "cduc-num-pf-pages",
4660 [ILT_CLI_CDUC].pf_total_lines);
4661 offset += qed_dump_num_param(dump_buf + offset,
4663 "cduc-num-vf-pages",
4665 [ILT_CLI_CDUC].vf_total_lines);
4666 offset += qed_dump_num_param(dump_buf + offset,
4668 "max-conn-ctx-size",
4670 offset += qed_dump_num_param(dump_buf + offset,
4672 "cdut-page-size", cdut_page_size);
4673 offset += qed_dump_num_param(dump_buf + offset,
4675 "cdut-first-page-id",
4676 clients[ILT_CLI_CDUT].first.val);
4677 offset += qed_dump_num_param(dump_buf + offset,
4679 "cdut-last-page-id",
4680 clients[ILT_CLI_CDUT].last.val);
4681 offset += qed_dump_num_param(dump_buf + offset,
4683 "cdut-num-pf-init-pages",
4684 qed_get_cdut_num_pf_init_pages(p_hwfn));
4685 offset += qed_dump_num_param(dump_buf + offset,
4687 "cdut-num-vf-init-pages",
4688 qed_get_cdut_num_vf_init_pages(p_hwfn));
4689 offset += qed_dump_num_param(dump_buf + offset,
4691 "cdut-num-pf-work-pages",
4692 qed_get_cdut_num_pf_work_pages(p_hwfn));
4693 offset += qed_dump_num_param(dump_buf + offset,
4695 "cdut-num-vf-work-pages",
4696 qed_get_cdut_num_vf_work_pages(p_hwfn));
4697 offset += qed_dump_num_param(dump_buf + offset,
4699 "max-task-ctx-size",
4700 p_hwfn->p_cxt_mngr->task_ctx_size);
4701 offset += qed_dump_num_param(dump_buf + offset,
4704 p_hwfn->p_cxt_mngr->task_type_id);
4705 offset += qed_dump_num_param(dump_buf + offset,
4707 "first-vf-id-in-pf",
4708 p_hwfn->p_cxt_mngr->first_vf_in_pf);
4709 offset += /* 18 */ qed_dump_num_param(dump_buf + offset,
4712 p_hwfn->p_cxt_mngr->vf_count);
4713 offset += qed_dump_num_param(dump_buf + offset,
4715 "ptr-size-bytes", sizeof(void *));
4716 offset += qed_dump_num_param(dump_buf + offset,
4719 p_hwfn->p_cxt_mngr->pf_start_line);
4720 offset += qed_dump_num_param(dump_buf + offset,
4722 "page-mem-desc-size-dwords",
4723 PAGE_MEM_DESC_SIZE_DWORDS);
4724 offset += qed_dump_num_param(dump_buf + offset,
4727 p_hwfn->p_cxt_mngr->ilt_shadow_size);
4728 /* Additional/Less parameters require matching of number in call to
4729 * dump_common_global_params()
4732 /* Dump section containing number of PF CIDs per connection type */
4733 offset += qed_dump_section_hdr(dump_buf + offset,
4734 dump, "num_pf_cids_per_conn_type", 1);
4735 offset += qed_dump_num_param(dump_buf + offset,
4736 dump, "size", NUM_OF_CONNECTION_TYPES_E4);
4737 for (conn_type = 0, valid_conn_pf_cids = 0;
4738 conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) {
4740 p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count;
4743 *(dump_buf + offset) = num_pf_cids;
4744 valid_conn_pf_cids += num_pf_cids;
4747 /* Dump section containing number of VF CIDs per connection type */
4748 offset += qed_dump_section_hdr(dump_buf + offset,
4749 dump, "num_vf_cids_per_conn_type", 1);
4750 offset += qed_dump_num_param(dump_buf + offset,
4751 dump, "size", NUM_OF_CONNECTION_TYPES_E4);
4752 for (conn_type = 0, valid_conn_vf_cids = 0;
4753 conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) {
4755 p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cids_per_vf;
4758 *(dump_buf + offset) = num_vf_cids;
4759 valid_conn_vf_cids += num_vf_cids;
4762 /* Dump section containing physical memory descs for each ILT page */
4763 num_pages = p_hwfn->p_cxt_mngr->ilt_shadow_size;
4764 offset += qed_dump_section_hdr(dump_buf + offset,
4765 dump, "ilt_page_desc", 1);
4766 offset += qed_dump_num_param(dump_buf + offset,
4769 num_pages * PAGE_MEM_DESC_SIZE_DWORDS);
4771 /* Copy memory descriptors to dump buffer */
4775 for (page_id = 0; page_id < num_pages;
4776 page_id++, offset += PAGE_MEM_DESC_SIZE_DWORDS)
4777 memcpy(dump_buf + offset,
4778 &ilt_pages[page_id],
4779 DWORDS_TO_BYTES(PAGE_MEM_DESC_SIZE_DWORDS));
4781 offset += num_pages * PAGE_MEM_DESC_SIZE_DWORDS;
4784 valid_conn_pf_pages = DIV_ROUND_UP(valid_conn_pf_cids,
4786 valid_conn_vf_pages = DIV_ROUND_UP(valid_conn_vf_cids,
4789 /* Dump ILT pages IDs */
4790 offset += qed_ilt_dump_pages_section(p_hwfn,
4793 valid_conn_pf_pages,
4794 valid_conn_vf_pages,
4797 /* Dump ILT pages memory */
4798 offset += qed_ilt_dump_pages_section(p_hwfn,
4801 valid_conn_pf_pages,
4802 valid_conn_vf_pages,
4805 /* Dump last section */
4806 offset += qed_dump_last_section(dump_buf, offset, dump);
4811 /***************************** Public Functions *******************************/
4813 enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn,
4814 const u8 * const bin_ptr)
4816 struct bin_buffer_hdr *buf_hdrs = (struct bin_buffer_hdr *)bin_ptr;
4819 /* Convert binary data to debug arrays */
4820 for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++)
4821 qed_set_dbg_bin_buf(p_hwfn,
4823 (u32 *)(bin_ptr + buf_hdrs[buf_id].offset),
4824 buf_hdrs[buf_id].length);
4826 return DBG_STATUS_OK;
4829 bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
4830 struct qed_ptt *p_ptt, struct fw_info *fw_info)
4832 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4835 for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4836 struct storm_defs *storm = &s_storm_defs[storm_id];
4838 /* Skip Storm if it's in reset */
4839 if (dev_data->block_in_reset[storm->sem_block_id])
4842 /* Read FW info for the current Storm */
4843 qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, fw_info);
4851 enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
4852 enum dbg_grc_params grc_param, u32 val)
4854 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4855 enum dbg_status status;
4860 "dbg_grc_config: paramId = %d, val = %d\n", grc_param, val);
4862 status = qed_dbg_dev_init(p_hwfn);
4863 if (status != DBG_STATUS_OK)
4866 /* Initializes the GRC parameters (if not initialized). Needed in order
4867 * to set the default parameter values for the first time.
4869 qed_dbg_grc_init_params(p_hwfn);
4871 if (grc_param >= MAX_DBG_GRC_PARAMS)
4872 return DBG_STATUS_INVALID_ARGS;
4873 if (val < s_grc_param_defs[grc_param].min ||
4874 val > s_grc_param_defs[grc_param].max)
4875 return DBG_STATUS_INVALID_ARGS;
4877 if (s_grc_param_defs[grc_param].is_preset) {
4880 /* Disabling a preset is not allowed. Call
4881 * dbg_grc_set_params_default instead.
4884 return DBG_STATUS_INVALID_ARGS;
4886 /* Update all params with the preset values */
4887 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) {
4888 struct grc_param_defs *defs = &s_grc_param_defs[i];
4890 /* Skip persistent params */
4891 if (defs->is_persistent)
4894 /* Find preset value */
4895 if (grc_param == DBG_GRC_PARAM_EXCLUDE_ALL)
4897 defs->exclude_all_preset_val;
4898 else if (grc_param == DBG_GRC_PARAM_CRASH)
4900 defs->crash_preset_val[dev_data->chip_id];
4902 return DBG_STATUS_INVALID_ARGS;
4904 qed_grc_set_param(p_hwfn, i, preset_val);
4907 /* Regular param - set its value */
4908 qed_grc_set_param(p_hwfn, grc_param, val);
4911 return DBG_STATUS_OK;
4914 /* Assign default GRC param values */
4915 void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
4917 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4920 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
4921 if (!s_grc_param_defs[i].is_persistent)
4922 dev_data->grc.param_val[i] =
4923 s_grc_param_defs[i].default_val[dev_data->chip_id];
4926 enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
4927 struct qed_ptt *p_ptt,
4930 enum dbg_status status = qed_dbg_dev_init(p_hwfn);
4934 if (status != DBG_STATUS_OK)
4937 if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
4938 !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
4939 !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
4940 !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
4941 !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
4942 return DBG_STATUS_DBG_ARRAY_NOT_SET;
4944 return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
4947 enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
4948 struct qed_ptt *p_ptt,
4950 u32 buf_size_in_dwords,
4951 u32 *num_dumped_dwords)
4953 u32 needed_buf_size_in_dwords;
4954 enum dbg_status status;
4956 *num_dumped_dwords = 0;
4958 status = qed_dbg_grc_get_dump_buf_size(p_hwfn,
4960 &needed_buf_size_in_dwords);
4961 if (status != DBG_STATUS_OK)
4964 if (buf_size_in_dwords < needed_buf_size_in_dwords)
4965 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
4968 status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
4970 /* Revert GRC params to their default */
4971 qed_dbg_grc_set_params_default(p_hwfn);
4976 enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
4977 struct qed_ptt *p_ptt,
4980 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4981 struct idle_chk_data *idle_chk = &dev_data->idle_chk;
4982 enum dbg_status status;
4986 status = qed_dbg_dev_init(p_hwfn);
4987 if (status != DBG_STATUS_OK)
4990 if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
4991 !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
4992 !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
4993 !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
4994 return DBG_STATUS_DBG_ARRAY_NOT_SET;
4996 if (!idle_chk->buf_size_set) {
4997 idle_chk->buf_size = qed_idle_chk_dump(p_hwfn,
4998 p_ptt, NULL, false);
4999 idle_chk->buf_size_set = true;
5002 *buf_size = idle_chk->buf_size;
5004 return DBG_STATUS_OK;
5007 enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
5008 struct qed_ptt *p_ptt,
5010 u32 buf_size_in_dwords,
5011 u32 *num_dumped_dwords)
5013 u32 needed_buf_size_in_dwords;
5014 enum dbg_status status;
5016 *num_dumped_dwords = 0;
5018 status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn,
5020 &needed_buf_size_in_dwords);
5021 if (status != DBG_STATUS_OK)
5024 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5025 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5027 /* Update reset state */
5028 qed_grc_unreset_blocks(p_hwfn, p_ptt, true);
5029 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5031 /* Idle Check Dump */
5032 *num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
5034 /* Revert GRC params to their default */
5035 qed_dbg_grc_set_params_default(p_hwfn);
5037 return DBG_STATUS_OK;
5040 enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5041 struct qed_ptt *p_ptt,
5044 enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5048 if (status != DBG_STATUS_OK)
5051 return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5054 enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
5055 struct qed_ptt *p_ptt,
5057 u32 buf_size_in_dwords,
5058 u32 *num_dumped_dwords)
5060 u32 needed_buf_size_in_dwords;
5061 enum dbg_status status;
5064 qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
5066 &needed_buf_size_in_dwords);
5067 if (status != DBG_STATUS_OK && status !=
5068 DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
5071 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5072 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5074 /* Update reset state */
5075 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5078 status = qed_mcp_trace_dump(p_hwfn,
5079 p_ptt, dump_buf, true, num_dumped_dwords);
5081 /* Revert GRC params to their default */
5082 qed_dbg_grc_set_params_default(p_hwfn);
5087 enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5088 struct qed_ptt *p_ptt,
5091 enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5095 if (status != DBG_STATUS_OK)
5098 return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5101 enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
5102 struct qed_ptt *p_ptt,
5104 u32 buf_size_in_dwords,
5105 u32 *num_dumped_dwords)
5107 u32 needed_buf_size_in_dwords;
5108 enum dbg_status status;
5110 *num_dumped_dwords = 0;
5112 status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
5114 &needed_buf_size_in_dwords);
5115 if (status != DBG_STATUS_OK)
5118 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5119 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5121 /* Update reset state */
5122 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5124 status = qed_reg_fifo_dump(p_hwfn,
5125 p_ptt, dump_buf, true, num_dumped_dwords);
5127 /* Revert GRC params to their default */
5128 qed_dbg_grc_set_params_default(p_hwfn);
5133 enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5134 struct qed_ptt *p_ptt,
5137 enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5141 if (status != DBG_STATUS_OK)
5144 return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5147 enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
5148 struct qed_ptt *p_ptt,
5150 u32 buf_size_in_dwords,
5151 u32 *num_dumped_dwords)
5153 u32 needed_buf_size_in_dwords;
5154 enum dbg_status status;
5156 *num_dumped_dwords = 0;
5158 status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
5160 &needed_buf_size_in_dwords);
5161 if (status != DBG_STATUS_OK)
5164 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5165 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5167 /* Update reset state */
5168 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5170 status = qed_igu_fifo_dump(p_hwfn,
5171 p_ptt, dump_buf, true, num_dumped_dwords);
5172 /* Revert GRC params to their default */
5173 qed_dbg_grc_set_params_default(p_hwfn);
5179 qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5180 struct qed_ptt *p_ptt,
5183 enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5187 if (status != DBG_STATUS_OK)
5190 return qed_protection_override_dump(p_hwfn,
5191 p_ptt, NULL, false, buf_size);
5194 enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
5195 struct qed_ptt *p_ptt,
5197 u32 buf_size_in_dwords,
5198 u32 *num_dumped_dwords)
5200 u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5201 enum dbg_status status;
5203 *num_dumped_dwords = 0;
5206 qed_dbg_protection_override_get_dump_buf_size(p_hwfn,
5209 if (status != DBG_STATUS_OK)
5212 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5213 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5215 /* Update reset state */
5216 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5218 status = qed_protection_override_dump(p_hwfn,
5221 true, num_dumped_dwords);
5223 /* Revert GRC params to their default */
5224 qed_dbg_grc_set_params_default(p_hwfn);
5229 enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5230 struct qed_ptt *p_ptt,
5233 enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5237 if (status != DBG_STATUS_OK)
5240 /* Update reset state */
5241 qed_update_blocks_reset_state(p_hwfn, p_ptt);
5243 *buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
5245 return DBG_STATUS_OK;
5248 enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
5249 struct qed_ptt *p_ptt,
5251 u32 buf_size_in_dwords,
5252 u32 *num_dumped_dwords)
5254 u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5255 enum dbg_status status;
5257 *num_dumped_dwords = 0;
5260 qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
5263 if (status != DBG_STATUS_OK)
5266 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5267 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5269 *num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
5271 /* Revert GRC params to their default */
5272 qed_dbg_grc_set_params_default(p_hwfn);
5274 return DBG_STATUS_OK;
5277 static enum dbg_status qed_dbg_ilt_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5278 struct qed_ptt *p_ptt,
5281 enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5285 if (status != DBG_STATUS_OK)
5288 *buf_size = qed_ilt_dump(p_hwfn, p_ptt, NULL, false);
5290 return DBG_STATUS_OK;
5293 static enum dbg_status qed_dbg_ilt_dump(struct qed_hwfn *p_hwfn,
5294 struct qed_ptt *p_ptt,
5296 u32 buf_size_in_dwords,
5297 u32 *num_dumped_dwords)
5299 u32 needed_buf_size_in_dwords;
5300 enum dbg_status status;
5302 *num_dumped_dwords = 0;
5304 status = qed_dbg_ilt_get_dump_buf_size(p_hwfn,
5306 &needed_buf_size_in_dwords);
5307 if (status != DBG_STATUS_OK)
5310 if (buf_size_in_dwords < needed_buf_size_in_dwords)
5311 return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5313 *num_dumped_dwords = qed_ilt_dump(p_hwfn, p_ptt, dump_buf, true);
5315 /* Reveret GRC params to their default */
5316 qed_dbg_grc_set_params_default(p_hwfn);
5318 return DBG_STATUS_OK;
5321 enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
5322 struct qed_ptt *p_ptt,
5323 enum block_id block_id,
5324 enum dbg_attn_type attn_type,
5326 struct dbg_attn_block_result *results)
5328 enum dbg_status status = qed_dbg_dev_init(p_hwfn);
5329 u8 reg_idx, num_attn_regs, num_result_regs = 0;
5330 const struct dbg_attn_reg *attn_reg_arr;
5332 if (status != DBG_STATUS_OK)
5335 if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5336 !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5337 !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5338 return DBG_STATUS_DBG_ARRAY_NOT_SET;
5340 attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
5342 attn_type, &num_attn_regs);
5344 for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
5345 const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
5346 struct dbg_attn_reg_result *reg_result;
5347 u32 sts_addr, sts_val;
5348 u16 modes_buf_offset;
5352 eval_mode = GET_FIELD(reg_data->mode.data,
5353 DBG_MODE_HDR_EVAL_MODE) > 0;
5354 modes_buf_offset = GET_FIELD(reg_data->mode.data,
5355 DBG_MODE_HDR_MODES_BUF_OFFSET);
5356 if (eval_mode && !qed_is_mode_match(p_hwfn, &modes_buf_offset))
5359 /* Mode match - read attention status register */
5360 sts_addr = DWORDS_TO_BYTES(clear_status ?
5361 reg_data->sts_clr_address :
5362 GET_FIELD(reg_data->data,
5363 DBG_ATTN_REG_STS_ADDRESS));
5364 sts_val = qed_rd(p_hwfn, p_ptt, sts_addr);
5368 /* Non-zero attention status - add to results */
5369 reg_result = &results->reg_results[num_result_regs];
5370 SET_FIELD(reg_result->data,
5371 DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
5372 SET_FIELD(reg_result->data,
5373 DBG_ATTN_REG_RESULT_NUM_REG_ATTN,
5374 GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
5375 reg_result->block_attn_offset = reg_data->block_attn_offset;
5376 reg_result->sts_val = sts_val;
5377 reg_result->mask_val = qed_rd(p_hwfn,
5380 (reg_data->mask_address));
5384 results->block_id = (u8)block_id;
5385 results->names_offset =
5386 qed_get_block_attn_data(p_hwfn, block_id, attn_type)->names_offset;
5387 SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
5388 SET_FIELD(results->data,
5389 DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
5391 return DBG_STATUS_OK;
5394 /******************************* Data Types **********************************/
5396 /* REG fifo element */
5397 struct reg_fifo_element {
5399 #define REG_FIFO_ELEMENT_ADDRESS_SHIFT 0
5400 #define REG_FIFO_ELEMENT_ADDRESS_MASK 0x7fffff
5401 #define REG_FIFO_ELEMENT_ACCESS_SHIFT 23
5402 #define REG_FIFO_ELEMENT_ACCESS_MASK 0x1
5403 #define REG_FIFO_ELEMENT_PF_SHIFT 24
5404 #define REG_FIFO_ELEMENT_PF_MASK 0xf
5405 #define REG_FIFO_ELEMENT_VF_SHIFT 28
5406 #define REG_FIFO_ELEMENT_VF_MASK 0xff
5407 #define REG_FIFO_ELEMENT_PORT_SHIFT 36
5408 #define REG_FIFO_ELEMENT_PORT_MASK 0x3
5409 #define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT 38
5410 #define REG_FIFO_ELEMENT_PRIVILEGE_MASK 0x3
5411 #define REG_FIFO_ELEMENT_PROTECTION_SHIFT 40
5412 #define REG_FIFO_ELEMENT_PROTECTION_MASK 0x7
5413 #define REG_FIFO_ELEMENT_MASTER_SHIFT 43
5414 #define REG_FIFO_ELEMENT_MASTER_MASK 0xf
5415 #define REG_FIFO_ELEMENT_ERROR_SHIFT 47
5416 #define REG_FIFO_ELEMENT_ERROR_MASK 0x1f
5419 /* REG fifo error element */
5420 struct reg_fifo_err {
5422 const char *err_msg;
5425 /* IGU fifo element */
5426 struct igu_fifo_element {
5428 #define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT 0
5429 #define IGU_FIFO_ELEMENT_DWORD0_FID_MASK 0xff
5430 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT 8
5431 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK 0x1
5432 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT 9
5433 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK 0xf
5434 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT 13
5435 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK 0xf
5436 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT 17
5437 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK 0x7fff
5440 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT 0
5441 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK 0x1
5442 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT 1
5443 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK 0xffffffff
5447 struct igu_fifo_wr_data {
5449 #define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT 0
5450 #define IGU_FIFO_WR_DATA_PROD_CONS_MASK 0xffffff
5451 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT 24
5452 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK 0x1
5453 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT 25
5454 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK 0x3
5455 #define IGU_FIFO_WR_DATA_SEGMENT_SHIFT 27
5456 #define IGU_FIFO_WR_DATA_SEGMENT_MASK 0x1
5457 #define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT 28
5458 #define IGU_FIFO_WR_DATA_TIMER_MASK_MASK 0x1
5459 #define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT 31
5460 #define IGU_FIFO_WR_DATA_CMD_TYPE_MASK 0x1
5463 struct igu_fifo_cleanup_wr_data {
5465 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT 0
5466 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK 0x7ffffff
5467 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT 27
5468 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK 0x1
5469 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT 28
5470 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK 0x7
5471 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT 31
5472 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK 0x1
5475 /* Protection override element */
5476 struct protection_override_element {
5478 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT 0
5479 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK 0x7fffff
5480 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT 23
5481 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK 0xffffff
5482 #define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT 47
5483 #define PROTECTION_OVERRIDE_ELEMENT_READ_MASK 0x1
5484 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT 48
5485 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK 0x1
5486 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT 49
5487 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK 0x7
5488 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT 52
5489 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK 0x7
5492 enum igu_fifo_sources {
5506 enum igu_fifo_addr_types {
5507 IGU_ADDR_TYPE_MSIX_MEM,
5508 IGU_ADDR_TYPE_WRITE_PBA,
5509 IGU_ADDR_TYPE_WRITE_INT_ACK,
5510 IGU_ADDR_TYPE_WRITE_ATTN_BITS,
5511 IGU_ADDR_TYPE_READ_INT,
5512 IGU_ADDR_TYPE_WRITE_PROD_UPDATE,
5513 IGU_ADDR_TYPE_RESERVED
5516 struct igu_fifo_addr_data {
5521 enum igu_fifo_addr_types type;
5524 /******************************** Constants **********************************/
5526 #define MAX_MSG_LEN 1024
5528 #define MCP_TRACE_MAX_MODULE_LEN 8
5529 #define MCP_TRACE_FORMAT_MAX_PARAMS 3
5530 #define MCP_TRACE_FORMAT_PARAM_WIDTH \
5531 (MCP_TRACE_FORMAT_P2_SIZE_OFFSET - MCP_TRACE_FORMAT_P1_SIZE_OFFSET)
5533 #define REG_FIFO_ELEMENT_ADDR_FACTOR 4
5534 #define REG_FIFO_ELEMENT_IS_PF_VF_VAL 127
5536 #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4
5538 /***************************** Constant Arrays *******************************/
5540 /* Status string array */
5541 static const char * const s_status_str[] = {
5543 "Operation completed successfully",
5545 /* DBG_STATUS_APP_VERSION_NOT_SET */
5546 "Debug application version wasn't set",
5548 /* DBG_STATUS_UNSUPPORTED_APP_VERSION */
5549 "Unsupported debug application version",
5551 /* DBG_STATUS_DBG_BLOCK_NOT_RESET */
5552 "The debug block wasn't reset since the last recording",
5554 /* DBG_STATUS_INVALID_ARGS */
5555 "Invalid arguments",
5557 /* DBG_STATUS_OUTPUT_ALREADY_SET */
5558 "The debug output was already set",
5560 /* DBG_STATUS_INVALID_PCI_BUF_SIZE */
5561 "Invalid PCI buffer size",
5563 /* DBG_STATUS_PCI_BUF_ALLOC_FAILED */
5564 "PCI buffer allocation failed",
5566 /* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */
5567 "A PCI buffer wasn't allocated",
5569 /* DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS */
5570 "The filter/trigger constraint dword offsets are not enabled for recording",
5573 /* DBG_STATUS_VFC_READ_ERROR */
5574 "Error reading from VFC",
5576 /* DBG_STATUS_STORM_ALREADY_ENABLED */
5577 "The Storm was already enabled",
5579 /* DBG_STATUS_STORM_NOT_ENABLED */
5580 "The specified Storm wasn't enabled",
5582 /* DBG_STATUS_BLOCK_ALREADY_ENABLED */
5583 "The block was already enabled",
5585 /* DBG_STATUS_BLOCK_NOT_ENABLED */
5586 "The specified block wasn't enabled",
5588 /* DBG_STATUS_NO_INPUT_ENABLED */
5589 "No input was enabled for recording",
5591 /* DBG_STATUS_NO_FILTER_TRIGGER_256B */
5592 "Filters and triggers are not allowed in E4 256-bit mode",
5594 /* DBG_STATUS_FILTER_ALREADY_ENABLED */
5595 "The filter was already enabled",
5597 /* DBG_STATUS_TRIGGER_ALREADY_ENABLED */
5598 "The trigger was already enabled",
5600 /* DBG_STATUS_TRIGGER_NOT_ENABLED */
5601 "The trigger wasn't enabled",
5603 /* DBG_STATUS_CANT_ADD_CONSTRAINT */
5604 "A constraint can be added only after a filter was enabled or a trigger state was added",
5606 /* DBG_STATUS_TOO_MANY_TRIGGER_STATES */
5607 "Cannot add more than 3 trigger states",
5609 /* DBG_STATUS_TOO_MANY_CONSTRAINTS */
5610 "Cannot add more than 4 constraints per filter or trigger state",
5612 /* DBG_STATUS_RECORDING_NOT_STARTED */
5613 "The recording wasn't started",
5615 /* DBG_STATUS_DATA_DIDNT_TRIGGER */
5616 "A trigger was configured, but it didn't trigger",
5618 /* DBG_STATUS_NO_DATA_RECORDED */
5619 "No data was recorded",
5621 /* DBG_STATUS_DUMP_BUF_TOO_SMALL */
5622 "Dump buffer is too small",
5624 /* DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED */
5625 "Dumped data is not aligned to chunks",
5627 /* DBG_STATUS_UNKNOWN_CHIP */
5630 /* DBG_STATUS_VIRT_MEM_ALLOC_FAILED */
5631 "Failed allocating virtual memory",
5633 /* DBG_STATUS_BLOCK_IN_RESET */
5634 "The input block is in reset",
5636 /* DBG_STATUS_INVALID_TRACE_SIGNATURE */
5637 "Invalid MCP trace signature found in NVRAM",
5639 /* DBG_STATUS_INVALID_NVRAM_BUNDLE */
5640 "Invalid bundle ID found in NVRAM",
5642 /* DBG_STATUS_NVRAM_GET_IMAGE_FAILED */
5643 "Failed getting NVRAM image",
5645 /* DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE */
5646 "NVRAM image is not dword-aligned",
5648 /* DBG_STATUS_NVRAM_READ_FAILED */
5649 "Failed reading from NVRAM",
5651 /* DBG_STATUS_IDLE_CHK_PARSE_FAILED */
5652 "Idle check parsing failed",
5654 /* DBG_STATUS_MCP_TRACE_BAD_DATA */
5655 "MCP Trace data is corrupt",
5657 /* DBG_STATUS_MCP_TRACE_NO_META */
5658 "Dump doesn't contain meta data - it must be provided in image file",
5660 /* DBG_STATUS_MCP_COULD_NOT_HALT */
5661 "Failed to halt MCP",
5663 /* DBG_STATUS_MCP_COULD_NOT_RESUME */
5664 "Failed to resume MCP after halt",
5666 /* DBG_STATUS_RESERVED0 */
5669 /* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
5670 "Failed to empty SEMI sync FIFO",
5672 /* DBG_STATUS_IGU_FIFO_BAD_DATA */
5673 "IGU FIFO data is corrupt",
5675 /* DBG_STATUS_MCP_COULD_NOT_MASK_PRTY */
5676 "MCP failed to mask parities",
5678 /* DBG_STATUS_FW_ASSERTS_PARSE_FAILED */
5679 "FW Asserts parsing failed",
5681 /* DBG_STATUS_REG_FIFO_BAD_DATA */
5682 "GRC FIFO data is corrupt",
5684 /* DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA */
5685 "Protection Override data is corrupt",
5687 /* DBG_STATUS_DBG_ARRAY_NOT_SET */
5688 "Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
5690 /* DBG_STATUS_RESERVED1 */
5693 /* DBG_STATUS_NON_MATCHING_LINES */
5694 "Non-matching debug lines - in E4, all lines must be of the same type (either 128b or 256b)",
5696 /* DBG_STATUS_INSUFFICIENT_HW_IDS */
5697 "Insufficient HW IDs. Try to record less Storms/blocks",
5699 /* DBG_STATUS_DBG_BUS_IN_USE */
5700 "The debug bus is in use",
5702 /* DBG_STATUS_INVALID_STORM_DBG_MODE */
5703 "The storm debug mode is not supported in the current chip",
5705 /* DBG_STATUS_OTHER_ENGINE_BB_ONLY */
5706 "Other engine is supported only in BB",
5708 /* DBG_STATUS_FILTER_SINGLE_HW_ID */
5709 "The configured filter mode requires a single Storm/block input",
5711 /* DBG_STATUS_TRIGGER_SINGLE_HW_ID */
5712 "The configured filter mode requires that all the constraints of a single trigger state will be defined on a single Storm/block input",
5714 /* DBG_STATUS_MISSING_TRIGGER_STATE_STORM */
5715 "When triggering on Storm data, the Storm to trigger on must be specified"
5718 /* Idle check severity names array */
5719 static const char * const s_idle_chk_severity_str[] = {
5721 "Error if no traffic",
5725 /* MCP Trace level names array */
5726 static const char * const s_mcp_trace_level_str[] = {
5732 /* Access type names array */
5733 static const char * const s_access_strs[] = {
5738 /* Privilege type names array */
5739 static const char * const s_privilege_strs[] = {
5746 /* Protection type names array */
5747 static const char * const s_protection_strs[] = {
5758 /* Master type names array */
5759 static const char * const s_master_strs[] = {
5778 /* REG FIFO error messages array */
5779 static struct reg_fifo_err s_reg_fifo_errors[] = {
5781 {2, "address doesn't belong to any block"},
5782 {4, "reserved address in block or write to read-only address"},
5783 {8, "privilege/protection mismatch"},
5784 {16, "path isolation error"},
5788 /* IGU FIFO sources array */
5789 static const char * const s_igu_fifo_source_strs[] = {
5803 /* IGU FIFO error messages */
5804 static const char * const s_igu_fifo_error_strs[] = {
5807 "function disabled",
5808 "VF sent command to attention address",
5809 "host sent prod update command",
5810 "read of during interrupt register while in MIMD mode",
5811 "access to PXP BAR reserved address",
5812 "producer update command to attention index",
5814 "SB index not valid",
5815 "SB relative index and FID not found",
5817 "command with error flag asserted (PCI error or CAU discard)",
5818 "VF sent cleanup and RF cleanup is disabled",
5819 "cleanup command on type bigger than 4"
5822 /* IGU FIFO address data */
5823 static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
5824 {0x0, 0x101, "MSI-X Memory", NULL,
5825 IGU_ADDR_TYPE_MSIX_MEM},
5826 {0x102, 0x1ff, "reserved", NULL,
5827 IGU_ADDR_TYPE_RESERVED},
5828 {0x200, 0x200, "Write PBA[0:63]", NULL,
5829 IGU_ADDR_TYPE_WRITE_PBA},
5830 {0x201, 0x201, "Write PBA[64:127]", "reserved",
5831 IGU_ADDR_TYPE_WRITE_PBA},
5832 {0x202, 0x202, "Write PBA[128]", "reserved",
5833 IGU_ADDR_TYPE_WRITE_PBA},
5834 {0x203, 0x3ff, "reserved", NULL,
5835 IGU_ADDR_TYPE_RESERVED},
5836 {0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
5837 IGU_ADDR_TYPE_WRITE_INT_ACK},
5838 {0x5f0, 0x5f0, "Attention bits update", NULL,
5839 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5840 {0x5f1, 0x5f1, "Attention bits set", NULL,
5841 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5842 {0x5f2, 0x5f2, "Attention bits clear", NULL,
5843 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5844 {0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL,
5845 IGU_ADDR_TYPE_READ_INT},
5846 {0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL,
5847 IGU_ADDR_TYPE_READ_INT},
5848 {0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL,
5849 IGU_ADDR_TYPE_READ_INT},
5850 {0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
5851 IGU_ADDR_TYPE_READ_INT},
5852 {0x5f7, 0x5ff, "reserved", NULL,
5853 IGU_ADDR_TYPE_RESERVED},
5854 {0x600, 0x7ff, "Producer update", NULL,
5855 IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
5858 /******************************** Variables **********************************/
5860 /* Temporary buffer, used for print size calculations */
5861 static char s_temp_buf[MAX_MSG_LEN];
5863 /**************************** Private Functions ******************************/
5865 static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
5867 return (a + b) % size;
5870 static u32 qed_cyclic_sub(u32 a, u32 b, u32 size)
5872 return (size + a - b) % size;
5875 /* Reads the specified number of bytes from the specified cyclic buffer (up to 4
5876 * bytes) and returns them as a dword value. the specified buffer offset is
5879 static u32 qed_read_from_cyclic_buf(void *buf,
5881 u32 buf_size, u8 num_bytes_to_read)
5883 u8 i, *val_ptr, *bytes_buf = (u8 *)buf;
5886 val_ptr = (u8 *)&val;
5888 /* Assume running on a LITTLE ENDIAN and the buffer is network order
5889 * (BIG ENDIAN), as high order bytes are placed in lower memory address.
5891 for (i = 0; i < num_bytes_to_read; i++) {
5892 val_ptr[i] = bytes_buf[*offset];
5893 *offset = qed_cyclic_add(*offset, 1, buf_size);
5899 /* Reads and returns the next byte from the specified buffer.
5900 * The specified buffer offset is updated.
5902 static u8 qed_read_byte_from_buf(void *buf, u32 *offset)
5904 return ((u8 *)buf)[(*offset)++];
5907 /* Reads and returns the next dword from the specified buffer.
5908 * The specified buffer offset is updated.
5910 static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
5912 u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
5919 /* Reads the next string from the specified buffer, and copies it to the
5920 * specified pointer. The specified buffer offset is updated.
5922 static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
5924 const char *source_str = &((const char *)buf)[*offset];
5926 strncpy(dest, source_str, size);
5927 dest[size - 1] = '\0';
5931 /* Returns a pointer to the specified offset (in bytes) of the specified buffer.
5932 * If the specified buffer in NULL, a temporary buffer pointer is returned.
5934 static char *qed_get_buf_ptr(void *buf, u32 offset)
5936 return buf ? (char *)buf + offset : s_temp_buf;
5939 /* Reads a param from the specified buffer. Returns the number of dwords read.
5940 * If the returned str_param is NULL, the param is numeric and its value is
5941 * returned in num_param.
5942 * Otheriwise, the param is a string and its pointer is returned in str_param.
5944 static u32 qed_read_param(u32 *dump_buf,
5945 const char **param_name,
5946 const char **param_str_val, u32 *param_num_val)
5948 char *char_buf = (char *)dump_buf;
5951 /* Extract param name */
5952 *param_name = char_buf;
5953 offset += strlen(*param_name) + 1;
5955 /* Check param type */
5956 if (*(char_buf + offset++)) {
5958 *param_str_val = char_buf + offset;
5960 offset += strlen(*param_str_val) + 1;
5962 offset += (4 - (offset & 0x3));
5965 *param_str_val = NULL;
5967 offset += (4 - (offset & 0x3));
5968 *param_num_val = *(u32 *)(char_buf + offset);
5972 return (u32)offset / 4;
5975 /* Reads a section header from the specified buffer.
5976 * Returns the number of dwords read.
5978 static u32 qed_read_section_hdr(u32 *dump_buf,
5979 const char **section_name,
5980 u32 *num_section_params)
5982 const char *param_str_val;
5984 return qed_read_param(dump_buf,
5985 section_name, ¶m_str_val, num_section_params);
5988 /* Reads section params from the specified buffer and prints them to the results
5989 * buffer. Returns the number of dwords read.
5991 static u32 qed_print_section_params(u32 *dump_buf,
5992 u32 num_section_params,
5993 char *results_buf, u32 *num_chars_printed)
5995 u32 i, dump_offset = 0, results_offset = 0;
5997 for (i = 0; i < num_section_params; i++) {
5998 const char *param_name, *param_str_val;
5999 u32 param_num_val = 0;
6001 dump_offset += qed_read_param(dump_buf + dump_offset,
6003 ¶m_str_val, ¶m_num_val);
6007 sprintf(qed_get_buf_ptr(results_buf,
6009 "%s: %s\n", param_name, param_str_val);
6010 else if (strcmp(param_name, "fw-timestamp"))
6012 sprintf(qed_get_buf_ptr(results_buf,
6014 "%s: %d\n", param_name, param_num_val);
6017 results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset),
6020 *num_chars_printed = results_offset;
6025 /* Returns the block name that matches the specified block ID,
6026 * or NULL if not found.
6028 static const char *qed_dbg_get_block_name(struct qed_hwfn *p_hwfn,
6029 enum block_id block_id)
6031 const struct dbg_block_user *block =
6032 (const struct dbg_block_user *)
6033 p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_USER_DATA].ptr + block_id;
6035 return (const char *)block->name;
6038 static struct dbg_tools_user_data *qed_dbg_get_user_data(struct qed_hwfn
6041 return (struct dbg_tools_user_data *)p_hwfn->dbg_user_info;
6044 /* Parses the idle check rules and returns the number of characters printed.
6045 * In case of parsing error, returns 0.
6047 static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
6051 bool print_fw_idle_chk,
6053 u32 *num_errors, u32 *num_warnings)
6055 /* Offset in results_buf in bytes */
6056 u32 results_offset = 0;
6064 /* Go over dumped results */
6065 for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end;
6067 const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
6068 struct dbg_idle_chk_result_hdr *hdr;
6069 const char *parsing_str, *lsi_msg;
6070 u32 parsing_str_offset;
6074 hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
6076 (const struct dbg_idle_chk_rule_parsing_data *)
6077 p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr +
6079 parsing_str_offset =
6080 GET_FIELD(rule_parsing_data->data,
6081 DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
6083 GET_FIELD(rule_parsing_data->data,
6084 DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
6085 parsing_str = (const char *)
6086 p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr +
6088 lsi_msg = parsing_str;
6091 if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
6094 /* Skip rule header */
6095 dump_buf += BYTES_TO_DWORDS(sizeof(*hdr));
6097 /* Update errors/warnings count */
6098 if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
6099 hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC)
6104 /* Print rule severity */
6106 sprintf(qed_get_buf_ptr(results_buf,
6107 results_offset), "%s: ",
6108 s_idle_chk_severity_str[hdr->severity]);
6110 /* Print rule message */
6112 parsing_str += strlen(parsing_str) + 1;
6114 sprintf(qed_get_buf_ptr(results_buf,
6115 results_offset), "%s.",
6117 print_fw_idle_chk ? parsing_str : lsi_msg);
6118 parsing_str += strlen(parsing_str) + 1;
6120 /* Print register values */
6122 sprintf(qed_get_buf_ptr(results_buf,
6123 results_offset), " Registers:");
6125 i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
6127 struct dbg_idle_chk_result_reg_hdr *reg_hdr;
6132 (struct dbg_idle_chk_result_reg_hdr *)dump_buf;
6133 is_mem = GET_FIELD(reg_hdr->data,
6134 DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
6135 reg_id = GET_FIELD(reg_hdr->data,
6136 DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
6138 /* Skip reg header */
6139 dump_buf += BYTES_TO_DWORDS(sizeof(*reg_hdr));
6141 /* Skip register names until the required reg_id is
6144 for (; reg_id > curr_reg_id;
6146 parsing_str += strlen(parsing_str) + 1);
6149 sprintf(qed_get_buf_ptr(results_buf,
6150 results_offset), " %s",
6152 if (i < hdr->num_dumped_cond_regs && is_mem)
6154 sprintf(qed_get_buf_ptr(results_buf,
6156 "[%d]", hdr->mem_entry_id +
6157 reg_hdr->start_entry);
6159 sprintf(qed_get_buf_ptr(results_buf,
6160 results_offset), "=");
6161 for (j = 0; j < reg_hdr->size; j++, dump_buf++) {
6163 sprintf(qed_get_buf_ptr(results_buf,
6166 if (j < reg_hdr->size - 1)
6168 sprintf(qed_get_buf_ptr
6170 results_offset), ",");
6175 sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6178 /* Check if end of dump buffer was exceeded */
6179 if (dump_buf > dump_buf_end)
6182 return results_offset;
6185 /* Parses an idle check dump buffer.
6186 * If result_buf is not NULL, the idle check results are printed to it.
6187 * In any case, the required results buffer size is assigned to
6188 * parsed_results_bytes.
6189 * The parsing status is returned.
6191 static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
6193 u32 num_dumped_dwords,
6195 u32 *parsed_results_bytes,
6199 const char *section_name, *param_name, *param_str_val;
6200 u32 *dump_buf_end = dump_buf + num_dumped_dwords;
6201 u32 num_section_params = 0, num_rules;
6203 /* Offset in results_buf in bytes */
6204 u32 results_offset = 0;
6206 *parsed_results_bytes = 0;
6210 if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
6211 !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
6212 return DBG_STATUS_DBG_ARRAY_NOT_SET;
6214 /* Read global_params section */
6215 dump_buf += qed_read_section_hdr(dump_buf,
6216 §ion_name, &num_section_params);
6217 if (strcmp(section_name, "global_params"))
6218 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6220 /* Print global params */
6221 dump_buf += qed_print_section_params(dump_buf,
6223 results_buf, &results_offset);
6225 /* Read idle_chk section */
6226 dump_buf += qed_read_section_hdr(dump_buf,
6227 §ion_name, &num_section_params);
6228 if (strcmp(section_name, "idle_chk") || num_section_params != 1)
6229 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6230 dump_buf += qed_read_param(dump_buf,
6231 ¶m_name, ¶m_str_val, &num_rules);
6232 if (strcmp(param_name, "num_rules"))
6233 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6236 u32 rules_print_size;
6238 /* Print FW output */
6240 sprintf(qed_get_buf_ptr(results_buf,
6242 "FW_IDLE_CHECK:\n");
6244 qed_parse_idle_chk_dump_rules(p_hwfn,
6255 results_offset += rules_print_size;
6256 if (!rules_print_size)
6257 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6259 /* Print LSI output */
6261 sprintf(qed_get_buf_ptr(results_buf,
6263 "\nLSI_IDLE_CHECK:\n");
6265 qed_parse_idle_chk_dump_rules(p_hwfn,
6276 results_offset += rules_print_size;
6277 if (!rules_print_size)
6278 return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6281 /* Print errors/warnings count */
6284 sprintf(qed_get_buf_ptr(results_buf,
6286 "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
6287 *num_errors, *num_warnings);
6288 else if (*num_warnings)
6290 sprintf(qed_get_buf_ptr(results_buf,
6292 "\nIdle Check completed successfully (with %d warnings)\n",
6296 sprintf(qed_get_buf_ptr(results_buf,
6298 "\nIdle Check completed successfully\n");
6300 /* Add 1 for string NULL termination */
6301 *parsed_results_bytes = results_offset + 1;
6303 return DBG_STATUS_OK;
6306 /* Allocates and fills MCP Trace meta data based on the specified meta data
6308 * Returns debug status code.
6310 static enum dbg_status
6311 qed_mcp_trace_alloc_meta_data(struct qed_hwfn *p_hwfn,
6312 const u32 *meta_buf)
6314 struct dbg_tools_user_data *dev_user_data;
6315 u32 offset = 0, signature, i;
6316 struct mcp_trace_meta *meta;
6319 dev_user_data = qed_dbg_get_user_data(p_hwfn);
6320 meta = &dev_user_data->mcp_trace_meta;
6321 meta_buf_bytes = (u8 *)meta_buf;
6323 /* Free the previous meta before loading a new one. */
6324 if (meta->is_allocated)
6325 qed_mcp_trace_free_meta_data(p_hwfn);
6327 memset(meta, 0, sizeof(*meta));
6329 /* Read first signature */
6330 signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6331 if (signature != NVM_MAGIC_VALUE)
6332 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6334 /* Read no. of modules and allocate memory for their pointers */
6335 meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6336 meta->modules = kcalloc(meta->modules_num, sizeof(char *),
6339 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6341 /* Allocate and read all module strings */
6342 for (i = 0; i < meta->modules_num; i++) {
6343 u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6345 *(meta->modules + i) = kzalloc(module_len, GFP_KERNEL);
6346 if (!(*(meta->modules + i))) {
6347 /* Update number of modules to be released */
6348 meta->modules_num = i ? i - 1 : 0;
6349 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6352 qed_read_str_from_buf(meta_buf_bytes, &offset, module_len,
6353 *(meta->modules + i));
6354 if (module_len > MCP_TRACE_MAX_MODULE_LEN)
6355 (*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0';
6358 /* Read second signature */
6359 signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6360 if (signature != NVM_MAGIC_VALUE)
6361 return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6363 /* Read number of formats and allocate memory for all formats */
6364 meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6365 meta->formats = kcalloc(meta->formats_num,
6366 sizeof(struct mcp_trace_format),
6369 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6371 /* Allocate and read all strings */
6372 for (i = 0; i < meta->formats_num; i++) {
6373 struct mcp_trace_format *format_ptr = &meta->formats[i];
6376 format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
6378 format_len = GET_MFW_FIELD(format_ptr->data,
6379 MCP_TRACE_FORMAT_LEN);
6380 format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
6381 if (!format_ptr->format_str) {
6382 /* Update number of modules to be released */
6383 meta->formats_num = i ? i - 1 : 0;
6384 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6387 qed_read_str_from_buf(meta_buf_bytes,
6389 format_len, format_ptr->format_str);
6392 meta->is_allocated = true;
6393 return DBG_STATUS_OK;
6396 /* Parses an MCP trace buffer. If result_buf is not NULL, the MCP Trace results
6397 * are printed to it. The parsing status is returned.
6399 * trace_buf - MCP trace cyclic buffer
6400 * trace_buf_size - MCP trace cyclic buffer size in bytes
6401 * data_offset - offset in bytes of the data to parse in the MCP trace cyclic
6403 * data_size - size in bytes of data to parse.
6404 * parsed_buf - destination buffer for parsed data.
6405 * parsed_results_bytes - size of parsed data in bytes.
6407 static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn,
6413 u32 *parsed_results_bytes)
6415 struct dbg_tools_user_data *dev_user_data;
6416 struct mcp_trace_meta *meta;
6417 u32 param_mask, param_shift;
6418 enum dbg_status status;
6420 dev_user_data = qed_dbg_get_user_data(p_hwfn);
6421 meta = &dev_user_data->mcp_trace_meta;
6422 *parsed_results_bytes = 0;
6424 if (!meta->is_allocated)
6425 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6427 status = DBG_STATUS_OK;
6430 struct mcp_trace_format *format_ptr;
6431 u8 format_level, format_module;
6432 u32 params[3] = { 0, 0, 0 };
6433 u32 header, format_idx, i;
6435 if (data_size < MFW_TRACE_ENTRY_SIZE)
6436 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6438 header = qed_read_from_cyclic_buf(trace_buf,
6441 MFW_TRACE_ENTRY_SIZE);
6442 data_size -= MFW_TRACE_ENTRY_SIZE;
6443 format_idx = header & MFW_TRACE_EVENTID_MASK;
6445 /* Skip message if its index doesn't exist in the meta data */
6446 if (format_idx >= meta->formats_num) {
6447 u8 format_size = (u8)GET_MFW_FIELD(header,
6448 MFW_TRACE_PRM_SIZE);
6450 if (data_size < format_size)
6451 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6453 data_offset = qed_cyclic_add(data_offset,
6456 data_size -= format_size;
6460 format_ptr = &meta->formats[format_idx];
6463 param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift =
6464 MCP_TRACE_FORMAT_P1_SIZE_OFFSET;
6465 i < MCP_TRACE_FORMAT_MAX_PARAMS;
6466 i++, param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
6467 param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
6468 /* Extract param size (0..3) */
6469 u8 param_size = (u8)((format_ptr->data & param_mask) >>
6472 /* If the param size is zero, there are no other
6478 /* Size is encoded using 2 bits, where 3 is used to
6481 if (param_size == 3)
6484 if (data_size < param_size)
6485 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6487 params[i] = qed_read_from_cyclic_buf(trace_buf,
6491 data_size -= param_size;
6494 format_level = (u8)GET_MFW_FIELD(format_ptr->data,
6495 MCP_TRACE_FORMAT_LEVEL);
6496 format_module = (u8)GET_MFW_FIELD(format_ptr->data,
6497 MCP_TRACE_FORMAT_MODULE);
6498 if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str))
6499 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6501 /* Print current message to results buffer */
6502 *parsed_results_bytes +=
6503 sprintf(qed_get_buf_ptr(parsed_buf,
6504 *parsed_results_bytes),
6506 s_mcp_trace_level_str[format_level],
6507 meta->modules[format_module]);
6508 *parsed_results_bytes +=
6509 sprintf(qed_get_buf_ptr(parsed_buf, *parsed_results_bytes),
6510 format_ptr->format_str,
6511 params[0], params[1], params[2]);
6514 /* Add string NULL terminator */
6515 (*parsed_results_bytes)++;
6520 /* Parses an MCP Trace dump buffer.
6521 * If result_buf is not NULL, the MCP Trace results are printed to it.
6522 * In any case, the required results buffer size is assigned to
6523 * parsed_results_bytes.
6524 * The parsing status is returned.
6526 static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
6529 u32 *parsed_results_bytes,
6530 bool free_meta_data)
6532 const char *section_name, *param_name, *param_str_val;
6533 u32 data_size, trace_data_dwords, trace_meta_dwords;
6534 u32 offset, results_offset, results_buf_bytes;
6535 u32 param_num_val, num_section_params;
6536 struct mcp_trace *trace;
6537 enum dbg_status status;
6538 const u32 *meta_buf;
6541 *parsed_results_bytes = 0;
6543 /* Read global_params section */
6544 dump_buf += qed_read_section_hdr(dump_buf,
6545 §ion_name, &num_section_params);
6546 if (strcmp(section_name, "global_params"))
6547 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6549 /* Print global params */
6550 dump_buf += qed_print_section_params(dump_buf,
6552 results_buf, &results_offset);
6554 /* Read trace_data section */
6555 dump_buf += qed_read_section_hdr(dump_buf,
6556 §ion_name, &num_section_params);
6557 if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1)
6558 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6559 dump_buf += qed_read_param(dump_buf,
6560 ¶m_name, ¶m_str_val, ¶m_num_val);
6561 if (strcmp(param_name, "size"))
6562 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6563 trace_data_dwords = param_num_val;
6565 /* Prepare trace info */
6566 trace = (struct mcp_trace *)dump_buf;
6567 if (trace->signature != MFW_TRACE_SIGNATURE || !trace->size)
6568 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6570 trace_buf = (u8 *)dump_buf + sizeof(*trace);
6571 offset = trace->trace_oldest;
6572 data_size = qed_cyclic_sub(trace->trace_prod, offset, trace->size);
6573 dump_buf += trace_data_dwords;
6575 /* Read meta_data section */
6576 dump_buf += qed_read_section_hdr(dump_buf,
6577 §ion_name, &num_section_params);
6578 if (strcmp(section_name, "mcp_trace_meta"))
6579 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6580 dump_buf += qed_read_param(dump_buf,
6581 ¶m_name, ¶m_str_val, ¶m_num_val);
6582 if (strcmp(param_name, "size"))
6583 return DBG_STATUS_MCP_TRACE_BAD_DATA;
6584 trace_meta_dwords = param_num_val;
6586 /* Choose meta data buffer */
6587 if (!trace_meta_dwords) {
6588 /* Dump doesn't include meta data */
6589 struct dbg_tools_user_data *dev_user_data =
6590 qed_dbg_get_user_data(p_hwfn);
6592 if (!dev_user_data->mcp_trace_user_meta_buf)
6593 return DBG_STATUS_MCP_TRACE_NO_META;
6595 meta_buf = dev_user_data->mcp_trace_user_meta_buf;
6597 /* Dump includes meta data */
6598 meta_buf = dump_buf;
6601 /* Allocate meta data memory */
6602 status = qed_mcp_trace_alloc_meta_data(p_hwfn, meta_buf);
6603 if (status != DBG_STATUS_OK)
6606 status = qed_parse_mcp_trace_buf(p_hwfn,
6612 results_buf + results_offset :
6614 &results_buf_bytes);
6615 if (status != DBG_STATUS_OK)
6619 qed_mcp_trace_free_meta_data(p_hwfn);
6621 *parsed_results_bytes = results_offset + results_buf_bytes;
6623 return DBG_STATUS_OK;
6626 /* Parses a Reg FIFO dump buffer.
6627 * If result_buf is not NULL, the Reg FIFO results are printed to it.
6628 * In any case, the required results buffer size is assigned to
6629 * parsed_results_bytes.
6630 * The parsing status is returned.
6632 static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
6634 u32 *parsed_results_bytes)
6636 const char *section_name, *param_name, *param_str_val;
6637 u32 param_num_val, num_section_params, num_elements;
6638 struct reg_fifo_element *elements;
6639 u8 i, j, err_code, vf_val;
6640 u32 results_offset = 0;
6643 /* Read global_params section */
6644 dump_buf += qed_read_section_hdr(dump_buf,
6645 §ion_name, &num_section_params);
6646 if (strcmp(section_name, "global_params"))
6647 return DBG_STATUS_REG_FIFO_BAD_DATA;
6649 /* Print global params */
6650 dump_buf += qed_print_section_params(dump_buf,
6652 results_buf, &results_offset);
6654 /* Read reg_fifo_data section */
6655 dump_buf += qed_read_section_hdr(dump_buf,
6656 §ion_name, &num_section_params);
6657 if (strcmp(section_name, "reg_fifo_data"))
6658 return DBG_STATUS_REG_FIFO_BAD_DATA;
6659 dump_buf += qed_read_param(dump_buf,
6660 ¶m_name, ¶m_str_val, ¶m_num_val);
6661 if (strcmp(param_name, "size"))
6662 return DBG_STATUS_REG_FIFO_BAD_DATA;
6663 if (param_num_val % REG_FIFO_ELEMENT_DWORDS)
6664 return DBG_STATUS_REG_FIFO_BAD_DATA;
6665 num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS;
6666 elements = (struct reg_fifo_element *)dump_buf;
6668 /* Decode elements */
6669 for (i = 0; i < num_elements; i++) {
6670 const char *err_msg = NULL;
6672 /* Discover if element belongs to a VF or a PF */
6673 vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
6674 if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL)
6675 sprintf(vf_str, "%s", "N/A");
6677 sprintf(vf_str, "%d", vf_val);
6679 /* Find error message */
6680 err_code = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_ERROR);
6681 for (j = 0; j < ARRAY_SIZE(s_reg_fifo_errors) && !err_msg; j++)
6682 if (err_code == s_reg_fifo_errors[j].err_code)
6683 err_msg = s_reg_fifo_errors[j].err_msg;
6685 /* Add parsed element to parsed buffer */
6687 sprintf(qed_get_buf_ptr(results_buf,
6689 "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, error: %s\n",
6691 (u32)GET_FIELD(elements[i].data,
6692 REG_FIFO_ELEMENT_ADDRESS) *
6693 REG_FIFO_ELEMENT_ADDR_FACTOR,
6694 s_access_strs[GET_FIELD(elements[i].data,
6695 REG_FIFO_ELEMENT_ACCESS)],
6696 (u32)GET_FIELD(elements[i].data,
6697 REG_FIFO_ELEMENT_PF),
6699 (u32)GET_FIELD(elements[i].data,
6700 REG_FIFO_ELEMENT_PORT),
6701 s_privilege_strs[GET_FIELD(elements[i].data,
6702 REG_FIFO_ELEMENT_PRIVILEGE)],
6703 s_protection_strs[GET_FIELD(elements[i].data,
6704 REG_FIFO_ELEMENT_PROTECTION)],
6705 s_master_strs[GET_FIELD(elements[i].data,
6706 REG_FIFO_ELEMENT_MASTER)],
6707 err_msg ? err_msg : "unknown error code");
6710 results_offset += sprintf(qed_get_buf_ptr(results_buf,
6712 "fifo contained %d elements", num_elements);
6714 /* Add 1 for string NULL termination */
6715 *parsed_results_bytes = results_offset + 1;
6717 return DBG_STATUS_OK;
6720 static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
6723 u32 *results_offset)
6725 const struct igu_fifo_addr_data *found_addr = NULL;
6726 u8 source, err_type, i, is_cleanup;
6727 char parsed_addr_data[32];
6728 char parsed_wr_data[256];
6729 u32 wr_data, prod_cons;
6730 bool is_wr_cmd, is_pf;
6734 /* Dword12 (dword index 1 and 2) contains bits 32..95 of the
6737 dword12 = ((u64)element->dword2 << 32) | element->dword1;
6738 is_wr_cmd = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
6739 is_pf = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_IS_PF);
6740 cmd_addr = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
6741 source = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_SOURCE);
6742 err_type = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
6744 if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
6745 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6746 if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
6747 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6749 /* Find address data */
6750 for (i = 0; i < ARRAY_SIZE(s_igu_fifo_addr_data) && !found_addr; i++) {
6751 const struct igu_fifo_addr_data *curr_addr =
6752 &s_igu_fifo_addr_data[i];
6754 if (cmd_addr >= curr_addr->start_addr && cmd_addr <=
6755 curr_addr->end_addr)
6756 found_addr = curr_addr;
6760 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6762 /* Prepare parsed address data */
6763 switch (found_addr->type) {
6764 case IGU_ADDR_TYPE_MSIX_MEM:
6765 sprintf(parsed_addr_data, " vector_num = 0x%x", cmd_addr / 2);
6767 case IGU_ADDR_TYPE_WRITE_INT_ACK:
6768 case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
6769 sprintf(parsed_addr_data,
6770 " SB = 0x%x", cmd_addr - found_addr->start_addr);
6773 parsed_addr_data[0] = '\0';
6777 parsed_wr_data[0] = '\0';
6781 /* Prepare parsed write data */
6782 wr_data = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
6783 prod_cons = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_PROD_CONS);
6784 is_cleanup = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_CMD_TYPE);
6786 if (source == IGU_SRC_ATTN) {
6787 sprintf(parsed_wr_data, "prod: 0x%x, ", prod_cons);
6790 u8 cleanup_val, cleanup_type;
6794 IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
6797 IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
6799 sprintf(parsed_wr_data,
6800 "cmd_type: cleanup, cleanup_val: %s, cleanup_type : %d, ",
6801 cleanup_val ? "set" : "clear",
6804 u8 update_flag, en_dis_int_for_sb, segment;
6807 update_flag = GET_FIELD(wr_data,
6808 IGU_FIFO_WR_DATA_UPDATE_FLAG);
6811 IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
6812 segment = GET_FIELD(wr_data,
6813 IGU_FIFO_WR_DATA_SEGMENT);
6814 timer_mask = GET_FIELD(wr_data,
6815 IGU_FIFO_WR_DATA_TIMER_MASK);
6817 sprintf(parsed_wr_data,
6818 "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
6820 update_flag ? "update" : "nop",
6822 (en_dis_int_for_sb == 1 ? "disable" : "nop") :
6824 segment ? "attn" : "regular",
6829 /* Add parsed element to parsed buffer */
6830 *results_offset += sprintf(qed_get_buf_ptr(results_buf,
6832 "raw: 0x%01x%08x%08x, %s: %d, source : %s, type : %s, cmd_addr : 0x%x(%s%s), %serror: %s\n",
6833 element->dword2, element->dword1,
6835 is_pf ? "pf" : "vf",
6836 GET_FIELD(element->dword0,
6837 IGU_FIFO_ELEMENT_DWORD0_FID),
6838 s_igu_fifo_source_strs[source],
6839 is_wr_cmd ? "wr" : "rd",
6841 (!is_pf && found_addr->vf_desc)
6842 ? found_addr->vf_desc
6846 s_igu_fifo_error_strs[err_type]);
6848 return DBG_STATUS_OK;
6851 /* Parses an IGU FIFO dump buffer.
6852 * If result_buf is not NULL, the IGU FIFO results are printed to it.
6853 * In any case, the required results buffer size is assigned to
6854 * parsed_results_bytes.
6855 * The parsing status is returned.
6857 static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf,
6859 u32 *parsed_results_bytes)
6861 const char *section_name, *param_name, *param_str_val;
6862 u32 param_num_val, num_section_params, num_elements;
6863 struct igu_fifo_element *elements;
6864 enum dbg_status status;
6865 u32 results_offset = 0;
6868 /* Read global_params section */
6869 dump_buf += qed_read_section_hdr(dump_buf,
6870 §ion_name, &num_section_params);
6871 if (strcmp(section_name, "global_params"))
6872 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6874 /* Print global params */
6875 dump_buf += qed_print_section_params(dump_buf,
6877 results_buf, &results_offset);
6879 /* Read igu_fifo_data section */
6880 dump_buf += qed_read_section_hdr(dump_buf,
6881 §ion_name, &num_section_params);
6882 if (strcmp(section_name, "igu_fifo_data"))
6883 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6884 dump_buf += qed_read_param(dump_buf,
6885 ¶m_name, ¶m_str_val, ¶m_num_val);
6886 if (strcmp(param_name, "size"))
6887 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6888 if (param_num_val % IGU_FIFO_ELEMENT_DWORDS)
6889 return DBG_STATUS_IGU_FIFO_BAD_DATA;
6890 num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS;
6891 elements = (struct igu_fifo_element *)dump_buf;
6893 /* Decode elements */
6894 for (i = 0; i < num_elements; i++) {
6895 status = qed_parse_igu_fifo_element(&elements[i],
6898 if (status != DBG_STATUS_OK)
6902 results_offset += sprintf(qed_get_buf_ptr(results_buf,
6904 "fifo contained %d elements", num_elements);
6906 /* Add 1 for string NULL termination */
6907 *parsed_results_bytes = results_offset + 1;
6909 return DBG_STATUS_OK;
6912 static enum dbg_status
6913 qed_parse_protection_override_dump(u32 *dump_buf,
6915 u32 *parsed_results_bytes)
6917 const char *section_name, *param_name, *param_str_val;
6918 u32 param_num_val, num_section_params, num_elements;
6919 struct protection_override_element *elements;
6920 u32 results_offset = 0;
6923 /* Read global_params section */
6924 dump_buf += qed_read_section_hdr(dump_buf,
6925 §ion_name, &num_section_params);
6926 if (strcmp(section_name, "global_params"))
6927 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6929 /* Print global params */
6930 dump_buf += qed_print_section_params(dump_buf,
6932 results_buf, &results_offset);
6934 /* Read protection_override_data section */
6935 dump_buf += qed_read_section_hdr(dump_buf,
6936 §ion_name, &num_section_params);
6937 if (strcmp(section_name, "protection_override_data"))
6938 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6939 dump_buf += qed_read_param(dump_buf,
6940 ¶m_name, ¶m_str_val, ¶m_num_val);
6941 if (strcmp(param_name, "size"))
6942 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6943 if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS)
6944 return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
6945 num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
6946 elements = (struct protection_override_element *)dump_buf;
6948 /* Decode elements */
6949 for (i = 0; i < num_elements; i++) {
6950 u32 address = GET_FIELD(elements[i].data,
6951 PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
6952 PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
6955 sprintf(qed_get_buf_ptr(results_buf,
6957 "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
6959 (u32)GET_FIELD(elements[i].data,
6960 PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
6961 (u32)GET_FIELD(elements[i].data,
6962 PROTECTION_OVERRIDE_ELEMENT_READ),
6963 (u32)GET_FIELD(elements[i].data,
6964 PROTECTION_OVERRIDE_ELEMENT_WRITE),
6965 s_protection_strs[GET_FIELD(elements[i].data,
6966 PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
6967 s_protection_strs[GET_FIELD(elements[i].data,
6968 PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]);
6971 results_offset += sprintf(qed_get_buf_ptr(results_buf,
6973 "protection override contained %d elements",
6976 /* Add 1 for string NULL termination */
6977 *parsed_results_bytes = results_offset + 1;
6979 return DBG_STATUS_OK;
6982 /* Parses a FW Asserts dump buffer.
6983 * If result_buf is not NULL, the FW Asserts results are printed to it.
6984 * In any case, the required results buffer size is assigned to
6985 * parsed_results_bytes.
6986 * The parsing status is returned.
6988 static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf,
6990 u32 *parsed_results_bytes)
6992 u32 num_section_params, param_num_val, i, results_offset = 0;
6993 const char *param_name, *param_str_val, *section_name;
6994 bool last_section_found = false;
6996 *parsed_results_bytes = 0;
6998 /* Read global_params section */
6999 dump_buf += qed_read_section_hdr(dump_buf,
7000 §ion_name, &num_section_params);
7001 if (strcmp(section_name, "global_params"))
7002 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7004 /* Print global params */
7005 dump_buf += qed_print_section_params(dump_buf,
7007 results_buf, &results_offset);
7009 while (!last_section_found) {
7010 dump_buf += qed_read_section_hdr(dump_buf,
7012 &num_section_params);
7013 if (!strcmp(section_name, "fw_asserts")) {
7014 /* Extract params */
7015 const char *storm_letter = NULL;
7016 u32 storm_dump_size = 0;
7018 for (i = 0; i < num_section_params; i++) {
7019 dump_buf += qed_read_param(dump_buf,
7023 if (!strcmp(param_name, "storm"))
7024 storm_letter = param_str_val;
7025 else if (!strcmp(param_name, "size"))
7026 storm_dump_size = param_num_val;
7029 DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7032 if (!storm_letter || !storm_dump_size)
7033 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7037 sprintf(qed_get_buf_ptr(results_buf,
7039 "\n%sSTORM_ASSERT: size=%d\n",
7040 storm_letter, storm_dump_size);
7041 for (i = 0; i < storm_dump_size; i++, dump_buf++)
7043 sprintf(qed_get_buf_ptr(results_buf,
7045 "%08x\n", *dump_buf);
7046 } else if (!strcmp(section_name, "last")) {
7047 last_section_found = true;
7049 return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7053 /* Add 1 for string NULL termination */
7054 *parsed_results_bytes = results_offset + 1;
7056 return DBG_STATUS_OK;
7059 /***************************** Public Functions *******************************/
7061 enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn,
7062 const u8 * const bin_ptr)
7064 struct bin_buffer_hdr *buf_hdrs = (struct bin_buffer_hdr *)bin_ptr;
7067 /* Convert binary data to debug arrays */
7068 for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++)
7069 qed_set_dbg_bin_buf(p_hwfn,
7070 (enum bin_dbg_buffer_type)buf_id,
7071 (u32 *)(bin_ptr + buf_hdrs[buf_id].offset),
7072 buf_hdrs[buf_id].length);
7074 return DBG_STATUS_OK;
7077 enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn,
7078 void **user_data_ptr)
7080 *user_data_ptr = kzalloc(sizeof(struct dbg_tools_user_data),
7082 if (!(*user_data_ptr))
7083 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7085 return DBG_STATUS_OK;
7088 const char *qed_dbg_get_status_str(enum dbg_status status)
7091 MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
7094 enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
7096 u32 num_dumped_dwords,
7097 u32 *results_buf_size)
7099 u32 num_errors, num_warnings;
7101 return qed_parse_idle_chk_dump(p_hwfn,
7106 &num_errors, &num_warnings);
7109 enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
7111 u32 num_dumped_dwords,
7116 u32 parsed_buf_size;
7118 return qed_parse_idle_chk_dump(p_hwfn,
7123 num_errors, num_warnings);
7126 void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn,
7127 const u32 *meta_buf)
7129 struct dbg_tools_user_data *dev_user_data =
7130 qed_dbg_get_user_data(p_hwfn);
7132 dev_user_data->mcp_trace_user_meta_buf = meta_buf;
7135 enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
7137 u32 num_dumped_dwords,
7138 u32 *results_buf_size)
7140 return qed_parse_mcp_trace_dump(p_hwfn,
7141 dump_buf, NULL, results_buf_size, true);
7144 enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
7146 u32 num_dumped_dwords,
7149 u32 parsed_buf_size;
7151 return qed_parse_mcp_trace_dump(p_hwfn,
7153 results_buf, &parsed_buf_size, true);
7156 enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
7160 u32 parsed_buf_size;
7162 return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf,
7163 &parsed_buf_size, false);
7166 enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
7168 u32 num_dumped_bytes,
7171 u32 parsed_results_bytes;
7173 return qed_parse_mcp_trace_buf(p_hwfn,
7178 results_buf, &parsed_results_bytes);
7181 /* Frees the specified MCP Trace meta data */
7182 void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn)
7184 struct dbg_tools_user_data *dev_user_data;
7185 struct mcp_trace_meta *meta;
7188 dev_user_data = qed_dbg_get_user_data(p_hwfn);
7189 meta = &dev_user_data->mcp_trace_meta;
7190 if (!meta->is_allocated)
7193 /* Release modules */
7194 if (meta->modules) {
7195 for (i = 0; i < meta->modules_num; i++)
7196 kfree(meta->modules[i]);
7197 kfree(meta->modules);
7200 /* Release formats */
7201 if (meta->formats) {
7202 for (i = 0; i < meta->formats_num; i++)
7203 kfree(meta->formats[i].format_str);
7204 kfree(meta->formats);
7207 meta->is_allocated = false;
7210 enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7212 u32 num_dumped_dwords,
7213 u32 *results_buf_size)
7215 return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size);
7218 enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
7220 u32 num_dumped_dwords,
7223 u32 parsed_buf_size;
7225 return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7228 enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7230 u32 num_dumped_dwords,
7231 u32 *results_buf_size)
7233 return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size);
7236 enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
7238 u32 num_dumped_dwords,
7241 u32 parsed_buf_size;
7243 return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
7247 qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
7249 u32 num_dumped_dwords,
7250 u32 *results_buf_size)
7252 return qed_parse_protection_override_dump(dump_buf,
7253 NULL, results_buf_size);
7256 enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
7258 u32 num_dumped_dwords,
7261 u32 parsed_buf_size;
7263 return qed_parse_protection_override_dump(dump_buf,
7268 enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
7270 u32 num_dumped_dwords,
7271 u32 *results_buf_size)
7273 return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size);
7276 enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
7278 u32 num_dumped_dwords,
7281 u32 parsed_buf_size;
7283 return qed_parse_fw_asserts_dump(dump_buf,
7284 results_buf, &parsed_buf_size);
7287 enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
7288 struct dbg_attn_block_result *results)
7290 const u32 *block_attn_name_offsets;
7291 const char *attn_name_base;
7292 const char *block_name;
7293 enum dbg_attn_type attn_type;
7296 num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
7297 attn_type = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
7298 block_name = qed_dbg_get_block_name(p_hwfn, results->block_id);
7300 return DBG_STATUS_INVALID_ARGS;
7302 if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr ||
7303 !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr ||
7304 !p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
7305 return DBG_STATUS_DBG_ARRAY_NOT_SET;
7307 block_attn_name_offsets =
7308 (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr +
7309 results->names_offset;
7311 attn_name_base = p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr;
7313 /* Go over registers with a non-zero attention status */
7314 for (i = 0; i < num_regs; i++) {
7315 struct dbg_attn_bit_mapping *bit_mapping;
7316 struct dbg_attn_reg_result *reg_result;
7317 u8 num_reg_attn, bit_idx = 0;
7319 reg_result = &results->reg_results[i];
7320 num_reg_attn = GET_FIELD(reg_result->data,
7321 DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
7322 bit_mapping = (struct dbg_attn_bit_mapping *)
7323 p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr +
7324 reg_result->block_attn_offset;
7326 /* Go over attention status bits */
7327 for (j = 0; j < num_reg_attn; j++, bit_idx++) {
7328 u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
7329 DBG_ATTN_BIT_MAPPING_VAL);
7330 const char *attn_name, *attn_type_str, *masked_str;
7331 u32 attn_name_offset;
7334 /* Check if bit mask should be advanced (due to unused
7337 if (GET_FIELD(bit_mapping[j].data,
7338 DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
7339 bit_idx += (u8)attn_idx_val;
7343 /* Check current bit index */
7344 if (!(reg_result->sts_val & BIT(bit_idx)))
7347 /* An attention bit with value=1 was found
7348 * Find attention name
7351 block_attn_name_offsets[attn_idx_val];
7352 attn_name = attn_name_base + attn_name_offset;
7355 ATTN_TYPE_INTERRUPT ? "Interrupt" :
7357 masked_str = reg_result->mask_val & BIT(bit_idx) ?
7359 sts_addr = GET_FIELD(reg_result->data,
7360 DBG_ATTN_REG_RESULT_STS_ADDRESS);
7362 "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
7363 block_name, attn_type_str, attn_name,
7364 sts_addr * 4, bit_idx, masked_str);
7368 return DBG_STATUS_OK;
7371 static DEFINE_MUTEX(qed_dbg_lock);
7373 /* Wrapper for unifying the idle_chk and mcp_trace api */
7374 static enum dbg_status
7375 qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
7377 u32 num_dumped_dwords,
7380 u32 num_errors, num_warnnings;
7382 return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,
7383 results_buf, &num_errors,
7387 /* Feature meta data lookup table */
7390 enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
7391 struct qed_ptt *p_ptt, u32 *size);
7392 enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
7393 struct qed_ptt *p_ptt, u32 *dump_buf,
7394 u32 buf_size, u32 *dumped_dwords);
7395 enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn,
7396 u32 *dump_buf, u32 num_dumped_dwords,
7398 enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn,
7400 u32 num_dumped_dwords,
7401 u32 *results_buf_size);
7402 } qed_features_lookup[] = {
7404 "grc", qed_dbg_grc_get_dump_buf_size,
7405 qed_dbg_grc_dump, NULL, NULL}, {
7407 qed_dbg_idle_chk_get_dump_buf_size,
7408 qed_dbg_idle_chk_dump,
7409 qed_print_idle_chk_results_wrapper,
7410 qed_get_idle_chk_results_buf_size}, {
7412 qed_dbg_mcp_trace_get_dump_buf_size,
7413 qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
7414 qed_get_mcp_trace_results_buf_size}, {
7416 qed_dbg_reg_fifo_get_dump_buf_size,
7417 qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
7418 qed_get_reg_fifo_results_buf_size}, {
7420 qed_dbg_igu_fifo_get_dump_buf_size,
7421 qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
7422 qed_get_igu_fifo_results_buf_size}, {
7423 "protection_override",
7424 qed_dbg_protection_override_get_dump_buf_size,
7425 qed_dbg_protection_override_dump,
7426 qed_print_protection_override_results,
7427 qed_get_protection_override_results_buf_size}, {
7429 qed_dbg_fw_asserts_get_dump_buf_size,
7430 qed_dbg_fw_asserts_dump,
7431 qed_print_fw_asserts_results,
7432 qed_get_fw_asserts_results_buf_size}, {
7434 qed_dbg_ilt_get_dump_buf_size,
7435 qed_dbg_ilt_dump, NULL, NULL},};
7437 static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
7439 u32 i, precision = 80;
7444 pr_notice("\n%.*s", precision, p_text_buf);
7445 for (i = precision; i < text_size; i += precision)
7446 pr_cont("%.*s", precision, p_text_buf + i);
7450 #define QED_RESULTS_BUF_MIN_SIZE 16
7451 /* Generic function for decoding debug feature info */
7452 static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
7453 enum qed_dbg_features feature_idx)
7455 struct qed_dbg_feature *feature =
7456 &p_hwfn->cdev->dbg_params.features[feature_idx];
7457 u32 text_size_bytes, null_char_pos, i;
7461 /* Check if feature supports formatting capability */
7462 if (!qed_features_lookup[feature_idx].results_buf_size)
7463 return DBG_STATUS_OK;
7465 /* Obtain size of formatted output */
7466 rc = qed_features_lookup[feature_idx].
7467 results_buf_size(p_hwfn, (u32 *)feature->dump_buf,
7468 feature->dumped_dwords, &text_size_bytes);
7469 if (rc != DBG_STATUS_OK)
7472 /* Make sure that the allocated size is a multiple of dword (4 bytes) */
7473 null_char_pos = text_size_bytes - 1;
7474 text_size_bytes = (text_size_bytes + 3) & ~0x3;
7476 if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
7477 DP_NOTICE(p_hwfn->cdev,
7478 "formatted size of feature was too small %d. Aborting\n",
7480 return DBG_STATUS_INVALID_ARGS;
7483 /* Allocate temp text buf */
7484 text_buf = vzalloc(text_size_bytes);
7486 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7488 /* Decode feature opcodes to string on temp buf */
7489 rc = qed_features_lookup[feature_idx].
7490 print_results(p_hwfn, (u32 *)feature->dump_buf,
7491 feature->dumped_dwords, text_buf);
7492 if (rc != DBG_STATUS_OK) {
7497 /* Replace the original null character with a '\n' character.
7498 * The bytes that were added as a result of the dword alignment are also
7499 * padded with '\n' characters.
7501 for (i = null_char_pos; i < text_size_bytes; i++)
7504 /* Dump printable feature to log */
7505 if (p_hwfn->cdev->dbg_params.print_data)
7506 qed_dbg_print_feature(text_buf, text_size_bytes);
7508 /* Free the old dump_buf and point the dump_buf to the newly allocagted
7509 * and formatted text buffer.
7511 vfree(feature->dump_buf);
7512 feature->dump_buf = text_buf;
7513 feature->buf_size = text_size_bytes;
7514 feature->dumped_dwords = text_size_bytes / 4;
7518 #define MAX_DBG_FEATURE_SIZE_DWORDS 0x3FFFFFFF
7520 /* Generic function for performing the dump of a debug feature. */
7521 static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
7522 struct qed_ptt *p_ptt,
7523 enum qed_dbg_features feature_idx)
7525 struct qed_dbg_feature *feature =
7526 &p_hwfn->cdev->dbg_params.features[feature_idx];
7527 u32 buf_size_dwords;
7530 DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
7531 qed_features_lookup[feature_idx].name);
7533 /* Dump_buf was already allocated need to free (this can happen if dump
7534 * was called but file was never read).
7535 * We can't use the buffer as is since size may have changed.
7537 if (feature->dump_buf) {
7538 vfree(feature->dump_buf);
7539 feature->dump_buf = NULL;
7542 /* Get buffer size from hsi, allocate accordingly, and perform the
7545 rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
7547 if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7550 if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS) {
7551 feature->buf_size = 0;
7552 DP_NOTICE(p_hwfn->cdev,
7553 "Debug feature [\"%s\"] size (0x%x dwords) exceeds maximum size (0x%x dwords)\n",
7554 qed_features_lookup[feature_idx].name,
7555 buf_size_dwords, MAX_DBG_FEATURE_SIZE_DWORDS);
7557 return DBG_STATUS_OK;
7560 feature->buf_size = buf_size_dwords * sizeof(u32);
7561 feature->dump_buf = vmalloc(feature->buf_size);
7562 if (!feature->dump_buf)
7563 return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7565 rc = qed_features_lookup[feature_idx].
7566 perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf,
7567 feature->buf_size / sizeof(u32),
7568 &feature->dumped_dwords);
7570 /* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
7571 * In this case the buffer holds valid binary data, but we wont able
7572 * to parse it (since parsing relies on data in NVRAM which is only
7573 * accessible when MFW is responsive). skip the formatting but return
7574 * success so that binary data is provided.
7576 if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7577 return DBG_STATUS_OK;
7579 if (rc != DBG_STATUS_OK)
7583 rc = format_feature(p_hwfn, feature_idx);
7587 int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7589 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes);
7592 int qed_dbg_grc_size(struct qed_dev *cdev)
7594 return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC);
7597 int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7599 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK,
7603 int qed_dbg_idle_chk_size(struct qed_dev *cdev)
7605 return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK);
7608 int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7610 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO,
7614 int qed_dbg_reg_fifo_size(struct qed_dev *cdev)
7616 return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO);
7619 int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7621 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO,
7625 int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
7627 return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
7630 static int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
7631 enum qed_nvm_images image_id, u32 *length)
7633 struct qed_nvm_image_att image_att;
7637 rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
7641 *length = image_att.length;
7646 static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
7647 u32 *num_dumped_bytes,
7648 enum qed_nvm_images image_id)
7650 struct qed_hwfn *p_hwfn =
7651 &cdev->hwfns[cdev->dbg_params.engine_for_debug];
7656 *num_dumped_bytes = 0;
7657 rc = qed_dbg_nvm_image_length(p_hwfn, image_id, &len_rounded);
7661 DP_NOTICE(p_hwfn->cdev,
7662 "Collecting a debug feature [\"nvram image %d\"]\n",
7665 len_rounded = roundup(len_rounded, sizeof(u32));
7666 rc = qed_mcp_get_nvm_image(p_hwfn, image_id, buffer, len_rounded);
7670 /* QED_NVM_IMAGE_NVM_META image is not swapped like other images */
7671 if (image_id != QED_NVM_IMAGE_NVM_META)
7672 for (i = 0; i < len_rounded; i += 4) {
7673 val = cpu_to_be32(*(u32 *)(buffer + i));
7674 *(u32 *)(buffer + i) = val;
7677 *num_dumped_bytes = len_rounded;
7682 int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
7683 u32 *num_dumped_bytes)
7685 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE,
7689 int qed_dbg_protection_override_size(struct qed_dev *cdev)
7691 return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE);
7694 int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
7695 u32 *num_dumped_bytes)
7697 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS,
7701 int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
7703 return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
7706 int qed_dbg_ilt(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7708 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_ILT, num_dumped_bytes);
7711 int qed_dbg_ilt_size(struct qed_dev *cdev)
7713 return qed_dbg_feature_size(cdev, DBG_FEATURE_ILT);
7716 int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
7717 u32 *num_dumped_bytes)
7719 return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE,
7723 int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
7725 return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE);
7728 /* Defines the amount of bytes allocated for recording the length of debugfs
7731 #define REGDUMP_HEADER_SIZE sizeof(u32)
7732 #define REGDUMP_HEADER_SIZE_SHIFT 0
7733 #define REGDUMP_HEADER_SIZE_MASK 0xffffff
7734 #define REGDUMP_HEADER_FEATURE_SHIFT 24
7735 #define REGDUMP_HEADER_FEATURE_MASK 0x3f
7736 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT 30
7737 #define REGDUMP_HEADER_OMIT_ENGINE_MASK 0x1
7738 #define REGDUMP_HEADER_ENGINE_SHIFT 31
7739 #define REGDUMP_HEADER_ENGINE_MASK 0x1
7740 #define REGDUMP_MAX_SIZE 0x1000000
7741 #define ILT_DUMP_MAX_SIZE (1024 * 1024 * 15)
7743 enum debug_print_features {
7749 PROTECTION_OVERRIDE = 5,
7760 static u32 qed_calc_regdump_header(struct qed_dev *cdev,
7761 enum debug_print_features feature,
7762 int engine, u32 feature_size, u8 omit_engine)
7766 SET_FIELD(res, REGDUMP_HEADER_SIZE, feature_size);
7767 if (res != feature_size)
7769 "Feature %d is too large (size 0x%x) and will corrupt the dump\n",
7770 feature, feature_size);
7772 SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature);
7773 SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine);
7774 SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine);
7779 int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
7781 u8 cur_engine, omit_engine = 0, org_engine;
7782 struct qed_hwfn *p_hwfn =
7783 &cdev->hwfns[cdev->dbg_params.engine_for_debug];
7784 struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
7785 int grc_params[MAX_DBG_GRC_PARAMS], i;
7786 u32 offset = 0, feature_size;
7789 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
7790 grc_params[i] = dev_data->grc.param_val[i];
7792 if (!QED_IS_CMT(cdev))
7795 mutex_lock(&qed_dbg_lock);
7797 org_engine = qed_get_debug_engine(cdev);
7798 for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
7799 /* Collect idle_chks and grcDump for each hw function */
7800 DP_VERBOSE(cdev, QED_MSG_DEBUG,
7801 "obtaining idle_chk and grcdump for current engine\n");
7802 qed_set_debug_engine(cdev, cur_engine);
7804 /* First idle_chk */
7805 rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
7806 REGDUMP_HEADER_SIZE, &feature_size);
7808 *(u32 *)((u8 *)buffer + offset) =
7809 qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine,
7810 feature_size, omit_engine);
7811 offset += (feature_size + REGDUMP_HEADER_SIZE);
7813 DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
7816 /* Second idle_chk */
7817 rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
7818 REGDUMP_HEADER_SIZE, &feature_size);
7820 *(u32 *)((u8 *)buffer + offset) =
7821 qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine,
7822 feature_size, omit_engine);
7823 offset += (feature_size + REGDUMP_HEADER_SIZE);
7825 DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
7829 rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset +
7830 REGDUMP_HEADER_SIZE, &feature_size);
7832 *(u32 *)((u8 *)buffer + offset) =
7833 qed_calc_regdump_header(cdev, REG_FIFO, cur_engine,
7834 feature_size, omit_engine);
7835 offset += (feature_size + REGDUMP_HEADER_SIZE);
7837 DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
7841 rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset +
7842 REGDUMP_HEADER_SIZE, &feature_size);
7844 *(u32 *)((u8 *)buffer + offset) =
7845 qed_calc_regdump_header(cdev, IGU_FIFO, cur_engine,
7846 feature_size, omit_engine);
7847 offset += (feature_size + REGDUMP_HEADER_SIZE);
7849 DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
7852 /* protection_override dump */
7853 rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset +
7854 REGDUMP_HEADER_SIZE,
7857 *(u32 *)((u8 *)buffer + offset) =
7858 qed_calc_regdump_header(cdev, PROTECTION_OVERRIDE,
7860 feature_size, omit_engine);
7861 offset += (feature_size + REGDUMP_HEADER_SIZE);
7864 "qed_dbg_protection_override failed. rc = %d\n",
7868 /* fw_asserts dump */
7869 rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset +
7870 REGDUMP_HEADER_SIZE, &feature_size);
7872 *(u32 *)((u8 *)buffer + offset) =
7873 qed_calc_regdump_header(cdev, FW_ASSERTS,
7874 cur_engine, feature_size,
7876 offset += (feature_size + REGDUMP_HEADER_SIZE);
7878 DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
7882 feature_size = qed_dbg_ilt_size(cdev);
7883 if (!cdev->disable_ilt_dump &&
7884 feature_size < ILT_DUMP_MAX_SIZE) {
7885 rc = qed_dbg_ilt(cdev, (u8 *)buffer + offset +
7886 REGDUMP_HEADER_SIZE, &feature_size);
7888 *(u32 *)((u8 *)buffer + offset) =
7889 qed_calc_regdump_header(cdev, ILT_DUMP,
7893 offset += feature_size + REGDUMP_HEADER_SIZE;
7895 DP_ERR(cdev, "qed_dbg_ilt failed. rc = %d\n",
7900 /* GRC dump - must be last because when mcp stuck it will
7901 * clutter idle_chk, reg_fifo, ...
7903 for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
7904 dev_data->grc.param_val[i] = grc_params[i];
7906 rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
7907 REGDUMP_HEADER_SIZE, &feature_size);
7909 *(u32 *)((u8 *)buffer + offset) =
7910 qed_calc_regdump_header(cdev, GRC_DUMP,
7912 feature_size, omit_engine);
7913 offset += (feature_size + REGDUMP_HEADER_SIZE);
7915 DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
7919 qed_set_debug_engine(cdev, org_engine);
7922 rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
7923 REGDUMP_HEADER_SIZE, &feature_size);
7925 *(u32 *)((u8 *)buffer + offset) =
7926 qed_calc_regdump_header(cdev, MCP_TRACE, cur_engine,
7927 feature_size, omit_engine);
7928 offset += (feature_size + REGDUMP_HEADER_SIZE);
7930 DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
7934 rc = qed_dbg_nvm_image(cdev,
7935 (u8 *)buffer + offset +
7936 REGDUMP_HEADER_SIZE, &feature_size,
7937 QED_NVM_IMAGE_NVM_CFG1);
7939 *(u32 *)((u8 *)buffer + offset) =
7940 qed_calc_regdump_header(cdev, NVM_CFG1, cur_engine,
7941 feature_size, omit_engine);
7942 offset += (feature_size + REGDUMP_HEADER_SIZE);
7943 } else if (rc != -ENOENT) {
7945 "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
7946 QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1", rc);
7950 rc = qed_dbg_nvm_image(cdev,
7951 (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
7952 &feature_size, QED_NVM_IMAGE_DEFAULT_CFG);
7954 *(u32 *)((u8 *)buffer + offset) =
7955 qed_calc_regdump_header(cdev, DEFAULT_CFG, cur_engine,
7956 feature_size, omit_engine);
7957 offset += (feature_size + REGDUMP_HEADER_SIZE);
7958 } else if (rc != -ENOENT) {
7960 "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
7961 QED_NVM_IMAGE_DEFAULT_CFG, "QED_NVM_IMAGE_DEFAULT_CFG",
7966 rc = qed_dbg_nvm_image(cdev,
7967 (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
7968 &feature_size, QED_NVM_IMAGE_NVM_META);
7970 *(u32 *)((u8 *)buffer + offset) =
7971 qed_calc_regdump_header(cdev, NVM_META, cur_engine,
7972 feature_size, omit_engine);
7973 offset += (feature_size + REGDUMP_HEADER_SIZE);
7974 } else if (rc != -ENOENT) {
7976 "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
7977 QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", rc);
7981 rc = qed_dbg_nvm_image(cdev, (u8 *)buffer + offset +
7982 REGDUMP_HEADER_SIZE, &feature_size,
7983 QED_NVM_IMAGE_MDUMP);
7985 *(u32 *)((u8 *)buffer + offset) =
7986 qed_calc_regdump_header(cdev, MDUMP, cur_engine,
7987 feature_size, omit_engine);
7988 offset += (feature_size + REGDUMP_HEADER_SIZE);
7989 } else if (rc != -ENOENT) {
7991 "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
7992 QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc);
7995 mutex_unlock(&qed_dbg_lock);
8000 int qed_dbg_all_data_size(struct qed_dev *cdev)
8002 struct qed_hwfn *p_hwfn =
8003 &cdev->hwfns[cdev->dbg_params.engine_for_debug];
8004 u32 regs_len = 0, image_len = 0, ilt_len = 0, total_ilt_len = 0;
8005 u8 cur_engine, org_engine;
8007 cdev->disable_ilt_dump = false;
8008 org_engine = qed_get_debug_engine(cdev);
8009 for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
8010 /* Engine specific */
8011 DP_VERBOSE(cdev, QED_MSG_DEBUG,
8012 "calculating idle_chk and grcdump register length for current engine\n");
8013 qed_set_debug_engine(cdev, cur_engine);
8014 regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8015 REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
8016 REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
8017 REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
8018 REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
8019 REGDUMP_HEADER_SIZE +
8020 qed_dbg_protection_override_size(cdev) +
8021 REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
8023 ilt_len = REGDUMP_HEADER_SIZE + qed_dbg_ilt_size(cdev);
8024 if (ilt_len < ILT_DUMP_MAX_SIZE) {
8025 total_ilt_len += ilt_len;
8026 regs_len += ilt_len;
8030 qed_set_debug_engine(cdev, org_engine);
8033 regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
8034 qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len);
8036 regs_len += REGDUMP_HEADER_SIZE + image_len;
8037 qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_DEFAULT_CFG, &image_len);
8039 regs_len += REGDUMP_HEADER_SIZE + image_len;
8040 qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_META, &image_len);
8042 regs_len += REGDUMP_HEADER_SIZE + image_len;
8043 qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_MDUMP, &image_len);
8045 regs_len += REGDUMP_HEADER_SIZE + image_len;
8047 if (regs_len > REGDUMP_MAX_SIZE) {
8048 DP_VERBOSE(cdev, QED_MSG_DEBUG,
8049 "Dump exceeds max size 0x%x, disable ILT dump\n",
8051 cdev->disable_ilt_dump = true;
8052 regs_len -= total_ilt_len;
8058 int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
8059 enum qed_dbg_features feature, u32 *num_dumped_bytes)
8061 struct qed_hwfn *p_hwfn =
8062 &cdev->hwfns[cdev->dbg_params.engine_for_debug];
8063 struct qed_dbg_feature *qed_feature =
8064 &cdev->dbg_params.features[feature];
8065 enum dbg_status dbg_rc;
8066 struct qed_ptt *p_ptt;
8070 p_ptt = qed_ptt_acquire(p_hwfn);
8075 dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature);
8076 if (dbg_rc != DBG_STATUS_OK) {
8077 DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n",
8078 qed_dbg_get_status_str(dbg_rc));
8079 *num_dumped_bytes = 0;
8084 DP_VERBOSE(cdev, QED_MSG_DEBUG,
8085 "copying debugfs feature to external buffer\n");
8086 memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
8087 *num_dumped_bytes = cdev->dbg_params.features[feature].dumped_dwords *
8091 qed_ptt_release(p_hwfn, p_ptt);
8095 int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
8097 struct qed_hwfn *p_hwfn =
8098 &cdev->hwfns[cdev->dbg_params.engine_for_debug];
8099 struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature];
8100 struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
8101 u32 buf_size_dwords;
8107 rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt,
8109 if (rc != DBG_STATUS_OK)
8110 buf_size_dwords = 0;
8112 /* Feature will not be dumped if it exceeds maximum size */
8113 if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS)
8114 buf_size_dwords = 0;
8116 qed_ptt_release(p_hwfn, p_ptt);
8117 qed_feature->buf_size = buf_size_dwords * sizeof(u32);
8118 return qed_feature->buf_size;
8121 u8 qed_get_debug_engine(struct qed_dev *cdev)
8123 return cdev->dbg_params.engine_for_debug;
8126 void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
8128 DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
8130 cdev->dbg_params.engine_for_debug = engine_number;
8133 void qed_dbg_pf_init(struct qed_dev *cdev)
8135 const u8 *dbg_values = NULL;
8138 /* Debug values are after init values.
8139 * The offset is the first dword of the file.
8141 dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
8143 for_each_hwfn(cdev, i) {
8144 qed_dbg_set_bin_ptr(&cdev->hwfns[i], dbg_values);
8145 qed_dbg_user_set_bin_ptr(&cdev->hwfns[i], dbg_values);
8148 /* Set the hwfn to be 0 as default */
8149 cdev->dbg_params.engine_for_debug = 0;
8152 void qed_dbg_pf_exit(struct qed_dev *cdev)
8154 struct qed_dbg_feature *feature = NULL;
8155 enum qed_dbg_features feature_idx;
8157 /* debug features' buffers may be allocated if debug feature was used
8158 * but dump wasn't called
8160 for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
8161 feature = &cdev->dbg_features[feature_idx];
8162 if (feature->dump_buf) {
8163 vfree(feature->dump_buf);
8164 feature->dump_buf = NULL;