xref: /linux/drivers/net/ethernet/qlogic/qed/qed_debug.c (revision e2be04c7f9958dde770eeb8b30e829ca969b37bb)
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8 
9 #include <linux/module.h>
10 #include <linux/vmalloc.h>
11 #include <linux/crc32.h>
12 #include "qed.h"
13 #include "qed_hsi.h"
14 #include "qed_hw.h"
15 #include "qed_mcp.h"
16 #include "qed_reg_addr.h"
17 
18 /* Memory groups enum */
19 enum mem_groups {
20 	MEM_GROUP_PXP_MEM,
21 	MEM_GROUP_DMAE_MEM,
22 	MEM_GROUP_CM_MEM,
23 	MEM_GROUP_QM_MEM,
24 	MEM_GROUP_TM_MEM,
25 	MEM_GROUP_BRB_RAM,
26 	MEM_GROUP_BRB_MEM,
27 	MEM_GROUP_PRS_MEM,
28 	MEM_GROUP_SDM_MEM,
29 	MEM_GROUP_IOR,
30 	MEM_GROUP_RAM,
31 	MEM_GROUP_BTB_RAM,
32 	MEM_GROUP_RDIF_CTX,
33 	MEM_GROUP_TDIF_CTX,
34 	MEM_GROUP_CFC_MEM,
35 	MEM_GROUP_CONN_CFC_MEM,
36 	MEM_GROUP_TASK_CFC_MEM,
37 	MEM_GROUP_CAU_PI,
38 	MEM_GROUP_CAU_MEM,
39 	MEM_GROUP_PXP_ILT,
40 	MEM_GROUP_PBUF,
41 	MEM_GROUP_MULD_MEM,
42 	MEM_GROUP_BTB_MEM,
43 	MEM_GROUP_IGU_MEM,
44 	MEM_GROUP_IGU_MSIX,
45 	MEM_GROUP_CAU_SB,
46 	MEM_GROUP_BMB_RAM,
47 	MEM_GROUP_BMB_MEM,
48 	MEM_GROUPS_NUM
49 };
50 
51 /* Memory groups names */
52 static const char * const s_mem_group_names[] = {
53 	"PXP_MEM",
54 	"DMAE_MEM",
55 	"CM_MEM",
56 	"QM_MEM",
57 	"TM_MEM",
58 	"BRB_RAM",
59 	"BRB_MEM",
60 	"PRS_MEM",
61 	"SDM_MEM",
62 	"IOR",
63 	"RAM",
64 	"BTB_RAM",
65 	"RDIF_CTX",
66 	"TDIF_CTX",
67 	"CFC_MEM",
68 	"CONN_CFC_MEM",
69 	"TASK_CFC_MEM",
70 	"CAU_PI",
71 	"CAU_MEM",
72 	"PXP_ILT",
73 	"PBUF",
74 	"MULD_MEM",
75 	"BTB_MEM",
76 	"IGU_MEM",
77 	"IGU_MSIX",
78 	"CAU_SB",
79 	"BMB_RAM",
80 	"BMB_MEM",
81 };
82 
83 /* Idle check conditions */
84 
85 static u32 cond5(const u32 *r, const u32 *imm)
86 {
87 	return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
88 }
89 
90 static u32 cond7(const u32 *r, const u32 *imm)
91 {
92 	return ((r[0] >> imm[0]) & imm[1]) != imm[2];
93 }
94 
95 static u32 cond14(const u32 *r, const u32 *imm)
96 {
97 	return (r[0] != imm[0]) && (((r[1] >> imm[1]) & imm[2]) == imm[3]);
98 }
99 
100 static u32 cond6(const u32 *r, const u32 *imm)
101 {
102 	return (r[0] & imm[0]) != imm[1];
103 }
104 
105 static u32 cond9(const u32 *r, const u32 *imm)
106 {
107 	return ((r[0] & imm[0]) >> imm[1]) !=
108 	    (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
109 }
110 
111 static u32 cond10(const u32 *r, const u32 *imm)
112 {
113 	return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
114 }
115 
116 static u32 cond4(const u32 *r, const u32 *imm)
117 {
118 	return (r[0] & ~imm[0]) != imm[1];
119 }
120 
121 static u32 cond0(const u32 *r, const u32 *imm)
122 {
123 	return (r[0] & ~r[1]) != imm[0];
124 }
125 
126 static u32 cond1(const u32 *r, const u32 *imm)
127 {
128 	return r[0] != imm[0];
129 }
130 
131 static u32 cond11(const u32 *r, const u32 *imm)
132 {
133 	return r[0] != r[1] && r[2] == imm[0];
134 }
135 
136 static u32 cond12(const u32 *r, const u32 *imm)
137 {
138 	return r[0] != r[1] && r[2] > imm[0];
139 }
140 
141 static u32 cond3(const u32 *r, const u32 *imm)
142 {
143 	return r[0] != r[1];
144 }
145 
146 static u32 cond13(const u32 *r, const u32 *imm)
147 {
148 	return r[0] & imm[0];
149 }
150 
151 static u32 cond8(const u32 *r, const u32 *imm)
152 {
153 	return r[0] < (r[1] - imm[0]);
154 }
155 
156 static u32 cond2(const u32 *r, const u32 *imm)
157 {
158 	return r[0] > imm[0];
159 }
160 
161 /* Array of Idle Check conditions */
162 static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
163 	cond0,
164 	cond1,
165 	cond2,
166 	cond3,
167 	cond4,
168 	cond5,
169 	cond6,
170 	cond7,
171 	cond8,
172 	cond9,
173 	cond10,
174 	cond11,
175 	cond12,
176 	cond13,
177 	cond14,
178 };
179 
180 /******************************* Data Types **********************************/
181 
182 enum platform_ids {
183 	PLATFORM_ASIC,
184 	PLATFORM_RESERVED,
185 	PLATFORM_RESERVED2,
186 	PLATFORM_RESERVED3,
187 	MAX_PLATFORM_IDS
188 };
189 
190 struct chip_platform_defs {
191 	u8 num_ports;
192 	u8 num_pfs;
193 	u8 num_vfs;
194 };
195 
196 /* Chip constant definitions */
197 struct chip_defs {
198 	const char *name;
199 	struct chip_platform_defs per_platform[MAX_PLATFORM_IDS];
200 };
201 
202 /* Platform constant definitions */
203 struct platform_defs {
204 	const char *name;
205 	u32 delay_factor;
206 };
207 
208 /* Storm constant definitions.
209  * Addresses are in bytes, sizes are in quad-regs.
210  */
211 struct storm_defs {
212 	char letter;
213 	enum block_id block_id;
214 	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
215 	bool has_vfc;
216 	u32 sem_fast_mem_addr;
217 	u32 sem_frame_mode_addr;
218 	u32 sem_slow_enable_addr;
219 	u32 sem_slow_mode_addr;
220 	u32 sem_slow_mode1_conf_addr;
221 	u32 sem_sync_dbg_empty_addr;
222 	u32 sem_slow_dbg_empty_addr;
223 	u32 cm_ctx_wr_addr;
224 	u32 cm_conn_ag_ctx_lid_size;
225 	u32 cm_conn_ag_ctx_rd_addr;
226 	u32 cm_conn_st_ctx_lid_size;
227 	u32 cm_conn_st_ctx_rd_addr;
228 	u32 cm_task_ag_ctx_lid_size;
229 	u32 cm_task_ag_ctx_rd_addr;
230 	u32 cm_task_st_ctx_lid_size;
231 	u32 cm_task_st_ctx_rd_addr;
232 };
233 
234 /* Block constant definitions */
235 struct block_defs {
236 	const char *name;
237 	bool has_dbg_bus[MAX_CHIP_IDS];
238 	bool associated_to_storm;
239 
240 	/* Valid only if associated_to_storm is true */
241 	u32 storm_id;
242 	enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
243 	u32 dbg_select_addr;
244 	u32 dbg_enable_addr;
245 	u32 dbg_shift_addr;
246 	u32 dbg_force_valid_addr;
247 	u32 dbg_force_frame_addr;
248 	bool has_reset_bit;
249 
250 	/* If true, block is taken out of reset before dump */
251 	bool unreset;
252 	enum dbg_reset_regs reset_reg;
253 
254 	/* Bit offset in reset register */
255 	u8 reset_bit_offset;
256 };
257 
258 /* Reset register definitions */
259 struct reset_reg_defs {
260 	u32 addr;
261 	u32 unreset_val;
262 	bool exists[MAX_CHIP_IDS];
263 };
264 
265 struct grc_param_defs {
266 	u32 default_val[MAX_CHIP_IDS];
267 	u32 min;
268 	u32 max;
269 	bool is_preset;
270 	u32 exclude_all_preset_val;
271 	u32 crash_preset_val;
272 };
273 
274 /* Address is in 128b units. Width is in bits. */
275 struct rss_mem_defs {
276 	const char *mem_name;
277 	const char *type_name;
278 	u32 addr;
279 	u32 num_entries[MAX_CHIP_IDS];
280 	u32 entry_width[MAX_CHIP_IDS];
281 };
282 
283 struct vfc_ram_defs {
284 	const char *mem_name;
285 	const char *type_name;
286 	u32 base_row;
287 	u32 num_rows;
288 };
289 
290 struct big_ram_defs {
291 	const char *instance_name;
292 	enum mem_groups mem_group_id;
293 	enum mem_groups ram_mem_group_id;
294 	enum dbg_grc_params grc_param;
295 	u32 addr_reg_addr;
296 	u32 data_reg_addr;
297 	u32 num_of_blocks[MAX_CHIP_IDS];
298 };
299 
300 struct phy_defs {
301 	const char *phy_name;
302 
303 	/* PHY base GRC address */
304 	u32 base_addr;
305 
306 	/* Relative address of indirect TBUS address register (bits 0..7) */
307 	u32 tbus_addr_lo_addr;
308 
309 	/* Relative address of indirect TBUS address register (bits 8..10) */
310 	u32 tbus_addr_hi_addr;
311 
312 	/* Relative address of indirect TBUS data register (bits 0..7) */
313 	u32 tbus_data_lo_addr;
314 
315 	/* Relative address of indirect TBUS data register (bits 8..11) */
316 	u32 tbus_data_hi_addr;
317 };
318 
319 /******************************** Constants **********************************/
320 
321 #define MAX_LCIDS			320
322 #define MAX_LTIDS			320
323 
324 #define NUM_IOR_SETS			2
325 #define IORS_PER_SET			176
326 #define IOR_SET_OFFSET(set_id)		((set_id) * 256)
327 
328 #define BYTES_IN_DWORD			sizeof(u32)
329 
330 /* In the macros below, size and offset are specified in bits */
331 #define CEIL_DWORDS(size)		DIV_ROUND_UP(size, 32)
332 #define FIELD_BIT_OFFSET(type, field)	type ## _ ## field ## _ ## OFFSET
333 #define FIELD_BIT_SIZE(type, field)	type ## _ ## field ## _ ## SIZE
334 #define FIELD_DWORD_OFFSET(type, field) \
335 	 (int)(FIELD_BIT_OFFSET(type, field) / 32)
336 #define FIELD_DWORD_SHIFT(type, field)	(FIELD_BIT_OFFSET(type, field) % 32)
337 #define FIELD_BIT_MASK(type, field) \
338 	(((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
339 	 FIELD_DWORD_SHIFT(type, field))
340 
341 #define SET_VAR_FIELD(var, type, field, val) \
342 	do { \
343 		var[FIELD_DWORD_OFFSET(type, field)] &=	\
344 		(~FIELD_BIT_MASK(type, field));	\
345 		var[FIELD_DWORD_OFFSET(type, field)] |= \
346 		(val) << FIELD_DWORD_SHIFT(type, field); \
347 	} while (0)
348 
349 #define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
350 	do { \
351 		for (i = 0; i < (arr_size); i++) \
352 			qed_wr(dev, ptt, addr,	(arr)[i]); \
353 	} while (0)
354 
355 #define ARR_REG_RD(dev, ptt, addr, arr, arr_size) \
356 	do { \
357 		for (i = 0; i < (arr_size); i++) \
358 			(arr)[i] = qed_rd(dev, ptt, addr); \
359 	} while (0)
360 
361 #ifndef DWORDS_TO_BYTES
362 #define DWORDS_TO_BYTES(dwords)		((dwords) * BYTES_IN_DWORD)
363 #endif
364 #ifndef BYTES_TO_DWORDS
365 #define BYTES_TO_DWORDS(bytes)		((bytes) / BYTES_IN_DWORD)
366 #endif
367 
368 /* extra lines include a signature line + optional latency events line */
369 #ifndef NUM_DBG_LINES
370 #define NUM_EXTRA_DBG_LINES(block_desc) \
371 	(1 + ((block_desc)->has_latency_events ? 1 : 0))
372 #define NUM_DBG_LINES(block_desc) \
373 	((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
374 #endif
375 
376 #define RAM_LINES_TO_DWORDS(lines)	((lines) * 2)
377 #define RAM_LINES_TO_BYTES(lines) \
378 	DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
379 
380 #define REG_DUMP_LEN_SHIFT		24
381 #define MEM_DUMP_ENTRY_SIZE_DWORDS \
382 	BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
383 
384 #define IDLE_CHK_RULE_SIZE_DWORDS \
385 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
386 
387 #define IDLE_CHK_RESULT_HDR_DWORDS \
388 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
389 
390 #define IDLE_CHK_RESULT_REG_HDR_DWORDS \
391 	BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
392 
393 #define IDLE_CHK_MAX_ENTRIES_SIZE	32
394 
395 /* The sizes and offsets below are specified in bits */
396 #define VFC_CAM_CMD_STRUCT_SIZE		64
397 #define VFC_CAM_CMD_ROW_OFFSET		48
398 #define VFC_CAM_CMD_ROW_SIZE		9
399 #define VFC_CAM_ADDR_STRUCT_SIZE	16
400 #define VFC_CAM_ADDR_OP_OFFSET		0
401 #define VFC_CAM_ADDR_OP_SIZE		4
402 #define VFC_CAM_RESP_STRUCT_SIZE	256
403 #define VFC_RAM_ADDR_STRUCT_SIZE	16
404 #define VFC_RAM_ADDR_OP_OFFSET		0
405 #define VFC_RAM_ADDR_OP_SIZE		2
406 #define VFC_RAM_ADDR_ROW_OFFSET		2
407 #define VFC_RAM_ADDR_ROW_SIZE		10
408 #define VFC_RAM_RESP_STRUCT_SIZE	256
409 
410 #define VFC_CAM_CMD_DWORDS		CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
411 #define VFC_CAM_ADDR_DWORDS		CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
412 #define VFC_CAM_RESP_DWORDS		CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
413 #define VFC_RAM_CMD_DWORDS		VFC_CAM_CMD_DWORDS
414 #define VFC_RAM_ADDR_DWORDS		CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
415 #define VFC_RAM_RESP_DWORDS		CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
416 
417 #define NUM_VFC_RAM_TYPES		4
418 
419 #define VFC_CAM_NUM_ROWS		512
420 
421 #define VFC_OPCODE_CAM_RD		14
422 #define VFC_OPCODE_RAM_RD		0
423 
424 #define NUM_RSS_MEM_TYPES		5
425 
426 #define NUM_BIG_RAM_TYPES		3
427 #define BIG_RAM_BLOCK_SIZE_BYTES	128
428 #define BIG_RAM_BLOCK_SIZE_DWORDS \
429 	BYTES_TO_DWORDS(BIG_RAM_BLOCK_SIZE_BYTES)
430 
431 #define NUM_PHY_TBUS_ADDRESSES		2048
432 #define PHY_DUMP_SIZE_DWORDS		(NUM_PHY_TBUS_ADDRESSES / 2)
433 
434 #define RESET_REG_UNRESET_OFFSET	4
435 
436 #define STALL_DELAY_MS			500
437 
438 #define STATIC_DEBUG_LINE_DWORDS	9
439 
440 #define NUM_COMMON_GLOBAL_PARAMS	8
441 
442 #define FW_IMG_MAIN			1
443 
444 #ifndef REG_FIFO_ELEMENT_DWORDS
445 #define REG_FIFO_ELEMENT_DWORDS		2
446 #endif
447 #define REG_FIFO_DEPTH_ELEMENTS		32
448 #define REG_FIFO_DEPTH_DWORDS \
449 	(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
450 
451 #ifndef IGU_FIFO_ELEMENT_DWORDS
452 #define IGU_FIFO_ELEMENT_DWORDS		4
453 #endif
454 #define IGU_FIFO_DEPTH_ELEMENTS		64
455 #define IGU_FIFO_DEPTH_DWORDS \
456 	(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
457 
458 #ifndef PROTECTION_OVERRIDE_ELEMENT_DWORDS
459 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS	2
460 #endif
461 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS	20
462 #define PROTECTION_OVERRIDE_DEPTH_DWORDS \
463 	(PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
464 	 PROTECTION_OVERRIDE_ELEMENT_DWORDS)
465 
466 #define MCP_SPAD_TRACE_OFFSIZE_ADDR \
467 	(MCP_REG_SCRATCH + \
468 	 offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
469 
470 #define EMPTY_FW_VERSION_STR		"???_???_???_???"
471 #define EMPTY_FW_IMAGE_STR		"???????????????"
472 
473 /***************************** Constant Arrays *******************************/
474 
475 struct dbg_array {
476 	const u32 *ptr;
477 	u32 size_in_dwords;
478 };
479 
480 /* Debug arrays */
481 static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
482 
483 /* Chip constant definitions array */
484 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
485 	{ "bb",
486 	  {{MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB},
487 	   {0, 0, 0},
488 	   {0, 0, 0},
489 	   {0, 0, 0} } },
490 	{ "ah",
491 	  {{MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2},
492 	   {0, 0, 0},
493 	   {0, 0, 0},
494 	   {0, 0, 0} } }
495 };
496 
497 /* Storm constant definitions array */
498 static struct storm_defs s_storm_defs[] = {
499 	/* Tstorm */
500 	{'T', BLOCK_TSEM,
501 	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, true,
502 	 TSEM_REG_FAST_MEMORY,
503 	 TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
504 	 TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
505 	 TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
506 	 TCM_REG_CTX_RBC_ACCS,
507 	 4, TCM_REG_AGG_CON_CTX,
508 	 16, TCM_REG_SM_CON_CTX,
509 	 2, TCM_REG_AGG_TASK_CTX,
510 	 4, TCM_REG_SM_TASK_CTX},
511 
512 	/* Mstorm */
513 	{'M', BLOCK_MSEM,
514 	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, false,
515 	 MSEM_REG_FAST_MEMORY,
516 	 MSEM_REG_DBG_FRAME_MODE_BB_K2, MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
517 	 MSEM_REG_SLOW_DBG_MODE_BB_K2, MSEM_REG_DBG_MODE1_CFG_BB_K2,
518 	 MSEM_REG_SYNC_DBG_EMPTY, MSEM_REG_SLOW_DBG_EMPTY_BB_K2,
519 	 MCM_REG_CTX_RBC_ACCS,
520 	 1, MCM_REG_AGG_CON_CTX,
521 	 10, MCM_REG_SM_CON_CTX,
522 	 2, MCM_REG_AGG_TASK_CTX,
523 	 7, MCM_REG_SM_TASK_CTX},
524 
525 	/* Ustorm */
526 	{'U', BLOCK_USEM,
527 	 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, false,
528 	 USEM_REG_FAST_MEMORY,
529 	 USEM_REG_DBG_FRAME_MODE_BB_K2, USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
530 	 USEM_REG_SLOW_DBG_MODE_BB_K2, USEM_REG_DBG_MODE1_CFG_BB_K2,
531 	 USEM_REG_SYNC_DBG_EMPTY, USEM_REG_SLOW_DBG_EMPTY_BB_K2,
532 	 UCM_REG_CTX_RBC_ACCS,
533 	 2, UCM_REG_AGG_CON_CTX,
534 	 13, UCM_REG_SM_CON_CTX,
535 	 3, UCM_REG_AGG_TASK_CTX,
536 	 3, UCM_REG_SM_TASK_CTX},
537 
538 	/* Xstorm */
539 	{'X', BLOCK_XSEM,
540 	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, false,
541 	 XSEM_REG_FAST_MEMORY,
542 	 XSEM_REG_DBG_FRAME_MODE_BB_K2, XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
543 	 XSEM_REG_SLOW_DBG_MODE_BB_K2, XSEM_REG_DBG_MODE1_CFG_BB_K2,
544 	 XSEM_REG_SYNC_DBG_EMPTY, XSEM_REG_SLOW_DBG_EMPTY_BB_K2,
545 	 XCM_REG_CTX_RBC_ACCS,
546 	 9, XCM_REG_AGG_CON_CTX,
547 	 15, XCM_REG_SM_CON_CTX,
548 	 0, 0,
549 	 0, 0},
550 
551 	/* Ystorm */
552 	{'Y', BLOCK_YSEM,
553 	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, false,
554 	 YSEM_REG_FAST_MEMORY,
555 	 YSEM_REG_DBG_FRAME_MODE_BB_K2, YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
556 	 YSEM_REG_SLOW_DBG_MODE_BB_K2, YSEM_REG_DBG_MODE1_CFG_BB_K2,
557 	 YSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_SLOW_DBG_EMPTY_BB_K2,
558 	 YCM_REG_CTX_RBC_ACCS,
559 	 2, YCM_REG_AGG_CON_CTX,
560 	 3, YCM_REG_SM_CON_CTX,
561 	 2, YCM_REG_AGG_TASK_CTX,
562 	 12, YCM_REG_SM_TASK_CTX},
563 
564 	/* Pstorm */
565 	{'P', BLOCK_PSEM,
566 	 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, true,
567 	 PSEM_REG_FAST_MEMORY,
568 	 PSEM_REG_DBG_FRAME_MODE_BB_K2, PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
569 	 PSEM_REG_SLOW_DBG_MODE_BB_K2, PSEM_REG_DBG_MODE1_CFG_BB_K2,
570 	 PSEM_REG_SYNC_DBG_EMPTY, PSEM_REG_SLOW_DBG_EMPTY_BB_K2,
571 	 PCM_REG_CTX_RBC_ACCS,
572 	 0, 0,
573 	 10, PCM_REG_SM_CON_CTX,
574 	 0, 0,
575 	 0, 0}
576 };
577 
578 /* Block definitions array */
579 
580 static struct block_defs block_grc_defs = {
581 	"grc",
582 	{true, true}, false, 0,
583 	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
584 	GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
585 	GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
586 	GRC_REG_DBG_FORCE_FRAME,
587 	true, false, DBG_RESET_REG_MISC_PL_UA, 1
588 };
589 
590 static struct block_defs block_miscs_defs = {
591 	"miscs", {false, false}, false, 0,
592 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
593 	0, 0, 0, 0, 0,
594 	false, false, MAX_DBG_RESET_REGS, 0
595 };
596 
597 static struct block_defs block_misc_defs = {
598 	"misc", {false, false}, false, 0,
599 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
600 	0, 0, 0, 0, 0,
601 	false, false, MAX_DBG_RESET_REGS, 0
602 };
603 
604 static struct block_defs block_dbu_defs = {
605 	"dbu", {false, false}, false, 0,
606 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
607 	0, 0, 0, 0, 0,
608 	false, false, MAX_DBG_RESET_REGS, 0
609 };
610 
611 static struct block_defs block_pglue_b_defs = {
612 	"pglue_b",
613 	{true, true}, false, 0,
614 	{DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
615 	PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
616 	PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
617 	PGLUE_B_REG_DBG_FORCE_FRAME,
618 	true, false, DBG_RESET_REG_MISCS_PL_HV, 1
619 };
620 
621 static struct block_defs block_cnig_defs = {
622 	"cnig",
623 	{false, true}, false, 0,
624 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
625 	CNIG_REG_DBG_SELECT_K2, CNIG_REG_DBG_DWORD_ENABLE_K2,
626 	CNIG_REG_DBG_SHIFT_K2, CNIG_REG_DBG_FORCE_VALID_K2,
627 	CNIG_REG_DBG_FORCE_FRAME_K2,
628 	true, false, DBG_RESET_REG_MISCS_PL_HV, 0
629 };
630 
631 static struct block_defs block_cpmu_defs = {
632 	"cpmu", {false, false}, false, 0,
633 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
634 	0, 0, 0, 0, 0,
635 	true, false, DBG_RESET_REG_MISCS_PL_HV, 8
636 };
637 
638 static struct block_defs block_ncsi_defs = {
639 	"ncsi",
640 	{true, true}, false, 0,
641 	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
642 	NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
643 	NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
644 	NCSI_REG_DBG_FORCE_FRAME,
645 	true, false, DBG_RESET_REG_MISCS_PL_HV, 5
646 };
647 
648 static struct block_defs block_opte_defs = {
649 	"opte", {false, false}, false, 0,
650 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
651 	0, 0, 0, 0, 0,
652 	true, false, DBG_RESET_REG_MISCS_PL_HV, 4
653 };
654 
655 static struct block_defs block_bmb_defs = {
656 	"bmb",
657 	{true, true}, false, 0,
658 	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB},
659 	BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
660 	BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
661 	BMB_REG_DBG_FORCE_FRAME,
662 	true, false, DBG_RESET_REG_MISCS_PL_UA, 7
663 };
664 
665 static struct block_defs block_pcie_defs = {
666 	"pcie",
667 	{false, true}, false, 0,
668 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
669 	PCIE_REG_DBG_COMMON_SELECT_K2,
670 	PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2,
671 	PCIE_REG_DBG_COMMON_SHIFT_K2,
672 	PCIE_REG_DBG_COMMON_FORCE_VALID_K2,
673 	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2,
674 	false, false, MAX_DBG_RESET_REGS, 0
675 };
676 
677 static struct block_defs block_mcp_defs = {
678 	"mcp", {false, false}, false, 0,
679 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
680 	0, 0, 0, 0, 0,
681 	false, false, MAX_DBG_RESET_REGS, 0
682 };
683 
684 static struct block_defs block_mcp2_defs = {
685 	"mcp2",
686 	{true, true}, false, 0,
687 	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
688 	MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
689 	MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
690 	MCP2_REG_DBG_FORCE_FRAME,
691 	false, false, MAX_DBG_RESET_REGS, 0
692 };
693 
694 static struct block_defs block_pswhst_defs = {
695 	"pswhst",
696 	{true, true}, false, 0,
697 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
698 	PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
699 	PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
700 	PSWHST_REG_DBG_FORCE_FRAME,
701 	true, false, DBG_RESET_REG_MISC_PL_HV, 0
702 };
703 
704 static struct block_defs block_pswhst2_defs = {
705 	"pswhst2",
706 	{true, true}, false, 0,
707 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
708 	PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
709 	PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
710 	PSWHST2_REG_DBG_FORCE_FRAME,
711 	true, false, DBG_RESET_REG_MISC_PL_HV, 0
712 };
713 
714 static struct block_defs block_pswrd_defs = {
715 	"pswrd",
716 	{true, true}, false, 0,
717 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
718 	PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
719 	PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
720 	PSWRD_REG_DBG_FORCE_FRAME,
721 	true, false, DBG_RESET_REG_MISC_PL_HV, 2
722 };
723 
724 static struct block_defs block_pswrd2_defs = {
725 	"pswrd2",
726 	{true, true}, false, 0,
727 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
728 	PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
729 	PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
730 	PSWRD2_REG_DBG_FORCE_FRAME,
731 	true, false, DBG_RESET_REG_MISC_PL_HV, 2
732 };
733 
734 static struct block_defs block_pswwr_defs = {
735 	"pswwr",
736 	{true, true}, false, 0,
737 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
738 	PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
739 	PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
740 	PSWWR_REG_DBG_FORCE_FRAME,
741 	true, false, DBG_RESET_REG_MISC_PL_HV, 3
742 };
743 
744 static struct block_defs block_pswwr2_defs = {
745 	"pswwr2", {false, false}, false, 0,
746 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
747 	0, 0, 0, 0, 0,
748 	true, false, DBG_RESET_REG_MISC_PL_HV, 3
749 };
750 
751 static struct block_defs block_pswrq_defs = {
752 	"pswrq",
753 	{true, true}, false, 0,
754 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
755 	PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
756 	PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
757 	PSWRQ_REG_DBG_FORCE_FRAME,
758 	true, false, DBG_RESET_REG_MISC_PL_HV, 1
759 };
760 
761 static struct block_defs block_pswrq2_defs = {
762 	"pswrq2",
763 	{true, true}, false, 0,
764 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
765 	PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
766 	PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
767 	PSWRQ2_REG_DBG_FORCE_FRAME,
768 	true, false, DBG_RESET_REG_MISC_PL_HV, 1
769 };
770 
771 static struct block_defs block_pglcs_defs = {
772 	"pglcs",
773 	{false, true}, false, 0,
774 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
775 	PGLCS_REG_DBG_SELECT_K2, PGLCS_REG_DBG_DWORD_ENABLE_K2,
776 	PGLCS_REG_DBG_SHIFT_K2, PGLCS_REG_DBG_FORCE_VALID_K2,
777 	PGLCS_REG_DBG_FORCE_FRAME_K2,
778 	true, false, DBG_RESET_REG_MISCS_PL_HV, 2
779 };
780 
781 static struct block_defs block_ptu_defs = {
782 	"ptu",
783 	{true, true}, false, 0,
784 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
785 	PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
786 	PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
787 	PTU_REG_DBG_FORCE_FRAME,
788 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 20
789 };
790 
791 static struct block_defs block_dmae_defs = {
792 	"dmae",
793 	{true, true}, false, 0,
794 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
795 	DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
796 	DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
797 	DMAE_REG_DBG_FORCE_FRAME,
798 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 28
799 };
800 
801 static struct block_defs block_tcm_defs = {
802 	"tcm",
803 	{true, true}, true, DBG_TSTORM_ID,
804 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
805 	TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
806 	TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
807 	TCM_REG_DBG_FORCE_FRAME,
808 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 5
809 };
810 
811 static struct block_defs block_mcm_defs = {
812 	"mcm",
813 	{true, true}, true, DBG_MSTORM_ID,
814 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
815 	MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
816 	MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
817 	MCM_REG_DBG_FORCE_FRAME,
818 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 3
819 };
820 
821 static struct block_defs block_ucm_defs = {
822 	"ucm",
823 	{true, true}, true, DBG_USTORM_ID,
824 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
825 	UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
826 	UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
827 	UCM_REG_DBG_FORCE_FRAME,
828 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 8
829 };
830 
831 static struct block_defs block_xcm_defs = {
832 	"xcm",
833 	{true, true}, true, DBG_XSTORM_ID,
834 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
835 	XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
836 	XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
837 	XCM_REG_DBG_FORCE_FRAME,
838 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 19
839 };
840 
841 static struct block_defs block_ycm_defs = {
842 	"ycm",
843 	{true, true}, true, DBG_YSTORM_ID,
844 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
845 	YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
846 	YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
847 	YCM_REG_DBG_FORCE_FRAME,
848 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 5
849 };
850 
851 static struct block_defs block_pcm_defs = {
852 	"pcm",
853 	{true, true}, true, DBG_PSTORM_ID,
854 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
855 	PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
856 	PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
857 	PCM_REG_DBG_FORCE_FRAME,
858 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 4
859 };
860 
861 static struct block_defs block_qm_defs = {
862 	"qm",
863 	{true, true}, false, 0,
864 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ},
865 	QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
866 	QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
867 	QM_REG_DBG_FORCE_FRAME,
868 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 16
869 };
870 
871 static struct block_defs block_tm_defs = {
872 	"tm",
873 	{true, true}, false, 0,
874 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
875 	TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
876 	TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
877 	TM_REG_DBG_FORCE_FRAME,
878 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 17
879 };
880 
881 static struct block_defs block_dorq_defs = {
882 	"dorq",
883 	{true, true}, false, 0,
884 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
885 	DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
886 	DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
887 	DORQ_REG_DBG_FORCE_FRAME,
888 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 18
889 };
890 
891 static struct block_defs block_brb_defs = {
892 	"brb",
893 	{true, true}, false, 0,
894 	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
895 	BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
896 	BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
897 	BRB_REG_DBG_FORCE_FRAME,
898 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 0
899 };
900 
901 static struct block_defs block_src_defs = {
902 	"src",
903 	{true, true}, false, 0,
904 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
905 	SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
906 	SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
907 	SRC_REG_DBG_FORCE_FRAME,
908 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 2
909 };
910 
911 static struct block_defs block_prs_defs = {
912 	"prs",
913 	{true, true}, false, 0,
914 	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
915 	PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
916 	PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
917 	PRS_REG_DBG_FORCE_FRAME,
918 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 1
919 };
920 
921 static struct block_defs block_tsdm_defs = {
922 	"tsdm",
923 	{true, true}, true, DBG_TSTORM_ID,
924 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
925 	TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
926 	TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
927 	TSDM_REG_DBG_FORCE_FRAME,
928 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 3
929 };
930 
931 static struct block_defs block_msdm_defs = {
932 	"msdm",
933 	{true, true}, true, DBG_MSTORM_ID,
934 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
935 	MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
936 	MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
937 	MSDM_REG_DBG_FORCE_FRAME,
938 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 6
939 };
940 
941 static struct block_defs block_usdm_defs = {
942 	"usdm",
943 	{true, true}, true, DBG_USTORM_ID,
944 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
945 	USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
946 	USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
947 	USDM_REG_DBG_FORCE_FRAME,
948 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 7
949 };
950 
951 static struct block_defs block_xsdm_defs = {
952 	"xsdm",
953 	{true, true}, true, DBG_XSTORM_ID,
954 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
955 	XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
956 	XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
957 	XSDM_REG_DBG_FORCE_FRAME,
958 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 20
959 };
960 
961 static struct block_defs block_ysdm_defs = {
962 	"ysdm",
963 	{true, true}, true, DBG_YSTORM_ID,
964 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
965 	YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
966 	YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
967 	YSDM_REG_DBG_FORCE_FRAME,
968 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 8
969 };
970 
971 static struct block_defs block_psdm_defs = {
972 	"psdm",
973 	{true, true}, true, DBG_PSTORM_ID,
974 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
975 	PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
976 	PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
977 	PSDM_REG_DBG_FORCE_FRAME,
978 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 7
979 };
980 
981 static struct block_defs block_tsem_defs = {
982 	"tsem",
983 	{true, true}, true, DBG_TSTORM_ID,
984 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
985 	TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
986 	TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
987 	TSEM_REG_DBG_FORCE_FRAME,
988 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 4
989 };
990 
991 static struct block_defs block_msem_defs = {
992 	"msem",
993 	{true, true}, true, DBG_MSTORM_ID,
994 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
995 	MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
996 	MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
997 	MSEM_REG_DBG_FORCE_FRAME,
998 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 9
999 };
1000 
1001 static struct block_defs block_usem_defs = {
1002 	"usem",
1003 	{true, true}, true, DBG_USTORM_ID,
1004 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
1005 	USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
1006 	USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
1007 	USEM_REG_DBG_FORCE_FRAME,
1008 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 9
1009 };
1010 
1011 static struct block_defs block_xsem_defs = {
1012 	"xsem",
1013 	{true, true}, true, DBG_XSTORM_ID,
1014 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
1015 	XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
1016 	XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
1017 	XSEM_REG_DBG_FORCE_FRAME,
1018 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 21
1019 };
1020 
1021 static struct block_defs block_ysem_defs = {
1022 	"ysem",
1023 	{true, true}, true, DBG_YSTORM_ID,
1024 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
1025 	YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
1026 	YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
1027 	YSEM_REG_DBG_FORCE_FRAME,
1028 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 11
1029 };
1030 
1031 static struct block_defs block_psem_defs = {
1032 	"psem",
1033 	{true, true}, true, DBG_PSTORM_ID,
1034 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
1035 	PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
1036 	PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
1037 	PSEM_REG_DBG_FORCE_FRAME,
1038 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 10
1039 };
1040 
1041 static struct block_defs block_rss_defs = {
1042 	"rss",
1043 	{true, true}, false, 0,
1044 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
1045 	RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
1046 	RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
1047 	RSS_REG_DBG_FORCE_FRAME,
1048 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 18
1049 };
1050 
1051 static struct block_defs block_tmld_defs = {
1052 	"tmld",
1053 	{true, true}, false, 0,
1054 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
1055 	TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
1056 	TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
1057 	TMLD_REG_DBG_FORCE_FRAME,
1058 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 13
1059 };
1060 
1061 static struct block_defs block_muld_defs = {
1062 	"muld",
1063 	{true, true}, false, 0,
1064 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
1065 	MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
1066 	MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
1067 	MULD_REG_DBG_FORCE_FRAME,
1068 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 14
1069 };
1070 
1071 static struct block_defs block_yuld_defs = {
1072 	"yuld",
1073 	{true, true}, false, 0,
1074 	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
1075 	YULD_REG_DBG_SELECT_BB_K2, YULD_REG_DBG_DWORD_ENABLE_BB_K2,
1076 	YULD_REG_DBG_SHIFT_BB_K2, YULD_REG_DBG_FORCE_VALID_BB_K2,
1077 	YULD_REG_DBG_FORCE_FRAME_BB_K2,
1078 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
1079 	15
1080 };
1081 
1082 static struct block_defs block_xyld_defs = {
1083 	"xyld",
1084 	{true, true}, false, 0,
1085 	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
1086 	XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
1087 	XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
1088 	XYLD_REG_DBG_FORCE_FRAME,
1089 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
1090 };
1091 
1092 static struct block_defs block_prm_defs = {
1093 	"prm",
1094 	{true, true}, false, 0,
1095 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
1096 	PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
1097 	PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
1098 	PRM_REG_DBG_FORCE_FRAME,
1099 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 21
1100 };
1101 
1102 static struct block_defs block_pbf_pb1_defs = {
1103 	"pbf_pb1",
1104 	{true, true}, false, 0,
1105 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
1106 	PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
1107 	PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
1108 	PBF_PB1_REG_DBG_FORCE_FRAME,
1109 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1110 	11
1111 };
1112 
1113 static struct block_defs block_pbf_pb2_defs = {
1114 	"pbf_pb2",
1115 	{true, true}, false, 0,
1116 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
1117 	PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
1118 	PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
1119 	PBF_PB2_REG_DBG_FORCE_FRAME,
1120 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
1121 	12
1122 };
1123 
1124 static struct block_defs block_rpb_defs = {
1125 	"rpb",
1126 	{true, true}, false, 0,
1127 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
1128 	RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
1129 	RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
1130 	RPB_REG_DBG_FORCE_FRAME,
1131 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 13
1132 };
1133 
1134 static struct block_defs block_btb_defs = {
1135 	"btb",
1136 	{true, true}, false, 0,
1137 	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV},
1138 	BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
1139 	BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
1140 	BTB_REG_DBG_FORCE_FRAME,
1141 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 10
1142 };
1143 
1144 static struct block_defs block_pbf_defs = {
1145 	"pbf",
1146 	{true, true}, false, 0,
1147 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
1148 	PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
1149 	PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
1150 	PBF_REG_DBG_FORCE_FRAME,
1151 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 15
1152 };
1153 
1154 static struct block_defs block_rdif_defs = {
1155 	"rdif",
1156 	{true, true}, false, 0,
1157 	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
1158 	RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
1159 	RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
1160 	RDIF_REG_DBG_FORCE_FRAME,
1161 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 16
1162 };
1163 
1164 static struct block_defs block_tdif_defs = {
1165 	"tdif",
1166 	{true, true}, false, 0,
1167 	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
1168 	TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
1169 	TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
1170 	TDIF_REG_DBG_FORCE_FRAME,
1171 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 17
1172 };
1173 
1174 static struct block_defs block_cdu_defs = {
1175 	"cdu",
1176 	{true, true}, false, 0,
1177 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1178 	CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
1179 	CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
1180 	CDU_REG_DBG_FORCE_FRAME,
1181 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 23
1182 };
1183 
1184 static struct block_defs block_ccfc_defs = {
1185 	"ccfc",
1186 	{true, true}, false, 0,
1187 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1188 	CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
1189 	CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
1190 	CCFC_REG_DBG_FORCE_FRAME,
1191 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 24
1192 };
1193 
1194 static struct block_defs block_tcfc_defs = {
1195 	"tcfc",
1196 	{true, true}, false, 0,
1197 	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
1198 	TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
1199 	TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
1200 	TCFC_REG_DBG_FORCE_FRAME,
1201 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 25
1202 };
1203 
1204 static struct block_defs block_igu_defs = {
1205 	"igu",
1206 	{true, true}, false, 0,
1207 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1208 	IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
1209 	IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
1210 	IGU_REG_DBG_FORCE_FRAME,
1211 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, 27
1212 };
1213 
1214 static struct block_defs block_cau_defs = {
1215 	"cau",
1216 	{true, true}, false, 0,
1217 	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
1218 	CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
1219 	CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
1220 	CAU_REG_DBG_FORCE_FRAME,
1221 	true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
1222 };
1223 
1224 static struct block_defs block_umac_defs = {
1225 	"umac",
1226 	{false, true}, false, 0,
1227 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
1228 	UMAC_REG_DBG_SELECT_K2, UMAC_REG_DBG_DWORD_ENABLE_K2,
1229 	UMAC_REG_DBG_SHIFT_K2, UMAC_REG_DBG_FORCE_VALID_K2,
1230 	UMAC_REG_DBG_FORCE_FRAME_K2,
1231 	true, false, DBG_RESET_REG_MISCS_PL_HV, 6
1232 };
1233 
1234 static struct block_defs block_xmac_defs = {
1235 	"xmac", {false, false}, false, 0,
1236 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1237 	0, 0, 0, 0, 0,
1238 	false, false, MAX_DBG_RESET_REGS, 0
1239 };
1240 
1241 static struct block_defs block_dbg_defs = {
1242 	"dbg", {false, false}, false, 0,
1243 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1244 	0, 0, 0, 0, 0,
1245 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
1246 };
1247 
1248 static struct block_defs block_nig_defs = {
1249 	"nig",
1250 	{true, true}, false, 0,
1251 	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
1252 	NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
1253 	NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
1254 	NIG_REG_DBG_FORCE_FRAME,
1255 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 0
1256 };
1257 
1258 static struct block_defs block_wol_defs = {
1259 	"wol",
1260 	{false, true}, false, 0,
1261 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
1262 	WOL_REG_DBG_SELECT_K2, WOL_REG_DBG_DWORD_ENABLE_K2,
1263 	WOL_REG_DBG_SHIFT_K2, WOL_REG_DBG_FORCE_VALID_K2,
1264 	WOL_REG_DBG_FORCE_FRAME_K2,
1265 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 7
1266 };
1267 
1268 static struct block_defs block_bmbn_defs = {
1269 	"bmbn",
1270 	{false, true}, false, 0,
1271 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
1272 	BMBN_REG_DBG_SELECT_K2, BMBN_REG_DBG_DWORD_ENABLE_K2,
1273 	BMBN_REG_DBG_SHIFT_K2, BMBN_REG_DBG_FORCE_VALID_K2,
1274 	BMBN_REG_DBG_FORCE_FRAME_K2,
1275 	false, false, MAX_DBG_RESET_REGS, 0
1276 };
1277 
1278 static struct block_defs block_ipc_defs = {
1279 	"ipc", {false, false}, false, 0,
1280 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1281 	0, 0, 0, 0, 0,
1282 	true, false, DBG_RESET_REG_MISCS_PL_UA, 8
1283 };
1284 
1285 static struct block_defs block_nwm_defs = {
1286 	"nwm",
1287 	{false, true}, false, 0,
1288 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
1289 	NWM_REG_DBG_SELECT_K2, NWM_REG_DBG_DWORD_ENABLE_K2,
1290 	NWM_REG_DBG_SHIFT_K2, NWM_REG_DBG_FORCE_VALID_K2,
1291 	NWM_REG_DBG_FORCE_FRAME_K2,
1292 	true, false, DBG_RESET_REG_MISCS_PL_HV_2, 0
1293 };
1294 
1295 static struct block_defs block_nws_defs = {
1296 	"nws",
1297 	{false, true}, false, 0,
1298 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
1299 	NWS_REG_DBG_SELECT_K2, NWS_REG_DBG_DWORD_ENABLE_K2,
1300 	NWS_REG_DBG_SHIFT_K2, NWS_REG_DBG_FORCE_VALID_K2,
1301 	NWS_REG_DBG_FORCE_FRAME_K2,
1302 	true, false, DBG_RESET_REG_MISCS_PL_HV, 12
1303 };
1304 
1305 static struct block_defs block_ms_defs = {
1306 	"ms",
1307 	{false, true}, false, 0,
1308 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
1309 	MS_REG_DBG_SELECT_K2, MS_REG_DBG_DWORD_ENABLE_K2,
1310 	MS_REG_DBG_SHIFT_K2, MS_REG_DBG_FORCE_VALID_K2,
1311 	MS_REG_DBG_FORCE_FRAME_K2,
1312 	true, false, DBG_RESET_REG_MISCS_PL_HV, 13
1313 };
1314 
1315 static struct block_defs block_phy_pcie_defs = {
1316 	"phy_pcie",
1317 	{false, true}, false, 0,
1318 	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
1319 	PCIE_REG_DBG_COMMON_SELECT_K2,
1320 	PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2,
1321 	PCIE_REG_DBG_COMMON_SHIFT_K2,
1322 	PCIE_REG_DBG_COMMON_FORCE_VALID_K2,
1323 	PCIE_REG_DBG_COMMON_FORCE_FRAME_K2,
1324 	false, false, MAX_DBG_RESET_REGS, 0
1325 };
1326 
1327 static struct block_defs block_led_defs = {
1328 	"led", {false, false}, false, 0,
1329 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1330 	0, 0, 0, 0, 0,
1331 	true, false, DBG_RESET_REG_MISCS_PL_HV, 14
1332 };
1333 
1334 static struct block_defs block_avs_wrap_defs = {
1335 	"avs_wrap", {false, false}, false, 0,
1336 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1337 	0, 0, 0, 0, 0,
1338 	true, false, DBG_RESET_REG_MISCS_PL_UA, 11
1339 };
1340 
1341 static struct block_defs block_rgfs_defs = {
1342 	"rgfs", {false, false}, false, 0,
1343 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1344 	0, 0, 0, 0, 0,
1345 	false, false, MAX_DBG_RESET_REGS, 0
1346 };
1347 
1348 static struct block_defs block_rgsrc_defs = {
1349 	"rgsrc", {false, false}, false, 0,
1350 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1351 	0, 0, 0, 0, 0,
1352 	false, false, MAX_DBG_RESET_REGS, 0
1353 };
1354 
1355 static struct block_defs block_tgfs_defs = {
1356 	"tgfs", {false, false}, false, 0,
1357 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1358 	0, 0, 0, 0, 0,
1359 	false, false, MAX_DBG_RESET_REGS, 0
1360 };
1361 
1362 static struct block_defs block_tgsrc_defs = {
1363 	"tgsrc", {false, false}, false, 0,
1364 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1365 	0, 0, 0, 0, 0,
1366 	false, false, MAX_DBG_RESET_REGS, 0
1367 };
1368 
1369 static struct block_defs block_ptld_defs = {
1370 	"ptld", {false, false}, false, 0,
1371 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1372 	0, 0, 0, 0, 0,
1373 	false, false, MAX_DBG_RESET_REGS, 0
1374 };
1375 
1376 static struct block_defs block_ypld_defs = {
1377 	"ypld", {false, false}, false, 0,
1378 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1379 	0, 0, 0, 0, 0,
1380 	false, false, MAX_DBG_RESET_REGS, 0
1381 };
1382 
1383 static struct block_defs block_misc_aeu_defs = {
1384 	"misc_aeu", {false, false}, false, 0,
1385 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1386 	0, 0, 0, 0, 0,
1387 	false, false, MAX_DBG_RESET_REGS, 0
1388 };
1389 
1390 static struct block_defs block_bar0_map_defs = {
1391 	"bar0_map", {false, false}, false, 0,
1392 	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
1393 	0, 0, 0, 0, 0,
1394 	false, false, MAX_DBG_RESET_REGS, 0
1395 };
1396 
1397 static struct block_defs *s_block_defs[MAX_BLOCK_ID] = {
1398 	&block_grc_defs,
1399 	&block_miscs_defs,
1400 	&block_misc_defs,
1401 	&block_dbu_defs,
1402 	&block_pglue_b_defs,
1403 	&block_cnig_defs,
1404 	&block_cpmu_defs,
1405 	&block_ncsi_defs,
1406 	&block_opte_defs,
1407 	&block_bmb_defs,
1408 	&block_pcie_defs,
1409 	&block_mcp_defs,
1410 	&block_mcp2_defs,
1411 	&block_pswhst_defs,
1412 	&block_pswhst2_defs,
1413 	&block_pswrd_defs,
1414 	&block_pswrd2_defs,
1415 	&block_pswwr_defs,
1416 	&block_pswwr2_defs,
1417 	&block_pswrq_defs,
1418 	&block_pswrq2_defs,
1419 	&block_pglcs_defs,
1420 	&block_dmae_defs,
1421 	&block_ptu_defs,
1422 	&block_tcm_defs,
1423 	&block_mcm_defs,
1424 	&block_ucm_defs,
1425 	&block_xcm_defs,
1426 	&block_ycm_defs,
1427 	&block_pcm_defs,
1428 	&block_qm_defs,
1429 	&block_tm_defs,
1430 	&block_dorq_defs,
1431 	&block_brb_defs,
1432 	&block_src_defs,
1433 	&block_prs_defs,
1434 	&block_tsdm_defs,
1435 	&block_msdm_defs,
1436 	&block_usdm_defs,
1437 	&block_xsdm_defs,
1438 	&block_ysdm_defs,
1439 	&block_psdm_defs,
1440 	&block_tsem_defs,
1441 	&block_msem_defs,
1442 	&block_usem_defs,
1443 	&block_xsem_defs,
1444 	&block_ysem_defs,
1445 	&block_psem_defs,
1446 	&block_rss_defs,
1447 	&block_tmld_defs,
1448 	&block_muld_defs,
1449 	&block_yuld_defs,
1450 	&block_xyld_defs,
1451 	&block_ptld_defs,
1452 	&block_ypld_defs,
1453 	&block_prm_defs,
1454 	&block_pbf_pb1_defs,
1455 	&block_pbf_pb2_defs,
1456 	&block_rpb_defs,
1457 	&block_btb_defs,
1458 	&block_pbf_defs,
1459 	&block_rdif_defs,
1460 	&block_tdif_defs,
1461 	&block_cdu_defs,
1462 	&block_ccfc_defs,
1463 	&block_tcfc_defs,
1464 	&block_igu_defs,
1465 	&block_cau_defs,
1466 	&block_rgfs_defs,
1467 	&block_rgsrc_defs,
1468 	&block_tgfs_defs,
1469 	&block_tgsrc_defs,
1470 	&block_umac_defs,
1471 	&block_xmac_defs,
1472 	&block_dbg_defs,
1473 	&block_nig_defs,
1474 	&block_wol_defs,
1475 	&block_bmbn_defs,
1476 	&block_ipc_defs,
1477 	&block_nwm_defs,
1478 	&block_nws_defs,
1479 	&block_ms_defs,
1480 	&block_phy_pcie_defs,
1481 	&block_led_defs,
1482 	&block_avs_wrap_defs,
1483 	&block_misc_aeu_defs,
1484 	&block_bar0_map_defs,
1485 };
1486 
1487 static struct platform_defs s_platform_defs[] = {
1488 	{"asic", 1},
1489 	{"reserved", 0},
1490 	{"reserved2", 0},
1491 	{"reserved3", 0}
1492 };
1493 
1494 static struct grc_param_defs s_grc_param_defs[] = {
1495 	/* DBG_GRC_PARAM_DUMP_TSTORM */
1496 	{{1, 1}, 0, 1, false, 1, 1},
1497 
1498 	/* DBG_GRC_PARAM_DUMP_MSTORM */
1499 	{{1, 1}, 0, 1, false, 1, 1},
1500 
1501 	/* DBG_GRC_PARAM_DUMP_USTORM */
1502 	{{1, 1}, 0, 1, false, 1, 1},
1503 
1504 	/* DBG_GRC_PARAM_DUMP_XSTORM */
1505 	{{1, 1}, 0, 1, false, 1, 1},
1506 
1507 	/* DBG_GRC_PARAM_DUMP_YSTORM */
1508 	{{1, 1}, 0, 1, false, 1, 1},
1509 
1510 	/* DBG_GRC_PARAM_DUMP_PSTORM */
1511 	{{1, 1}, 0, 1, false, 1, 1},
1512 
1513 	/* DBG_GRC_PARAM_DUMP_REGS */
1514 	{{1, 1}, 0, 1, false, 0, 1},
1515 
1516 	/* DBG_GRC_PARAM_DUMP_RAM */
1517 	{{1, 1}, 0, 1, false, 0, 1},
1518 
1519 	/* DBG_GRC_PARAM_DUMP_PBUF */
1520 	{{1, 1}, 0, 1, false, 0, 1},
1521 
1522 	/* DBG_GRC_PARAM_DUMP_IOR */
1523 	{{0, 0}, 0, 1, false, 0, 1},
1524 
1525 	/* DBG_GRC_PARAM_DUMP_VFC */
1526 	{{0, 0}, 0, 1, false, 0, 1},
1527 
1528 	/* DBG_GRC_PARAM_DUMP_CM_CTX */
1529 	{{1, 1}, 0, 1, false, 0, 1},
1530 
1531 	/* DBG_GRC_PARAM_DUMP_ILT */
1532 	{{1, 1}, 0, 1, false, 0, 1},
1533 
1534 	/* DBG_GRC_PARAM_DUMP_RSS */
1535 	{{1, 1}, 0, 1, false, 0, 1},
1536 
1537 	/* DBG_GRC_PARAM_DUMP_CAU */
1538 	{{1, 1}, 0, 1, false, 0, 1},
1539 
1540 	/* DBG_GRC_PARAM_DUMP_QM */
1541 	{{1, 1}, 0, 1, false, 0, 1},
1542 
1543 	/* DBG_GRC_PARAM_DUMP_MCP */
1544 	{{1, 1}, 0, 1, false, 0, 1},
1545 
1546 	/* DBG_GRC_PARAM_RESERVED */
1547 	{{1, 1}, 0, 1, false, 0, 1},
1548 
1549 	/* DBG_GRC_PARAM_DUMP_CFC */
1550 	{{1, 1}, 0, 1, false, 0, 1},
1551 
1552 	/* DBG_GRC_PARAM_DUMP_IGU */
1553 	{{1, 1}, 0, 1, false, 0, 1},
1554 
1555 	/* DBG_GRC_PARAM_DUMP_BRB */
1556 	{{0, 0}, 0, 1, false, 0, 1},
1557 
1558 	/* DBG_GRC_PARAM_DUMP_BTB */
1559 	{{0, 0}, 0, 1, false, 0, 1},
1560 
1561 	/* DBG_GRC_PARAM_DUMP_BMB */
1562 	{{0, 0}, 0, 1, false, 0, 1},
1563 
1564 	/* DBG_GRC_PARAM_DUMP_NIG */
1565 	{{1, 1}, 0, 1, false, 0, 1},
1566 
1567 	/* DBG_GRC_PARAM_DUMP_MULD */
1568 	{{1, 1}, 0, 1, false, 0, 1},
1569 
1570 	/* DBG_GRC_PARAM_DUMP_PRS */
1571 	{{1, 1}, 0, 1, false, 0, 1},
1572 
1573 	/* DBG_GRC_PARAM_DUMP_DMAE */
1574 	{{1, 1}, 0, 1, false, 0, 1},
1575 
1576 	/* DBG_GRC_PARAM_DUMP_TM */
1577 	{{1, 1}, 0, 1, false, 0, 1},
1578 
1579 	/* DBG_GRC_PARAM_DUMP_SDM */
1580 	{{1, 1}, 0, 1, false, 0, 1},
1581 
1582 	/* DBG_GRC_PARAM_DUMP_DIF */
1583 	{{1, 1}, 0, 1, false, 0, 1},
1584 
1585 	/* DBG_GRC_PARAM_DUMP_STATIC */
1586 	{{1, 1}, 0, 1, false, 0, 1},
1587 
1588 	/* DBG_GRC_PARAM_UNSTALL */
1589 	{{0, 0}, 0, 1, false, 0, 0},
1590 
1591 	/* DBG_GRC_PARAM_NUM_LCIDS */
1592 	{{MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
1593 	 MAX_LCIDS},
1594 
1595 	/* DBG_GRC_PARAM_NUM_LTIDS */
1596 	{{MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
1597 	 MAX_LTIDS},
1598 
1599 	/* DBG_GRC_PARAM_EXCLUDE_ALL */
1600 	{{0, 0}, 0, 1, true, 0, 0},
1601 
1602 	/* DBG_GRC_PARAM_CRASH */
1603 	{{0, 0}, 0, 1, true, 0, 0},
1604 
1605 	/* DBG_GRC_PARAM_PARITY_SAFE */
1606 	{{0, 0}, 0, 1, false, 1, 0},
1607 
1608 	/* DBG_GRC_PARAM_DUMP_CM */
1609 	{{1, 1}, 0, 1, false, 0, 1},
1610 
1611 	/* DBG_GRC_PARAM_DUMP_PHY */
1612 	{{1, 1}, 0, 1, false, 0, 1},
1613 
1614 	/* DBG_GRC_PARAM_NO_MCP */
1615 	{{0, 0}, 0, 1, false, 0, 0},
1616 
1617 	/* DBG_GRC_PARAM_NO_FW_VER */
1618 	{{0, 0}, 0, 1, false, 0, 0}
1619 };
1620 
1621 static struct rss_mem_defs s_rss_mem_defs[] = {
1622 	{ "rss_mem_cid", "rss_cid", 0,
1623 	  {256, 320},
1624 	  {32, 32} },
1625 
1626 	{ "rss_mem_key_msb", "rss_key", 1024,
1627 	  {128, 208},
1628 	  {256, 256} },
1629 
1630 	{ "rss_mem_key_lsb", "rss_key", 2048,
1631 	  {128, 208},
1632 	  {64, 64} },
1633 
1634 	{ "rss_mem_info", "rss_info", 3072,
1635 	  {128, 208},
1636 	  {16, 16} },
1637 
1638 	{ "rss_mem_ind", "rss_ind", 4096,
1639 	  {16384, 26624},
1640 	  {16, 16} }
1641 };
1642 
1643 static struct vfc_ram_defs s_vfc_ram_defs[] = {
1644 	{"vfc_ram_tt1", "vfc_ram", 0, 512},
1645 	{"vfc_ram_mtt2", "vfc_ram", 512, 128},
1646 	{"vfc_ram_stt2", "vfc_ram", 640, 32},
1647 	{"vfc_ram_ro_vect", "vfc_ram", 672, 32}
1648 };
1649 
1650 static struct big_ram_defs s_big_ram_defs[] = {
1651 	{ "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
1652 	  BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
1653 	  {4800, 5632} },
1654 
1655 	{ "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
1656 	  BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
1657 	  {2880, 3680} },
1658 
1659 	{ "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
1660 	  BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
1661 	  {1152, 1152} }
1662 };
1663 
1664 static struct reset_reg_defs s_reset_regs_defs[] = {
1665 	/* DBG_RESET_REG_MISCS_PL_UA */
1666 	{ MISCS_REG_RESET_PL_UA, 0x0,
1667 	  {true, true} },
1668 
1669 	/* DBG_RESET_REG_MISCS_PL_HV */
1670 	{ MISCS_REG_RESET_PL_HV, 0x0,
1671 	  {true, true} },
1672 
1673 	/* DBG_RESET_REG_MISCS_PL_HV_2 */
1674 	{ MISCS_REG_RESET_PL_HV_2_K2, 0x0,
1675 	  {false, true} },
1676 
1677 	/* DBG_RESET_REG_MISC_PL_UA */
1678 	{ MISC_REG_RESET_PL_UA, 0x0,
1679 	  {true, true} },
1680 
1681 	/* DBG_RESET_REG_MISC_PL_HV */
1682 	{ MISC_REG_RESET_PL_HV, 0x0,
1683 	  {true, true} },
1684 
1685 	/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
1686 	{ MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040,
1687 	  {true, true} },
1688 
1689 	/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
1690 	{ MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007,
1691 	  {true, true} },
1692 
1693 	/* DBG_RESET_REG_MISC_PL_PDA_VAUX */
1694 	{ MISC_REG_RESET_PL_PDA_VAUX, 0x2,
1695 	  {true, true} },
1696 };
1697 
1698 static struct phy_defs s_phy_defs[] = {
1699 	{"nw_phy", NWS_REG_NWS_CMU_K2,
1700 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2,
1701 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2,
1702 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2,
1703 	 PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2},
1704 	{"sgmii_phy", MS_REG_MS_CMU_K2,
1705 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2,
1706 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2,
1707 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2,
1708 	 PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2},
1709 	{"pcie_phy0", PHY_PCIE_REG_PHY0_K2,
1710 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
1711 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
1712 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
1713 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
1714 	{"pcie_phy1", PHY_PCIE_REG_PHY1_K2,
1715 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
1716 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
1717 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
1718 	 PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
1719 };
1720 
1721 /**************************** Private Functions ******************************/
1722 
1723 /* Reads and returns a single dword from the specified unaligned buffer */
1724 static u32 qed_read_unaligned_dword(u8 *buf)
1725 {
1726 	u32 dword;
1727 
1728 	memcpy((u8 *)&dword, buf, sizeof(dword));
1729 	return dword;
1730 }
1731 
1732 /* Returns the value of the specified GRC param */
1733 static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
1734 			     enum dbg_grc_params grc_param)
1735 {
1736 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1737 
1738 	return dev_data->grc.param_val[grc_param];
1739 }
1740 
1741 /* Initializes the GRC parameters */
1742 static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
1743 {
1744 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1745 
1746 	if (!dev_data->grc.params_initialized) {
1747 		qed_dbg_grc_set_params_default(p_hwfn);
1748 		dev_data->grc.params_initialized = 1;
1749 	}
1750 }
1751 
1752 /* Initializes debug data for the specified device */
1753 static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
1754 					struct qed_ptt *p_ptt)
1755 {
1756 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1757 
1758 	if (dev_data->initialized)
1759 		return DBG_STATUS_OK;
1760 
1761 	if (QED_IS_K2(p_hwfn->cdev)) {
1762 		dev_data->chip_id = CHIP_K2;
1763 		dev_data->mode_enable[MODE_K2] = 1;
1764 	} else if (QED_IS_BB_B0(p_hwfn->cdev)) {
1765 		dev_data->chip_id = CHIP_BB;
1766 		dev_data->mode_enable[MODE_BB] = 1;
1767 	} else {
1768 		return DBG_STATUS_UNKNOWN_CHIP;
1769 	}
1770 
1771 	dev_data->platform_id = PLATFORM_ASIC;
1772 	dev_data->mode_enable[MODE_ASIC] = 1;
1773 
1774 	/* Initializes the GRC parameters */
1775 	qed_dbg_grc_init_params(p_hwfn);
1776 
1777 	dev_data->initialized = true;
1778 
1779 	return DBG_STATUS_OK;
1780 }
1781 
1782 static struct dbg_bus_block *get_dbg_bus_block_desc(struct qed_hwfn *p_hwfn,
1783 						    enum block_id block_id)
1784 {
1785 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1786 
1787 	return (struct dbg_bus_block *)&dbg_bus_blocks[block_id *
1788 						       MAX_CHIP_IDS +
1789 						       dev_data->chip_id];
1790 }
1791 
1792 /* Reads the FW info structure for the specified Storm from the chip,
1793  * and writes it to the specified fw_info pointer.
1794  */
1795 static void qed_read_fw_info(struct qed_hwfn *p_hwfn,
1796 			     struct qed_ptt *p_ptt,
1797 			     u8 storm_id, struct fw_info *fw_info)
1798 {
1799 	struct storm_defs *storm = &s_storm_defs[storm_id];
1800 	struct fw_info_location fw_info_location;
1801 	u32 addr, i, *dest;
1802 
1803 	memset(&fw_info_location, 0, sizeof(fw_info_location));
1804 	memset(fw_info, 0, sizeof(*fw_info));
1805 
1806 	/* Read first the address that points to fw_info location.
1807 	 * The address is located in the last line of the Storm RAM.
1808 	 */
1809 	addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
1810 	       DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
1811 	       sizeof(fw_info_location);
1812 	dest = (u32 *)&fw_info_location;
1813 
1814 	for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
1815 	     i++, addr += BYTES_IN_DWORD)
1816 		dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1817 
1818 	/* Read FW version info from Storm RAM */
1819 	if (fw_info_location.size > 0 && fw_info_location.size <=
1820 	    sizeof(*fw_info)) {
1821 		addr = fw_info_location.grc_addr;
1822 		dest = (u32 *)fw_info;
1823 		for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size);
1824 		     i++, addr += BYTES_IN_DWORD)
1825 			dest[i] = qed_rd(p_hwfn, p_ptt, addr);
1826 	}
1827 }
1828 
1829 /* Dumps the specified string to the specified buffer.
1830  * Returns the dumped size in bytes.
1831  */
1832 static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
1833 {
1834 	if (dump)
1835 		strcpy(dump_buf, str);
1836 
1837 	return (u32)strlen(str) + 1;
1838 }
1839 
1840 /* Dumps zeros to align the specified buffer to dwords.
1841  * Returns the dumped size in bytes.
1842  */
1843 static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
1844 {
1845 	u8 offset_in_dword, align_size;
1846 
1847 	offset_in_dword = (u8)(byte_offset & 0x3);
1848 	align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
1849 
1850 	if (dump && align_size)
1851 		memset(dump_buf, 0, align_size);
1852 
1853 	return align_size;
1854 }
1855 
1856 /* Writes the specified string param to the specified buffer.
1857  * Returns the dumped size in dwords.
1858  */
1859 static u32 qed_dump_str_param(u32 *dump_buf,
1860 			      bool dump,
1861 			      const char *param_name, const char *param_val)
1862 {
1863 	char *char_buf = (char *)dump_buf;
1864 	u32 offset = 0;
1865 
1866 	/* Dump param name */
1867 	offset += qed_dump_str(char_buf + offset, dump, param_name);
1868 
1869 	/* Indicate a string param value */
1870 	if (dump)
1871 		*(char_buf + offset) = 1;
1872 	offset++;
1873 
1874 	/* Dump param value */
1875 	offset += qed_dump_str(char_buf + offset, dump, param_val);
1876 
1877 	/* Align buffer to next dword */
1878 	offset += qed_dump_align(char_buf + offset, dump, offset);
1879 
1880 	return BYTES_TO_DWORDS(offset);
1881 }
1882 
1883 /* Writes the specified numeric param to the specified buffer.
1884  * Returns the dumped size in dwords.
1885  */
1886 static u32 qed_dump_num_param(u32 *dump_buf,
1887 			      bool dump, const char *param_name, u32 param_val)
1888 {
1889 	char *char_buf = (char *)dump_buf;
1890 	u32 offset = 0;
1891 
1892 	/* Dump param name */
1893 	offset += qed_dump_str(char_buf + offset, dump, param_name);
1894 
1895 	/* Indicate a numeric param value */
1896 	if (dump)
1897 		*(char_buf + offset) = 0;
1898 	offset++;
1899 
1900 	/* Align buffer to next dword */
1901 	offset += qed_dump_align(char_buf + offset, dump, offset);
1902 
1903 	/* Dump param value (and change offset from bytes to dwords) */
1904 	offset = BYTES_TO_DWORDS(offset);
1905 	if (dump)
1906 		*(dump_buf + offset) = param_val;
1907 	offset++;
1908 
1909 	return offset;
1910 }
1911 
1912 /* Reads the FW version and writes it as a param to the specified buffer.
1913  * Returns the dumped size in dwords.
1914  */
1915 static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
1916 				 struct qed_ptt *p_ptt,
1917 				 u32 *dump_buf, bool dump)
1918 {
1919 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
1920 	char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
1921 	char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
1922 	struct fw_info fw_info = { {0}, {0} };
1923 	u32 offset = 0;
1924 
1925 	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1926 		/* Read FW image/version from PRAM in a non-reset SEMI */
1927 		bool found = false;
1928 		u8 storm_id;
1929 
1930 		for (storm_id = 0; storm_id < MAX_DBG_STORMS && !found;
1931 		     storm_id++) {
1932 			struct storm_defs *storm = &s_storm_defs[storm_id];
1933 
1934 			/* Read FW version/image */
1935 			if (dev_data->block_in_reset[storm->block_id])
1936 				continue;
1937 
1938 			/* Read FW info for the current Storm */
1939 			qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
1940 
1941 			/* Create FW version/image strings */
1942 			if (snprintf(fw_ver_str, sizeof(fw_ver_str),
1943 				     "%d_%d_%d_%d", fw_info.ver.num.major,
1944 				     fw_info.ver.num.minor, fw_info.ver.num.rev,
1945 				     fw_info.ver.num.eng) < 0)
1946 				DP_NOTICE(p_hwfn,
1947 					  "Unexpected debug error: invalid FW version string\n");
1948 			switch (fw_info.ver.image_id) {
1949 			case FW_IMG_MAIN:
1950 				strcpy(fw_img_str, "main");
1951 				break;
1952 			default:
1953 				strcpy(fw_img_str, "unknown");
1954 				break;
1955 			}
1956 
1957 			found = true;
1958 		}
1959 	}
1960 
1961 	/* Dump FW version, image and timestamp */
1962 	offset += qed_dump_str_param(dump_buf + offset,
1963 				     dump, "fw-version", fw_ver_str);
1964 	offset += qed_dump_str_param(dump_buf + offset,
1965 				     dump, "fw-image", fw_img_str);
1966 	offset += qed_dump_num_param(dump_buf + offset,
1967 				     dump,
1968 				     "fw-timestamp", fw_info.ver.timestamp);
1969 
1970 	return offset;
1971 }
1972 
1973 /* Reads the MFW version and writes it as a param to the specified buffer.
1974  * Returns the dumped size in dwords.
1975  */
1976 static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
1977 				  struct qed_ptt *p_ptt,
1978 				  u32 *dump_buf, bool dump)
1979 {
1980 	char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
1981 
1982 	if (dump &&
1983 	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
1984 		u32 global_section_offsize, global_section_addr, mfw_ver;
1985 		u32 public_data_addr, global_section_offsize_addr;
1986 
1987 		/* Find MCP public data GRC address. Needs to be ORed with
1988 		 * MCP_REG_SCRATCH due to a HW bug.
1989 		 */
1990 		public_data_addr = qed_rd(p_hwfn,
1991 					  p_ptt,
1992 					  MISC_REG_SHARED_MEM_ADDR) |
1993 				   MCP_REG_SCRATCH;
1994 
1995 		/* Find MCP public global section offset */
1996 		global_section_offsize_addr = public_data_addr +
1997 					      offsetof(struct mcp_public_data,
1998 						       sections) +
1999 					      sizeof(offsize_t) * PUBLIC_GLOBAL;
2000 		global_section_offsize = qed_rd(p_hwfn, p_ptt,
2001 						global_section_offsize_addr);
2002 		global_section_addr =
2003 			MCP_REG_SCRATCH +
2004 			(global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
2005 
2006 		/* Read MFW version from MCP public global section */
2007 		mfw_ver = qed_rd(p_hwfn, p_ptt,
2008 				 global_section_addr +
2009 				 offsetof(struct public_global, mfw_ver));
2010 
2011 		/* Dump MFW version param */
2012 		if (snprintf(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d",
2013 			     (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16),
2014 			     (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
2015 			DP_NOTICE(p_hwfn,
2016 				  "Unexpected debug error: invalid MFW version string\n");
2017 	}
2018 
2019 	return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
2020 }
2021 
2022 /* Writes a section header to the specified buffer.
2023  * Returns the dumped size in dwords.
2024  */
2025 static u32 qed_dump_section_hdr(u32 *dump_buf,
2026 				bool dump, const char *name, u32 num_params)
2027 {
2028 	return qed_dump_num_param(dump_buf, dump, name, num_params);
2029 }
2030 
2031 /* Writes the common global params to the specified buffer.
2032  * Returns the dumped size in dwords.
2033  */
2034 static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
2035 					 struct qed_ptt *p_ptt,
2036 					 u32 *dump_buf,
2037 					 bool dump,
2038 					 u8 num_specific_global_params)
2039 {
2040 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2041 	u32 offset = 0;
2042 	u8 num_params;
2043 
2044 	/* Dump global params section header */
2045 	num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
2046 	offset += qed_dump_section_hdr(dump_buf + offset,
2047 				       dump, "global_params", num_params);
2048 
2049 	/* Store params */
2050 	offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
2051 	offset += qed_dump_mfw_ver_param(p_hwfn,
2052 					 p_ptt, dump_buf + offset, dump);
2053 	offset += qed_dump_num_param(dump_buf + offset,
2054 				     dump, "tools-version", TOOLS_VERSION);
2055 	offset += qed_dump_str_param(dump_buf + offset,
2056 				     dump,
2057 				     "chip",
2058 				     s_chip_defs[dev_data->chip_id].name);
2059 	offset += qed_dump_str_param(dump_buf + offset,
2060 				     dump,
2061 				     "platform",
2062 				     s_platform_defs[dev_data->platform_id].
2063 				     name);
2064 	offset +=
2065 	    qed_dump_num_param(dump_buf + offset, dump, "pci-func",
2066 			       p_hwfn->abs_pf_id);
2067 
2068 	return offset;
2069 }
2070 
2071 /* Writes the "last" section (including CRC) to the specified buffer at the
2072  * given offset. Returns the dumped size in dwords.
2073  */
2074 static u32 qed_dump_last_section(struct qed_hwfn *p_hwfn,
2075 				 u32 *dump_buf, u32 offset, bool dump)
2076 {
2077 	u32 start_offset = offset;
2078 
2079 	/* Dump CRC section header */
2080 	offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
2081 
2082 	/* Calculate CRC32 and add it to the dword after the "last" section */
2083 	if (dump)
2084 		*(dump_buf + offset) = ~crc32(0xffffffff,
2085 					      (u8 *)dump_buf,
2086 					      DWORDS_TO_BYTES(offset));
2087 
2088 	offset++;
2089 
2090 	return offset - start_offset;
2091 }
2092 
2093 /* Update blocks reset state  */
2094 static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
2095 					  struct qed_ptt *p_ptt)
2096 {
2097 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2098 	u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2099 	u32 i;
2100 
2101 	/* Read reset registers */
2102 	for (i = 0; i < MAX_DBG_RESET_REGS; i++)
2103 		if (s_reset_regs_defs[i].exists[dev_data->chip_id])
2104 			reg_val[i] = qed_rd(p_hwfn,
2105 					    p_ptt, s_reset_regs_defs[i].addr);
2106 
2107 	/* Check if blocks are in reset */
2108 	for (i = 0; i < MAX_BLOCK_ID; i++) {
2109 		struct block_defs *block = s_block_defs[i];
2110 
2111 		dev_data->block_in_reset[i] = block->has_reset_bit &&
2112 		    !(reg_val[block->reset_reg] & BIT(block->reset_bit_offset));
2113 	}
2114 }
2115 
2116 /* Enable / disable the Debug block */
2117 static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
2118 				     struct qed_ptt *p_ptt, bool enable)
2119 {
2120 	qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
2121 }
2122 
2123 /* Resets the Debug block */
2124 static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
2125 				    struct qed_ptt *p_ptt)
2126 {
2127 	u32 dbg_reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
2128 	struct block_defs *dbg_block = s_block_defs[BLOCK_DBG];
2129 
2130 	dbg_reset_reg_addr = s_reset_regs_defs[dbg_block->reset_reg].addr;
2131 	old_reset_reg_val = qed_rd(p_hwfn, p_ptt, dbg_reset_reg_addr);
2132 	new_reset_reg_val =
2133 	    old_reset_reg_val & ~BIT(dbg_block->reset_bit_offset);
2134 
2135 	qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, new_reset_reg_val);
2136 	qed_wr(p_hwfn, p_ptt, dbg_reset_reg_addr, old_reset_reg_val);
2137 }
2138 
2139 static void qed_bus_set_framing_mode(struct qed_hwfn *p_hwfn,
2140 				     struct qed_ptt *p_ptt,
2141 				     enum dbg_bus_frame_modes mode)
2142 {
2143 	qed_wr(p_hwfn, p_ptt, DBG_REG_FRAMING_MODE, (u8)mode);
2144 }
2145 
2146 /* Enable / disable Debug Bus clients according to the specified mask
2147  * (1 = enable, 0 = disable).
2148  */
2149 static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
2150 				   struct qed_ptt *p_ptt, u32 client_mask)
2151 {
2152 	qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
2153 }
2154 
2155 static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
2156 {
2157 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2158 	bool arg1, arg2;
2159 	const u32 *ptr;
2160 	u8 tree_val;
2161 
2162 	/* Get next element from modes tree buffer */
2163 	ptr = s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
2164 	tree_val = ((u8 *)ptr)[(*modes_buf_offset)++];
2165 
2166 	switch (tree_val) {
2167 	case INIT_MODE_OP_NOT:
2168 		return !qed_is_mode_match(p_hwfn, modes_buf_offset);
2169 	case INIT_MODE_OP_OR:
2170 	case INIT_MODE_OP_AND:
2171 		arg1 = qed_is_mode_match(p_hwfn, modes_buf_offset);
2172 		arg2 = qed_is_mode_match(p_hwfn, modes_buf_offset);
2173 		return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
2174 							arg2) : (arg1 && arg2);
2175 	default:
2176 		return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
2177 	}
2178 }
2179 
2180 /* Returns true if the specified entity (indicated by GRC param) should be
2181  * included in the dump, false otherwise.
2182  */
2183 static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
2184 				enum dbg_grc_params grc_param)
2185 {
2186 	return qed_grc_get_param(p_hwfn, grc_param) > 0;
2187 }
2188 
2189 /* Returns true of the specified Storm should be included in the dump, false
2190  * otherwise.
2191  */
2192 static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
2193 				      enum dbg_storms storm)
2194 {
2195 	return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
2196 }
2197 
2198 /* Returns true if the specified memory should be included in the dump, false
2199  * otherwise.
2200  */
2201 static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
2202 				    enum block_id block_id, u8 mem_group_id)
2203 {
2204 	struct block_defs *block = s_block_defs[block_id];
2205 	u8 i;
2206 
2207 	/* Check Storm match */
2208 	if (block->associated_to_storm &&
2209 	    !qed_grc_is_storm_included(p_hwfn,
2210 				       (enum dbg_storms)block->storm_id))
2211 		return false;
2212 
2213 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
2214 		struct big_ram_defs *big_ram = &s_big_ram_defs[i];
2215 
2216 		if (mem_group_id == big_ram->mem_group_id ||
2217 		    mem_group_id == big_ram->ram_mem_group_id)
2218 			return qed_grc_is_included(p_hwfn, big_ram->grc_param);
2219 	}
2220 
2221 	switch (mem_group_id) {
2222 	case MEM_GROUP_PXP_ILT:
2223 	case MEM_GROUP_PXP_MEM:
2224 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
2225 	case MEM_GROUP_RAM:
2226 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
2227 	case MEM_GROUP_PBUF:
2228 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
2229 	case MEM_GROUP_CAU_MEM:
2230 	case MEM_GROUP_CAU_SB:
2231 	case MEM_GROUP_CAU_PI:
2232 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
2233 	case MEM_GROUP_QM_MEM:
2234 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
2235 	case MEM_GROUP_CFC_MEM:
2236 	case MEM_GROUP_CONN_CFC_MEM:
2237 	case MEM_GROUP_TASK_CFC_MEM:
2238 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC);
2239 	case MEM_GROUP_IGU_MEM:
2240 	case MEM_GROUP_IGU_MSIX:
2241 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
2242 	case MEM_GROUP_MULD_MEM:
2243 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
2244 	case MEM_GROUP_PRS_MEM:
2245 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
2246 	case MEM_GROUP_DMAE_MEM:
2247 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
2248 	case MEM_GROUP_TM_MEM:
2249 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
2250 	case MEM_GROUP_SDM_MEM:
2251 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
2252 	case MEM_GROUP_TDIF_CTX:
2253 	case MEM_GROUP_RDIF_CTX:
2254 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
2255 	case MEM_GROUP_CM_MEM:
2256 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
2257 	case MEM_GROUP_IOR:
2258 		return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
2259 	default:
2260 		return true;
2261 	}
2262 }
2263 
2264 /* Stalls all Storms */
2265 static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
2266 				 struct qed_ptt *p_ptt, bool stall)
2267 {
2268 	u32 reg_addr;
2269 	u8 storm_id;
2270 
2271 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2272 		if (!qed_grc_is_storm_included(p_hwfn,
2273 					       (enum dbg_storms)storm_id))
2274 			continue;
2275 
2276 		reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
2277 		    SEM_FAST_REG_STALL_0_BB_K2;
2278 		qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
2279 	}
2280 
2281 	msleep(STALL_DELAY_MS);
2282 }
2283 
2284 /* Takes all blocks out of reset */
2285 static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
2286 				   struct qed_ptt *p_ptt)
2287 {
2288 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2289 	u32 reg_val[MAX_DBG_RESET_REGS] = { 0 };
2290 	u32 block_id, i;
2291 
2292 	/* Fill reset regs values */
2293 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2294 		struct block_defs *block = s_block_defs[block_id];
2295 
2296 		if (block->has_reset_bit && block->unreset)
2297 			reg_val[block->reset_reg] |=
2298 			    BIT(block->reset_bit_offset);
2299 	}
2300 
2301 	/* Write reset registers */
2302 	for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2303 		if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2304 			continue;
2305 
2306 		reg_val[i] |= s_reset_regs_defs[i].unreset_val;
2307 
2308 		if (reg_val[i])
2309 			qed_wr(p_hwfn,
2310 			       p_ptt,
2311 			       s_reset_regs_defs[i].addr +
2312 			       RESET_REG_UNRESET_OFFSET, reg_val[i]);
2313 	}
2314 }
2315 
2316 /* Returns the attention block data of the specified block */
2317 static const struct dbg_attn_block_type_data *
2318 qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type)
2319 {
2320 	const struct dbg_attn_block *base_attn_block_arr =
2321 		(const struct dbg_attn_block *)
2322 		s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
2323 
2324 	return &base_attn_block_arr[block_id].per_type_data[attn_type];
2325 }
2326 
2327 /* Returns the attention registers of the specified block */
2328 static const struct dbg_attn_reg *
2329 qed_get_block_attn_regs(enum block_id block_id, enum dbg_attn_type attn_type,
2330 			u8 *num_attn_regs)
2331 {
2332 	const struct dbg_attn_block_type_data *block_type_data =
2333 		qed_get_block_attn_data(block_id, attn_type);
2334 
2335 	*num_attn_regs = block_type_data->num_regs;
2336 
2337 	return &((const struct dbg_attn_reg *)
2338 		 s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)[block_type_data->
2339 							  regs_offset];
2340 }
2341 
2342 /* For each block, clear the status of all parities */
2343 static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
2344 				   struct qed_ptt *p_ptt)
2345 {
2346 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2347 	const struct dbg_attn_reg *attn_reg_arr;
2348 	u8 reg_idx, num_attn_regs;
2349 	u32 block_id;
2350 
2351 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2352 		if (dev_data->block_in_reset[block_id])
2353 			continue;
2354 
2355 		attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
2356 						       ATTN_TYPE_PARITY,
2357 						       &num_attn_regs);
2358 
2359 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2360 			const struct dbg_attn_reg *reg_data =
2361 				&attn_reg_arr[reg_idx];
2362 			u16 modes_buf_offset;
2363 			bool eval_mode;
2364 
2365 			/* Check mode */
2366 			eval_mode = GET_FIELD(reg_data->mode.data,
2367 					      DBG_MODE_HDR_EVAL_MODE) > 0;
2368 			modes_buf_offset =
2369 				GET_FIELD(reg_data->mode.data,
2370 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2371 
2372 			/* If Mode match: clear parity status */
2373 			if (!eval_mode ||
2374 			    qed_is_mode_match(p_hwfn, &modes_buf_offset))
2375 				qed_rd(p_hwfn, p_ptt,
2376 				       DWORDS_TO_BYTES(reg_data->
2377 						       sts_clr_address));
2378 		}
2379 	}
2380 }
2381 
2382 /* Dumps GRC registers section header. Returns the dumped size in dwords.
2383  * The following parameters are dumped:
2384  * - count:	 no. of dumped entries
2385  * - split:	 split type
2386  * - id:	 split ID (dumped only if split_id >= 0)
2387  * - param_name: user parameter value (dumped only if param_name != NULL
2388  *		 and param_val != NULL).
2389  */
2390 static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
2391 				 bool dump,
2392 				 u32 num_reg_entries,
2393 				 const char *split_type,
2394 				 int split_id,
2395 				 const char *param_name, const char *param_val)
2396 {
2397 	u8 num_params = 2 + (split_id >= 0 ? 1 : 0) + (param_name ? 1 : 0);
2398 	u32 offset = 0;
2399 
2400 	offset += qed_dump_section_hdr(dump_buf + offset,
2401 				       dump, "grc_regs", num_params);
2402 	offset += qed_dump_num_param(dump_buf + offset,
2403 				     dump, "count", num_reg_entries);
2404 	offset += qed_dump_str_param(dump_buf + offset,
2405 				     dump, "split", split_type);
2406 	if (split_id >= 0)
2407 		offset += qed_dump_num_param(dump_buf + offset,
2408 					     dump, "id", split_id);
2409 	if (param_name && param_val)
2410 		offset += qed_dump_str_param(dump_buf + offset,
2411 					     dump, param_name, param_val);
2412 
2413 	return offset;
2414 }
2415 
2416 /* Dumps the GRC registers in the specified address range.
2417  * Returns the dumped size in dwords.
2418  * The addr and len arguments are specified in dwords.
2419  */
2420 static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
2421 				   struct qed_ptt *p_ptt,
2422 				   u32 *dump_buf,
2423 				   bool dump, u32 addr, u32 len, bool wide_bus)
2424 {
2425 	u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i;
2426 
2427 	if (!dump)
2428 		return len;
2429 
2430 	for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++)
2431 		*(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
2432 
2433 	return offset;
2434 }
2435 
2436 /* Dumps GRC registers sequence header. Returns the dumped size in dwords.
2437  * The addr and len arguments are specified in dwords.
2438  */
2439 static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf,
2440 				      bool dump, u32 addr, u32 len)
2441 {
2442 	if (dump)
2443 		*dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
2444 
2445 	return 1;
2446 }
2447 
2448 /* Dumps GRC registers sequence. Returns the dumped size in dwords.
2449  * The addr and len arguments are specified in dwords.
2450  */
2451 static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
2452 				  struct qed_ptt *p_ptt,
2453 				  u32 *dump_buf,
2454 				  bool dump, u32 addr, u32 len, bool wide_bus)
2455 {
2456 	u32 offset = 0;
2457 
2458 	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
2459 	offset += qed_grc_dump_addr_range(p_hwfn,
2460 					  p_ptt,
2461 					  dump_buf + offset,
2462 					  dump, addr, len, wide_bus);
2463 
2464 	return offset;
2465 }
2466 
2467 /* Dumps GRC registers sequence with skip cycle.
2468  * Returns the dumped size in dwords.
2469  * - addr:	start GRC address in dwords
2470  * - total_len:	total no. of dwords to dump
2471  * - read_len:	no. consecutive dwords to read
2472  * - skip_len:	no. of dwords to skip (and fill with zeros)
2473  */
2474 static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
2475 				       struct qed_ptt *p_ptt,
2476 				       u32 *dump_buf,
2477 				       bool dump,
2478 				       u32 addr,
2479 				       u32 total_len,
2480 				       u32 read_len, u32 skip_len)
2481 {
2482 	u32 offset = 0, reg_offset = 0;
2483 
2484 	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
2485 
2486 	if (!dump)
2487 		return offset + total_len;
2488 
2489 	while (reg_offset < total_len) {
2490 		u32 curr_len = min_t(u32, read_len, total_len - reg_offset);
2491 
2492 		offset += qed_grc_dump_addr_range(p_hwfn,
2493 						  p_ptt,
2494 						  dump_buf + offset,
2495 						  dump, addr, curr_len, false);
2496 		reg_offset += curr_len;
2497 		addr += curr_len;
2498 
2499 		if (reg_offset < total_len) {
2500 			curr_len = min_t(u32, skip_len, total_len - skip_len);
2501 			memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
2502 			offset += curr_len;
2503 			reg_offset += curr_len;
2504 			addr += curr_len;
2505 		}
2506 	}
2507 
2508 	return offset;
2509 }
2510 
2511 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2512 static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
2513 				     struct qed_ptt *p_ptt,
2514 				     struct dbg_array input_regs_arr,
2515 				     u32 *dump_buf,
2516 				     bool dump,
2517 				     bool block_enable[MAX_BLOCK_ID],
2518 				     u32 *num_dumped_reg_entries)
2519 {
2520 	u32 i, offset = 0, input_offset = 0;
2521 	bool mode_match = true;
2522 
2523 	*num_dumped_reg_entries = 0;
2524 
2525 	while (input_offset < input_regs_arr.size_in_dwords) {
2526 		const struct dbg_dump_cond_hdr *cond_hdr =
2527 		    (const struct dbg_dump_cond_hdr *)
2528 		    &input_regs_arr.ptr[input_offset++];
2529 		u16 modes_buf_offset;
2530 		bool eval_mode;
2531 
2532 		/* Check mode/block */
2533 		eval_mode = GET_FIELD(cond_hdr->mode.data,
2534 				      DBG_MODE_HDR_EVAL_MODE) > 0;
2535 		if (eval_mode) {
2536 			modes_buf_offset =
2537 				GET_FIELD(cond_hdr->mode.data,
2538 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2539 			mode_match = qed_is_mode_match(p_hwfn,
2540 						       &modes_buf_offset);
2541 		}
2542 
2543 		if (!mode_match || !block_enable[cond_hdr->block_id]) {
2544 			input_offset += cond_hdr->data_size;
2545 			continue;
2546 		}
2547 
2548 		for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
2549 			const struct dbg_dump_reg *reg =
2550 			    (const struct dbg_dump_reg *)
2551 			    &input_regs_arr.ptr[input_offset];
2552 			u32 addr, len;
2553 			bool wide_bus;
2554 
2555 			addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS);
2556 			len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
2557 			wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS);
2558 			offset += qed_grc_dump_reg_entry(p_hwfn,
2559 							 p_ptt,
2560 							 dump_buf + offset,
2561 							 dump,
2562 							 addr,
2563 							 len,
2564 							 wide_bus);
2565 			(*num_dumped_reg_entries)++;
2566 		}
2567 	}
2568 
2569 	return offset;
2570 }
2571 
2572 /* Dumps GRC registers entries. Returns the dumped size in dwords. */
2573 static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
2574 				   struct qed_ptt *p_ptt,
2575 				   struct dbg_array input_regs_arr,
2576 				   u32 *dump_buf,
2577 				   bool dump,
2578 				   bool block_enable[MAX_BLOCK_ID],
2579 				   const char *split_type_name,
2580 				   u32 split_id,
2581 				   const char *param_name,
2582 				   const char *param_val)
2583 {
2584 	u32 num_dumped_reg_entries, offset;
2585 
2586 	/* Calculate register dump header size (and skip it for now) */
2587 	offset = qed_grc_dump_regs_hdr(dump_buf,
2588 				       false,
2589 				       0,
2590 				       split_type_name,
2591 				       split_id, param_name, param_val);
2592 
2593 	/* Dump registers */
2594 	offset += qed_grc_dump_regs_entries(p_hwfn,
2595 					    p_ptt,
2596 					    input_regs_arr,
2597 					    dump_buf + offset,
2598 					    dump,
2599 					    block_enable,
2600 					    &num_dumped_reg_entries);
2601 
2602 	/* Write register dump header */
2603 	if (dump && num_dumped_reg_entries > 0)
2604 		qed_grc_dump_regs_hdr(dump_buf,
2605 				      dump,
2606 				      num_dumped_reg_entries,
2607 				      split_type_name,
2608 				      split_id, param_name, param_val);
2609 
2610 	return num_dumped_reg_entries > 0 ? offset : 0;
2611 }
2612 
2613 /* Dumps registers according to the input registers array. Returns the dumped
2614  * size in dwords.
2615  */
2616 static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
2617 				  struct qed_ptt *p_ptt,
2618 				  u32 *dump_buf,
2619 				  bool dump,
2620 				  bool block_enable[MAX_BLOCK_ID],
2621 				  const char *param_name, const char *param_val)
2622 {
2623 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2624 	struct chip_platform_defs *chip_platform;
2625 	u32 offset = 0, input_offset = 0;
2626 	struct chip_defs *chip;
2627 	u8 port_id, pf_id, vf_id;
2628 	u16 fid;
2629 
2630 	chip = &s_chip_defs[dev_data->chip_id];
2631 	chip_platform = &chip->per_platform[dev_data->platform_id];
2632 
2633 	if (dump)
2634 		DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n");
2635 
2636 	while (input_offset <
2637 	       s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].size_in_dwords) {
2638 		const struct dbg_dump_split_hdr *split_hdr;
2639 		struct dbg_array curr_input_regs_arr;
2640 		u32 split_data_size;
2641 		u8 split_type_id;
2642 
2643 		split_hdr =
2644 			(const struct dbg_dump_split_hdr *)
2645 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset++];
2646 		split_type_id =
2647 			GET_FIELD(split_hdr->hdr,
2648 				  DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
2649 		split_data_size =
2650 			GET_FIELD(split_hdr->hdr,
2651 				  DBG_DUMP_SPLIT_HDR_DATA_SIZE);
2652 		curr_input_regs_arr.ptr =
2653 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr[input_offset];
2654 		curr_input_regs_arr.size_in_dwords = split_data_size;
2655 
2656 		switch (split_type_id) {
2657 		case SPLIT_TYPE_NONE:
2658 			offset += qed_grc_dump_split_data(p_hwfn,
2659 							  p_ptt,
2660 							  curr_input_regs_arr,
2661 							  dump_buf + offset,
2662 							  dump,
2663 							  block_enable,
2664 							  "eng",
2665 							  (u32)(-1),
2666 							  param_name,
2667 							  param_val);
2668 			break;
2669 
2670 		case SPLIT_TYPE_PORT:
2671 			for (port_id = 0; port_id < chip_platform->num_ports;
2672 			     port_id++) {
2673 				if (dump)
2674 					qed_port_pretend(p_hwfn, p_ptt,
2675 							 port_id);
2676 				offset +=
2677 				    qed_grc_dump_split_data(p_hwfn, p_ptt,
2678 							    curr_input_regs_arr,
2679 							    dump_buf + offset,
2680 							    dump, block_enable,
2681 							    "port", port_id,
2682 							    param_name,
2683 							    param_val);
2684 			}
2685 			break;
2686 
2687 		case SPLIT_TYPE_PF:
2688 		case SPLIT_TYPE_PORT_PF:
2689 			for (pf_id = 0; pf_id < chip_platform->num_pfs;
2690 			     pf_id++) {
2691 				u8 pfid_shift =
2692 					PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2693 
2694 				if (dump) {
2695 					fid = pf_id << pfid_shift;
2696 					qed_fid_pretend(p_hwfn, p_ptt, fid);
2697 				}
2698 
2699 				offset +=
2700 				    qed_grc_dump_split_data(p_hwfn,
2701 							    p_ptt,
2702 							    curr_input_regs_arr,
2703 							    dump_buf + offset,
2704 							    dump,
2705 							    block_enable,
2706 							    "pf",
2707 							    pf_id,
2708 							    param_name,
2709 							    param_val);
2710 			}
2711 			break;
2712 
2713 		case SPLIT_TYPE_VF:
2714 			for (vf_id = 0; vf_id < chip_platform->num_vfs;
2715 			     vf_id++) {
2716 				u8 vfvalid_shift =
2717 					PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT;
2718 				u8 vfid_shift =
2719 					PXP_PRETEND_CONCRETE_FID_VFID_SHIFT;
2720 
2721 				if (dump) {
2722 					fid = BIT(vfvalid_shift) |
2723 					      (vf_id << vfid_shift);
2724 					qed_fid_pretend(p_hwfn, p_ptt, fid);
2725 				}
2726 
2727 				offset +=
2728 				    qed_grc_dump_split_data(p_hwfn, p_ptt,
2729 							    curr_input_regs_arr,
2730 							    dump_buf + offset,
2731 							    dump, block_enable,
2732 							    "vf", vf_id,
2733 							    param_name,
2734 							    param_val);
2735 			}
2736 			break;
2737 
2738 		default:
2739 			break;
2740 		}
2741 
2742 		input_offset += split_data_size;
2743 	}
2744 
2745 	/* Pretend to original PF */
2746 	if (dump) {
2747 		fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
2748 		qed_fid_pretend(p_hwfn, p_ptt, fid);
2749 	}
2750 
2751 	return offset;
2752 }
2753 
2754 /* Dump reset registers. Returns the dumped size in dwords. */
2755 static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
2756 				   struct qed_ptt *p_ptt,
2757 				   u32 *dump_buf, bool dump)
2758 {
2759 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2760 	u32 i, offset = 0, num_regs = 0;
2761 
2762 	/* Calculate header size */
2763 	offset += qed_grc_dump_regs_hdr(dump_buf,
2764 					false, 0, "eng", -1, NULL, NULL);
2765 
2766 	/* Write reset registers */
2767 	for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
2768 		if (!s_reset_regs_defs[i].exists[dev_data->chip_id])
2769 			continue;
2770 
2771 		offset += qed_grc_dump_reg_entry(p_hwfn,
2772 						 p_ptt,
2773 						 dump_buf + offset,
2774 						 dump,
2775 						 BYTES_TO_DWORDS
2776 						 (s_reset_regs_defs[i].addr), 1,
2777 						 false);
2778 		num_regs++;
2779 	}
2780 
2781 	/* Write header */
2782 	if (dump)
2783 		qed_grc_dump_regs_hdr(dump_buf,
2784 				      true, num_regs, "eng", -1, NULL, NULL);
2785 
2786 	return offset;
2787 }
2788 
2789 /* Dump registers that are modified during GRC Dump and therefore must be
2790  * dumped first. Returns the dumped size in dwords.
2791  */
2792 static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
2793 				      struct qed_ptt *p_ptt,
2794 				      u32 *dump_buf, bool dump)
2795 {
2796 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
2797 	u32 block_id, offset = 0, num_reg_entries = 0;
2798 	const struct dbg_attn_reg *attn_reg_arr;
2799 	u8 storm_id, reg_idx, num_attn_regs;
2800 
2801 	/* Calculate header size */
2802 	offset += qed_grc_dump_regs_hdr(dump_buf,
2803 					false, 0, "eng", -1, NULL, NULL);
2804 
2805 	/* Write parity registers */
2806 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
2807 		if (dev_data->block_in_reset[block_id] && dump)
2808 			continue;
2809 
2810 		attn_reg_arr = qed_get_block_attn_regs((enum block_id)block_id,
2811 						       ATTN_TYPE_PARITY,
2812 						       &num_attn_regs);
2813 
2814 		for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
2815 			const struct dbg_attn_reg *reg_data =
2816 				&attn_reg_arr[reg_idx];
2817 			u16 modes_buf_offset;
2818 			bool eval_mode;
2819 			u32 addr;
2820 
2821 			/* Check mode */
2822 			eval_mode = GET_FIELD(reg_data->mode.data,
2823 					      DBG_MODE_HDR_EVAL_MODE) > 0;
2824 			modes_buf_offset =
2825 				GET_FIELD(reg_data->mode.data,
2826 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
2827 			if (eval_mode &&
2828 			    !qed_is_mode_match(p_hwfn, &modes_buf_offset))
2829 				continue;
2830 
2831 			/* Mode match: read & dump registers */
2832 			addr = reg_data->mask_address;
2833 			offset += qed_grc_dump_reg_entry(p_hwfn,
2834 							 p_ptt,
2835 							 dump_buf + offset,
2836 							 dump,
2837 							 addr,
2838 							 1, false);
2839 			addr = GET_FIELD(reg_data->data,
2840 					 DBG_ATTN_REG_STS_ADDRESS);
2841 			offset += qed_grc_dump_reg_entry(p_hwfn,
2842 							 p_ptt,
2843 							 dump_buf + offset,
2844 							 dump,
2845 							 addr,
2846 							 1, false);
2847 			num_reg_entries += 2;
2848 		}
2849 	}
2850 
2851 	/* Write Storm stall status registers */
2852 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
2853 		struct storm_defs *storm = &s_storm_defs[storm_id];
2854 		u32 addr;
2855 
2856 		if (dev_data->block_in_reset[storm->block_id] && dump)
2857 			continue;
2858 
2859 		addr =
2860 		    BYTES_TO_DWORDS(s_storm_defs[storm_id].sem_fast_mem_addr +
2861 				    SEM_FAST_REG_STALLED);
2862 		offset += qed_grc_dump_reg_entry(p_hwfn,
2863 						 p_ptt,
2864 						 dump_buf + offset,
2865 						 dump,
2866 						 addr,
2867 						 1,
2868 						 false);
2869 		num_reg_entries++;
2870 	}
2871 
2872 	/* Write header */
2873 	if (dump)
2874 		qed_grc_dump_regs_hdr(dump_buf,
2875 				      true,
2876 				      num_reg_entries, "eng", -1, NULL, NULL);
2877 
2878 	return offset;
2879 }
2880 
2881 /* Dumps registers that can't be represented in the debug arrays */
2882 static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
2883 				     struct qed_ptt *p_ptt,
2884 				     u32 *dump_buf, bool dump)
2885 {
2886 	u32 offset = 0, addr;
2887 
2888 	offset += qed_grc_dump_regs_hdr(dump_buf,
2889 					dump, 2, "eng", -1, NULL, NULL);
2890 
2891 	/* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
2892 	 * skipped).
2893 	 */
2894 	addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
2895 	offset += qed_grc_dump_reg_entry_skip(p_hwfn,
2896 					      p_ptt,
2897 					      dump_buf + offset,
2898 					      dump,
2899 					      addr,
2900 					      RDIF_REG_DEBUG_ERROR_INFO_SIZE,
2901 					      7,
2902 					      1);
2903 	addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
2904 	offset +=
2905 	    qed_grc_dump_reg_entry_skip(p_hwfn,
2906 					p_ptt,
2907 					dump_buf + offset,
2908 					dump,
2909 					addr,
2910 					TDIF_REG_DEBUG_ERROR_INFO_SIZE,
2911 					7,
2912 					1);
2913 
2914 	return offset;
2915 }
2916 
2917 /* Dumps a GRC memory header (section and params). Returns the dumped size in
2918  * dwords. The following parameters are dumped:
2919  * - name:	   dumped only if it's not NULL.
2920  * - addr:	   in dwords, dumped only if name is NULL.
2921  * - len:	   in dwords, always dumped.
2922  * - width:	   dumped if it's not zero.
2923  * - packed:	   dumped only if it's not false.
2924  * - mem_group:	   always dumped.
2925  * - is_storm:	   true only if the memory is related to a Storm.
2926  * - storm_letter: valid only if is_storm is true.
2927  *
2928  */
2929 static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
2930 				u32 *dump_buf,
2931 				bool dump,
2932 				const char *name,
2933 				u32 addr,
2934 				u32 len,
2935 				u32 bit_width,
2936 				bool packed,
2937 				const char *mem_group,
2938 				bool is_storm, char storm_letter)
2939 {
2940 	u8 num_params = 3;
2941 	u32 offset = 0;
2942 	char buf[64];
2943 
2944 	if (!len)
2945 		DP_NOTICE(p_hwfn,
2946 			  "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
2947 
2948 	if (bit_width)
2949 		num_params++;
2950 	if (packed)
2951 		num_params++;
2952 
2953 	/* Dump section header */
2954 	offset += qed_dump_section_hdr(dump_buf + offset,
2955 				       dump, "grc_mem", num_params);
2956 
2957 	if (name) {
2958 		/* Dump name */
2959 		if (is_storm) {
2960 			strcpy(buf, "?STORM_");
2961 			buf[0] = storm_letter;
2962 			strcpy(buf + strlen(buf), name);
2963 		} else {
2964 			strcpy(buf, name);
2965 		}
2966 
2967 		offset += qed_dump_str_param(dump_buf + offset,
2968 					     dump, "name", buf);
2969 		if (dump)
2970 			DP_VERBOSE(p_hwfn,
2971 				   QED_MSG_DEBUG,
2972 				   "Dumping %d registers from %s...\n",
2973 				   len, buf);
2974 	} else {
2975 		/* Dump address */
2976 		u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
2977 
2978 		offset += qed_dump_num_param(dump_buf + offset,
2979 					     dump, "addr", addr_in_bytes);
2980 		if (dump && len > 64)
2981 			DP_VERBOSE(p_hwfn,
2982 				   QED_MSG_DEBUG,
2983 				   "Dumping %d registers from address 0x%x...\n",
2984 				   len, addr_in_bytes);
2985 	}
2986 
2987 	/* Dump len */
2988 	offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
2989 
2990 	/* Dump bit width */
2991 	if (bit_width)
2992 		offset += qed_dump_num_param(dump_buf + offset,
2993 					     dump, "width", bit_width);
2994 
2995 	/* Dump packed */
2996 	if (packed)
2997 		offset += qed_dump_num_param(dump_buf + offset,
2998 					     dump, "packed", 1);
2999 
3000 	/* Dump reg type */
3001 	if (is_storm) {
3002 		strcpy(buf, "?STORM_");
3003 		buf[0] = storm_letter;
3004 		strcpy(buf + strlen(buf), mem_group);
3005 	} else {
3006 		strcpy(buf, mem_group);
3007 	}
3008 
3009 	offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
3010 
3011 	return offset;
3012 }
3013 
3014 /* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
3015  * Returns the dumped size in dwords.
3016  * The addr and len arguments are specified in dwords.
3017  */
3018 static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
3019 			    struct qed_ptt *p_ptt,
3020 			    u32 *dump_buf,
3021 			    bool dump,
3022 			    const char *name,
3023 			    u32 addr,
3024 			    u32 len,
3025 			    bool wide_bus,
3026 			    u32 bit_width,
3027 			    bool packed,
3028 			    const char *mem_group,
3029 			    bool is_storm, char storm_letter)
3030 {
3031 	u32 offset = 0;
3032 
3033 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3034 				       dump_buf + offset,
3035 				       dump,
3036 				       name,
3037 				       addr,
3038 				       len,
3039 				       bit_width,
3040 				       packed,
3041 				       mem_group, is_storm, storm_letter);
3042 	offset += qed_grc_dump_addr_range(p_hwfn,
3043 					  p_ptt,
3044 					  dump_buf + offset,
3045 					  dump, addr, len, wide_bus);
3046 
3047 	return offset;
3048 }
3049 
3050 /* Dumps GRC memories entries. Returns the dumped size in dwords. */
3051 static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
3052 				    struct qed_ptt *p_ptt,
3053 				    struct dbg_array input_mems_arr,
3054 				    u32 *dump_buf, bool dump)
3055 {
3056 	u32 i, offset = 0, input_offset = 0;
3057 	bool mode_match = true;
3058 
3059 	while (input_offset < input_mems_arr.size_in_dwords) {
3060 		const struct dbg_dump_cond_hdr *cond_hdr;
3061 		u16 modes_buf_offset;
3062 		u32 num_entries;
3063 		bool eval_mode;
3064 
3065 		cond_hdr = (const struct dbg_dump_cond_hdr *)
3066 			   &input_mems_arr.ptr[input_offset++];
3067 		num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
3068 
3069 		/* Check required mode */
3070 		eval_mode = GET_FIELD(cond_hdr->mode.data,
3071 				      DBG_MODE_HDR_EVAL_MODE) > 0;
3072 		if (eval_mode) {
3073 			modes_buf_offset =
3074 				GET_FIELD(cond_hdr->mode.data,
3075 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
3076 			mode_match = qed_is_mode_match(p_hwfn,
3077 						       &modes_buf_offset);
3078 		}
3079 
3080 		if (!mode_match) {
3081 			input_offset += cond_hdr->data_size;
3082 			continue;
3083 		}
3084 
3085 		for (i = 0; i < num_entries;
3086 		     i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
3087 			const struct dbg_dump_mem *mem =
3088 				(const struct dbg_dump_mem *)
3089 				&input_mems_arr.ptr[input_offset];
3090 			u8 mem_group_id = GET_FIELD(mem->dword0,
3091 						    DBG_DUMP_MEM_MEM_GROUP_ID);
3092 			bool is_storm = false, mem_wide_bus;
3093 			enum dbg_grc_params grc_param;
3094 			char storm_letter = 'a';
3095 			enum block_id block_id;
3096 			u32 mem_addr, mem_len;
3097 
3098 			if (mem_group_id >= MEM_GROUPS_NUM) {
3099 				DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
3100 				return 0;
3101 			}
3102 
3103 			block_id = (enum block_id)cond_hdr->block_id;
3104 			if (!qed_grc_is_mem_included(p_hwfn,
3105 						     block_id,
3106 						     mem_group_id))
3107 				continue;
3108 
3109 			mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
3110 			mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
3111 			mem_wide_bus = GET_FIELD(mem->dword1,
3112 						 DBG_DUMP_MEM_WIDE_BUS);
3113 
3114 			/* Update memory length for CCFC/TCFC memories
3115 			 * according to number of LCIDs/LTIDs.
3116 			 */
3117 			if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
3118 				if (mem_len % MAX_LCIDS) {
3119 					DP_NOTICE(p_hwfn,
3120 						  "Invalid CCFC connection memory size\n");
3121 					return 0;
3122 				}
3123 
3124 				grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3125 				mem_len = qed_grc_get_param(p_hwfn, grc_param) *
3126 					  (mem_len / MAX_LCIDS);
3127 			} else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM) {
3128 				if (mem_len % MAX_LTIDS) {
3129 					DP_NOTICE(p_hwfn,
3130 						  "Invalid TCFC task memory size\n");
3131 					return 0;
3132 				}
3133 
3134 				grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3135 				mem_len = qed_grc_get_param(p_hwfn, grc_param) *
3136 					  (mem_len / MAX_LTIDS);
3137 			}
3138 
3139 			/* If memory is associated with Storm, update Storm
3140 			 * details.
3141 			 */
3142 			if (s_block_defs
3143 			    [cond_hdr->block_id]->associated_to_storm) {
3144 				is_storm = true;
3145 				storm_letter =
3146 				    s_storm_defs[s_block_defs
3147 						 [cond_hdr->block_id]->
3148 						 storm_id].letter;
3149 			}
3150 
3151 			/* Dump memory */
3152 			offset += qed_grc_dump_mem(p_hwfn,
3153 						p_ptt,
3154 						dump_buf + offset,
3155 						dump,
3156 						NULL,
3157 						mem_addr,
3158 						mem_len,
3159 						mem_wide_bus,
3160 						0,
3161 						false,
3162 						s_mem_group_names[mem_group_id],
3163 						is_storm,
3164 						storm_letter);
3165 		}
3166 	}
3167 
3168 	return offset;
3169 }
3170 
3171 /* Dumps GRC memories according to the input array dump_mem.
3172  * Returns the dumped size in dwords.
3173  */
3174 static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
3175 				 struct qed_ptt *p_ptt,
3176 				 u32 *dump_buf, bool dump)
3177 {
3178 	u32 offset = 0, input_offset = 0;
3179 
3180 	while (input_offset <
3181 	       s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].size_in_dwords) {
3182 		const struct dbg_dump_split_hdr *split_hdr;
3183 		struct dbg_array curr_input_mems_arr;
3184 		u32 split_data_size;
3185 		u8 split_type_id;
3186 
3187 		split_hdr = (const struct dbg_dump_split_hdr *)
3188 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset++];
3189 		split_type_id =
3190 			GET_FIELD(split_hdr->hdr,
3191 				  DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
3192 		split_data_size =
3193 			GET_FIELD(split_hdr->hdr,
3194 				  DBG_DUMP_SPLIT_HDR_DATA_SIZE);
3195 		curr_input_mems_arr.ptr =
3196 			&s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr[input_offset];
3197 		curr_input_mems_arr.size_in_dwords = split_data_size;
3198 
3199 		switch (split_type_id) {
3200 		case SPLIT_TYPE_NONE:
3201 			offset += qed_grc_dump_mem_entries(p_hwfn,
3202 							   p_ptt,
3203 							   curr_input_mems_arr,
3204 							   dump_buf + offset,
3205 							   dump);
3206 			break;
3207 
3208 		default:
3209 			DP_NOTICE(p_hwfn,
3210 				  "Dumping split memories is currently not supported\n");
3211 			break;
3212 		}
3213 
3214 		input_offset += split_data_size;
3215 	}
3216 
3217 	return offset;
3218 }
3219 
3220 /* Dumps GRC context data for the specified Storm.
3221  * Returns the dumped size in dwords.
3222  * The lid_size argument is specified in quad-regs.
3223  */
3224 static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
3225 				 struct qed_ptt *p_ptt,
3226 				 u32 *dump_buf,
3227 				 bool dump,
3228 				 const char *name,
3229 				 u32 num_lids,
3230 				 u32 lid_size,
3231 				 u32 rd_reg_addr,
3232 				 u8 storm_id)
3233 {
3234 	struct storm_defs *storm = &s_storm_defs[storm_id];
3235 	u32 i, lid, total_size, offset = 0;
3236 
3237 	if (!lid_size)
3238 		return 0;
3239 
3240 	lid_size *= BYTES_IN_DWORD;
3241 	total_size = num_lids * lid_size;
3242 
3243 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3244 				       dump_buf + offset,
3245 				       dump,
3246 				       name,
3247 				       0,
3248 				       total_size,
3249 				       lid_size * 32,
3250 				       false, name, true, storm->letter);
3251 
3252 	if (!dump)
3253 		return offset + total_size;
3254 
3255 	/* Dump context data */
3256 	for (lid = 0; lid < num_lids; lid++) {
3257 		for (i = 0; i < lid_size; i++, offset++) {
3258 			qed_wr(p_hwfn,
3259 			       p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
3260 			*(dump_buf + offset) = qed_rd(p_hwfn,
3261 						      p_ptt, rd_reg_addr);
3262 		}
3263 	}
3264 
3265 	return offset;
3266 }
3267 
3268 /* Dumps GRC contexts. Returns the dumped size in dwords. */
3269 static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
3270 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3271 {
3272 	enum dbg_grc_params grc_param;
3273 	u32 offset = 0;
3274 	u8 storm_id;
3275 
3276 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3277 		struct storm_defs *storm = &s_storm_defs[storm_id];
3278 
3279 		if (!qed_grc_is_storm_included(p_hwfn,
3280 					       (enum dbg_storms)storm_id))
3281 			continue;
3282 
3283 		/* Dump Conn AG context size */
3284 		grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3285 		offset +=
3286 			qed_grc_dump_ctx_data(p_hwfn,
3287 					      p_ptt,
3288 					      dump_buf + offset,
3289 					      dump,
3290 					      "CONN_AG_CTX",
3291 					      qed_grc_get_param(p_hwfn,
3292 								grc_param),
3293 					      storm->cm_conn_ag_ctx_lid_size,
3294 					      storm->cm_conn_ag_ctx_rd_addr,
3295 					      storm_id);
3296 
3297 		/* Dump Conn ST context size */
3298 		grc_param = DBG_GRC_PARAM_NUM_LCIDS;
3299 		offset +=
3300 			qed_grc_dump_ctx_data(p_hwfn,
3301 					      p_ptt,
3302 					      dump_buf + offset,
3303 					      dump,
3304 					      "CONN_ST_CTX",
3305 					      qed_grc_get_param(p_hwfn,
3306 								grc_param),
3307 					      storm->cm_conn_st_ctx_lid_size,
3308 					      storm->cm_conn_st_ctx_rd_addr,
3309 					      storm_id);
3310 
3311 		/* Dump Task AG context size */
3312 		grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3313 		offset +=
3314 			qed_grc_dump_ctx_data(p_hwfn,
3315 					      p_ptt,
3316 					      dump_buf + offset,
3317 					      dump,
3318 					      "TASK_AG_CTX",
3319 					      qed_grc_get_param(p_hwfn,
3320 								grc_param),
3321 					      storm->cm_task_ag_ctx_lid_size,
3322 					      storm->cm_task_ag_ctx_rd_addr,
3323 					      storm_id);
3324 
3325 		/* Dump Task ST context size */
3326 		grc_param = DBG_GRC_PARAM_NUM_LTIDS;
3327 		offset +=
3328 			qed_grc_dump_ctx_data(p_hwfn,
3329 					      p_ptt,
3330 					      dump_buf + offset,
3331 					      dump,
3332 					      "TASK_ST_CTX",
3333 					      qed_grc_get_param(p_hwfn,
3334 								grc_param),
3335 					      storm->cm_task_st_ctx_lid_size,
3336 					      storm->cm_task_st_ctx_rd_addr,
3337 					      storm_id);
3338 	}
3339 
3340 	return offset;
3341 }
3342 
3343 /* Dumps GRC IORs data. Returns the dumped size in dwords. */
3344 static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
3345 			     struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3346 {
3347 	char buf[10] = "IOR_SET_?";
3348 	u32 addr, offset = 0;
3349 	u8 storm_id, set_id;
3350 
3351 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3352 		struct storm_defs *storm = &s_storm_defs[storm_id];
3353 
3354 		if (!qed_grc_is_storm_included(p_hwfn,
3355 					       (enum dbg_storms)storm_id))
3356 			continue;
3357 
3358 		for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
3359 			addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
3360 					       SEM_FAST_REG_STORM_REG_FILE) +
3361 			       IOR_SET_OFFSET(set_id);
3362 			buf[strlen(buf) - 1] = '0' + set_id;
3363 			offset += qed_grc_dump_mem(p_hwfn,
3364 						   p_ptt,
3365 						   dump_buf + offset,
3366 						   dump,
3367 						   buf,
3368 						   addr,
3369 						   IORS_PER_SET,
3370 						   false,
3371 						   32,
3372 						   false,
3373 						   "ior",
3374 						   true,
3375 						   storm->letter);
3376 		}
3377 	}
3378 
3379 	return offset;
3380 }
3381 
3382 /* Dump VFC CAM. Returns the dumped size in dwords. */
3383 static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
3384 				struct qed_ptt *p_ptt,
3385 				u32 *dump_buf, bool dump, u8 storm_id)
3386 {
3387 	u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
3388 	struct storm_defs *storm = &s_storm_defs[storm_id];
3389 	u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
3390 	u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
3391 	u32 row, i, offset = 0;
3392 
3393 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3394 				       dump_buf + offset,
3395 				       dump,
3396 				       "vfc_cam",
3397 				       0,
3398 				       total_size,
3399 				       256,
3400 				       false, "vfc_cam", true, storm->letter);
3401 
3402 	if (!dump)
3403 		return offset + total_size;
3404 
3405 	/* Prepare CAM address */
3406 	SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
3407 
3408 	for (row = 0; row < VFC_CAM_NUM_ROWS;
3409 	     row++, offset += VFC_CAM_RESP_DWORDS) {
3410 		/* Write VFC CAM command */
3411 		SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
3412 		ARR_REG_WR(p_hwfn,
3413 			   p_ptt,
3414 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
3415 			   cam_cmd, VFC_CAM_CMD_DWORDS);
3416 
3417 		/* Write VFC CAM address */
3418 		ARR_REG_WR(p_hwfn,
3419 			   p_ptt,
3420 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
3421 			   cam_addr, VFC_CAM_ADDR_DWORDS);
3422 
3423 		/* Read VFC CAM read response */
3424 		ARR_REG_RD(p_hwfn,
3425 			   p_ptt,
3426 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
3427 			   dump_buf + offset, VFC_CAM_RESP_DWORDS);
3428 	}
3429 
3430 	return offset;
3431 }
3432 
3433 /* Dump VFC RAM. Returns the dumped size in dwords. */
3434 static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
3435 				struct qed_ptt *p_ptt,
3436 				u32 *dump_buf,
3437 				bool dump,
3438 				u8 storm_id, struct vfc_ram_defs *ram_defs)
3439 {
3440 	u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
3441 	struct storm_defs *storm = &s_storm_defs[storm_id];
3442 	u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
3443 	u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
3444 	u32 row, i, offset = 0;
3445 
3446 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3447 				       dump_buf + offset,
3448 				       dump,
3449 				       ram_defs->mem_name,
3450 				       0,
3451 				       total_size,
3452 				       256,
3453 				       false,
3454 				       ram_defs->type_name,
3455 				       true, storm->letter);
3456 
3457 	/* Prepare RAM address */
3458 	SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
3459 
3460 	if (!dump)
3461 		return offset + total_size;
3462 
3463 	for (row = ram_defs->base_row;
3464 	     row < ram_defs->base_row + ram_defs->num_rows;
3465 	     row++, offset += VFC_RAM_RESP_DWORDS) {
3466 		/* Write VFC RAM command */
3467 		ARR_REG_WR(p_hwfn,
3468 			   p_ptt,
3469 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_WR,
3470 			   ram_cmd, VFC_RAM_CMD_DWORDS);
3471 
3472 		/* Write VFC RAM address */
3473 		SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
3474 		ARR_REG_WR(p_hwfn,
3475 			   p_ptt,
3476 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_ADDR,
3477 			   ram_addr, VFC_RAM_ADDR_DWORDS);
3478 
3479 		/* Read VFC RAM read response */
3480 		ARR_REG_RD(p_hwfn,
3481 			   p_ptt,
3482 			   storm->sem_fast_mem_addr + SEM_FAST_REG_VFC_DATA_RD,
3483 			   dump_buf + offset, VFC_RAM_RESP_DWORDS);
3484 	}
3485 
3486 	return offset;
3487 }
3488 
3489 /* Dumps GRC VFC data. Returns the dumped size in dwords. */
3490 static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
3491 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3492 {
3493 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3494 	u8 storm_id, i;
3495 	u32 offset = 0;
3496 
3497 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
3498 		if (!qed_grc_is_storm_included(p_hwfn,
3499 					       (enum dbg_storms)storm_id) ||
3500 		    !s_storm_defs[storm_id].has_vfc ||
3501 		    (storm_id == DBG_PSTORM_ID && dev_data->platform_id !=
3502 		     PLATFORM_ASIC))
3503 			continue;
3504 
3505 		/* Read CAM */
3506 		offset += qed_grc_dump_vfc_cam(p_hwfn,
3507 					       p_ptt,
3508 					       dump_buf + offset,
3509 					       dump, storm_id);
3510 
3511 		/* Read RAM */
3512 		for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
3513 			offset += qed_grc_dump_vfc_ram(p_hwfn,
3514 						       p_ptt,
3515 						       dump_buf + offset,
3516 						       dump,
3517 						       storm_id,
3518 						       &s_vfc_ram_defs[i]);
3519 	}
3520 
3521 	return offset;
3522 }
3523 
3524 /* Dumps GRC RSS data. Returns the dumped size in dwords. */
3525 static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
3526 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3527 {
3528 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3529 	u32 offset = 0;
3530 	u8 rss_mem_id;
3531 
3532 	for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
3533 		u32 rss_addr, num_entries, entry_width, total_dwords, i;
3534 		struct rss_mem_defs *rss_defs;
3535 		u32 addr, size;
3536 		bool packed;
3537 
3538 		rss_defs = &s_rss_mem_defs[rss_mem_id];
3539 		rss_addr = rss_defs->addr;
3540 		num_entries = rss_defs->num_entries[dev_data->chip_id];
3541 		entry_width = rss_defs->entry_width[dev_data->chip_id];
3542 		total_dwords = (num_entries * entry_width) / 32;
3543 		packed = (entry_width == 16);
3544 
3545 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3546 					       dump_buf + offset,
3547 					       dump,
3548 					       rss_defs->mem_name,
3549 					       0,
3550 					       total_dwords,
3551 					       entry_width,
3552 					       packed,
3553 					       rss_defs->type_name, false, 0);
3554 
3555 		/* Dump RSS data */
3556 		if (!dump) {
3557 			offset += total_dwords;
3558 			continue;
3559 		}
3560 
3561 		addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
3562 		size = RSS_REG_RSS_RAM_DATA_SIZE;
3563 		for (i = 0; i < total_dwords; i += size, rss_addr++) {
3564 			qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
3565 			offset += qed_grc_dump_addr_range(p_hwfn,
3566 							  p_ptt,
3567 							  dump_buf + offset,
3568 							  dump,
3569 							  addr,
3570 							  size,
3571 							  false);
3572 		}
3573 	}
3574 
3575 	return offset;
3576 }
3577 
3578 /* Dumps GRC Big RAM. Returns the dumped size in dwords. */
3579 static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
3580 				struct qed_ptt *p_ptt,
3581 				u32 *dump_buf, bool dump, u8 big_ram_id)
3582 {
3583 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3584 	u32 total_blocks, ram_size, offset = 0, i;
3585 	char mem_name[12] = "???_BIG_RAM";
3586 	char type_name[8] = "???_RAM";
3587 	struct big_ram_defs *big_ram;
3588 
3589 	big_ram = &s_big_ram_defs[big_ram_id];
3590 	total_blocks = big_ram->num_of_blocks[dev_data->chip_id];
3591 	ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
3592 
3593 	strncpy(type_name, big_ram->instance_name,
3594 		strlen(big_ram->instance_name));
3595 	strncpy(mem_name, big_ram->instance_name,
3596 		strlen(big_ram->instance_name));
3597 
3598 	/* Dump memory header */
3599 	offset += qed_grc_dump_mem_hdr(p_hwfn,
3600 				       dump_buf + offset,
3601 				       dump,
3602 				       mem_name,
3603 				       0,
3604 				       ram_size,
3605 				       BIG_RAM_BLOCK_SIZE_BYTES * 8,
3606 				       false, type_name, false, 0);
3607 
3608 	/* Read and dump Big RAM data */
3609 	if (!dump)
3610 		return offset + ram_size;
3611 
3612 	/* Dump Big RAM */
3613 	for (i = 0; i < total_blocks / 2; i++) {
3614 		u32 addr, len;
3615 
3616 		qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
3617 		addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
3618 		len = 2 * BIG_RAM_BLOCK_SIZE_DWORDS;
3619 		offset += qed_grc_dump_addr_range(p_hwfn,
3620 						  p_ptt,
3621 						  dump_buf + offset,
3622 						  dump,
3623 						  addr,
3624 						  len,
3625 						  false);
3626 	}
3627 
3628 	return offset;
3629 }
3630 
3631 static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
3632 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3633 {
3634 	bool block_enable[MAX_BLOCK_ID] = { 0 };
3635 	u32 offset = 0, addr;
3636 	bool halted = false;
3637 
3638 	/* Halt MCP */
3639 	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3640 		halted = !qed_mcp_halt(p_hwfn, p_ptt);
3641 		if (!halted)
3642 			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
3643 	}
3644 
3645 	/* Dump MCP scratchpad */
3646 	offset += qed_grc_dump_mem(p_hwfn,
3647 				   p_ptt,
3648 				   dump_buf + offset,
3649 				   dump,
3650 				   NULL,
3651 				   BYTES_TO_DWORDS(MCP_REG_SCRATCH),
3652 				   MCP_REG_SCRATCH_SIZE,
3653 				   false, 0, false, "MCP", false, 0);
3654 
3655 	/* Dump MCP cpu_reg_file */
3656 	offset += qed_grc_dump_mem(p_hwfn,
3657 				   p_ptt,
3658 				   dump_buf + offset,
3659 				   dump,
3660 				   NULL,
3661 				   BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
3662 				   MCP_REG_CPU_REG_FILE_SIZE,
3663 				   false, 0, false, "MCP", false, 0);
3664 
3665 	/* Dump MCP registers */
3666 	block_enable[BLOCK_MCP] = true;
3667 	offset += qed_grc_dump_registers(p_hwfn,
3668 					 p_ptt,
3669 					 dump_buf + offset,
3670 					 dump, block_enable, "block", "MCP");
3671 
3672 	/* Dump required non-MCP registers */
3673 	offset += qed_grc_dump_regs_hdr(dump_buf + offset,
3674 					dump, 1, "eng", -1, "block", "MCP");
3675 	addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
3676 	offset += qed_grc_dump_reg_entry(p_hwfn,
3677 					 p_ptt,
3678 					 dump_buf + offset,
3679 					 dump,
3680 					 addr,
3681 					 1,
3682 					 false);
3683 
3684 	/* Release MCP */
3685 	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
3686 		DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
3687 
3688 	return offset;
3689 }
3690 
3691 /* Dumps the tbus indirect memory for all PHYs. */
3692 static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
3693 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
3694 {
3695 	u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
3696 	char mem_name[32];
3697 	u8 phy_id;
3698 
3699 	for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
3700 		u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
3701 		struct phy_defs *phy_defs;
3702 		u8 *bytes_buf;
3703 
3704 		phy_defs = &s_phy_defs[phy_id];
3705 		addr_lo_addr = phy_defs->base_addr +
3706 			       phy_defs->tbus_addr_lo_addr;
3707 		addr_hi_addr = phy_defs->base_addr +
3708 			       phy_defs->tbus_addr_hi_addr;
3709 		data_lo_addr = phy_defs->base_addr +
3710 			       phy_defs->tbus_data_lo_addr;
3711 		data_hi_addr = phy_defs->base_addr +
3712 			       phy_defs->tbus_data_hi_addr;
3713 		bytes_buf = (u8 *)(dump_buf + offset);
3714 
3715 		if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
3716 			     phy_defs->phy_name) < 0)
3717 			DP_NOTICE(p_hwfn,
3718 				  "Unexpected debug error: invalid PHY memory name\n");
3719 
3720 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3721 					       dump_buf + offset,
3722 					       dump,
3723 					       mem_name,
3724 					       0,
3725 					       PHY_DUMP_SIZE_DWORDS,
3726 					       16, true, mem_name, false, 0);
3727 
3728 		if (!dump) {
3729 			offset += PHY_DUMP_SIZE_DWORDS;
3730 			continue;
3731 		}
3732 
3733 		for (tbus_hi_offset = 0;
3734 		     tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
3735 		     tbus_hi_offset++) {
3736 			qed_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
3737 			for (tbus_lo_offset = 0; tbus_lo_offset < 256;
3738 			     tbus_lo_offset++) {
3739 				qed_wr(p_hwfn,
3740 				       p_ptt, addr_lo_addr, tbus_lo_offset);
3741 				*(bytes_buf++) = (u8)qed_rd(p_hwfn,
3742 							    p_ptt,
3743 							    data_lo_addr);
3744 				*(bytes_buf++) = (u8)qed_rd(p_hwfn,
3745 							    p_ptt,
3746 							    data_hi_addr);
3747 			}
3748 		}
3749 
3750 		offset += PHY_DUMP_SIZE_DWORDS;
3751 	}
3752 
3753 	return offset;
3754 }
3755 
3756 static void qed_config_dbg_line(struct qed_hwfn *p_hwfn,
3757 				struct qed_ptt *p_ptt,
3758 				enum block_id block_id,
3759 				u8 line_id,
3760 				u8 enable_mask,
3761 				u8 right_shift,
3762 				u8 force_valid_mask, u8 force_frame_mask)
3763 {
3764 	struct block_defs *block = s_block_defs[block_id];
3765 
3766 	qed_wr(p_hwfn, p_ptt, block->dbg_select_addr, line_id);
3767 	qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, enable_mask);
3768 	qed_wr(p_hwfn, p_ptt, block->dbg_shift_addr, right_shift);
3769 	qed_wr(p_hwfn, p_ptt, block->dbg_force_valid_addr, force_valid_mask);
3770 	qed_wr(p_hwfn, p_ptt, block->dbg_force_frame_addr, force_frame_mask);
3771 }
3772 
3773 /* Dumps Static Debug data. Returns the dumped size in dwords. */
3774 static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
3775 				     struct qed_ptt *p_ptt,
3776 				     u32 *dump_buf, bool dump)
3777 {
3778 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3779 	u32 block_id, line_id, offset = 0;
3780 
3781 	/* Skip static debug if a debug bus recording is in progress */
3782 	if (qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
3783 		return 0;
3784 
3785 	if (dump) {
3786 		DP_VERBOSE(p_hwfn,
3787 			   QED_MSG_DEBUG, "Dumping static debug data...\n");
3788 
3789 		/* Disable all blocks debug output */
3790 		for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3791 			struct block_defs *block = s_block_defs[block_id];
3792 
3793 			if (block->has_dbg_bus[dev_data->chip_id])
3794 				qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr,
3795 				       0);
3796 		}
3797 
3798 		qed_bus_reset_dbg_block(p_hwfn, p_ptt);
3799 		qed_bus_set_framing_mode(p_hwfn,
3800 					 p_ptt, DBG_BUS_FRAME_MODE_8HW_0ST);
3801 		qed_wr(p_hwfn,
3802 		       p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
3803 		qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
3804 		qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
3805 	}
3806 
3807 	/* Dump all static debug lines for each relevant block */
3808 	for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
3809 		struct block_defs *block = s_block_defs[block_id];
3810 		struct dbg_bus_block *block_desc;
3811 		u32 block_dwords, addr, len;
3812 		u8 dbg_client_id;
3813 
3814 		if (!block->has_dbg_bus[dev_data->chip_id])
3815 			continue;
3816 
3817 		block_desc =
3818 			get_dbg_bus_block_desc(p_hwfn,
3819 					       (enum block_id)block_id);
3820 		block_dwords = NUM_DBG_LINES(block_desc) *
3821 			       STATIC_DEBUG_LINE_DWORDS;
3822 
3823 		/* Dump static section params */
3824 		offset += qed_grc_dump_mem_hdr(p_hwfn,
3825 					       dump_buf + offset,
3826 					       dump,
3827 					       block->name,
3828 					       0,
3829 					       block_dwords,
3830 					       32, false, "STATIC", false, 0);
3831 
3832 		if (!dump) {
3833 			offset += block_dwords;
3834 			continue;
3835 		}
3836 
3837 		/* If all lines are invalid - dump zeros */
3838 		if (dev_data->block_in_reset[block_id]) {
3839 			memset(dump_buf + offset, 0,
3840 			       DWORDS_TO_BYTES(block_dwords));
3841 			offset += block_dwords;
3842 			continue;
3843 		}
3844 
3845 		/* Enable block's client */
3846 		dbg_client_id = block->dbg_client_id[dev_data->chip_id];
3847 		qed_bus_enable_clients(p_hwfn,
3848 				       p_ptt,
3849 				       BIT(dbg_client_id));
3850 
3851 		addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
3852 		len = STATIC_DEBUG_LINE_DWORDS;
3853 		for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_desc);
3854 		     line_id++) {
3855 			/* Configure debug line ID */
3856 			qed_config_dbg_line(p_hwfn,
3857 					    p_ptt,
3858 					    (enum block_id)block_id,
3859 					    (u8)line_id, 0xf, 0, 0, 0);
3860 
3861 			/* Read debug line info */
3862 			offset += qed_grc_dump_addr_range(p_hwfn,
3863 							  p_ptt,
3864 							  dump_buf + offset,
3865 							  dump,
3866 							  addr,
3867 							  len,
3868 							  true);
3869 		}
3870 
3871 		/* Disable block's client and debug output */
3872 		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3873 		qed_wr(p_hwfn, p_ptt, block->dbg_enable_addr, 0);
3874 	}
3875 
3876 	if (dump) {
3877 		qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
3878 		qed_bus_enable_clients(p_hwfn, p_ptt, 0);
3879 	}
3880 
3881 	return offset;
3882 }
3883 
3884 /* Performs GRC Dump to the specified buffer.
3885  * Returns the dumped size in dwords.
3886  */
3887 static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
3888 				    struct qed_ptt *p_ptt,
3889 				    u32 *dump_buf,
3890 				    bool dump, u32 *num_dumped_dwords)
3891 {
3892 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
3893 	bool parities_masked = false;
3894 	u8 i, port_mode = 0;
3895 	u32 offset = 0;
3896 
3897 	*num_dumped_dwords = 0;
3898 
3899 	if (dump) {
3900 		/* Find port mode */
3901 		switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
3902 		case 0:
3903 			port_mode = 1;
3904 			break;
3905 		case 1:
3906 			port_mode = 2;
3907 			break;
3908 		case 2:
3909 			port_mode = 4;
3910 			break;
3911 		}
3912 
3913 		/* Update reset state */
3914 		qed_update_blocks_reset_state(p_hwfn, p_ptt);
3915 	}
3916 
3917 	/* Dump global params */
3918 	offset += qed_dump_common_global_params(p_hwfn,
3919 						p_ptt,
3920 						dump_buf + offset, dump, 4);
3921 	offset += qed_dump_str_param(dump_buf + offset,
3922 				     dump, "dump-type", "grc-dump");
3923 	offset += qed_dump_num_param(dump_buf + offset,
3924 				     dump,
3925 				     "num-lcids",
3926 				     qed_grc_get_param(p_hwfn,
3927 						DBG_GRC_PARAM_NUM_LCIDS));
3928 	offset += qed_dump_num_param(dump_buf + offset,
3929 				     dump,
3930 				     "num-ltids",
3931 				     qed_grc_get_param(p_hwfn,
3932 						DBG_GRC_PARAM_NUM_LTIDS));
3933 	offset += qed_dump_num_param(dump_buf + offset,
3934 				     dump, "num-ports", port_mode);
3935 
3936 	/* Dump reset registers (dumped before taking blocks out of reset ) */
3937 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3938 		offset += qed_grc_dump_reset_regs(p_hwfn,
3939 						  p_ptt,
3940 						  dump_buf + offset, dump);
3941 
3942 	/* Take all blocks out of reset (using reset registers) */
3943 	if (dump) {
3944 		qed_grc_unreset_blocks(p_hwfn, p_ptt);
3945 		qed_update_blocks_reset_state(p_hwfn, p_ptt);
3946 	}
3947 
3948 	/* Disable all parities using MFW command */
3949 	if (dump &&
3950 	    !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
3951 		parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
3952 		if (!parities_masked) {
3953 			DP_NOTICE(p_hwfn,
3954 				  "Failed to mask parities using MFW\n");
3955 			if (qed_grc_get_param
3956 			    (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
3957 				return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
3958 		}
3959 	}
3960 
3961 	/* Dump modified registers (dumped before modifying them) */
3962 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
3963 		offset += qed_grc_dump_modified_regs(p_hwfn,
3964 						     p_ptt,
3965 						     dump_buf + offset, dump);
3966 
3967 	/* Stall storms */
3968 	if (dump &&
3969 	    (qed_grc_is_included(p_hwfn,
3970 				 DBG_GRC_PARAM_DUMP_IOR) ||
3971 	     qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
3972 		qed_grc_stall_storms(p_hwfn, p_ptt, true);
3973 
3974 	/* Dump all regs  */
3975 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
3976 		bool block_enable[MAX_BLOCK_ID];
3977 
3978 		/* Dump all blocks except MCP */
3979 		for (i = 0; i < MAX_BLOCK_ID; i++)
3980 			block_enable[i] = true;
3981 		block_enable[BLOCK_MCP] = false;
3982 		offset += qed_grc_dump_registers(p_hwfn,
3983 						 p_ptt,
3984 						 dump_buf +
3985 						 offset,
3986 						 dump,
3987 						 block_enable, NULL, NULL);
3988 
3989 		/* Dump special registers */
3990 		offset += qed_grc_dump_special_regs(p_hwfn,
3991 						    p_ptt,
3992 						    dump_buf + offset, dump);
3993 	}
3994 
3995 	/* Dump memories */
3996 	offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
3997 
3998 	/* Dump MCP */
3999 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
4000 		offset += qed_grc_dump_mcp(p_hwfn,
4001 					   p_ptt, dump_buf + offset, dump);
4002 
4003 	/* Dump context */
4004 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
4005 		offset += qed_grc_dump_ctx(p_hwfn,
4006 					   p_ptt, dump_buf + offset, dump);
4007 
4008 	/* Dump RSS memories */
4009 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
4010 		offset += qed_grc_dump_rss(p_hwfn,
4011 					   p_ptt, dump_buf + offset, dump);
4012 
4013 	/* Dump Big RAM */
4014 	for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
4015 		if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
4016 			offset += qed_grc_dump_big_ram(p_hwfn,
4017 						       p_ptt,
4018 						       dump_buf + offset,
4019 						       dump, i);
4020 
4021 	/* Dump IORs */
4022 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR))
4023 		offset += qed_grc_dump_iors(p_hwfn,
4024 					    p_ptt, dump_buf + offset, dump);
4025 
4026 	/* Dump VFC */
4027 	if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))
4028 		offset += qed_grc_dump_vfc(p_hwfn,
4029 					   p_ptt, dump_buf + offset, dump);
4030 
4031 	/* Dump PHY tbus */
4032 	if (qed_grc_is_included(p_hwfn,
4033 				DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
4034 	    CHIP_K2 && dev_data->platform_id == PLATFORM_ASIC)
4035 		offset += qed_grc_dump_phy(p_hwfn,
4036 					   p_ptt, dump_buf + offset, dump);
4037 
4038 	/* Dump static debug data  */
4039 	if (qed_grc_is_included(p_hwfn,
4040 				DBG_GRC_PARAM_DUMP_STATIC) &&
4041 	    dev_data->bus.state == DBG_BUS_STATE_IDLE)
4042 		offset += qed_grc_dump_static_debug(p_hwfn,
4043 						    p_ptt,
4044 						    dump_buf + offset, dump);
4045 
4046 	/* Dump last section */
4047 	offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
4048 
4049 	if (dump) {
4050 		/* Unstall storms */
4051 		if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
4052 			qed_grc_stall_storms(p_hwfn, p_ptt, false);
4053 
4054 		/* Clear parity status */
4055 		qed_grc_clear_all_prty(p_hwfn, p_ptt);
4056 
4057 		/* Enable all parities using MFW command */
4058 		if (parities_masked)
4059 			qed_mcp_mask_parities(p_hwfn, p_ptt, 0);
4060 	}
4061 
4062 	*num_dumped_dwords = offset;
4063 
4064 	return DBG_STATUS_OK;
4065 }
4066 
4067 /* Writes the specified failing Idle Check rule to the specified buffer.
4068  * Returns the dumped size in dwords.
4069  */
4070 static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
4071 				     struct qed_ptt *p_ptt,
4072 				     u32 *
4073 				     dump_buf,
4074 				     bool dump,
4075 				     u16 rule_id,
4076 				     const struct dbg_idle_chk_rule *rule,
4077 				     u16 fail_entry_id, u32 *cond_reg_values)
4078 {
4079 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4080 	const struct dbg_idle_chk_cond_reg *cond_regs;
4081 	const struct dbg_idle_chk_info_reg *info_regs;
4082 	u32 i, next_reg_offset = 0, offset = 0;
4083 	struct dbg_idle_chk_result_hdr *hdr;
4084 	const union dbg_idle_chk_reg *regs;
4085 	u8 reg_id;
4086 
4087 	hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
4088 	regs = &((const union dbg_idle_chk_reg *)
4089 		 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)[rule->reg_offset];
4090 	cond_regs = &regs[0].cond_reg;
4091 	info_regs = &regs[rule->num_cond_regs].info_reg;
4092 
4093 	/* Dump rule data */
4094 	if (dump) {
4095 		memset(hdr, 0, sizeof(*hdr));
4096 		hdr->rule_id = rule_id;
4097 		hdr->mem_entry_id = fail_entry_id;
4098 		hdr->severity = rule->severity;
4099 		hdr->num_dumped_cond_regs = rule->num_cond_regs;
4100 	}
4101 
4102 	offset += IDLE_CHK_RESULT_HDR_DWORDS;
4103 
4104 	/* Dump condition register values */
4105 	for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
4106 		const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
4107 		struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4108 
4109 		reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
4110 			  (dump_buf + offset);
4111 
4112 		/* Write register header */
4113 		if (!dump) {
4114 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
4115 			    reg->entry_size;
4116 			continue;
4117 		}
4118 
4119 		offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4120 		memset(reg_hdr, 0, sizeof(*reg_hdr));
4121 		reg_hdr->start_entry = reg->start_entry;
4122 		reg_hdr->size = reg->entry_size;
4123 		SET_FIELD(reg_hdr->data,
4124 			  DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
4125 			  reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
4126 		SET_FIELD(reg_hdr->data,
4127 			  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
4128 
4129 		/* Write register values */
4130 		for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
4131 			dump_buf[offset] = cond_reg_values[next_reg_offset];
4132 	}
4133 
4134 	/* Dump info register values */
4135 	for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
4136 		const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
4137 		u32 block_id;
4138 
4139 		/* Check if register's block is in reset */
4140 		if (!dump) {
4141 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
4142 			continue;
4143 		}
4144 
4145 		block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
4146 		if (block_id >= MAX_BLOCK_ID) {
4147 			DP_NOTICE(p_hwfn, "Invalid block_id\n");
4148 			return 0;
4149 		}
4150 
4151 		if (!dev_data->block_in_reset[block_id]) {
4152 			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
4153 			bool wide_bus, eval_mode, mode_match = true;
4154 			u16 modes_buf_offset;
4155 			u32 addr;
4156 
4157 			reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
4158 				  (dump_buf + offset);
4159 
4160 			/* Check mode */
4161 			eval_mode = GET_FIELD(reg->mode.data,
4162 					      DBG_MODE_HDR_EVAL_MODE) > 0;
4163 			if (eval_mode) {
4164 				modes_buf_offset =
4165 				    GET_FIELD(reg->mode.data,
4166 					      DBG_MODE_HDR_MODES_BUF_OFFSET);
4167 				mode_match =
4168 					qed_is_mode_match(p_hwfn,
4169 							  &modes_buf_offset);
4170 			}
4171 
4172 			if (!mode_match)
4173 				continue;
4174 
4175 			addr = GET_FIELD(reg->data,
4176 					 DBG_IDLE_CHK_INFO_REG_ADDRESS);
4177 			wide_bus = GET_FIELD(reg->data,
4178 					     DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
4179 
4180 			/* Write register header */
4181 			offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
4182 			hdr->num_dumped_info_regs++;
4183 			memset(reg_hdr, 0, sizeof(*reg_hdr));
4184 			reg_hdr->size = reg->size;
4185 			SET_FIELD(reg_hdr->data,
4186 				  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
4187 				  rule->num_cond_regs + reg_id);
4188 
4189 			/* Write register values */
4190 			offset += qed_grc_dump_addr_range(p_hwfn,
4191 							  p_ptt,
4192 							  dump_buf + offset,
4193 							  dump,
4194 							  addr,
4195 							  reg->size, wide_bus);
4196 		}
4197 	}
4198 
4199 	return offset;
4200 }
4201 
4202 /* Dumps idle check rule entries. Returns the dumped size in dwords. */
4203 static u32
4204 qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
4205 			       u32 *dump_buf, bool dump,
4206 			       const struct dbg_idle_chk_rule *input_rules,
4207 			       u32 num_input_rules, u32 *num_failing_rules)
4208 {
4209 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4210 	u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
4211 	u32 i, offset = 0;
4212 	u16 entry_id;
4213 	u8 reg_id;
4214 
4215 	*num_failing_rules = 0;
4216 
4217 	for (i = 0; i < num_input_rules; i++) {
4218 		const struct dbg_idle_chk_cond_reg *cond_regs;
4219 		const struct dbg_idle_chk_rule *rule;
4220 		const union dbg_idle_chk_reg *regs;
4221 		u16 num_reg_entries = 1;
4222 		bool check_rule = true;
4223 		const u32 *imm_values;
4224 
4225 		rule = &input_rules[i];
4226 		regs = &((const union dbg_idle_chk_reg *)
4227 			 s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr)
4228 			[rule->reg_offset];
4229 		cond_regs = &regs[0].cond_reg;
4230 		imm_values = &s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr
4231 			     [rule->imm_offset];
4232 
4233 		/* Check if all condition register blocks are out of reset, and
4234 		 * find maximal number of entries (all condition registers that
4235 		 * are memories must have the same size, which is > 1).
4236 		 */
4237 		for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
4238 		     reg_id++) {
4239 			u32 block_id =
4240 				GET_FIELD(cond_regs[reg_id].data,
4241 					  DBG_IDLE_CHK_COND_REG_BLOCK_ID);
4242 
4243 			if (block_id >= MAX_BLOCK_ID) {
4244 				DP_NOTICE(p_hwfn, "Invalid block_id\n");
4245 				return 0;
4246 			}
4247 
4248 			check_rule = !dev_data->block_in_reset[block_id];
4249 			if (cond_regs[reg_id].num_entries > num_reg_entries)
4250 				num_reg_entries = cond_regs[reg_id].num_entries;
4251 		}
4252 
4253 		if (!check_rule && dump)
4254 			continue;
4255 
4256 		/* Go over all register entries (number of entries is the same
4257 		 * for all condition registers).
4258 		 */
4259 		for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
4260 			u32 next_reg_offset = 0;
4261 
4262 			if (!dump) {
4263 				offset += qed_idle_chk_dump_failure(p_hwfn,
4264 							p_ptt,
4265 							dump_buf + offset,
4266 							false,
4267 							rule->rule_id,
4268 							rule,
4269 							entry_id,
4270 							NULL);
4271 				(*num_failing_rules)++;
4272 				break;
4273 			}
4274 
4275 			/* Read current entry of all condition registers */
4276 			for (reg_id = 0; reg_id < rule->num_cond_regs;
4277 			     reg_id++) {
4278 				const struct dbg_idle_chk_cond_reg *reg =
4279 				    &cond_regs[reg_id];
4280 				u32 padded_entry_size, addr;
4281 				bool wide_bus;
4282 
4283 				/* Find GRC address (if it's a memory, the
4284 				 * address of the specific entry is calculated).
4285 				 */
4286 				addr = GET_FIELD(reg->data,
4287 						 DBG_IDLE_CHK_COND_REG_ADDRESS);
4288 				wide_bus =
4289 				    GET_FIELD(reg->data,
4290 					      DBG_IDLE_CHK_COND_REG_WIDE_BUS);
4291 				if (reg->num_entries > 1 ||
4292 				    reg->start_entry > 0) {
4293 					padded_entry_size =
4294 					    reg->entry_size > 1 ?
4295 					    roundup_pow_of_two(reg->entry_size)
4296 					    : 1;
4297 					addr += (reg->start_entry + entry_id) *
4298 						padded_entry_size;
4299 				}
4300 
4301 				/* Read registers */
4302 				if (next_reg_offset + reg->entry_size >=
4303 				    IDLE_CHK_MAX_ENTRIES_SIZE) {
4304 					DP_NOTICE(p_hwfn,
4305 						  "idle check registers entry is too large\n");
4306 					return 0;
4307 				}
4308 
4309 				next_reg_offset +=
4310 				    qed_grc_dump_addr_range(p_hwfn, p_ptt,
4311 							    cond_reg_values +
4312 							    next_reg_offset,
4313 							    dump, addr,
4314 							    reg->entry_size,
4315 							    wide_bus);
4316 			}
4317 
4318 			/* Call rule condition function.
4319 			 * If returns true, it's a failure.
4320 			 */
4321 			if ((*cond_arr[rule->cond_id]) (cond_reg_values,
4322 							imm_values)) {
4323 				offset += qed_idle_chk_dump_failure(p_hwfn,
4324 							p_ptt,
4325 							dump_buf + offset,
4326 							dump,
4327 							rule->rule_id,
4328 							rule,
4329 							entry_id,
4330 							cond_reg_values);
4331 				(*num_failing_rules)++;
4332 				break;
4333 			}
4334 		}
4335 	}
4336 
4337 	return offset;
4338 }
4339 
4340 /* Performs Idle Check Dump to the specified buffer.
4341  * Returns the dumped size in dwords.
4342  */
4343 static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
4344 			     struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4345 {
4346 	u32 num_failing_rules_offset, offset = 0, input_offset = 0;
4347 	u32 num_failing_rules = 0;
4348 
4349 	/* Dump global params */
4350 	offset += qed_dump_common_global_params(p_hwfn,
4351 						p_ptt,
4352 						dump_buf + offset, dump, 1);
4353 	offset += qed_dump_str_param(dump_buf + offset,
4354 				     dump, "dump-type", "idle-chk");
4355 
4356 	/* Dump idle check section header with a single parameter */
4357 	offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
4358 	num_failing_rules_offset = offset;
4359 	offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
4360 
4361 	while (input_offset <
4362 	       s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].size_in_dwords) {
4363 		const struct dbg_idle_chk_cond_hdr *cond_hdr =
4364 			(const struct dbg_idle_chk_cond_hdr *)
4365 			&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr
4366 			[input_offset++];
4367 		bool eval_mode, mode_match = true;
4368 		u32 curr_failing_rules;
4369 		u16 modes_buf_offset;
4370 
4371 		/* Check mode */
4372 		eval_mode = GET_FIELD(cond_hdr->mode.data,
4373 				      DBG_MODE_HDR_EVAL_MODE) > 0;
4374 		if (eval_mode) {
4375 			modes_buf_offset =
4376 				GET_FIELD(cond_hdr->mode.data,
4377 					  DBG_MODE_HDR_MODES_BUF_OFFSET);
4378 			mode_match = qed_is_mode_match(p_hwfn,
4379 						       &modes_buf_offset);
4380 		}
4381 
4382 		if (mode_match) {
4383 			offset +=
4384 			    qed_idle_chk_dump_rule_entries(p_hwfn,
4385 				p_ptt,
4386 				dump_buf + offset,
4387 				dump,
4388 				(const struct dbg_idle_chk_rule *)
4389 				&s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].
4390 				ptr[input_offset],
4391 				cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS,
4392 				&curr_failing_rules);
4393 			num_failing_rules += curr_failing_rules;
4394 		}
4395 
4396 		input_offset += cond_hdr->data_size;
4397 	}
4398 
4399 	/* Overwrite num_rules parameter */
4400 	if (dump)
4401 		qed_dump_num_param(dump_buf + num_failing_rules_offset,
4402 				   dump, "num_rules", num_failing_rules);
4403 
4404 	/* Dump last section */
4405 	offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
4406 
4407 	return offset;
4408 }
4409 
4410 /* Finds the meta data image in NVRAM */
4411 static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
4412 					    struct qed_ptt *p_ptt,
4413 					    u32 image_type,
4414 					    u32 *nvram_offset_bytes,
4415 					    u32 *nvram_size_bytes)
4416 {
4417 	u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
4418 	struct mcp_file_att file_att;
4419 	int nvm_result;
4420 
4421 	/* Call NVRAM get file command */
4422 	nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
4423 					p_ptt,
4424 					DRV_MSG_CODE_NVM_GET_FILE_ATT,
4425 					image_type,
4426 					&ret_mcp_resp,
4427 					&ret_mcp_param,
4428 					&ret_txn_size, (u32 *)&file_att);
4429 
4430 	/* Check response */
4431 	if (nvm_result ||
4432 	    (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4433 		return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4434 
4435 	/* Update return values */
4436 	*nvram_offset_bytes = file_att.nvm_start_addr;
4437 	*nvram_size_bytes = file_att.len;
4438 
4439 	DP_VERBOSE(p_hwfn,
4440 		   QED_MSG_DEBUG,
4441 		   "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
4442 		   image_type, *nvram_offset_bytes, *nvram_size_bytes);
4443 
4444 	/* Check alignment */
4445 	if (*nvram_size_bytes & 0x3)
4446 		return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
4447 
4448 	return DBG_STATUS_OK;
4449 }
4450 
4451 /* Reads data from NVRAM */
4452 static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
4453 				      struct qed_ptt *p_ptt,
4454 				      u32 nvram_offset_bytes,
4455 				      u32 nvram_size_bytes, u32 *ret_buf)
4456 {
4457 	u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
4458 	s32 bytes_left = nvram_size_bytes;
4459 	u32 read_offset = 0;
4460 
4461 	DP_VERBOSE(p_hwfn,
4462 		   QED_MSG_DEBUG,
4463 		   "nvram_read: reading image of size %d bytes from NVRAM\n",
4464 		   nvram_size_bytes);
4465 
4466 	do {
4467 		bytes_to_copy =
4468 		    (bytes_left >
4469 		     MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
4470 
4471 		/* Call NVRAM read command */
4472 		if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
4473 				       DRV_MSG_CODE_NVM_READ_NVRAM,
4474 				       (nvram_offset_bytes +
4475 					read_offset) |
4476 				       (bytes_to_copy <<
4477 					DRV_MB_PARAM_NVM_LEN_SHIFT),
4478 				       &ret_mcp_resp, &ret_mcp_param,
4479 				       &ret_read_size,
4480 				       (u32 *)((u8 *)ret_buf + read_offset)))
4481 			return DBG_STATUS_NVRAM_READ_FAILED;
4482 
4483 		/* Check response */
4484 		if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
4485 			return DBG_STATUS_NVRAM_READ_FAILED;
4486 
4487 		/* Update read offset */
4488 		read_offset += ret_read_size;
4489 		bytes_left -= ret_read_size;
4490 	} while (bytes_left > 0);
4491 
4492 	return DBG_STATUS_OK;
4493 }
4494 
4495 /* Get info on the MCP Trace data in the scratchpad:
4496  * - trace_data_grc_addr (OUT): trace data GRC address in bytes
4497  * - trace_data_size (OUT): trace data size in bytes (without the header)
4498  */
4499 static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
4500 						   struct qed_ptt *p_ptt,
4501 						   u32 *trace_data_grc_addr,
4502 						   u32 *trace_data_size)
4503 {
4504 	u32 spad_trace_offsize, signature;
4505 
4506 	/* Read trace section offsize structure from MCP scratchpad */
4507 	spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4508 
4509 	/* Extract trace section address from offsize (in scratchpad) */
4510 	*trace_data_grc_addr =
4511 		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
4512 
4513 	/* Read signature from MCP trace section */
4514 	signature = qed_rd(p_hwfn, p_ptt,
4515 			   *trace_data_grc_addr +
4516 			   offsetof(struct mcp_trace, signature));
4517 
4518 	if (signature != MFW_TRACE_SIGNATURE)
4519 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4520 
4521 	/* Read trace size from MCP trace section */
4522 	*trace_data_size = qed_rd(p_hwfn,
4523 				  p_ptt,
4524 				  *trace_data_grc_addr +
4525 				  offsetof(struct mcp_trace, size));
4526 
4527 	return DBG_STATUS_OK;
4528 }
4529 
4530 /* Reads MCP trace meta data image from NVRAM
4531  * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
4532  * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
4533  *			      loaded from file).
4534  * - trace_meta_size (OUT):   size in bytes of the trace meta data.
4535  */
4536 static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
4537 						   struct qed_ptt *p_ptt,
4538 						   u32 trace_data_size_bytes,
4539 						   u32 *running_bundle_id,
4540 						   u32 *trace_meta_offset,
4541 						   u32 *trace_meta_size)
4542 {
4543 	u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
4544 
4545 	/* Read MCP trace section offsize structure from MCP scratchpad */
4546 	spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
4547 
4548 	/* Find running bundle ID */
4549 	running_mfw_addr =
4550 		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
4551 		QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
4552 	*running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
4553 	if (*running_bundle_id > 1)
4554 		return DBG_STATUS_INVALID_NVRAM_BUNDLE;
4555 
4556 	/* Find image in NVRAM */
4557 	nvram_image_type =
4558 	    (*running_bundle_id ==
4559 	     DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
4560 	return qed_find_nvram_image(p_hwfn,
4561 				    p_ptt,
4562 				    nvram_image_type,
4563 				    trace_meta_offset, trace_meta_size);
4564 }
4565 
4566 /* Reads the MCP Trace meta data from NVRAM into the specified buffer */
4567 static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
4568 					       struct qed_ptt *p_ptt,
4569 					       u32 nvram_offset_in_bytes,
4570 					       u32 size_in_bytes, u32 *buf)
4571 {
4572 	u8 modules_num, module_len, i, *byte_buf = (u8 *)buf;
4573 	enum dbg_status status;
4574 	u32 signature;
4575 
4576 	/* Read meta data from NVRAM */
4577 	status = qed_nvram_read(p_hwfn,
4578 				p_ptt,
4579 				nvram_offset_in_bytes, size_in_bytes, buf);
4580 	if (status != DBG_STATUS_OK)
4581 		return status;
4582 
4583 	/* Extract and check first signature */
4584 	signature = qed_read_unaligned_dword(byte_buf);
4585 	byte_buf += sizeof(signature);
4586 	if (signature != NVM_MAGIC_VALUE)
4587 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4588 
4589 	/* Extract number of modules */
4590 	modules_num = *(byte_buf++);
4591 
4592 	/* Skip all modules */
4593 	for (i = 0; i < modules_num; i++) {
4594 		module_len = *(byte_buf++);
4595 		byte_buf += module_len;
4596 	}
4597 
4598 	/* Extract and check second signature */
4599 	signature = qed_read_unaligned_dword(byte_buf);
4600 	byte_buf += sizeof(signature);
4601 	if (signature != NVM_MAGIC_VALUE)
4602 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
4603 
4604 	return DBG_STATUS_OK;
4605 }
4606 
4607 /* Dump MCP Trace */
4608 static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
4609 					  struct qed_ptt *p_ptt,
4610 					  u32 *dump_buf,
4611 					  bool dump, u32 *num_dumped_dwords)
4612 {
4613 	u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
4614 	u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
4615 	u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
4616 	enum dbg_status status;
4617 	bool mcp_access;
4618 	int halted = 0;
4619 
4620 	*num_dumped_dwords = 0;
4621 
4622 	mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
4623 
4624 	/* Get trace data info */
4625 	status = qed_mcp_trace_get_data_info(p_hwfn,
4626 					     p_ptt,
4627 					     &trace_data_grc_addr,
4628 					     &trace_data_size_bytes);
4629 	if (status != DBG_STATUS_OK)
4630 		return status;
4631 
4632 	/* Dump global params */
4633 	offset += qed_dump_common_global_params(p_hwfn,
4634 						p_ptt,
4635 						dump_buf + offset, dump, 1);
4636 	offset += qed_dump_str_param(dump_buf + offset,
4637 				     dump, "dump-type", "mcp-trace");
4638 
4639 	/* Halt MCP while reading from scratchpad so the read data will be
4640 	 * consistent. if halt fails, MCP trace is taken anyway, with a small
4641 	 * risk that it may be corrupt.
4642 	 */
4643 	if (dump && mcp_access) {
4644 		halted = !qed_mcp_halt(p_hwfn, p_ptt);
4645 		if (!halted)
4646 			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
4647 	}
4648 
4649 	/* Find trace data size */
4650 	trace_data_size_dwords =
4651 	    DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
4652 			 BYTES_IN_DWORD);
4653 
4654 	/* Dump trace data section header and param */
4655 	offset += qed_dump_section_hdr(dump_buf + offset,
4656 				       dump, "mcp_trace_data", 1);
4657 	offset += qed_dump_num_param(dump_buf + offset,
4658 				     dump, "size", trace_data_size_dwords);
4659 
4660 	/* Read trace data from scratchpad into dump buffer */
4661 	offset += qed_grc_dump_addr_range(p_hwfn,
4662 					  p_ptt,
4663 					  dump_buf + offset,
4664 					  dump,
4665 					  BYTES_TO_DWORDS(trace_data_grc_addr),
4666 					  trace_data_size_dwords, false);
4667 
4668 	/* Resume MCP (only if halt succeeded) */
4669 	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
4670 		DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
4671 
4672 	/* Dump trace meta section header */
4673 	offset += qed_dump_section_hdr(dump_buf + offset,
4674 				       dump, "mcp_trace_meta", 1);
4675 
4676 	/* Read trace meta info (trace_meta_size_bytes is dword-aligned) */
4677 	if (mcp_access) {
4678 		status = qed_mcp_trace_get_meta_info(p_hwfn,
4679 						     p_ptt,
4680 						     trace_data_size_bytes,
4681 						     &running_bundle_id,
4682 						     &trace_meta_offset_bytes,
4683 						     &trace_meta_size_bytes);
4684 		if (status == DBG_STATUS_OK)
4685 			trace_meta_size_dwords =
4686 				BYTES_TO_DWORDS(trace_meta_size_bytes);
4687 	}
4688 
4689 	/* Dump trace meta size param */
4690 	offset += qed_dump_num_param(dump_buf + offset,
4691 				     dump, "size", trace_meta_size_dwords);
4692 
4693 	/* Read trace meta image into dump buffer */
4694 	if (dump && trace_meta_size_dwords)
4695 		status = qed_mcp_trace_read_meta(p_hwfn,
4696 						 p_ptt,
4697 						 trace_meta_offset_bytes,
4698 						 trace_meta_size_bytes,
4699 						 dump_buf + offset);
4700 	if (status == DBG_STATUS_OK)
4701 		offset += trace_meta_size_dwords;
4702 
4703 	/* Dump last section */
4704 	offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
4705 
4706 	*num_dumped_dwords = offset;
4707 
4708 	/* If no mcp access, indicate that the dump doesn't contain the meta
4709 	 * data from NVRAM.
4710 	 */
4711 	return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
4712 }
4713 
4714 /* Dump GRC FIFO */
4715 static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
4716 					 struct qed_ptt *p_ptt,
4717 					 u32 *dump_buf,
4718 					 bool dump, u32 *num_dumped_dwords)
4719 {
4720 	u32 dwords_read, size_param_offset, offset = 0;
4721 	bool fifo_has_data;
4722 
4723 	*num_dumped_dwords = 0;
4724 
4725 	/* Dump global params */
4726 	offset += qed_dump_common_global_params(p_hwfn,
4727 						p_ptt,
4728 						dump_buf + offset, dump, 1);
4729 	offset += qed_dump_str_param(dump_buf + offset,
4730 				     dump, "dump-type", "reg-fifo");
4731 
4732 	/* Dump fifo data section header and param. The size param is 0 for
4733 	 * now, and is overwritten after reading the FIFO.
4734 	 */
4735 	offset += qed_dump_section_hdr(dump_buf + offset,
4736 				       dump, "reg_fifo_data", 1);
4737 	size_param_offset = offset;
4738 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4739 
4740 	if (!dump) {
4741 		/* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
4742 		 * test how much data is available, except for reading it.
4743 		 */
4744 		offset += REG_FIFO_DEPTH_DWORDS;
4745 		goto out;
4746 	}
4747 
4748 	fifo_has_data = qed_rd(p_hwfn, p_ptt,
4749 			       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4750 
4751 	/* Pull available data from fifo. Use DMAE since this is widebus memory
4752 	 * and must be accessed atomically. Test for dwords_read not passing
4753 	 * buffer size since more entries could be added to the buffer as we are
4754 	 * emptying it.
4755 	 */
4756 	for (dwords_read = 0;
4757 	     fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
4758 	     dwords_read += REG_FIFO_ELEMENT_DWORDS, offset +=
4759 	     REG_FIFO_ELEMENT_DWORDS) {
4760 		if (qed_dmae_grc2host(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO,
4761 				      (u64)(uintptr_t)(&dump_buf[offset]),
4762 				      REG_FIFO_ELEMENT_DWORDS, 0))
4763 			return DBG_STATUS_DMAE_FAILED;
4764 		fifo_has_data = qed_rd(p_hwfn, p_ptt,
4765 				       GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
4766 	}
4767 
4768 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4769 			   dwords_read);
4770 out:
4771 	/* Dump last section */
4772 	offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
4773 
4774 	*num_dumped_dwords = offset;
4775 
4776 	return DBG_STATUS_OK;
4777 }
4778 
4779 /* Dump IGU FIFO */
4780 static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
4781 					 struct qed_ptt *p_ptt,
4782 					 u32 *dump_buf,
4783 					 bool dump, u32 *num_dumped_dwords)
4784 {
4785 	u32 dwords_read, size_param_offset, offset = 0;
4786 	bool fifo_has_data;
4787 
4788 	*num_dumped_dwords = 0;
4789 
4790 	/* Dump global params */
4791 	offset += qed_dump_common_global_params(p_hwfn,
4792 						p_ptt,
4793 						dump_buf + offset, dump, 1);
4794 	offset += qed_dump_str_param(dump_buf + offset,
4795 				     dump, "dump-type", "igu-fifo");
4796 
4797 	/* Dump fifo data section header and param. The size param is 0 for
4798 	 * now, and is overwritten after reading the FIFO.
4799 	 */
4800 	offset += qed_dump_section_hdr(dump_buf + offset,
4801 				       dump, "igu_fifo_data", 1);
4802 	size_param_offset = offset;
4803 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4804 
4805 	if (!dump) {
4806 		/* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
4807 		 * test how much data is available, except for reading it.
4808 		 */
4809 		offset += IGU_FIFO_DEPTH_DWORDS;
4810 		goto out;
4811 	}
4812 
4813 	fifo_has_data = qed_rd(p_hwfn, p_ptt,
4814 			       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4815 
4816 	/* Pull available data from fifo. Use DMAE since this is widebus memory
4817 	 * and must be accessed atomically. Test for dwords_read not passing
4818 	 * buffer size since more entries could be added to the buffer as we are
4819 	 * emptying it.
4820 	 */
4821 	for (dwords_read = 0;
4822 	     fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
4823 	     dwords_read += IGU_FIFO_ELEMENT_DWORDS, offset +=
4824 	     IGU_FIFO_ELEMENT_DWORDS) {
4825 		if (qed_dmae_grc2host(p_hwfn, p_ptt,
4826 				      IGU_REG_ERROR_HANDLING_MEMORY,
4827 				      (u64)(uintptr_t)(&dump_buf[offset]),
4828 				      IGU_FIFO_ELEMENT_DWORDS, 0))
4829 			return DBG_STATUS_DMAE_FAILED;
4830 		fifo_has_data =	qed_rd(p_hwfn, p_ptt,
4831 				       IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
4832 	}
4833 
4834 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4835 			   dwords_read);
4836 out:
4837 	/* Dump last section */
4838 	offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
4839 
4840 	*num_dumped_dwords = offset;
4841 
4842 	return DBG_STATUS_OK;
4843 }
4844 
4845 /* Protection Override dump */
4846 static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
4847 						    struct qed_ptt *p_ptt,
4848 						    u32 *dump_buf,
4849 						    bool dump,
4850 						    u32 *num_dumped_dwords)
4851 {
4852 	u32 size_param_offset, override_window_dwords, offset = 0;
4853 
4854 	*num_dumped_dwords = 0;
4855 
4856 	/* Dump global params */
4857 	offset += qed_dump_common_global_params(p_hwfn,
4858 						p_ptt,
4859 						dump_buf + offset, dump, 1);
4860 	offset += qed_dump_str_param(dump_buf + offset,
4861 				     dump, "dump-type", "protection-override");
4862 
4863 	/* Dump data section header and param. The size param is 0 for now,
4864 	 * and is overwritten after reading the data.
4865 	 */
4866 	offset += qed_dump_section_hdr(dump_buf + offset,
4867 				       dump, "protection_override_data", 1);
4868 	size_param_offset = offset;
4869 	offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
4870 
4871 	if (!dump) {
4872 		offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
4873 		goto out;
4874 	}
4875 
4876 	/* Add override window info to buffer */
4877 	override_window_dwords =
4878 		qed_rd(p_hwfn, p_ptt,
4879 		       GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
4880 		       PROTECTION_OVERRIDE_ELEMENT_DWORDS;
4881 	if (qed_dmae_grc2host(p_hwfn, p_ptt,
4882 			      GRC_REG_PROTECTION_OVERRIDE_WINDOW,
4883 			      (u64)(uintptr_t)(dump_buf + offset),
4884 			      override_window_dwords, 0))
4885 		return DBG_STATUS_DMAE_FAILED;
4886 	offset += override_window_dwords;
4887 	qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
4888 			   override_window_dwords);
4889 out:
4890 	/* Dump last section */
4891 	offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
4892 
4893 	*num_dumped_dwords = offset;
4894 
4895 	return DBG_STATUS_OK;
4896 }
4897 
4898 /* Performs FW Asserts Dump to the specified buffer.
4899  * Returns the dumped size in dwords.
4900  */
4901 static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
4902 			       struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
4903 {
4904 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4905 	struct fw_asserts_ram_section *asserts;
4906 	char storm_letter_str[2] = "?";
4907 	struct fw_info fw_info;
4908 	u32 offset = 0;
4909 	u8 storm_id;
4910 
4911 	/* Dump global params */
4912 	offset += qed_dump_common_global_params(p_hwfn,
4913 						p_ptt,
4914 						dump_buf + offset, dump, 1);
4915 	offset += qed_dump_str_param(dump_buf + offset,
4916 				     dump, "dump-type", "fw-asserts");
4917 
4918 	/* Find Storm dump size */
4919 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
4920 		u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
4921 		struct storm_defs *storm = &s_storm_defs[storm_id];
4922 		u32 last_list_idx, addr;
4923 
4924 		if (dev_data->block_in_reset[storm->block_id])
4925 			continue;
4926 
4927 		/* Read FW info for the current Storm */
4928 		qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
4929 
4930 		asserts = &fw_info.fw_asserts_section;
4931 
4932 		/* Dump FW Asserts section header and params */
4933 		storm_letter_str[0] = storm->letter;
4934 		offset += qed_dump_section_hdr(dump_buf + offset,
4935 					       dump, "fw_asserts", 2);
4936 		offset += qed_dump_str_param(dump_buf + offset,
4937 					     dump, "storm", storm_letter_str);
4938 		offset += qed_dump_num_param(dump_buf + offset,
4939 					     dump,
4940 					     "size",
4941 					     asserts->list_element_dword_size);
4942 
4943 		/* Read and dump FW Asserts data */
4944 		if (!dump) {
4945 			offset += asserts->list_element_dword_size;
4946 			continue;
4947 		}
4948 
4949 		fw_asserts_section_addr = storm->sem_fast_mem_addr +
4950 			SEM_FAST_REG_INT_RAM +
4951 			RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
4952 		next_list_idx_addr = fw_asserts_section_addr +
4953 			DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
4954 		next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
4955 		last_list_idx = (next_list_idx > 0
4956 				 ? next_list_idx
4957 				 : asserts->list_num_elements) - 1;
4958 		addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
4959 		       asserts->list_dword_offset +
4960 		       last_list_idx * asserts->list_element_dword_size;
4961 		offset +=
4962 		    qed_grc_dump_addr_range(p_hwfn, p_ptt,
4963 					    dump_buf + offset,
4964 					    dump, addr,
4965 					    asserts->list_element_dword_size,
4966 					    false);
4967 	}
4968 
4969 	/* Dump last section */
4970 	offset += qed_dump_last_section(p_hwfn, dump_buf, offset, dump);
4971 
4972 	return offset;
4973 }
4974 
4975 /***************************** Public Functions *******************************/
4976 
4977 enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
4978 {
4979 	struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
4980 	u8 buf_id;
4981 
4982 	/* convert binary data to debug arrays */
4983 	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
4984 		s_dbg_arrays[buf_id].ptr =
4985 		    (u32 *)(bin_ptr + buf_array[buf_id].offset);
4986 		s_dbg_arrays[buf_id].size_in_dwords =
4987 		    BYTES_TO_DWORDS(buf_array[buf_id].length);
4988 	}
4989 
4990 	return DBG_STATUS_OK;
4991 }
4992 
4993 /* Assign default GRC param values */
4994 void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
4995 {
4996 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
4997 	u32 i;
4998 
4999 	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
5000 		dev_data->grc.param_val[i] =
5001 		    s_grc_param_defs[i].default_val[dev_data->chip_id];
5002 }
5003 
5004 enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5005 					      struct qed_ptt *p_ptt,
5006 					      u32 *buf_size)
5007 {
5008 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5009 
5010 	*buf_size = 0;
5011 
5012 	if (status != DBG_STATUS_OK)
5013 		return status;
5014 
5015 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5016 	    !s_dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
5017 	    !s_dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
5018 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5019 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5020 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5021 
5022 	return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5023 }
5024 
5025 enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
5026 				 struct qed_ptt *p_ptt,
5027 				 u32 *dump_buf,
5028 				 u32 buf_size_in_dwords,
5029 				 u32 *num_dumped_dwords)
5030 {
5031 	u32 needed_buf_size_in_dwords;
5032 	enum dbg_status status;
5033 
5034 	*num_dumped_dwords = 0;
5035 
5036 	status = qed_dbg_grc_get_dump_buf_size(p_hwfn,
5037 					       p_ptt,
5038 					       &needed_buf_size_in_dwords);
5039 	if (status != DBG_STATUS_OK)
5040 		return status;
5041 
5042 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5043 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5044 
5045 	/* GRC Dump */
5046 	status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
5047 
5048 	/* Revert GRC params to their default */
5049 	qed_dbg_grc_set_params_default(p_hwfn);
5050 
5051 	return status;
5052 }
5053 
5054 enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5055 						   struct qed_ptt *p_ptt,
5056 						   u32 *buf_size)
5057 {
5058 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
5059 	struct idle_chk_data *idle_chk;
5060 	enum dbg_status status;
5061 
5062 	idle_chk = &dev_data->idle_chk;
5063 	*buf_size = 0;
5064 
5065 	status = qed_dbg_dev_init(p_hwfn, p_ptt);
5066 	if (status != DBG_STATUS_OK)
5067 		return status;
5068 
5069 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5070 	    !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
5071 	    !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
5072 	    !s_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
5073 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5074 
5075 	if (!idle_chk->buf_size_set) {
5076 		idle_chk->buf_size = qed_idle_chk_dump(p_hwfn,
5077 						       p_ptt, NULL, false);
5078 		idle_chk->buf_size_set = true;
5079 	}
5080 
5081 	*buf_size = idle_chk->buf_size;
5082 
5083 	return DBG_STATUS_OK;
5084 }
5085 
5086 enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
5087 				      struct qed_ptt *p_ptt,
5088 				      u32 *dump_buf,
5089 				      u32 buf_size_in_dwords,
5090 				      u32 *num_dumped_dwords)
5091 {
5092 	u32 needed_buf_size_in_dwords;
5093 	enum dbg_status status;
5094 
5095 	*num_dumped_dwords = 0;
5096 
5097 	status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn,
5098 						    p_ptt,
5099 						    &needed_buf_size_in_dwords);
5100 	if (status != DBG_STATUS_OK)
5101 		return status;
5102 
5103 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5104 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5105 
5106 	/* Update reset state */
5107 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5108 
5109 	/* Idle Check Dump */
5110 	*num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
5111 
5112 	/* Revert GRC params to their default */
5113 	qed_dbg_grc_set_params_default(p_hwfn);
5114 
5115 	return DBG_STATUS_OK;
5116 }
5117 
5118 enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5119 						    struct qed_ptt *p_ptt,
5120 						    u32 *buf_size)
5121 {
5122 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5123 
5124 	*buf_size = 0;
5125 
5126 	if (status != DBG_STATUS_OK)
5127 		return status;
5128 
5129 	return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5130 }
5131 
5132 enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
5133 				       struct qed_ptt *p_ptt,
5134 				       u32 *dump_buf,
5135 				       u32 buf_size_in_dwords,
5136 				       u32 *num_dumped_dwords)
5137 {
5138 	u32 needed_buf_size_in_dwords;
5139 	enum dbg_status status;
5140 
5141 	status =
5142 		qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
5143 						    p_ptt,
5144 						    &needed_buf_size_in_dwords);
5145 	if (status != DBG_STATUS_OK && status !=
5146 	    DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
5147 		return status;
5148 
5149 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5150 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5151 
5152 	/* Update reset state */
5153 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5154 
5155 	/* Perform dump */
5156 	status = qed_mcp_trace_dump(p_hwfn,
5157 				    p_ptt, dump_buf, true, num_dumped_dwords);
5158 
5159 	/* Revert GRC params to their default */
5160 	qed_dbg_grc_set_params_default(p_hwfn);
5161 
5162 	return status;
5163 }
5164 
5165 enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5166 						   struct qed_ptt *p_ptt,
5167 						   u32 *buf_size)
5168 {
5169 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5170 
5171 	*buf_size = 0;
5172 
5173 	if (status != DBG_STATUS_OK)
5174 		return status;
5175 
5176 	return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5177 }
5178 
5179 enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
5180 				      struct qed_ptt *p_ptt,
5181 				      u32 *dump_buf,
5182 				      u32 buf_size_in_dwords,
5183 				      u32 *num_dumped_dwords)
5184 {
5185 	u32 needed_buf_size_in_dwords;
5186 	enum dbg_status status;
5187 
5188 	*num_dumped_dwords = 0;
5189 
5190 	status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
5191 						    p_ptt,
5192 						    &needed_buf_size_in_dwords);
5193 	if (status != DBG_STATUS_OK)
5194 		return status;
5195 
5196 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5197 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5198 
5199 	/* Update reset state */
5200 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5201 
5202 	status = qed_reg_fifo_dump(p_hwfn,
5203 				   p_ptt, dump_buf, true, num_dumped_dwords);
5204 
5205 	/* Revert GRC params to their default */
5206 	qed_dbg_grc_set_params_default(p_hwfn);
5207 
5208 	return status;
5209 }
5210 
5211 enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5212 						   struct qed_ptt *p_ptt,
5213 						   u32 *buf_size)
5214 {
5215 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5216 
5217 	*buf_size = 0;
5218 
5219 	if (status != DBG_STATUS_OK)
5220 		return status;
5221 
5222 	return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
5223 }
5224 
5225 enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
5226 				      struct qed_ptt *p_ptt,
5227 				      u32 *dump_buf,
5228 				      u32 buf_size_in_dwords,
5229 				      u32 *num_dumped_dwords)
5230 {
5231 	u32 needed_buf_size_in_dwords;
5232 	enum dbg_status status;
5233 
5234 	*num_dumped_dwords = 0;
5235 
5236 	status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
5237 						    p_ptt,
5238 						    &needed_buf_size_in_dwords);
5239 	if (status != DBG_STATUS_OK)
5240 		return status;
5241 
5242 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5243 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5244 
5245 	/* Update reset state */
5246 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5247 
5248 	status = qed_igu_fifo_dump(p_hwfn,
5249 				   p_ptt, dump_buf, true, num_dumped_dwords);
5250 	/* Revert GRC params to their default */
5251 	qed_dbg_grc_set_params_default(p_hwfn);
5252 
5253 	return status;
5254 }
5255 
5256 enum dbg_status
5257 qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5258 					      struct qed_ptt *p_ptt,
5259 					      u32 *buf_size)
5260 {
5261 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5262 
5263 	*buf_size = 0;
5264 
5265 	if (status != DBG_STATUS_OK)
5266 		return status;
5267 
5268 	return qed_protection_override_dump(p_hwfn,
5269 					    p_ptt, NULL, false, buf_size);
5270 }
5271 
5272 enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
5273 						 struct qed_ptt *p_ptt,
5274 						 u32 *dump_buf,
5275 						 u32 buf_size_in_dwords,
5276 						 u32 *num_dumped_dwords)
5277 {
5278 	u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5279 	enum dbg_status status;
5280 
5281 	*num_dumped_dwords = 0;
5282 
5283 	status =
5284 		qed_dbg_protection_override_get_dump_buf_size(p_hwfn,
5285 							      p_ptt,
5286 							      p_size);
5287 	if (status != DBG_STATUS_OK)
5288 		return status;
5289 
5290 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5291 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5292 
5293 	/* Update reset state */
5294 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5295 
5296 	status = qed_protection_override_dump(p_hwfn,
5297 					      p_ptt,
5298 					      dump_buf,
5299 					      true, num_dumped_dwords);
5300 
5301 	/* Revert GRC params to their default */
5302 	qed_dbg_grc_set_params_default(p_hwfn);
5303 
5304 	return status;
5305 }
5306 
5307 enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
5308 						     struct qed_ptt *p_ptt,
5309 						     u32 *buf_size)
5310 {
5311 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5312 
5313 	*buf_size = 0;
5314 
5315 	if (status != DBG_STATUS_OK)
5316 		return status;
5317 
5318 	/* Update reset state */
5319 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
5320 
5321 	*buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
5322 
5323 	return DBG_STATUS_OK;
5324 }
5325 
5326 enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
5327 					struct qed_ptt *p_ptt,
5328 					u32 *dump_buf,
5329 					u32 buf_size_in_dwords,
5330 					u32 *num_dumped_dwords)
5331 {
5332 	u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
5333 	enum dbg_status status;
5334 
5335 	*num_dumped_dwords = 0;
5336 
5337 	status =
5338 		qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
5339 						     p_ptt,
5340 						     p_size);
5341 	if (status != DBG_STATUS_OK)
5342 		return status;
5343 
5344 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
5345 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
5346 
5347 	*num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
5348 
5349 	/* Revert GRC params to their default */
5350 	qed_dbg_grc_set_params_default(p_hwfn);
5351 
5352 	return DBG_STATUS_OK;
5353 }
5354 
5355 enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
5356 				  struct qed_ptt *p_ptt,
5357 				  enum block_id block_id,
5358 				  enum dbg_attn_type attn_type,
5359 				  bool clear_status,
5360 				  struct dbg_attn_block_result *results)
5361 {
5362 	enum dbg_status status = qed_dbg_dev_init(p_hwfn, p_ptt);
5363 	u8 reg_idx, num_attn_regs, num_result_regs = 0;
5364 	const struct dbg_attn_reg *attn_reg_arr;
5365 
5366 	if (status != DBG_STATUS_OK)
5367 		return status;
5368 
5369 	if (!s_dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
5370 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
5371 	    !s_dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
5372 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
5373 
5374 	attn_reg_arr = qed_get_block_attn_regs(block_id,
5375 					       attn_type, &num_attn_regs);
5376 
5377 	for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
5378 		const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
5379 		struct dbg_attn_reg_result *reg_result;
5380 		u32 sts_addr, sts_val;
5381 		u16 modes_buf_offset;
5382 		bool eval_mode;
5383 
5384 		/* Check mode */
5385 		eval_mode = GET_FIELD(reg_data->mode.data,
5386 				      DBG_MODE_HDR_EVAL_MODE) > 0;
5387 		modes_buf_offset = GET_FIELD(reg_data->mode.data,
5388 					     DBG_MODE_HDR_MODES_BUF_OFFSET);
5389 		if (eval_mode && !qed_is_mode_match(p_hwfn, &modes_buf_offset))
5390 			continue;
5391 
5392 		/* Mode match - read attention status register */
5393 		sts_addr = DWORDS_TO_BYTES(clear_status ?
5394 					   reg_data->sts_clr_address :
5395 					   GET_FIELD(reg_data->data,
5396 						     DBG_ATTN_REG_STS_ADDRESS));
5397 		sts_val = qed_rd(p_hwfn, p_ptt, sts_addr);
5398 		if (!sts_val)
5399 			continue;
5400 
5401 		/* Non-zero attention status - add to results */
5402 		reg_result = &results->reg_results[num_result_regs];
5403 		SET_FIELD(reg_result->data,
5404 			  DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
5405 		SET_FIELD(reg_result->data,
5406 			  DBG_ATTN_REG_RESULT_NUM_REG_ATTN,
5407 			  GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
5408 		reg_result->block_attn_offset = reg_data->block_attn_offset;
5409 		reg_result->sts_val = sts_val;
5410 		reg_result->mask_val = qed_rd(p_hwfn,
5411 					      p_ptt,
5412 					      DWORDS_TO_BYTES
5413 					      (reg_data->mask_address));
5414 		num_result_regs++;
5415 	}
5416 
5417 	results->block_id = (u8)block_id;
5418 	results->names_offset =
5419 	    qed_get_block_attn_data(block_id, attn_type)->names_offset;
5420 	SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
5421 	SET_FIELD(results->data,
5422 		  DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
5423 
5424 	return DBG_STATUS_OK;
5425 }
5426 
5427 /******************************* Data Types **********************************/
5428 
5429 struct block_info {
5430 	const char *name;
5431 	enum block_id id;
5432 };
5433 
5434 struct mcp_trace_format {
5435 	u32 data;
5436 #define MCP_TRACE_FORMAT_MODULE_MASK	0x0000ffff
5437 #define MCP_TRACE_FORMAT_MODULE_SHIFT	0
5438 #define MCP_TRACE_FORMAT_LEVEL_MASK	0x00030000
5439 #define MCP_TRACE_FORMAT_LEVEL_SHIFT	16
5440 #define MCP_TRACE_FORMAT_P1_SIZE_MASK	0x000c0000
5441 #define MCP_TRACE_FORMAT_P1_SIZE_SHIFT	18
5442 #define MCP_TRACE_FORMAT_P2_SIZE_MASK	0x00300000
5443 #define MCP_TRACE_FORMAT_P2_SIZE_SHIFT	20
5444 #define MCP_TRACE_FORMAT_P3_SIZE_MASK	0x00c00000
5445 #define MCP_TRACE_FORMAT_P3_SIZE_SHIFT	22
5446 #define MCP_TRACE_FORMAT_LEN_MASK	0xff000000
5447 #define MCP_TRACE_FORMAT_LEN_SHIFT	24
5448 
5449 	char *format_str;
5450 };
5451 
5452 /* Meta data structure, generated by a perl script during MFW build. therefore,
5453  * the structs mcp_trace_meta and mcp_trace_format are duplicated in the perl
5454  * script.
5455  */
5456 struct mcp_trace_meta {
5457 	u32 modules_num;
5458 	char **modules;
5459 	u32 formats_num;
5460 	struct mcp_trace_format *formats;
5461 };
5462 
5463 /* REG fifo element */
5464 struct reg_fifo_element {
5465 	u64 data;
5466 #define REG_FIFO_ELEMENT_ADDRESS_SHIFT		0
5467 #define REG_FIFO_ELEMENT_ADDRESS_MASK		0x7fffff
5468 #define REG_FIFO_ELEMENT_ACCESS_SHIFT		23
5469 #define REG_FIFO_ELEMENT_ACCESS_MASK		0x1
5470 #define REG_FIFO_ELEMENT_PF_SHIFT		24
5471 #define REG_FIFO_ELEMENT_PF_MASK		0xf
5472 #define REG_FIFO_ELEMENT_VF_SHIFT		28
5473 #define REG_FIFO_ELEMENT_VF_MASK		0xff
5474 #define REG_FIFO_ELEMENT_PORT_SHIFT		36
5475 #define REG_FIFO_ELEMENT_PORT_MASK		0x3
5476 #define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT	38
5477 #define REG_FIFO_ELEMENT_PRIVILEGE_MASK		0x3
5478 #define REG_FIFO_ELEMENT_PROTECTION_SHIFT	40
5479 #define REG_FIFO_ELEMENT_PROTECTION_MASK	0x7
5480 #define REG_FIFO_ELEMENT_MASTER_SHIFT		43
5481 #define REG_FIFO_ELEMENT_MASTER_MASK		0xf
5482 #define REG_FIFO_ELEMENT_ERROR_SHIFT		47
5483 #define REG_FIFO_ELEMENT_ERROR_MASK		0x1f
5484 };
5485 
5486 /* IGU fifo element */
5487 struct igu_fifo_element {
5488 	u32 dword0;
5489 #define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT		0
5490 #define IGU_FIFO_ELEMENT_DWORD0_FID_MASK		0xff
5491 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT		8
5492 #define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK		0x1
5493 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT		9
5494 #define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK		0xf
5495 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT		13
5496 #define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK		0xf
5497 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT		17
5498 #define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK		0x7fff
5499 	u32 dword1;
5500 	u32 dword2;
5501 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT	0
5502 #define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK		0x1
5503 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT		1
5504 #define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK		0xffffffff
5505 	u32 reserved;
5506 };
5507 
5508 struct igu_fifo_wr_data {
5509 	u32 data;
5510 #define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT		0
5511 #define IGU_FIFO_WR_DATA_PROD_CONS_MASK			0xffffff
5512 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT		24
5513 #define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK		0x1
5514 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT	25
5515 #define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK		0x3
5516 #define IGU_FIFO_WR_DATA_SEGMENT_SHIFT			27
5517 #define IGU_FIFO_WR_DATA_SEGMENT_MASK			0x1
5518 #define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT		28
5519 #define IGU_FIFO_WR_DATA_TIMER_MASK_MASK		0x1
5520 #define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT			31
5521 #define IGU_FIFO_WR_DATA_CMD_TYPE_MASK			0x1
5522 };
5523 
5524 struct igu_fifo_cleanup_wr_data {
5525 	u32 data;
5526 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT		0
5527 #define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK		0x7ffffff
5528 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT	27
5529 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK	0x1
5530 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT	28
5531 #define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK	0x7
5532 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT		31
5533 #define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK		0x1
5534 };
5535 
5536 /* Protection override element */
5537 struct protection_override_element {
5538 	u64 data;
5539 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT		0
5540 #define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK		0x7fffff
5541 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT		23
5542 #define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK		0xffffff
5543 #define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT			47
5544 #define PROTECTION_OVERRIDE_ELEMENT_READ_MASK			0x1
5545 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT			48
5546 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK			0x1
5547 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT	49
5548 #define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK	0x7
5549 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT	52
5550 #define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK	0x7
5551 };
5552 
5553 enum igu_fifo_sources {
5554 	IGU_SRC_PXP0,
5555 	IGU_SRC_PXP1,
5556 	IGU_SRC_PXP2,
5557 	IGU_SRC_PXP3,
5558 	IGU_SRC_PXP4,
5559 	IGU_SRC_PXP5,
5560 	IGU_SRC_PXP6,
5561 	IGU_SRC_PXP7,
5562 	IGU_SRC_CAU,
5563 	IGU_SRC_ATTN,
5564 	IGU_SRC_GRC
5565 };
5566 
5567 enum igu_fifo_addr_types {
5568 	IGU_ADDR_TYPE_MSIX_MEM,
5569 	IGU_ADDR_TYPE_WRITE_PBA,
5570 	IGU_ADDR_TYPE_WRITE_INT_ACK,
5571 	IGU_ADDR_TYPE_WRITE_ATTN_BITS,
5572 	IGU_ADDR_TYPE_READ_INT,
5573 	IGU_ADDR_TYPE_WRITE_PROD_UPDATE,
5574 	IGU_ADDR_TYPE_RESERVED
5575 };
5576 
5577 struct igu_fifo_addr_data {
5578 	u16 start_addr;
5579 	u16 end_addr;
5580 	char *desc;
5581 	char *vf_desc;
5582 	enum igu_fifo_addr_types type;
5583 };
5584 
5585 /******************************** Constants **********************************/
5586 
5587 #define MAX_MSG_LEN				1024
5588 
5589 #define MCP_TRACE_MAX_MODULE_LEN		8
5590 #define MCP_TRACE_FORMAT_MAX_PARAMS		3
5591 #define MCP_TRACE_FORMAT_PARAM_WIDTH \
5592 	(MCP_TRACE_FORMAT_P2_SIZE_SHIFT - MCP_TRACE_FORMAT_P1_SIZE_SHIFT)
5593 
5594 #define REG_FIFO_ELEMENT_ADDR_FACTOR		4
5595 #define REG_FIFO_ELEMENT_IS_PF_VF_VAL		127
5596 
5597 #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR	4
5598 
5599 /********************************* Macros ************************************/
5600 
5601 #define BYTES_TO_DWORDS(bytes)			((bytes) / BYTES_IN_DWORD)
5602 
5603 /***************************** Constant Arrays *******************************/
5604 
5605 struct user_dbg_array {
5606 	const u32 *ptr;
5607 	u32 size_in_dwords;
5608 };
5609 
5610 /* Debug arrays */
5611 static struct user_dbg_array
5612 s_user_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
5613 
5614 /* Block names array */
5615 static struct block_info s_block_info_arr[] = {
5616 	{"grc", BLOCK_GRC},
5617 	{"miscs", BLOCK_MISCS},
5618 	{"misc", BLOCK_MISC},
5619 	{"dbu", BLOCK_DBU},
5620 	{"pglue_b", BLOCK_PGLUE_B},
5621 	{"cnig", BLOCK_CNIG},
5622 	{"cpmu", BLOCK_CPMU},
5623 	{"ncsi", BLOCK_NCSI},
5624 	{"opte", BLOCK_OPTE},
5625 	{"bmb", BLOCK_BMB},
5626 	{"pcie", BLOCK_PCIE},
5627 	{"mcp", BLOCK_MCP},
5628 	{"mcp2", BLOCK_MCP2},
5629 	{"pswhst", BLOCK_PSWHST},
5630 	{"pswhst2", BLOCK_PSWHST2},
5631 	{"pswrd", BLOCK_PSWRD},
5632 	{"pswrd2", BLOCK_PSWRD2},
5633 	{"pswwr", BLOCK_PSWWR},
5634 	{"pswwr2", BLOCK_PSWWR2},
5635 	{"pswrq", BLOCK_PSWRQ},
5636 	{"pswrq2", BLOCK_PSWRQ2},
5637 	{"pglcs", BLOCK_PGLCS},
5638 	{"ptu", BLOCK_PTU},
5639 	{"dmae", BLOCK_DMAE},
5640 	{"tcm", BLOCK_TCM},
5641 	{"mcm", BLOCK_MCM},
5642 	{"ucm", BLOCK_UCM},
5643 	{"xcm", BLOCK_XCM},
5644 	{"ycm", BLOCK_YCM},
5645 	{"pcm", BLOCK_PCM},
5646 	{"qm", BLOCK_QM},
5647 	{"tm", BLOCK_TM},
5648 	{"dorq", BLOCK_DORQ},
5649 	{"brb", BLOCK_BRB},
5650 	{"src", BLOCK_SRC},
5651 	{"prs", BLOCK_PRS},
5652 	{"tsdm", BLOCK_TSDM},
5653 	{"msdm", BLOCK_MSDM},
5654 	{"usdm", BLOCK_USDM},
5655 	{"xsdm", BLOCK_XSDM},
5656 	{"ysdm", BLOCK_YSDM},
5657 	{"psdm", BLOCK_PSDM},
5658 	{"tsem", BLOCK_TSEM},
5659 	{"msem", BLOCK_MSEM},
5660 	{"usem", BLOCK_USEM},
5661 	{"xsem", BLOCK_XSEM},
5662 	{"ysem", BLOCK_YSEM},
5663 	{"psem", BLOCK_PSEM},
5664 	{"rss", BLOCK_RSS},
5665 	{"tmld", BLOCK_TMLD},
5666 	{"muld", BLOCK_MULD},
5667 	{"yuld", BLOCK_YULD},
5668 	{"xyld", BLOCK_XYLD},
5669 	{"ptld", BLOCK_PTLD},
5670 	{"ypld", BLOCK_YPLD},
5671 	{"prm", BLOCK_PRM},
5672 	{"pbf_pb1", BLOCK_PBF_PB1},
5673 	{"pbf_pb2", BLOCK_PBF_PB2},
5674 	{"rpb", BLOCK_RPB},
5675 	{"btb", BLOCK_BTB},
5676 	{"pbf", BLOCK_PBF},
5677 	{"rdif", BLOCK_RDIF},
5678 	{"tdif", BLOCK_TDIF},
5679 	{"cdu", BLOCK_CDU},
5680 	{"ccfc", BLOCK_CCFC},
5681 	{"tcfc", BLOCK_TCFC},
5682 	{"igu", BLOCK_IGU},
5683 	{"cau", BLOCK_CAU},
5684 	{"rgfs", BLOCK_RGFS},
5685 	{"rgsrc", BLOCK_RGSRC},
5686 	{"tgfs", BLOCK_TGFS},
5687 	{"tgsrc", BLOCK_TGSRC},
5688 	{"umac", BLOCK_UMAC},
5689 	{"xmac", BLOCK_XMAC},
5690 	{"dbg", BLOCK_DBG},
5691 	{"nig", BLOCK_NIG},
5692 	{"wol", BLOCK_WOL},
5693 	{"bmbn", BLOCK_BMBN},
5694 	{"ipc", BLOCK_IPC},
5695 	{"nwm", BLOCK_NWM},
5696 	{"nws", BLOCK_NWS},
5697 	{"ms", BLOCK_MS},
5698 	{"phy_pcie", BLOCK_PHY_PCIE},
5699 	{"led", BLOCK_LED},
5700 	{"avs_wrap", BLOCK_AVS_WRAP},
5701 	{"misc_aeu", BLOCK_MISC_AEU},
5702 	{"bar0_map", BLOCK_BAR0_MAP}
5703 };
5704 
5705 /* Status string array */
5706 static const char * const s_status_str[] = {
5707 	/* DBG_STATUS_OK */
5708 	"Operation completed successfully",
5709 
5710 	/* DBG_STATUS_APP_VERSION_NOT_SET */
5711 	"Debug application version wasn't set",
5712 
5713 	/* DBG_STATUS_UNSUPPORTED_APP_VERSION */
5714 	"Unsupported debug application version",
5715 
5716 	/* DBG_STATUS_DBG_BLOCK_NOT_RESET */
5717 	"The debug block wasn't reset since the last recording",
5718 
5719 	/* DBG_STATUS_INVALID_ARGS */
5720 	"Invalid arguments",
5721 
5722 	/* DBG_STATUS_OUTPUT_ALREADY_SET */
5723 	"The debug output was already set",
5724 
5725 	/* DBG_STATUS_INVALID_PCI_BUF_SIZE */
5726 	"Invalid PCI buffer size",
5727 
5728 	/* DBG_STATUS_PCI_BUF_ALLOC_FAILED */
5729 	"PCI buffer allocation failed",
5730 
5731 	/* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */
5732 	"A PCI buffer wasn't allocated",
5733 
5734 	/* DBG_STATUS_TOO_MANY_INPUTS */
5735 	"Too many inputs were enabled. Enabled less inputs, or set 'unifyInputs' to true",
5736 
5737 	/* DBG_STATUS_INPUT_OVERLAP */
5738 	"Overlapping debug bus inputs",
5739 
5740 	/* DBG_STATUS_HW_ONLY_RECORDING */
5741 	"Cannot record Storm data since the entire recording cycle is used by HW",
5742 
5743 	/* DBG_STATUS_STORM_ALREADY_ENABLED */
5744 	"The Storm was already enabled",
5745 
5746 	/* DBG_STATUS_STORM_NOT_ENABLED */
5747 	"The specified Storm wasn't enabled",
5748 
5749 	/* DBG_STATUS_BLOCK_ALREADY_ENABLED */
5750 	"The block was already enabled",
5751 
5752 	/* DBG_STATUS_BLOCK_NOT_ENABLED */
5753 	"The specified block wasn't enabled",
5754 
5755 	/* DBG_STATUS_NO_INPUT_ENABLED */
5756 	"No input was enabled for recording",
5757 
5758 	/* DBG_STATUS_NO_FILTER_TRIGGER_64B */
5759 	"Filters and triggers are not allowed when recording in 64b units",
5760 
5761 	/* DBG_STATUS_FILTER_ALREADY_ENABLED */
5762 	"The filter was already enabled",
5763 
5764 	/* DBG_STATUS_TRIGGER_ALREADY_ENABLED */
5765 	"The trigger was already enabled",
5766 
5767 	/* DBG_STATUS_TRIGGER_NOT_ENABLED */
5768 	"The trigger wasn't enabled",
5769 
5770 	/* DBG_STATUS_CANT_ADD_CONSTRAINT */
5771 	"A constraint can be added only after a filter was enabled or a trigger state was added",
5772 
5773 	/* DBG_STATUS_TOO_MANY_TRIGGER_STATES */
5774 	"Cannot add more than 3 trigger states",
5775 
5776 	/* DBG_STATUS_TOO_MANY_CONSTRAINTS */
5777 	"Cannot add more than 4 constraints per filter or trigger state",
5778 
5779 	/* DBG_STATUS_RECORDING_NOT_STARTED */
5780 	"The recording wasn't started",
5781 
5782 	/* DBG_STATUS_DATA_DIDNT_TRIGGER */
5783 	"A trigger was configured, but it didn't trigger",
5784 
5785 	/* DBG_STATUS_NO_DATA_RECORDED */
5786 	"No data was recorded",
5787 
5788 	/* DBG_STATUS_DUMP_BUF_TOO_SMALL */
5789 	"Dump buffer is too small",
5790 
5791 	/* DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED */
5792 	"Dumped data is not aligned to chunks",
5793 
5794 	/* DBG_STATUS_UNKNOWN_CHIP */
5795 	"Unknown chip",
5796 
5797 	/* DBG_STATUS_VIRT_MEM_ALLOC_FAILED */
5798 	"Failed allocating virtual memory",
5799 
5800 	/* DBG_STATUS_BLOCK_IN_RESET */
5801 	"The input block is in reset",
5802 
5803 	/* DBG_STATUS_INVALID_TRACE_SIGNATURE */
5804 	"Invalid MCP trace signature found in NVRAM",
5805 
5806 	/* DBG_STATUS_INVALID_NVRAM_BUNDLE */
5807 	"Invalid bundle ID found in NVRAM",
5808 
5809 	/* DBG_STATUS_NVRAM_GET_IMAGE_FAILED */
5810 	"Failed getting NVRAM image",
5811 
5812 	/* DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE */
5813 	"NVRAM image is not dword-aligned",
5814 
5815 	/* DBG_STATUS_NVRAM_READ_FAILED */
5816 	"Failed reading from NVRAM",
5817 
5818 	/* DBG_STATUS_IDLE_CHK_PARSE_FAILED */
5819 	"Idle check parsing failed",
5820 
5821 	/* DBG_STATUS_MCP_TRACE_BAD_DATA */
5822 	"MCP Trace data is corrupt",
5823 
5824 	/* DBG_STATUS_MCP_TRACE_NO_META */
5825 	"Dump doesn't contain meta data - it must be provided in image file",
5826 
5827 	/* DBG_STATUS_MCP_COULD_NOT_HALT */
5828 	"Failed to halt MCP",
5829 
5830 	/* DBG_STATUS_MCP_COULD_NOT_RESUME */
5831 	"Failed to resume MCP after halt",
5832 
5833 	/* DBG_STATUS_DMAE_FAILED */
5834 	"DMAE transaction failed",
5835 
5836 	/* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
5837 	"Failed to empty SEMI sync FIFO",
5838 
5839 	/* DBG_STATUS_IGU_FIFO_BAD_DATA */
5840 	"IGU FIFO data is corrupt",
5841 
5842 	/* DBG_STATUS_MCP_COULD_NOT_MASK_PRTY */
5843 	"MCP failed to mask parities",
5844 
5845 	/* DBG_STATUS_FW_ASSERTS_PARSE_FAILED */
5846 	"FW Asserts parsing failed",
5847 
5848 	/* DBG_STATUS_REG_FIFO_BAD_DATA */
5849 	"GRC FIFO data is corrupt",
5850 
5851 	/* DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA */
5852 	"Protection Override data is corrupt",
5853 
5854 	/* DBG_STATUS_DBG_ARRAY_NOT_SET */
5855 	"Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
5856 
5857 	/* DBG_STATUS_FILTER_BUG */
5858 	"Debug Bus filtering requires the -unifyInputs option (due to a HW bug)",
5859 
5860 	/* DBG_STATUS_NON_MATCHING_LINES */
5861 	"Non-matching debug lines - all lines must be of the same type (either 128b or 256b)",
5862 
5863 	/* DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET */
5864 	"The selected trigger dword offset wasn't enabled in the recorded HW block",
5865 
5866 	/* DBG_STATUS_DBG_BUS_IN_USE */
5867 	"The debug bus is in use"
5868 };
5869 
5870 /* Idle check severity names array */
5871 static const char * const s_idle_chk_severity_str[] = {
5872 	"Error",
5873 	"Error if no traffic",
5874 	"Warning"
5875 };
5876 
5877 /* MCP Trace level names array */
5878 static const char * const s_mcp_trace_level_str[] = {
5879 	"ERROR",
5880 	"TRACE",
5881 	"DEBUG"
5882 };
5883 
5884 /* Access type names array */
5885 static const char * const s_access_strs[] = {
5886 	"read",
5887 	"write"
5888 };
5889 
5890 /* Privilege type names array */
5891 static const char * const s_privilege_strs[] = {
5892 	"VF",
5893 	"PDA",
5894 	"HV",
5895 	"UA"
5896 };
5897 
5898 /* Protection type names array */
5899 static const char * const s_protection_strs[] = {
5900 	"(default)",
5901 	"(default)",
5902 	"(default)",
5903 	"(default)",
5904 	"override VF",
5905 	"override PDA",
5906 	"override HV",
5907 	"override UA"
5908 };
5909 
5910 /* Master type names array */
5911 static const char * const s_master_strs[] = {
5912 	"???",
5913 	"pxp",
5914 	"mcp",
5915 	"msdm",
5916 	"psdm",
5917 	"ysdm",
5918 	"usdm",
5919 	"tsdm",
5920 	"xsdm",
5921 	"dbu",
5922 	"dmae",
5923 	"???",
5924 	"???",
5925 	"???",
5926 	"???",
5927 	"???"
5928 };
5929 
5930 /* REG FIFO error messages array */
5931 static const char * const s_reg_fifo_error_strs[] = {
5932 	"grc timeout",
5933 	"address doesn't belong to any block",
5934 	"reserved address in block or write to read-only address",
5935 	"privilege/protection mismatch",
5936 	"path isolation error"
5937 };
5938 
5939 /* IGU FIFO sources array */
5940 static const char * const s_igu_fifo_source_strs[] = {
5941 	"TSTORM",
5942 	"MSTORM",
5943 	"USTORM",
5944 	"XSTORM",
5945 	"YSTORM",
5946 	"PSTORM",
5947 	"PCIE",
5948 	"NIG_QM_PBF",
5949 	"CAU",
5950 	"ATTN",
5951 	"GRC",
5952 };
5953 
5954 /* IGU FIFO error messages */
5955 static const char * const s_igu_fifo_error_strs[] = {
5956 	"no error",
5957 	"length error",
5958 	"function disabled",
5959 	"VF sent command to attnetion address",
5960 	"host sent prod update command",
5961 	"read of during interrupt register while in MIMD mode",
5962 	"access to PXP BAR reserved address",
5963 	"producer update command to attention index",
5964 	"unknown error",
5965 	"SB index not valid",
5966 	"SB relative index and FID not found",
5967 	"FID not match",
5968 	"command with error flag asserted (PCI error or CAU discard)",
5969 	"VF sent cleanup and RF cleanup is disabled",
5970 	"cleanup command on type bigger than 4"
5971 };
5972 
5973 /* IGU FIFO address data */
5974 static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
5975 	{0x0, 0x101, "MSI-X Memory", NULL,
5976 	 IGU_ADDR_TYPE_MSIX_MEM},
5977 	{0x102, 0x1ff, "reserved", NULL,
5978 	 IGU_ADDR_TYPE_RESERVED},
5979 	{0x200, 0x200, "Write PBA[0:63]", NULL,
5980 	 IGU_ADDR_TYPE_WRITE_PBA},
5981 	{0x201, 0x201, "Write PBA[64:127]", "reserved",
5982 	 IGU_ADDR_TYPE_WRITE_PBA},
5983 	{0x202, 0x202, "Write PBA[128]", "reserved",
5984 	 IGU_ADDR_TYPE_WRITE_PBA},
5985 	{0x203, 0x3ff, "reserved", NULL,
5986 	 IGU_ADDR_TYPE_RESERVED},
5987 	{0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
5988 	 IGU_ADDR_TYPE_WRITE_INT_ACK},
5989 	{0x5f0, 0x5f0, "Attention bits update", NULL,
5990 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5991 	{0x5f1, 0x5f1, "Attention bits set", NULL,
5992 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5993 	{0x5f2, 0x5f2, "Attention bits clear", NULL,
5994 	 IGU_ADDR_TYPE_WRITE_ATTN_BITS},
5995 	{0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL,
5996 	 IGU_ADDR_TYPE_READ_INT},
5997 	{0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL,
5998 	 IGU_ADDR_TYPE_READ_INT},
5999 	{0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL,
6000 	 IGU_ADDR_TYPE_READ_INT},
6001 	{0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
6002 	 IGU_ADDR_TYPE_READ_INT},
6003 	{0x5f7, 0x5ff, "reserved", NULL,
6004 	 IGU_ADDR_TYPE_RESERVED},
6005 	{0x600, 0x7ff, "Producer update", NULL,
6006 	 IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
6007 };
6008 
6009 /******************************** Variables **********************************/
6010 
6011 /* MCP Trace meta data - used in case the dump doesn't contain the meta data
6012  * (e.g. due to no NVRAM access).
6013  */
6014 static struct user_dbg_array s_mcp_trace_meta = { NULL, 0 };
6015 
6016 /* Temporary buffer, used for print size calculations */
6017 static char s_temp_buf[MAX_MSG_LEN];
6018 
6019 /**************************** Private Functions ******************************/
6020 
6021 static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
6022 {
6023 	return (a + b) % size;
6024 }
6025 
6026 static u32 qed_cyclic_sub(u32 a, u32 b, u32 size)
6027 {
6028 	return (size + a - b) % size;
6029 }
6030 
6031 /* Reads the specified number of bytes from the specified cyclic buffer (up to 4
6032  * bytes) and returns them as a dword value. the specified buffer offset is
6033  * updated.
6034  */
6035 static u32 qed_read_from_cyclic_buf(void *buf,
6036 				    u32 *offset,
6037 				    u32 buf_size, u8 num_bytes_to_read)
6038 {
6039 	u8 i, *val_ptr, *bytes_buf = (u8 *)buf;
6040 	u32 val = 0;
6041 
6042 	val_ptr = (u8 *)&val;
6043 
6044 	for (i = 0; i < num_bytes_to_read; i++) {
6045 		val_ptr[i] = bytes_buf[*offset];
6046 		*offset = qed_cyclic_add(*offset, 1, buf_size);
6047 	}
6048 
6049 	return val;
6050 }
6051 
6052 /* Reads and returns the next byte from the specified buffer.
6053  * The specified buffer offset is updated.
6054  */
6055 static u8 qed_read_byte_from_buf(void *buf, u32 *offset)
6056 {
6057 	return ((u8 *)buf)[(*offset)++];
6058 }
6059 
6060 /* Reads and returns the next dword from the specified buffer.
6061  * The specified buffer offset is updated.
6062  */
6063 static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
6064 {
6065 	u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
6066 
6067 	*offset += 4;
6068 
6069 	return dword_val;
6070 }
6071 
6072 /* Reads the next string from the specified buffer, and copies it to the
6073  * specified pointer. The specified buffer offset is updated.
6074  */
6075 static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
6076 {
6077 	const char *source_str = &((const char *)buf)[*offset];
6078 
6079 	strncpy(dest, source_str, size);
6080 	dest[size - 1] = '\0';
6081 	*offset += size;
6082 }
6083 
6084 /* Returns a pointer to the specified offset (in bytes) of the specified buffer.
6085  * If the specified buffer in NULL, a temporary buffer pointer is returned.
6086  */
6087 static char *qed_get_buf_ptr(void *buf, u32 offset)
6088 {
6089 	return buf ? (char *)buf + offset : s_temp_buf;
6090 }
6091 
6092 /* Reads a param from the specified buffer. Returns the number of dwords read.
6093  * If the returned str_param is NULL, the param is numeric and its value is
6094  * returned in num_param.
6095  * Otheriwise, the param is a string and its pointer is returned in str_param.
6096  */
6097 static u32 qed_read_param(u32 *dump_buf,
6098 			  const char **param_name,
6099 			  const char **param_str_val, u32 *param_num_val)
6100 {
6101 	char *char_buf = (char *)dump_buf;
6102 	size_t offset = 0;
6103 
6104 	/* Extract param name */
6105 	*param_name = char_buf;
6106 	offset += strlen(*param_name) + 1;
6107 
6108 	/* Check param type */
6109 	if (*(char_buf + offset++)) {
6110 		/* String param */
6111 		*param_str_val = char_buf + offset;
6112 		offset += strlen(*param_str_val) + 1;
6113 		if (offset & 0x3)
6114 			offset += (4 - (offset & 0x3));
6115 	} else {
6116 		/* Numeric param */
6117 		*param_str_val = NULL;
6118 		if (offset & 0x3)
6119 			offset += (4 - (offset & 0x3));
6120 		*param_num_val = *(u32 *)(char_buf + offset);
6121 		offset += 4;
6122 	}
6123 
6124 	return offset / 4;
6125 }
6126 
6127 /* Reads a section header from the specified buffer.
6128  * Returns the number of dwords read.
6129  */
6130 static u32 qed_read_section_hdr(u32 *dump_buf,
6131 				const char **section_name,
6132 				u32 *num_section_params)
6133 {
6134 	const char *param_str_val;
6135 
6136 	return qed_read_param(dump_buf,
6137 			      section_name, &param_str_val, num_section_params);
6138 }
6139 
6140 /* Reads section params from the specified buffer and prints them to the results
6141  * buffer. Returns the number of dwords read.
6142  */
6143 static u32 qed_print_section_params(u32 *dump_buf,
6144 				    u32 num_section_params,
6145 				    char *results_buf, u32 *num_chars_printed)
6146 {
6147 	u32 i, dump_offset = 0, results_offset = 0;
6148 
6149 	for (i = 0; i < num_section_params; i++) {
6150 		const char *param_name, *param_str_val;
6151 		u32 param_num_val = 0;
6152 
6153 		dump_offset += qed_read_param(dump_buf + dump_offset,
6154 					      &param_name,
6155 					      &param_str_val, &param_num_val);
6156 
6157 		if (param_str_val)
6158 			results_offset +=
6159 				sprintf(qed_get_buf_ptr(results_buf,
6160 							results_offset),
6161 					"%s: %s\n", param_name, param_str_val);
6162 		else if (strcmp(param_name, "fw-timestamp"))
6163 			results_offset +=
6164 				sprintf(qed_get_buf_ptr(results_buf,
6165 							results_offset),
6166 					"%s: %d\n", param_name, param_num_val);
6167 	}
6168 
6169 	results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset),
6170 				  "\n");
6171 
6172 	*num_chars_printed = results_offset;
6173 
6174 	return dump_offset;
6175 }
6176 
6177 /* Parses the idle check rules and returns the number of characters printed.
6178  * In case of parsing error, returns 0.
6179  */
6180 static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
6181 					 u32 *dump_buf,
6182 					 u32 *dump_buf_end,
6183 					 u32 num_rules,
6184 					 bool print_fw_idle_chk,
6185 					 char *results_buf,
6186 					 u32 *num_errors, u32 *num_warnings)
6187 {
6188 	/* Offset in results_buf in bytes */
6189 	u32 results_offset = 0;
6190 
6191 	u32 rule_idx;
6192 	u16 i, j;
6193 
6194 	*num_errors = 0;
6195 	*num_warnings = 0;
6196 
6197 	/* Go over dumped results */
6198 	for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end;
6199 	     rule_idx++) {
6200 		const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
6201 		struct dbg_idle_chk_result_hdr *hdr;
6202 		const char *parsing_str, *lsi_msg;
6203 		u32 parsing_str_offset;
6204 		bool has_fw_msg;
6205 		u8 curr_reg_id;
6206 
6207 		hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
6208 		rule_parsing_data =
6209 			(const struct dbg_idle_chk_rule_parsing_data *)
6210 			&s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].
6211 			ptr[hdr->rule_id];
6212 		parsing_str_offset =
6213 			GET_FIELD(rule_parsing_data->data,
6214 				  DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
6215 		has_fw_msg =
6216 			GET_FIELD(rule_parsing_data->data,
6217 				DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
6218 		parsing_str =
6219 			&((const char *)
6220 			s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
6221 			[parsing_str_offset];
6222 		lsi_msg = parsing_str;
6223 		curr_reg_id = 0;
6224 
6225 		if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
6226 			return 0;
6227 
6228 		/* Skip rule header */
6229 		dump_buf += BYTES_TO_DWORDS(sizeof(*hdr));
6230 
6231 		/* Update errors/warnings count */
6232 		if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
6233 		    hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC)
6234 			(*num_errors)++;
6235 		else
6236 			(*num_warnings)++;
6237 
6238 		/* Print rule severity */
6239 		results_offset +=
6240 		    sprintf(qed_get_buf_ptr(results_buf,
6241 					    results_offset), "%s: ",
6242 			    s_idle_chk_severity_str[hdr->severity]);
6243 
6244 		/* Print rule message */
6245 		if (has_fw_msg)
6246 			parsing_str += strlen(parsing_str) + 1;
6247 		results_offset +=
6248 		    sprintf(qed_get_buf_ptr(results_buf,
6249 					    results_offset), "%s.",
6250 			    has_fw_msg &&
6251 			    print_fw_idle_chk ? parsing_str : lsi_msg);
6252 		parsing_str += strlen(parsing_str) + 1;
6253 
6254 		/* Print register values */
6255 		results_offset +=
6256 		    sprintf(qed_get_buf_ptr(results_buf,
6257 					    results_offset), " Registers:");
6258 		for (i = 0;
6259 		     i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
6260 		     i++) {
6261 			struct dbg_idle_chk_result_reg_hdr *reg_hdr;
6262 			bool is_mem;
6263 			u8 reg_id;
6264 
6265 			reg_hdr =
6266 				(struct dbg_idle_chk_result_reg_hdr *)dump_buf;
6267 			is_mem = GET_FIELD(reg_hdr->data,
6268 					   DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
6269 			reg_id = GET_FIELD(reg_hdr->data,
6270 					   DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
6271 
6272 			/* Skip reg header */
6273 			dump_buf += BYTES_TO_DWORDS(sizeof(*reg_hdr));
6274 
6275 			/* Skip register names until the required reg_id is
6276 			 * reached.
6277 			 */
6278 			for (; reg_id > curr_reg_id;
6279 			     curr_reg_id++,
6280 			     parsing_str += strlen(parsing_str) + 1);
6281 
6282 			results_offset +=
6283 			    sprintf(qed_get_buf_ptr(results_buf,
6284 						    results_offset), " %s",
6285 				    parsing_str);
6286 			if (i < hdr->num_dumped_cond_regs && is_mem)
6287 				results_offset +=
6288 				    sprintf(qed_get_buf_ptr(results_buf,
6289 							    results_offset),
6290 					    "[%d]", hdr->mem_entry_id +
6291 					    reg_hdr->start_entry);
6292 			results_offset +=
6293 			    sprintf(qed_get_buf_ptr(results_buf,
6294 						    results_offset), "=");
6295 			for (j = 0; j < reg_hdr->size; j++, dump_buf++) {
6296 				results_offset +=
6297 				    sprintf(qed_get_buf_ptr(results_buf,
6298 							    results_offset),
6299 					    "0x%x", *dump_buf);
6300 				if (j < reg_hdr->size - 1)
6301 					results_offset +=
6302 					    sprintf(qed_get_buf_ptr
6303 						    (results_buf,
6304 						     results_offset), ",");
6305 			}
6306 		}
6307 
6308 		results_offset +=
6309 		    sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6310 	}
6311 
6312 	/* Check if end of dump buffer was exceeded */
6313 	if (dump_buf > dump_buf_end)
6314 		return 0;
6315 
6316 	return results_offset;
6317 }
6318 
6319 /* Parses an idle check dump buffer.
6320  * If result_buf is not NULL, the idle check results are printed to it.
6321  * In any case, the required results buffer size is assigned to
6322  * parsed_results_bytes.
6323  * The parsing status is returned.
6324  */
6325 static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
6326 					       u32 *dump_buf,
6327 					       u32 num_dumped_dwords,
6328 					       char *results_buf,
6329 					       u32 *parsed_results_bytes,
6330 					       u32 *num_errors,
6331 					       u32 *num_warnings)
6332 {
6333 	const char *section_name, *param_name, *param_str_val;
6334 	u32 *dump_buf_end = dump_buf + num_dumped_dwords;
6335 	u32 num_section_params = 0, num_rules;
6336 
6337 	/* Offset in results_buf in bytes */
6338 	u32 results_offset = 0;
6339 
6340 	*parsed_results_bytes = 0;
6341 	*num_errors = 0;
6342 	*num_warnings = 0;
6343 
6344 	if (!s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
6345 	    !s_user_dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
6346 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
6347 
6348 	/* Read global_params section */
6349 	dump_buf += qed_read_section_hdr(dump_buf,
6350 					 &section_name, &num_section_params);
6351 	if (strcmp(section_name, "global_params"))
6352 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6353 
6354 	/* Print global params */
6355 	dump_buf += qed_print_section_params(dump_buf,
6356 					     num_section_params,
6357 					     results_buf, &results_offset);
6358 
6359 	/* Read idle_chk section */
6360 	dump_buf += qed_read_section_hdr(dump_buf,
6361 					 &section_name, &num_section_params);
6362 	if (strcmp(section_name, "idle_chk") || num_section_params != 1)
6363 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6364 	dump_buf += qed_read_param(dump_buf,
6365 				   &param_name, &param_str_val, &num_rules);
6366 	if (strcmp(param_name, "num_rules"))
6367 		return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6368 
6369 	if (num_rules) {
6370 		u32 rules_print_size;
6371 
6372 		/* Print FW output */
6373 		results_offset +=
6374 		    sprintf(qed_get_buf_ptr(results_buf,
6375 					    results_offset),
6376 			    "FW_IDLE_CHECK:\n");
6377 		rules_print_size =
6378 			qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
6379 						      dump_buf_end, num_rules,
6380 						      true,
6381 						      results_buf ?
6382 						      results_buf +
6383 						      results_offset : NULL,
6384 						      num_errors, num_warnings);
6385 		results_offset += rules_print_size;
6386 		if (!rules_print_size)
6387 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6388 
6389 		/* Print LSI output */
6390 		results_offset +=
6391 		    sprintf(qed_get_buf_ptr(results_buf,
6392 					    results_offset),
6393 			    "\nLSI_IDLE_CHECK:\n");
6394 		rules_print_size =
6395 			qed_parse_idle_chk_dump_rules(p_hwfn, dump_buf,
6396 						      dump_buf_end, num_rules,
6397 						      false,
6398 						      results_buf ?
6399 						      results_buf +
6400 						      results_offset : NULL,
6401 						      num_errors, num_warnings);
6402 		results_offset += rules_print_size;
6403 		if (!rules_print_size)
6404 			return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
6405 	}
6406 
6407 	/* Print errors/warnings count */
6408 	if (*num_errors)
6409 		results_offset +=
6410 		    sprintf(qed_get_buf_ptr(results_buf,
6411 					    results_offset),
6412 			    "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
6413 			    *num_errors, *num_warnings);
6414 	else if (*num_warnings)
6415 		results_offset +=
6416 		    sprintf(qed_get_buf_ptr(results_buf,
6417 					    results_offset),
6418 			    "\nIdle Check completed successfully (with %d warnings)\n",
6419 			    *num_warnings);
6420 	else
6421 		results_offset +=
6422 		    sprintf(qed_get_buf_ptr(results_buf,
6423 					    results_offset),
6424 			    "\nIdle Check completed successfully\n");
6425 
6426 	/* Add 1 for string NULL termination */
6427 	*parsed_results_bytes = results_offset + 1;
6428 
6429 	return DBG_STATUS_OK;
6430 }
6431 
6432 /* Frees the specified MCP Trace meta data */
6433 static void qed_mcp_trace_free_meta(struct qed_hwfn *p_hwfn,
6434 				    struct mcp_trace_meta *meta)
6435 {
6436 	u32 i;
6437 
6438 	/* Release modules */
6439 	if (meta->modules) {
6440 		for (i = 0; i < meta->modules_num; i++)
6441 			kfree(meta->modules[i]);
6442 		kfree(meta->modules);
6443 	}
6444 
6445 	/* Release formats */
6446 	if (meta->formats) {
6447 		for (i = 0; i < meta->formats_num; i++)
6448 			kfree(meta->formats[i].format_str);
6449 		kfree(meta->formats);
6450 	}
6451 }
6452 
6453 /* Allocates and fills MCP Trace meta data based on the specified meta data
6454  * dump buffer.
6455  * Returns debug status code.
6456  */
6457 static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn,
6458 						const u32 *meta_buf,
6459 						struct mcp_trace_meta *meta)
6460 {
6461 	u8 *meta_buf_bytes = (u8 *)meta_buf;
6462 	u32 offset = 0, signature, i;
6463 
6464 	memset(meta, 0, sizeof(*meta));
6465 
6466 	/* Read first signature */
6467 	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6468 	if (signature != NVM_MAGIC_VALUE)
6469 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6470 
6471 	/* Read no. of modules and allocate memory for their pointers */
6472 	meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6473 	meta->modules = kzalloc(meta->modules_num * sizeof(char *), GFP_KERNEL);
6474 	if (!meta->modules)
6475 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6476 
6477 	/* Allocate and read all module strings */
6478 	for (i = 0; i < meta->modules_num; i++) {
6479 		u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset);
6480 
6481 		*(meta->modules + i) = kzalloc(module_len, GFP_KERNEL);
6482 		if (!(*(meta->modules + i))) {
6483 			/* Update number of modules to be released */
6484 			meta->modules_num = i ? i - 1 : 0;
6485 			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6486 		}
6487 
6488 		qed_read_str_from_buf(meta_buf_bytes, &offset, module_len,
6489 				      *(meta->modules + i));
6490 		if (module_len > MCP_TRACE_MAX_MODULE_LEN)
6491 			(*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0';
6492 	}
6493 
6494 	/* Read second signature */
6495 	signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6496 	if (signature != NVM_MAGIC_VALUE)
6497 		return DBG_STATUS_INVALID_TRACE_SIGNATURE;
6498 
6499 	/* Read number of formats and allocate memory for all formats */
6500 	meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
6501 	meta->formats = kzalloc(meta->formats_num *
6502 				sizeof(struct mcp_trace_format),
6503 				GFP_KERNEL);
6504 	if (!meta->formats)
6505 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6506 
6507 	/* Allocate and read all strings */
6508 	for (i = 0; i < meta->formats_num; i++) {
6509 		struct mcp_trace_format *format_ptr = &meta->formats[i];
6510 		u8 format_len;
6511 
6512 		format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
6513 							   &offset);
6514 		format_len =
6515 		    (format_ptr->data &
6516 		     MCP_TRACE_FORMAT_LEN_MASK) >> MCP_TRACE_FORMAT_LEN_SHIFT;
6517 		format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
6518 		if (!format_ptr->format_str) {
6519 			/* Update number of modules to be released */
6520 			meta->formats_num = i ? i - 1 : 0;
6521 			return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
6522 		}
6523 
6524 		qed_read_str_from_buf(meta_buf_bytes,
6525 				      &offset,
6526 				      format_len, format_ptr->format_str);
6527 	}
6528 
6529 	return DBG_STATUS_OK;
6530 }
6531 
6532 /* Parses an MCP Trace dump buffer.
6533  * If result_buf is not NULL, the MCP Trace results are printed to it.
6534  * In any case, the required results buffer size is assigned to
6535  * parsed_results_bytes.
6536  * The parsing status is returned.
6537  */
6538 static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
6539 						u32 *dump_buf,
6540 						u32 num_dumped_dwords,
6541 						char *results_buf,
6542 						u32 *parsed_results_bytes)
6543 {
6544 	u32 end_offset, bytes_left, trace_data_dwords, trace_meta_dwords;
6545 	u32 param_mask, param_shift, param_num_val, num_section_params;
6546 	const char *section_name, *param_name, *param_str_val;
6547 	u32 offset, results_offset = 0;
6548 	struct mcp_trace_meta meta;
6549 	struct mcp_trace *trace;
6550 	enum dbg_status status;
6551 	const u32 *meta_buf;
6552 	u8 *trace_buf;
6553 
6554 	*parsed_results_bytes = 0;
6555 
6556 	/* Read global_params section */
6557 	dump_buf += qed_read_section_hdr(dump_buf,
6558 					 &section_name, &num_section_params);
6559 	if (strcmp(section_name, "global_params"))
6560 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6561 
6562 	/* Print global params */
6563 	dump_buf += qed_print_section_params(dump_buf,
6564 					     num_section_params,
6565 					     results_buf, &results_offset);
6566 
6567 	/* Read trace_data section */
6568 	dump_buf += qed_read_section_hdr(dump_buf,
6569 					 &section_name, &num_section_params);
6570 	if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1)
6571 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6572 	dump_buf += qed_read_param(dump_buf,
6573 				   &param_name, &param_str_val, &param_num_val);
6574 	if (strcmp(param_name, "size"))
6575 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6576 	trace_data_dwords = param_num_val;
6577 
6578 	/* Prepare trace info */
6579 	trace = (struct mcp_trace *)dump_buf;
6580 	trace_buf = (u8 *)dump_buf + sizeof(*trace);
6581 	offset = trace->trace_oldest;
6582 	end_offset = trace->trace_prod;
6583 	bytes_left = qed_cyclic_sub(end_offset, offset, trace->size);
6584 	dump_buf += trace_data_dwords;
6585 
6586 	/* Read meta_data section */
6587 	dump_buf += qed_read_section_hdr(dump_buf,
6588 					 &section_name, &num_section_params);
6589 	if (strcmp(section_name, "mcp_trace_meta"))
6590 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6591 	dump_buf += qed_read_param(dump_buf,
6592 				   &param_name, &param_str_val, &param_num_val);
6593 	if (strcmp(param_name, "size"))
6594 		return DBG_STATUS_MCP_TRACE_BAD_DATA;
6595 	trace_meta_dwords = param_num_val;
6596 
6597 	/* Choose meta data buffer */
6598 	if (!trace_meta_dwords) {
6599 		/* Dump doesn't include meta data */
6600 		if (!s_mcp_trace_meta.ptr)
6601 			return DBG_STATUS_MCP_TRACE_NO_META;
6602 		meta_buf = s_mcp_trace_meta.ptr;
6603 	} else {
6604 		/* Dump includes meta data */
6605 		meta_buf = dump_buf;
6606 	}
6607 
6608 	/* Allocate meta data memory */
6609 	status = qed_mcp_trace_alloc_meta(p_hwfn, meta_buf, &meta);
6610 	if (status != DBG_STATUS_OK)
6611 		goto free_mem;
6612 
6613 	/* Ignore the level and modules masks - just print everything that is
6614 	 * already in the buffer.
6615 	 */
6616 	while (bytes_left) {
6617 		struct mcp_trace_format *format_ptr;
6618 		u8 format_level, format_module;
6619 		u32 params[3] = { 0, 0, 0 };
6620 		u32 header, format_idx, i;
6621 
6622 		if (bytes_left < MFW_TRACE_ENTRY_SIZE) {
6623 			status = DBG_STATUS_MCP_TRACE_BAD_DATA;
6624 			goto free_mem;
6625 		}
6626 
6627 		header = qed_read_from_cyclic_buf(trace_buf,
6628 						  &offset,
6629 						  trace->size,
6630 						  MFW_TRACE_ENTRY_SIZE);
6631 		bytes_left -= MFW_TRACE_ENTRY_SIZE;
6632 		format_idx = header & MFW_TRACE_EVENTID_MASK;
6633 
6634 		/* Skip message if its  index doesn't exist in the meta data */
6635 		if (format_idx > meta.formats_num) {
6636 			u8 format_size =
6637 			    (u8)((header &
6638 				  MFW_TRACE_PRM_SIZE_MASK) >>
6639 				 MFW_TRACE_PRM_SIZE_SHIFT);
6640 
6641 			if (bytes_left < format_size) {
6642 				status = DBG_STATUS_MCP_TRACE_BAD_DATA;
6643 				goto free_mem;
6644 			}
6645 
6646 			offset = qed_cyclic_add(offset,
6647 						format_size, trace->size);
6648 			bytes_left -= format_size;
6649 			continue;
6650 		}
6651 
6652 		format_ptr = &meta.formats[format_idx];
6653 
6654 		for (i = 0,
6655 		     param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift =
6656 		     MCP_TRACE_FORMAT_P1_SIZE_SHIFT;
6657 		     i < MCP_TRACE_FORMAT_MAX_PARAMS;
6658 		     i++, param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
6659 		     param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
6660 			/* Extract param size (0..3) */
6661 			u8 param_size =
6662 			    (u8)((format_ptr->data &
6663 				  param_mask) >> param_shift);
6664 
6665 			/* If the param size is zero, there are no other
6666 			 * parameters.
6667 			 */
6668 			if (!param_size)
6669 				break;
6670 
6671 			/* Size is encoded using 2 bits, where 3 is used to
6672 			 * encode 4.
6673 			 */
6674 			if (param_size == 3)
6675 				param_size = 4;
6676 
6677 			if (bytes_left < param_size) {
6678 				status = DBG_STATUS_MCP_TRACE_BAD_DATA;
6679 				goto free_mem;
6680 			}
6681 
6682 			params[i] = qed_read_from_cyclic_buf(trace_buf,
6683 							     &offset,
6684 							     trace->size,
6685 							     param_size);
6686 
6687 			bytes_left -= param_size;
6688 		}
6689 
6690 		format_level =
6691 		    (u8)((format_ptr->data &
6692 			  MCP_TRACE_FORMAT_LEVEL_MASK) >>
6693 			 MCP_TRACE_FORMAT_LEVEL_SHIFT);
6694 		format_module =
6695 		    (u8)((format_ptr->data &
6696 			  MCP_TRACE_FORMAT_MODULE_MASK) >>
6697 			 MCP_TRACE_FORMAT_MODULE_SHIFT);
6698 		if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str)) {
6699 			status = DBG_STATUS_MCP_TRACE_BAD_DATA;
6700 			goto free_mem;
6701 		}
6702 
6703 		/* Print current message to results buffer */
6704 		results_offset +=
6705 		    sprintf(qed_get_buf_ptr(results_buf,
6706 					    results_offset), "%s %-8s: ",
6707 			    s_mcp_trace_level_str[format_level],
6708 			    meta.modules[format_module]);
6709 		results_offset +=
6710 		    sprintf(qed_get_buf_ptr(results_buf,
6711 					    results_offset),
6712 			    format_ptr->format_str, params[0], params[1],
6713 			    params[2]);
6714 	}
6715 
6716 free_mem:
6717 	*parsed_results_bytes = results_offset + 1;
6718 	qed_mcp_trace_free_meta(p_hwfn, &meta);
6719 	return status;
6720 }
6721 
6722 /* Parses a Reg FIFO dump buffer.
6723  * If result_buf is not NULL, the Reg FIFO results are printed to it.
6724  * In any case, the required results buffer size is assigned to
6725  * parsed_results_bytes.
6726  * The parsing status is returned.
6727  */
6728 static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
6729 					       u32 *dump_buf,
6730 					       u32 num_dumped_dwords,
6731 					       char *results_buf,
6732 					       u32 *parsed_results_bytes)
6733 {
6734 	const char *section_name, *param_name, *param_str_val;
6735 	u32 param_num_val, num_section_params, num_elements;
6736 	struct reg_fifo_element *elements;
6737 	u8 i, j, err_val, vf_val;
6738 	u32 results_offset = 0;
6739 	char vf_str[4];
6740 
6741 	/* Read global_params section */
6742 	dump_buf += qed_read_section_hdr(dump_buf,
6743 					 &section_name, &num_section_params);
6744 	if (strcmp(section_name, "global_params"))
6745 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6746 
6747 	/* Print global params */
6748 	dump_buf += qed_print_section_params(dump_buf,
6749 					     num_section_params,
6750 					     results_buf, &results_offset);
6751 
6752 	/* Read reg_fifo_data section */
6753 	dump_buf += qed_read_section_hdr(dump_buf,
6754 					 &section_name, &num_section_params);
6755 	if (strcmp(section_name, "reg_fifo_data"))
6756 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6757 	dump_buf += qed_read_param(dump_buf,
6758 				   &param_name, &param_str_val, &param_num_val);
6759 	if (strcmp(param_name, "size"))
6760 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6761 	if (param_num_val % REG_FIFO_ELEMENT_DWORDS)
6762 		return DBG_STATUS_REG_FIFO_BAD_DATA;
6763 	num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS;
6764 	elements = (struct reg_fifo_element *)dump_buf;
6765 
6766 	/* Decode elements */
6767 	for (i = 0; i < num_elements; i++) {
6768 		bool err_printed = false;
6769 
6770 		/* Discover if element belongs to a VF or a PF */
6771 		vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
6772 		if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL)
6773 			sprintf(vf_str, "%s", "N/A");
6774 		else
6775 			sprintf(vf_str, "%d", vf_val);
6776 
6777 		/* Add parsed element to parsed buffer */
6778 		results_offset +=
6779 		    sprintf(qed_get_buf_ptr(results_buf,
6780 					    results_offset),
6781 			    "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
6782 			    elements[i].data,
6783 			    (u32)GET_FIELD(elements[i].data,
6784 					   REG_FIFO_ELEMENT_ADDRESS) *
6785 			    REG_FIFO_ELEMENT_ADDR_FACTOR,
6786 			    s_access_strs[GET_FIELD(elements[i].data,
6787 						    REG_FIFO_ELEMENT_ACCESS)],
6788 			    (u32)GET_FIELD(elements[i].data,
6789 					   REG_FIFO_ELEMENT_PF),
6790 			    vf_str,
6791 			    (u32)GET_FIELD(elements[i].data,
6792 					   REG_FIFO_ELEMENT_PORT),
6793 			    s_privilege_strs[GET_FIELD(elements[i].data,
6794 						REG_FIFO_ELEMENT_PRIVILEGE)],
6795 			    s_protection_strs[GET_FIELD(elements[i].data,
6796 						REG_FIFO_ELEMENT_PROTECTION)],
6797 			    s_master_strs[GET_FIELD(elements[i].data,
6798 						REG_FIFO_ELEMENT_MASTER)]);
6799 
6800 		/* Print errors */
6801 		for (j = 0,
6802 		     err_val = GET_FIELD(elements[i].data,
6803 					 REG_FIFO_ELEMENT_ERROR);
6804 		     j < ARRAY_SIZE(s_reg_fifo_error_strs);
6805 		     j++, err_val >>= 1) {
6806 			if (err_val & 0x1) {
6807 				if (err_printed)
6808 					results_offset +=
6809 					    sprintf(qed_get_buf_ptr
6810 						    (results_buf,
6811 						     results_offset), ", ");
6812 				results_offset +=
6813 				    sprintf(qed_get_buf_ptr
6814 					    (results_buf, results_offset), "%s",
6815 					    s_reg_fifo_error_strs[j]);
6816 				err_printed = true;
6817 			}
6818 		}
6819 
6820 		results_offset +=
6821 		    sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
6822 	}
6823 
6824 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
6825 						  results_offset),
6826 				  "fifo contained %d elements", num_elements);
6827 
6828 	/* Add 1 for string NULL termination */
6829 	*parsed_results_bytes = results_offset + 1;
6830 
6831 	return DBG_STATUS_OK;
6832 }
6833 
6834 static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
6835 						  *element, char
6836 						  *results_buf,
6837 						  u32 *results_offset,
6838 						  u32 *parsed_results_bytes)
6839 {
6840 	const struct igu_fifo_addr_data *found_addr = NULL;
6841 	u8 source, err_type, i, is_cleanup;
6842 	char parsed_addr_data[32];
6843 	char parsed_wr_data[256];
6844 	u32 wr_data, prod_cons;
6845 	bool is_wr_cmd, is_pf;
6846 	u16 cmd_addr;
6847 	u64 dword12;
6848 
6849 	/* Dword12 (dword index 1 and 2) contains bits 32..95 of the
6850 	 * FIFO element.
6851 	 */
6852 	dword12 = ((u64)element->dword2 << 32) | element->dword1;
6853 	is_wr_cmd = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
6854 	is_pf = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_IS_PF);
6855 	cmd_addr = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
6856 	source = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_SOURCE);
6857 	err_type = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
6858 
6859 	if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
6860 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6861 	if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
6862 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6863 
6864 	/* Find address data */
6865 	for (i = 0; i < ARRAY_SIZE(s_igu_fifo_addr_data) && !found_addr; i++) {
6866 		const struct igu_fifo_addr_data *curr_addr =
6867 			&s_igu_fifo_addr_data[i];
6868 
6869 		if (cmd_addr >= curr_addr->start_addr && cmd_addr <=
6870 		    curr_addr->end_addr)
6871 			found_addr = curr_addr;
6872 	}
6873 
6874 	if (!found_addr)
6875 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6876 
6877 	/* Prepare parsed address data */
6878 	switch (found_addr->type) {
6879 	case IGU_ADDR_TYPE_MSIX_MEM:
6880 		sprintf(parsed_addr_data, " vector_num = 0x%x", cmd_addr / 2);
6881 		break;
6882 	case IGU_ADDR_TYPE_WRITE_INT_ACK:
6883 	case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
6884 		sprintf(parsed_addr_data,
6885 			" SB = 0x%x", cmd_addr - found_addr->start_addr);
6886 		break;
6887 	default:
6888 		parsed_addr_data[0] = '\0';
6889 	}
6890 
6891 	if (!is_wr_cmd) {
6892 		parsed_wr_data[0] = '\0';
6893 		goto out;
6894 	}
6895 
6896 	/* Prepare parsed write data */
6897 	wr_data = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
6898 	prod_cons = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_PROD_CONS);
6899 	is_cleanup = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_CMD_TYPE);
6900 
6901 	if (source == IGU_SRC_ATTN) {
6902 		sprintf(parsed_wr_data, "prod: 0x%x, ", prod_cons);
6903 	} else {
6904 		if (is_cleanup) {
6905 			u8 cleanup_val, cleanup_type;
6906 
6907 			cleanup_val =
6908 				GET_FIELD(wr_data,
6909 					  IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
6910 			cleanup_type =
6911 			    GET_FIELD(wr_data,
6912 				      IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
6913 
6914 			sprintf(parsed_wr_data,
6915 				"cmd_type: cleanup, cleanup_val: %s, cleanup_type : %d, ",
6916 				cleanup_val ? "set" : "clear",
6917 				cleanup_type);
6918 		} else {
6919 			u8 update_flag, en_dis_int_for_sb, segment;
6920 			u8 timer_mask;
6921 
6922 			update_flag = GET_FIELD(wr_data,
6923 						IGU_FIFO_WR_DATA_UPDATE_FLAG);
6924 			en_dis_int_for_sb =
6925 				GET_FIELD(wr_data,
6926 					  IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
6927 			segment = GET_FIELD(wr_data,
6928 					    IGU_FIFO_WR_DATA_SEGMENT);
6929 			timer_mask = GET_FIELD(wr_data,
6930 					       IGU_FIFO_WR_DATA_TIMER_MASK);
6931 
6932 			sprintf(parsed_wr_data,
6933 				"cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
6934 				prod_cons,
6935 				update_flag ? "update" : "nop",
6936 				en_dis_int_for_sb
6937 				? (en_dis_int_for_sb == 1 ? "disable" : "nop")
6938 				: "enable",
6939 				segment ? "attn" : "regular",
6940 				timer_mask);
6941 		}
6942 	}
6943 out:
6944 	/* Add parsed element to parsed buffer */
6945 	*results_offset += sprintf(qed_get_buf_ptr(results_buf,
6946 						   *results_offset),
6947 				   "raw: 0x%01x%08x%08x, %s: %d, source : %s, type : %s, cmd_addr : 0x%x(%s%s), %serror: %s\n",
6948 				   element->dword2, element->dword1,
6949 				   element->dword0,
6950 				   is_pf ? "pf" : "vf",
6951 				   GET_FIELD(element->dword0,
6952 					     IGU_FIFO_ELEMENT_DWORD0_FID),
6953 				   s_igu_fifo_source_strs[source],
6954 				   is_wr_cmd ? "wr" : "rd",
6955 				   cmd_addr,
6956 				   (!is_pf && found_addr->vf_desc)
6957 				   ? found_addr->vf_desc
6958 				   : found_addr->desc,
6959 				   parsed_addr_data,
6960 				   parsed_wr_data,
6961 				   s_igu_fifo_error_strs[err_type]);
6962 
6963 	return DBG_STATUS_OK;
6964 }
6965 
6966 /* Parses an IGU FIFO dump buffer.
6967  * If result_buf is not NULL, the IGU FIFO results are printed to it.
6968  * In any case, the required results buffer size is assigned to
6969  * parsed_results_bytes.
6970  * The parsing status is returned.
6971  */
6972 static enum dbg_status qed_parse_igu_fifo_dump(struct qed_hwfn *p_hwfn,
6973 					       u32 *dump_buf,
6974 					       u32 num_dumped_dwords,
6975 					       char *results_buf,
6976 					       u32 *parsed_results_bytes)
6977 {
6978 	const char *section_name, *param_name, *param_str_val;
6979 	u32 param_num_val, num_section_params, num_elements;
6980 	struct igu_fifo_element *elements;
6981 	enum dbg_status status;
6982 	u32 results_offset = 0;
6983 	u8 i;
6984 
6985 	/* Read global_params section */
6986 	dump_buf += qed_read_section_hdr(dump_buf,
6987 					 &section_name, &num_section_params);
6988 	if (strcmp(section_name, "global_params"))
6989 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
6990 
6991 	/* Print global params */
6992 	dump_buf += qed_print_section_params(dump_buf,
6993 					     num_section_params,
6994 					     results_buf, &results_offset);
6995 
6996 	/* Read igu_fifo_data section */
6997 	dump_buf += qed_read_section_hdr(dump_buf,
6998 					 &section_name, &num_section_params);
6999 	if (strcmp(section_name, "igu_fifo_data"))
7000 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7001 	dump_buf += qed_read_param(dump_buf,
7002 				   &param_name, &param_str_val, &param_num_val);
7003 	if (strcmp(param_name, "size"))
7004 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7005 	if (param_num_val % IGU_FIFO_ELEMENT_DWORDS)
7006 		return DBG_STATUS_IGU_FIFO_BAD_DATA;
7007 	num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS;
7008 	elements = (struct igu_fifo_element *)dump_buf;
7009 
7010 	/* Decode elements */
7011 	for (i = 0; i < num_elements; i++) {
7012 		status = qed_parse_igu_fifo_element(&elements[i],
7013 						    results_buf,
7014 						    &results_offset,
7015 						    parsed_results_bytes);
7016 		if (status != DBG_STATUS_OK)
7017 			return status;
7018 	}
7019 
7020 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7021 						  results_offset),
7022 				  "fifo contained %d elements", num_elements);
7023 
7024 	/* Add 1 for string NULL termination */
7025 	*parsed_results_bytes = results_offset + 1;
7026 
7027 	return DBG_STATUS_OK;
7028 }
7029 
7030 static enum dbg_status
7031 qed_parse_protection_override_dump(struct qed_hwfn *p_hwfn,
7032 				   u32 *dump_buf,
7033 				   u32 num_dumped_dwords,
7034 				   char *results_buf,
7035 				   u32 *parsed_results_bytes)
7036 {
7037 	const char *section_name, *param_name, *param_str_val;
7038 	u32 param_num_val, num_section_params, num_elements;
7039 	struct protection_override_element *elements;
7040 	u32 results_offset = 0;
7041 	u8 i;
7042 
7043 	/* Read global_params section */
7044 	dump_buf += qed_read_section_hdr(dump_buf,
7045 					 &section_name, &num_section_params);
7046 	if (strcmp(section_name, "global_params"))
7047 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7048 
7049 	/* Print global params */
7050 	dump_buf += qed_print_section_params(dump_buf,
7051 					     num_section_params,
7052 					     results_buf, &results_offset);
7053 
7054 	/* Read protection_override_data section */
7055 	dump_buf += qed_read_section_hdr(dump_buf,
7056 					 &section_name, &num_section_params);
7057 	if (strcmp(section_name, "protection_override_data"))
7058 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7059 	dump_buf += qed_read_param(dump_buf,
7060 				   &param_name, &param_str_val, &param_num_val);
7061 	if (strcmp(param_name, "size"))
7062 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7063 	if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS)
7064 		return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
7065 	num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
7066 	elements = (struct protection_override_element *)dump_buf;
7067 
7068 	/* Decode elements */
7069 	for (i = 0; i < num_elements; i++) {
7070 		u32 address = GET_FIELD(elements[i].data,
7071 					PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
7072 			      PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
7073 
7074 		results_offset +=
7075 		    sprintf(qed_get_buf_ptr(results_buf,
7076 					    results_offset),
7077 			    "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
7078 			    i, address,
7079 			    (u32)GET_FIELD(elements[i].data,
7080 				      PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
7081 			    (u32)GET_FIELD(elements[i].data,
7082 				      PROTECTION_OVERRIDE_ELEMENT_READ),
7083 			    (u32)GET_FIELD(elements[i].data,
7084 				      PROTECTION_OVERRIDE_ELEMENT_WRITE),
7085 			    s_protection_strs[GET_FIELD(elements[i].data,
7086 				PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
7087 			    s_protection_strs[GET_FIELD(elements[i].data,
7088 				PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]);
7089 	}
7090 
7091 	results_offset += sprintf(qed_get_buf_ptr(results_buf,
7092 						  results_offset),
7093 				  "protection override contained %d elements",
7094 				  num_elements);
7095 
7096 	/* Add 1 for string NULL termination */
7097 	*parsed_results_bytes = results_offset + 1;
7098 
7099 	return DBG_STATUS_OK;
7100 }
7101 
7102 /* Parses a FW Asserts dump buffer.
7103  * If result_buf is not NULL, the FW Asserts results are printed to it.
7104  * In any case, the required results buffer size is assigned to
7105  * parsed_results_bytes.
7106  * The parsing status is returned.
7107  */
7108 static enum dbg_status qed_parse_fw_asserts_dump(struct qed_hwfn *p_hwfn,
7109 						 u32 *dump_buf,
7110 						 u32 num_dumped_dwords,
7111 						 char *results_buf,
7112 						 u32 *parsed_results_bytes)
7113 {
7114 	u32 num_section_params, param_num_val, i, results_offset = 0;
7115 	const char *param_name, *param_str_val, *section_name;
7116 	bool last_section_found = false;
7117 
7118 	*parsed_results_bytes = 0;
7119 
7120 	/* Read global_params section */
7121 	dump_buf += qed_read_section_hdr(dump_buf,
7122 					 &section_name, &num_section_params);
7123 	if (strcmp(section_name, "global_params"))
7124 		return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7125 
7126 	/* Print global params */
7127 	dump_buf += qed_print_section_params(dump_buf,
7128 					     num_section_params,
7129 					     results_buf, &results_offset);
7130 
7131 	while (!last_section_found) {
7132 		dump_buf += qed_read_section_hdr(dump_buf,
7133 						 &section_name,
7134 						 &num_section_params);
7135 		if (!strcmp(section_name, "fw_asserts")) {
7136 			/* Extract params */
7137 			const char *storm_letter = NULL;
7138 			u32 storm_dump_size = 0;
7139 
7140 			for (i = 0; i < num_section_params; i++) {
7141 				dump_buf += qed_read_param(dump_buf,
7142 							   &param_name,
7143 							   &param_str_val,
7144 							   &param_num_val);
7145 				if (!strcmp(param_name, "storm"))
7146 					storm_letter = param_str_val;
7147 				else if (!strcmp(param_name, "size"))
7148 					storm_dump_size = param_num_val;
7149 				else
7150 					return
7151 					    DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7152 			}
7153 
7154 			if (!storm_letter || !storm_dump_size)
7155 				return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7156 
7157 			/* Print data */
7158 			results_offset +=
7159 			    sprintf(qed_get_buf_ptr(results_buf,
7160 						    results_offset),
7161 				    "\n%sSTORM_ASSERT: size=%d\n",
7162 				    storm_letter, storm_dump_size);
7163 			for (i = 0; i < storm_dump_size; i++, dump_buf++)
7164 				results_offset +=
7165 				    sprintf(qed_get_buf_ptr(results_buf,
7166 							    results_offset),
7167 					    "%08x\n", *dump_buf);
7168 		} else if (!strcmp(section_name, "last")) {
7169 			last_section_found = true;
7170 		} else {
7171 			return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
7172 		}
7173 	}
7174 
7175 	/* Add 1 for string NULL termination */
7176 	*parsed_results_bytes = results_offset + 1;
7177 
7178 	return DBG_STATUS_OK;
7179 }
7180 
7181 /***************************** Public Functions *******************************/
7182 
7183 enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
7184 {
7185 	struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
7186 	u8 buf_id;
7187 
7188 	/* Convert binary data to debug arrays */
7189 	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
7190 		s_user_dbg_arrays[buf_id].ptr =
7191 			(u32 *)(bin_ptr + buf_array[buf_id].offset);
7192 		s_user_dbg_arrays[buf_id].size_in_dwords =
7193 			BYTES_TO_DWORDS(buf_array[buf_id].length);
7194 	}
7195 
7196 	return DBG_STATUS_OK;
7197 }
7198 
7199 const char *qed_dbg_get_status_str(enum dbg_status status)
7200 {
7201 	return (status <
7202 		MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
7203 }
7204 
7205 enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
7206 						  u32 *dump_buf,
7207 						  u32 num_dumped_dwords,
7208 						  u32 *results_buf_size)
7209 {
7210 	u32 num_errors, num_warnings;
7211 
7212 	return qed_parse_idle_chk_dump(p_hwfn,
7213 				       dump_buf,
7214 				       num_dumped_dwords,
7215 				       NULL,
7216 				       results_buf_size,
7217 				       &num_errors, &num_warnings);
7218 }
7219 
7220 enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
7221 					   u32 *dump_buf,
7222 					   u32 num_dumped_dwords,
7223 					   char *results_buf,
7224 					   u32 *num_errors, u32 *num_warnings)
7225 {
7226 	u32 parsed_buf_size;
7227 
7228 	return qed_parse_idle_chk_dump(p_hwfn,
7229 				       dump_buf,
7230 				       num_dumped_dwords,
7231 				       results_buf,
7232 				       &parsed_buf_size,
7233 				       num_errors, num_warnings);
7234 }
7235 
7236 void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size)
7237 {
7238 	s_mcp_trace_meta.ptr = data;
7239 	s_mcp_trace_meta.size_in_dwords = size;
7240 }
7241 
7242 enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
7243 						   u32 *dump_buf,
7244 						   u32 num_dumped_dwords,
7245 						   u32 *results_buf_size)
7246 {
7247 	return qed_parse_mcp_trace_dump(p_hwfn,
7248 					dump_buf,
7249 					num_dumped_dwords,
7250 					NULL, results_buf_size);
7251 }
7252 
7253 enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
7254 					    u32 *dump_buf,
7255 					    u32 num_dumped_dwords,
7256 					    char *results_buf)
7257 {
7258 	u32 parsed_buf_size;
7259 
7260 	return qed_parse_mcp_trace_dump(p_hwfn,
7261 					dump_buf,
7262 					num_dumped_dwords,
7263 					results_buf, &parsed_buf_size);
7264 }
7265 
7266 enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7267 						  u32 *dump_buf,
7268 						  u32 num_dumped_dwords,
7269 						  u32 *results_buf_size)
7270 {
7271 	return qed_parse_reg_fifo_dump(p_hwfn,
7272 				       dump_buf,
7273 				       num_dumped_dwords,
7274 				       NULL, results_buf_size);
7275 }
7276 
7277 enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
7278 					   u32 *dump_buf,
7279 					   u32 num_dumped_dwords,
7280 					   char *results_buf)
7281 {
7282 	u32 parsed_buf_size;
7283 
7284 	return qed_parse_reg_fifo_dump(p_hwfn,
7285 				       dump_buf,
7286 				       num_dumped_dwords,
7287 				       results_buf, &parsed_buf_size);
7288 }
7289 
7290 enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
7291 						  u32 *dump_buf,
7292 						  u32 num_dumped_dwords,
7293 						  u32 *results_buf_size)
7294 {
7295 	return qed_parse_igu_fifo_dump(p_hwfn,
7296 				       dump_buf,
7297 				       num_dumped_dwords,
7298 				       NULL, results_buf_size);
7299 }
7300 
7301 enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
7302 					   u32 *dump_buf,
7303 					   u32 num_dumped_dwords,
7304 					   char *results_buf)
7305 {
7306 	u32 parsed_buf_size;
7307 
7308 	return qed_parse_igu_fifo_dump(p_hwfn,
7309 				       dump_buf,
7310 				       num_dumped_dwords,
7311 				       results_buf, &parsed_buf_size);
7312 }
7313 
7314 enum dbg_status
7315 qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
7316 					     u32 *dump_buf,
7317 					     u32 num_dumped_dwords,
7318 					     u32 *results_buf_size)
7319 {
7320 	return qed_parse_protection_override_dump(p_hwfn,
7321 						  dump_buf,
7322 						  num_dumped_dwords,
7323 						  NULL, results_buf_size);
7324 }
7325 
7326 enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
7327 						      u32 *dump_buf,
7328 						      u32 num_dumped_dwords,
7329 						      char *results_buf)
7330 {
7331 	u32 parsed_buf_size;
7332 
7333 	return qed_parse_protection_override_dump(p_hwfn,
7334 						  dump_buf,
7335 						  num_dumped_dwords,
7336 						  results_buf,
7337 						  &parsed_buf_size);
7338 }
7339 
7340 enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
7341 						    u32 *dump_buf,
7342 						    u32 num_dumped_dwords,
7343 						    u32 *results_buf_size)
7344 {
7345 	return qed_parse_fw_asserts_dump(p_hwfn,
7346 					 dump_buf,
7347 					 num_dumped_dwords,
7348 					 NULL, results_buf_size);
7349 }
7350 
7351 enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
7352 					     u32 *dump_buf,
7353 					     u32 num_dumped_dwords,
7354 					     char *results_buf)
7355 {
7356 	u32 parsed_buf_size;
7357 
7358 	return qed_parse_fw_asserts_dump(p_hwfn,
7359 					 dump_buf,
7360 					 num_dumped_dwords,
7361 					 results_buf, &parsed_buf_size);
7362 }
7363 
7364 enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
7365 				   struct dbg_attn_block_result *results)
7366 {
7367 	struct user_dbg_array *block_attn, *pstrings;
7368 	const u32 *block_attn_name_offsets;
7369 	enum dbg_attn_type attn_type;
7370 	const char *block_name;
7371 	u8 num_regs, i, j;
7372 
7373 	num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
7374 	attn_type = (enum dbg_attn_type)
7375 		    GET_FIELD(results->data,
7376 			      DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
7377 	block_name = s_block_info_arr[results->block_id].name;
7378 
7379 	if (!s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr ||
7380 	    !s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr ||
7381 	    !s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
7382 		return DBG_STATUS_DBG_ARRAY_NOT_SET;
7383 
7384 	block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS];
7385 	block_attn_name_offsets = &block_attn->ptr[results->names_offset];
7386 
7387 	/* Go over registers with a non-zero attention status */
7388 	for (i = 0; i < num_regs; i++) {
7389 		struct dbg_attn_reg_result *reg_result;
7390 		struct dbg_attn_bit_mapping *mapping;
7391 		u8 num_reg_attn, bit_idx = 0;
7392 
7393 		reg_result = &results->reg_results[i];
7394 		num_reg_attn = GET_FIELD(reg_result->data,
7395 					 DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
7396 		block_attn = &s_user_dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES];
7397 		mapping = &((struct dbg_attn_bit_mapping *)
7398 			    block_attn->ptr)[reg_result->block_attn_offset];
7399 
7400 		pstrings = &s_user_dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS];
7401 
7402 		/* Go over attention status bits */
7403 		for (j = 0; j < num_reg_attn; j++) {
7404 			u16 attn_idx_val = GET_FIELD(mapping[j].data,
7405 						     DBG_ATTN_BIT_MAPPING_VAL);
7406 			const char *attn_name, *attn_type_str, *masked_str;
7407 			u32 name_offset, sts_addr;
7408 
7409 			/* Check if bit mask should be advanced (due to unused
7410 			 * bits).
7411 			 */
7412 			if (GET_FIELD(mapping[j].data,
7413 				      DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
7414 				bit_idx += (u8)attn_idx_val;
7415 				continue;
7416 			}
7417 
7418 			/* Check current bit index */
7419 			if (!(reg_result->sts_val & BIT(bit_idx))) {
7420 				bit_idx++;
7421 				continue;
7422 			}
7423 
7424 			/* Find attention name */
7425 			name_offset = block_attn_name_offsets[attn_idx_val];
7426 			attn_name = &((const char *)
7427 				      pstrings->ptr)[name_offset];
7428 			attn_type_str = attn_type == ATTN_TYPE_INTERRUPT ?
7429 					"Interrupt" : "Parity";
7430 			masked_str = reg_result->mask_val & BIT(bit_idx) ?
7431 				     " [masked]" : "";
7432 			sts_addr = GET_FIELD(reg_result->data,
7433 					     DBG_ATTN_REG_RESULT_STS_ADDRESS);
7434 			DP_NOTICE(p_hwfn,
7435 				  "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
7436 				  block_name, attn_type_str, attn_name,
7437 				  sts_addr, bit_idx, masked_str);
7438 
7439 			bit_idx++;
7440 		}
7441 	}
7442 
7443 	return DBG_STATUS_OK;
7444 }
7445 
7446 /* Wrapper for unifying the idle_chk and mcp_trace api */
7447 static enum dbg_status
7448 qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
7449 				   u32 *dump_buf,
7450 				   u32 num_dumped_dwords,
7451 				   char *results_buf)
7452 {
7453 	u32 num_errors, num_warnnings;
7454 
7455 	return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,
7456 					  results_buf, &num_errors,
7457 					  &num_warnnings);
7458 }
7459 
7460 /* Feature meta data lookup table */
7461 static struct {
7462 	char *name;
7463 	enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
7464 				    struct qed_ptt *p_ptt, u32 *size);
7465 	enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
7466 					struct qed_ptt *p_ptt, u32 *dump_buf,
7467 					u32 buf_size, u32 *dumped_dwords);
7468 	enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn,
7469 					 u32 *dump_buf, u32 num_dumped_dwords,
7470 					 char *results_buf);
7471 	enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn,
7472 					    u32 *dump_buf,
7473 					    u32 num_dumped_dwords,
7474 					    u32 *results_buf_size);
7475 } qed_features_lookup[] = {
7476 	{
7477 	"grc", qed_dbg_grc_get_dump_buf_size,
7478 		    qed_dbg_grc_dump, NULL, NULL}, {
7479 	"idle_chk",
7480 		    qed_dbg_idle_chk_get_dump_buf_size,
7481 		    qed_dbg_idle_chk_dump,
7482 		    qed_print_idle_chk_results_wrapper,
7483 		    qed_get_idle_chk_results_buf_size}, {
7484 	"mcp_trace",
7485 		    qed_dbg_mcp_trace_get_dump_buf_size,
7486 		    qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
7487 		    qed_get_mcp_trace_results_buf_size}, {
7488 	"reg_fifo",
7489 		    qed_dbg_reg_fifo_get_dump_buf_size,
7490 		    qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
7491 		    qed_get_reg_fifo_results_buf_size}, {
7492 	"igu_fifo",
7493 		    qed_dbg_igu_fifo_get_dump_buf_size,
7494 		    qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
7495 		    qed_get_igu_fifo_results_buf_size}, {
7496 	"protection_override",
7497 		    qed_dbg_protection_override_get_dump_buf_size,
7498 		    qed_dbg_protection_override_dump,
7499 		    qed_print_protection_override_results,
7500 		    qed_get_protection_override_results_buf_size}, {
7501 	"fw_asserts",
7502 		    qed_dbg_fw_asserts_get_dump_buf_size,
7503 		    qed_dbg_fw_asserts_dump,
7504 		    qed_print_fw_asserts_results,
7505 		    qed_get_fw_asserts_results_buf_size},};
7506 
7507 static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
7508 {
7509 	u32 i, precision = 80;
7510 
7511 	if (!p_text_buf)
7512 		return;
7513 
7514 	pr_notice("\n%.*s", precision, p_text_buf);
7515 	for (i = precision; i < text_size; i += precision)
7516 		pr_cont("%.*s", precision, p_text_buf + i);
7517 	pr_cont("\n");
7518 }
7519 
7520 #define QED_RESULTS_BUF_MIN_SIZE 16
7521 /* Generic function for decoding debug feature info */
7522 static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
7523 				      enum qed_dbg_features feature_idx)
7524 {
7525 	struct qed_dbg_feature *feature =
7526 	    &p_hwfn->cdev->dbg_params.features[feature_idx];
7527 	u32 text_size_bytes, null_char_pos, i;
7528 	enum dbg_status rc;
7529 	char *text_buf;
7530 
7531 	/* Check if feature supports formatting capability */
7532 	if (!qed_features_lookup[feature_idx].results_buf_size)
7533 		return DBG_STATUS_OK;
7534 
7535 	/* Obtain size of formatted output */
7536 	rc = qed_features_lookup[feature_idx].
7537 		results_buf_size(p_hwfn, (u32 *)feature->dump_buf,
7538 				 feature->dumped_dwords, &text_size_bytes);
7539 	if (rc != DBG_STATUS_OK)
7540 		return rc;
7541 
7542 	/* Make sure that the allocated size is a multiple of dword (4 bytes) */
7543 	null_char_pos = text_size_bytes - 1;
7544 	text_size_bytes = (text_size_bytes + 3) & ~0x3;
7545 
7546 	if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
7547 		DP_NOTICE(p_hwfn->cdev,
7548 			  "formatted size of feature was too small %d. Aborting\n",
7549 			  text_size_bytes);
7550 		return DBG_STATUS_INVALID_ARGS;
7551 	}
7552 
7553 	/* Allocate temp text buf */
7554 	text_buf = vzalloc(text_size_bytes);
7555 	if (!text_buf)
7556 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7557 
7558 	/* Decode feature opcodes to string on temp buf */
7559 	rc = qed_features_lookup[feature_idx].
7560 		print_results(p_hwfn, (u32 *)feature->dump_buf,
7561 			      feature->dumped_dwords, text_buf);
7562 	if (rc != DBG_STATUS_OK) {
7563 		vfree(text_buf);
7564 		return rc;
7565 	}
7566 
7567 	/* Replace the original null character with a '\n' character.
7568 	 * The bytes that were added as a result of the dword alignment are also
7569 	 * padded with '\n' characters.
7570 	 */
7571 	for (i = null_char_pos; i < text_size_bytes; i++)
7572 		text_buf[i] = '\n';
7573 
7574 	/* Dump printable feature to log */
7575 	if (p_hwfn->cdev->dbg_params.print_data)
7576 		qed_dbg_print_feature(text_buf, text_size_bytes);
7577 
7578 	/* Free the old dump_buf and point the dump_buf to the newly allocagted
7579 	 * and formatted text buffer.
7580 	 */
7581 	vfree(feature->dump_buf);
7582 	feature->dump_buf = text_buf;
7583 	feature->buf_size = text_size_bytes;
7584 	feature->dumped_dwords = text_size_bytes / 4;
7585 	return rc;
7586 }
7587 
7588 /* Generic function for performing the dump of a debug feature. */
7589 static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
7590 				    struct qed_ptt *p_ptt,
7591 				    enum qed_dbg_features feature_idx)
7592 {
7593 	struct qed_dbg_feature *feature =
7594 	    &p_hwfn->cdev->dbg_params.features[feature_idx];
7595 	u32 buf_size_dwords;
7596 	enum dbg_status rc;
7597 
7598 	DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
7599 		  qed_features_lookup[feature_idx].name);
7600 
7601 	/* Dump_buf was already allocated need to free (this can happen if dump
7602 	 * was called but file was never read).
7603 	 * We can't use the buffer as is since size may have changed.
7604 	 */
7605 	if (feature->dump_buf) {
7606 		vfree(feature->dump_buf);
7607 		feature->dump_buf = NULL;
7608 	}
7609 
7610 	/* Get buffer size from hsi, allocate accordingly, and perform the
7611 	 * dump.
7612 	 */
7613 	rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
7614 						       &buf_size_dwords);
7615 	if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7616 		return rc;
7617 	feature->buf_size = buf_size_dwords * sizeof(u32);
7618 	feature->dump_buf = vmalloc(feature->buf_size);
7619 	if (!feature->dump_buf)
7620 		return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
7621 
7622 	rc = qed_features_lookup[feature_idx].
7623 		perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf,
7624 			     feature->buf_size / sizeof(u32),
7625 			     &feature->dumped_dwords);
7626 
7627 	/* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
7628 	 * In this case the buffer holds valid binary data, but we wont able
7629 	 * to parse it (since parsing relies on data in NVRAM which is only
7630 	 * accessible when MFW is responsive). skip the formatting but return
7631 	 * success so that binary data is provided.
7632 	 */
7633 	if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
7634 		return DBG_STATUS_OK;
7635 
7636 	if (rc != DBG_STATUS_OK)
7637 		return rc;
7638 
7639 	/* Format output */
7640 	rc = format_feature(p_hwfn, feature_idx);
7641 	return rc;
7642 }
7643 
7644 int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7645 {
7646 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes);
7647 }
7648 
7649 int qed_dbg_grc_size(struct qed_dev *cdev)
7650 {
7651 	return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC);
7652 }
7653 
7654 int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7655 {
7656 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK,
7657 			       num_dumped_bytes);
7658 }
7659 
7660 int qed_dbg_idle_chk_size(struct qed_dev *cdev)
7661 {
7662 	return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK);
7663 }
7664 
7665 int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7666 {
7667 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO,
7668 			       num_dumped_bytes);
7669 }
7670 
7671 int qed_dbg_reg_fifo_size(struct qed_dev *cdev)
7672 {
7673 	return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO);
7674 }
7675 
7676 int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
7677 {
7678 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO,
7679 			       num_dumped_bytes);
7680 }
7681 
7682 int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
7683 {
7684 	return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
7685 }
7686 
7687 int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
7688 				u32 *num_dumped_bytes)
7689 {
7690 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE,
7691 			       num_dumped_bytes);
7692 }
7693 
7694 int qed_dbg_protection_override_size(struct qed_dev *cdev)
7695 {
7696 	return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE);
7697 }
7698 
7699 int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
7700 		       u32 *num_dumped_bytes)
7701 {
7702 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS,
7703 			       num_dumped_bytes);
7704 }
7705 
7706 int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
7707 {
7708 	return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
7709 }
7710 
7711 int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
7712 		      u32 *num_dumped_bytes)
7713 {
7714 	return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE,
7715 			       num_dumped_bytes);
7716 }
7717 
7718 int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
7719 {
7720 	return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE);
7721 }
7722 
7723 /* Defines the amount of bytes allocated for recording the length of debugfs
7724  * feature buffer.
7725  */
7726 #define REGDUMP_HEADER_SIZE			sizeof(u32)
7727 #define REGDUMP_HEADER_FEATURE_SHIFT		24
7728 #define REGDUMP_HEADER_ENGINE_SHIFT		31
7729 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT	30
7730 enum debug_print_features {
7731 	OLD_MODE = 0,
7732 	IDLE_CHK = 1,
7733 	GRC_DUMP = 2,
7734 	MCP_TRACE = 3,
7735 	REG_FIFO = 4,
7736 	PROTECTION_OVERRIDE = 5,
7737 	IGU_FIFO = 6,
7738 	PHY = 7,
7739 	FW_ASSERTS = 8,
7740 };
7741 
7742 static u32 qed_calc_regdump_header(enum debug_print_features feature,
7743 				   int engine, u32 feature_size, u8 omit_engine)
7744 {
7745 	/* Insert the engine, feature and mode inside the header and combine it
7746 	 * with feature size.
7747 	 */
7748 	return feature_size | (feature << REGDUMP_HEADER_FEATURE_SHIFT) |
7749 	       (omit_engine << REGDUMP_HEADER_OMIT_ENGINE_SHIFT) |
7750 	       (engine << REGDUMP_HEADER_ENGINE_SHIFT);
7751 }
7752 
7753 int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
7754 {
7755 	u8 cur_engine, omit_engine = 0, org_engine;
7756 	u32 offset = 0, feature_size;
7757 	int rc;
7758 
7759 	if (cdev->num_hwfns == 1)
7760 		omit_engine = 1;
7761 
7762 	org_engine = qed_get_debug_engine(cdev);
7763 	for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
7764 		/* Collect idle_chks and grcDump for each hw function */
7765 		DP_VERBOSE(cdev, QED_MSG_DEBUG,
7766 			   "obtaining idle_chk and grcdump for current engine\n");
7767 		qed_set_debug_engine(cdev, cur_engine);
7768 
7769 		/* First idle_chk */
7770 		rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
7771 				      REGDUMP_HEADER_SIZE, &feature_size);
7772 		if (!rc) {
7773 			*(u32 *)((u8 *)buffer + offset) =
7774 			    qed_calc_regdump_header(IDLE_CHK, cur_engine,
7775 						    feature_size, omit_engine);
7776 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7777 		} else {
7778 			DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
7779 		}
7780 
7781 		/* Second idle_chk */
7782 		rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
7783 				      REGDUMP_HEADER_SIZE, &feature_size);
7784 		if (!rc) {
7785 			*(u32 *)((u8 *)buffer + offset) =
7786 			    qed_calc_regdump_header(IDLE_CHK, cur_engine,
7787 						    feature_size, omit_engine);
7788 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7789 		} else {
7790 			DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
7791 		}
7792 
7793 		/* reg_fifo dump */
7794 		rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset +
7795 				      REGDUMP_HEADER_SIZE, &feature_size);
7796 		if (!rc) {
7797 			*(u32 *)((u8 *)buffer + offset) =
7798 			    qed_calc_regdump_header(REG_FIFO, cur_engine,
7799 						    feature_size, omit_engine);
7800 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7801 		} else {
7802 			DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
7803 		}
7804 
7805 		/* igu_fifo dump */
7806 		rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset +
7807 				      REGDUMP_HEADER_SIZE, &feature_size);
7808 		if (!rc) {
7809 			*(u32 *)((u8 *)buffer + offset) =
7810 			    qed_calc_regdump_header(IGU_FIFO, cur_engine,
7811 						    feature_size, omit_engine);
7812 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7813 		} else {
7814 			DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
7815 		}
7816 
7817 		/* protection_override dump */
7818 		rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset +
7819 						 REGDUMP_HEADER_SIZE,
7820 						 &feature_size);
7821 		if (!rc) {
7822 			*(u32 *)((u8 *)buffer + offset) =
7823 			    qed_calc_regdump_header(PROTECTION_OVERRIDE,
7824 						    cur_engine,
7825 						    feature_size, omit_engine);
7826 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7827 		} else {
7828 			DP_ERR(cdev,
7829 			       "qed_dbg_protection_override failed. rc = %d\n",
7830 			       rc);
7831 		}
7832 
7833 		/* fw_asserts dump */
7834 		rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset +
7835 					REGDUMP_HEADER_SIZE, &feature_size);
7836 		if (!rc) {
7837 			*(u32 *)((u8 *)buffer + offset) =
7838 			    qed_calc_regdump_header(FW_ASSERTS, cur_engine,
7839 						    feature_size, omit_engine);
7840 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7841 		} else {
7842 			DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
7843 			       rc);
7844 		}
7845 
7846 		/* GRC dump - must be last because when mcp stuck it will
7847 		 * clutter idle_chk, reg_fifo, ...
7848 		 */
7849 		rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
7850 				 REGDUMP_HEADER_SIZE, &feature_size);
7851 		if (!rc) {
7852 			*(u32 *)((u8 *)buffer + offset) =
7853 			    qed_calc_regdump_header(GRC_DUMP, cur_engine,
7854 						    feature_size, omit_engine);
7855 			offset += (feature_size + REGDUMP_HEADER_SIZE);
7856 		} else {
7857 			DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
7858 		}
7859 	}
7860 
7861 	/* mcp_trace */
7862 	rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
7863 			       REGDUMP_HEADER_SIZE, &feature_size);
7864 	if (!rc) {
7865 		*(u32 *)((u8 *)buffer + offset) =
7866 		    qed_calc_regdump_header(MCP_TRACE, cur_engine,
7867 					    feature_size, omit_engine);
7868 		offset += (feature_size + REGDUMP_HEADER_SIZE);
7869 	} else {
7870 		DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
7871 	}
7872 
7873 	qed_set_debug_engine(cdev, org_engine);
7874 
7875 	return 0;
7876 }
7877 
7878 int qed_dbg_all_data_size(struct qed_dev *cdev)
7879 {
7880 	u8 cur_engine, org_engine;
7881 	u32 regs_len = 0;
7882 
7883 	org_engine = qed_get_debug_engine(cdev);
7884 	for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
7885 		/* Engine specific */
7886 		DP_VERBOSE(cdev, QED_MSG_DEBUG,
7887 			   "calculating idle_chk and grcdump register length for current engine\n");
7888 		qed_set_debug_engine(cdev, cur_engine);
7889 		regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
7890 			    REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
7891 			    REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
7892 			    REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
7893 			    REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
7894 			    REGDUMP_HEADER_SIZE +
7895 			    qed_dbg_protection_override_size(cdev) +
7896 			    REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
7897 	}
7898 
7899 	/* Engine common */
7900 	regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
7901 	qed_set_debug_engine(cdev, org_engine);
7902 
7903 	return regs_len;
7904 }
7905 
7906 int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
7907 		    enum qed_dbg_features feature, u32 *num_dumped_bytes)
7908 {
7909 	struct qed_hwfn *p_hwfn =
7910 		&cdev->hwfns[cdev->dbg_params.engine_for_debug];
7911 	struct qed_dbg_feature *qed_feature =
7912 		&cdev->dbg_params.features[feature];
7913 	enum dbg_status dbg_rc;
7914 	struct qed_ptt *p_ptt;
7915 	int rc = 0;
7916 
7917 	/* Acquire ptt */
7918 	p_ptt = qed_ptt_acquire(p_hwfn);
7919 	if (!p_ptt)
7920 		return -EINVAL;
7921 
7922 	/* Get dump */
7923 	dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature);
7924 	if (dbg_rc != DBG_STATUS_OK) {
7925 		DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n",
7926 			   qed_dbg_get_status_str(dbg_rc));
7927 		*num_dumped_bytes = 0;
7928 		rc = -EINVAL;
7929 		goto out;
7930 	}
7931 
7932 	DP_VERBOSE(cdev, QED_MSG_DEBUG,
7933 		   "copying debugfs feature to external buffer\n");
7934 	memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
7935 	*num_dumped_bytes = cdev->dbg_params.features[feature].dumped_dwords *
7936 			    4;
7937 
7938 out:
7939 	qed_ptt_release(p_hwfn, p_ptt);
7940 	return rc;
7941 }
7942 
7943 int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
7944 {
7945 	struct qed_hwfn *p_hwfn =
7946 		&cdev->hwfns[cdev->dbg_params.engine_for_debug];
7947 	struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
7948 	struct qed_dbg_feature *qed_feature =
7949 		&cdev->dbg_params.features[feature];
7950 	u32 buf_size_dwords;
7951 	enum dbg_status rc;
7952 
7953 	if (!p_ptt)
7954 		return -EINVAL;
7955 
7956 	rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt,
7957 						   &buf_size_dwords);
7958 	if (rc != DBG_STATUS_OK)
7959 		buf_size_dwords = 0;
7960 
7961 	qed_ptt_release(p_hwfn, p_ptt);
7962 	qed_feature->buf_size = buf_size_dwords * sizeof(u32);
7963 	return qed_feature->buf_size;
7964 }
7965 
7966 u8 qed_get_debug_engine(struct qed_dev *cdev)
7967 {
7968 	return cdev->dbg_params.engine_for_debug;
7969 }
7970 
7971 void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
7972 {
7973 	DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
7974 		   engine_number);
7975 	cdev->dbg_params.engine_for_debug = engine_number;
7976 }
7977 
7978 void qed_dbg_pf_init(struct qed_dev *cdev)
7979 {
7980 	const u8 *dbg_values;
7981 
7982 	/* Debug values are after init values.
7983 	 * The offset is the first dword of the file.
7984 	 */
7985 	dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
7986 	qed_dbg_set_bin_ptr((u8 *)dbg_values);
7987 	qed_dbg_user_set_bin_ptr((u8 *)dbg_values);
7988 }
7989 
7990 void qed_dbg_pf_exit(struct qed_dev *cdev)
7991 {
7992 	struct qed_dbg_feature *feature = NULL;
7993 	enum qed_dbg_features feature_idx;
7994 
7995 	/* Debug features' buffers may be allocated if debug feature was used
7996 	 * but dump wasn't called.
7997 	 */
7998 	for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
7999 		feature = &cdev->dbg_params.features[feature_idx];
8000 		if (feature->dump_buf) {
8001 			vfree(feature->dump_buf);
8002 			feature->dump_buf = NULL;
8003 		}
8004 	}
8005 }
8006