xref: /illumos-gate/usr/src/uts/common/io/nxge/npi/npi_rxdma.c (revision 9b6224883056ca9db111541974efeb6a4de0c074)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <npi_rxdma.h>
27 #include <npi_rx_rd64.h>
28 #include <npi_rx_wr64.h>
29 #include <nxge_common.h>
30 
31 #define	 RXDMA_RESET_TRY_COUNT	4
32 #define	 RXDMA_RESET_DELAY	5
33 
34 #define	 RXDMA_OP_DISABLE	0
35 #define	 RXDMA_OP_ENABLE	1
36 #define	 RXDMA_OP_RESET	2
37 
38 #define	 RCR_TIMEOUT_ENABLE	1
39 #define	 RCR_TIMEOUT_DISABLE	2
40 #define	 RCR_THRESHOLD	4
41 
42 /* assume weight is in byte frames unit */
43 #define	WEIGHT_FACTOR 3/2
44 
45 uint64_t rdc_dmc_offset[] = {
46 	RXDMA_CFIG1_REG, RXDMA_CFIG2_REG, RBR_CFIG_A_REG, RBR_CFIG_B_REG,
47 	RBR_KICK_REG, RBR_STAT_REG, RBR_HDH_REG, RBR_HDL_REG,
48 	RCRCFIG_A_REG, RCRCFIG_B_REG, RCRSTAT_A_REG, RCRSTAT_B_REG,
49 	RCRSTAT_C_REG, RX_DMA_ENT_MSK_REG, RX_DMA_CTL_STAT_REG, RCR_FLSH_REG,
50 	RXMISC_DISCARD_REG
51 };
52 
53 const char *rdc_dmc_name[] = {
54 	"RXDMA_CFIG1", "RXDMA_CFIG2", "RBR_CFIG_A", "RBR_CFIG_B",
55 	"RBR_KICK", "RBR_STAT", "RBR_HDH", "RBR_HDL",
56 	"RCRCFIG_A", "RCRCFIG_B", "RCRSTAT_A", "RCRSTAT_B",
57 	"RCRSTAT_C", "RX_DMA_ENT_MSK", "RX_DMA_CTL_STAT", "RCR_FLSH",
58 	"RXMISC_DISCARD"
59 };
60 
61 uint64_t rdc_fzc_offset [] = {
62 	RX_LOG_PAGE_VLD_REG, RX_LOG_PAGE_MASK1_REG, RX_LOG_PAGE_VAL1_REG,
63 	RX_LOG_PAGE_MASK2_REG, RX_LOG_PAGE_VAL2_REG, RX_LOG_PAGE_RELO1_REG,
64 	RX_LOG_PAGE_RELO2_REG, RX_LOG_PAGE_HDL_REG, RDC_RED_PARA_REG,
65 	RED_DIS_CNT_REG
66 };
67 
68 
69 const char *rdc_fzc_name [] = {
70 	"RX_LOG_PAGE_VLD", "RX_LOG_PAGE_MASK1", "RX_LOG_PAGE_VAL1",
71 	"RX_LOG_PAGE_MASK2", "RX_LOG_PAGE_VAL2", "RX_LOG_PAGE_RELO1",
72 	"RX_LOG_PAGE_RELO2", "RX_LOG_PAGE_HDL", "RDC_RED_PARA", "RED_DIS_CNT"
73 };
74 
75 
76 /*
77  * Dump the MEM_ADD register first so all the data registers
78  * will have valid data buffer pointers.
79  */
80 uint64_t rx_fzc_offset[] = {
81 	RX_DMA_CK_DIV_REG, DEF_PT0_RDC_REG, DEF_PT1_RDC_REG, DEF_PT2_RDC_REG,
82 	DEF_PT3_RDC_REG, RX_ADDR_MD_REG, PT_DRR_WT0_REG, PT_DRR_WT1_REG,
83 	PT_DRR_WT2_REG, PT_DRR_WT3_REG, PT_USE0_REG, PT_USE1_REG,
84 	PT_USE2_REG, PT_USE3_REG, RED_RAN_INIT_REG, RX_ADDR_MD_REG,
85 	RDMC_PRE_PAR_ERR_REG, RDMC_SHA_PAR_ERR_REG,
86 	RDMC_MEM_DATA4_REG, RDMC_MEM_DATA3_REG, RDMC_MEM_DATA2_REG,
87 	RDMC_MEM_DATA1_REG, RDMC_MEM_DATA0_REG,
88 	RDMC_MEM_ADDR_REG,
89 	RX_CTL_DAT_FIFO_STAT_REG, RX_CTL_DAT_FIFO_MASK_REG,
90 	RX_CTL_DAT_FIFO_STAT_DBG_REG,
91 	RDMC_TRAINING_VECTOR_REG,
92 };
93 
94 
95 const char *rx_fzc_name[] = {
96 	"RX_DMA_CK_DIV", "DEF_PT0_RDC", "DEF_PT1_RDC", "DEF_PT2_RDC",
97 	"DEF_PT3_RDC", "RX_ADDR_MD", "PT_DRR_WT0", "PT_DRR_WT1",
98 	"PT_DRR_WT2", "PT_DRR_WT3", "PT_USE0", "PT_USE1",
99 	"PT_USE2", "PT_USE3", "RED_RAN_INIT", "RX_ADDR_MD",
100 	"RDMC_PRE_PAR_ERR", "RDMC_SHA_PAR_ERR",
101 	"RDMC_MEM_DATA4", "RDMC_MEM_DATA3", "RDMC_MEM_DATA2",
102 	"RDMC_MEM_DATA1", "RDMC_MEM_DATA0",
103 	"RDMC_MEM_ADDR",
104 	"RX_CTL_DAT_FIFO_STAT", "RX_CTL_DAT_FIFO_MASK",
105 	"RDMC_TRAINING_VECTOR_REG",
106 	"RX_CTL_DAT_FIFO_STAT_DBG_REG"
107 };
108 
109 
110 npi_status_t
111 npi_rxdma_cfg_rdc_ctl(npi_handle_t handle, uint8_t rdc, uint8_t op);
112 npi_status_t
113 npi_rxdma_cfg_rdc_rcr_ctl(npi_handle_t handle, uint8_t rdc, uint8_t op,
114 				uint16_t param);
115 
116 
117 /*
118  * npi_rxdma_dump_rdc_regs
119  * Dumps the contents of rdc csrs and fzc registers
120  *
121  * Input:
122  *      handle:	opaque handle interpreted by the underlying OS
123  *         rdc:      RX DMA number
124  *
125  * return:
126  *     NPI_SUCCESS
127  *     NPI_RXDMA_RDC_INVALID
128  *
129  */
130 npi_status_t
npi_rxdma_dump_rdc_regs(npi_handle_t handle,uint8_t rdc)131 npi_rxdma_dump_rdc_regs(npi_handle_t handle, uint8_t rdc)
132 {
133 
134 	uint64_t value, offset;
135 	int num_regs, i;
136 #ifdef NPI_DEBUG
137 	extern uint64_t npi_debug_level;
138 	uint64_t old_npi_debug_level = npi_debug_level;
139 #endif
140 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
141 	if (!RXDMA_CHANNEL_VALID(rdc)) {
142 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
143 		    "npi_rxdma_dump_rdc_regs"
144 		    " Illegal RDC number %d \n",
145 		    rdc));
146 		return (NPI_RXDMA_RDC_INVALID);
147 	}
148 #ifdef NPI_DEBUG
149 	npi_debug_level |= DUMP_ALWAYS;
150 #endif
151 	num_regs = sizeof (rdc_dmc_offset) / sizeof (uint64_t);
152 	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
153 	    "\nDMC Register Dump for Channel %d\n",
154 	    rdc));
155 	for (i = 0; i < num_regs; i++) {
156 		RXDMA_REG_READ64(handle, rdc_dmc_offset[i], rdc, &value);
157 		offset = NXGE_RXDMA_OFFSET(rdc_dmc_offset[i], handle.is_vraddr,
158 		    rdc);
159 		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
160 		    "%08llx %s\t %08llx \n",
161 		    offset, rdc_dmc_name[i], value));
162 	}
163 
164 	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
165 	    "\n Register Dump for Channel %d done\n",
166 	    rdc));
167 #ifdef NPI_DEBUG
168 	npi_debug_level = old_npi_debug_level;
169 #endif
170 	return (NPI_SUCCESS);
171 }
172 
173 /*
174  * npi_rxdma_dump_fzc_regs
175  * Dumps the contents of rdc csrs and fzc registers
176  *
177  * Input:
178  *      handle:	opaque handle interpreted by the underlying OS
179  *
180  * return:
181  *     NPI_SUCCESS
182  */
183 npi_status_t
npi_rxdma_dump_fzc_regs(npi_handle_t handle)184 npi_rxdma_dump_fzc_regs(npi_handle_t handle)
185 {
186 
187 	uint64_t value;
188 	int num_regs, i;
189 
190 
191 	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
192 	    "\nFZC_DMC Common Register Dump\n"));
193 	num_regs = sizeof (rx_fzc_offset) / sizeof (uint64_t);
194 
195 	for (i = 0; i < num_regs; i++) {
196 		NXGE_REG_RD64(handle, rx_fzc_offset[i], &value);
197 		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
198 		    "0x%08llx %s\t 0x%08llx \n",
199 		    rx_fzc_offset[i],
200 		    rx_fzc_name[i], value));
201 	}
202 	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
203 	    "\n FZC_DMC Register Dump Done \n"));
204 
205 	return (NPI_SUCCESS);
206 }
207 
208 
209 
210 /*
211  * per rdc config functions
212  */
213 npi_status_t
npi_rxdma_cfg_logical_page_disable(npi_handle_t handle,uint8_t rdc,uint8_t page_num)214 npi_rxdma_cfg_logical_page_disable(npi_handle_t handle, uint8_t rdc,
215 				    uint8_t page_num)
216 {
217 	log_page_vld_t page_vld;
218 	uint64_t valid_offset;
219 
220 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
221 	if (!RXDMA_CHANNEL_VALID(rdc)) {
222 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
223 		    "rxdma_cfg_logical_page_disable"
224 		    " Illegal RDC number %d \n",
225 		    rdc));
226 		return (NPI_RXDMA_RDC_INVALID);
227 	}
228 
229 	ASSERT(RXDMA_PAGE_VALID(page_num));
230 	if (!RXDMA_PAGE_VALID(page_num)) {
231 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
232 		    "rxdma_cfg_logical_page_disable"
233 		    " Illegal page number %d \n",
234 		    page_num));
235 		return (NPI_RXDMA_PAGE_INVALID);
236 	}
237 
238 	valid_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_VLD_REG, rdc);
239 	NXGE_REG_RD64(handle, valid_offset, &page_vld.value);
240 
241 	if (page_num == 0)
242 		page_vld.bits.ldw.page0 = 0;
243 
244 	if (page_num == 1)
245 		page_vld.bits.ldw.page1 = 0;
246 
247 	NXGE_REG_WR64(handle, valid_offset, page_vld.value);
248 	return (NPI_SUCCESS);
249 
250 }
251 
252 npi_status_t
npi_rxdma_cfg_logical_page(npi_handle_t handle,uint8_t rdc,dma_log_page_t * pg_cfg)253 npi_rxdma_cfg_logical_page(npi_handle_t handle, uint8_t rdc,
254 			    dma_log_page_t *pg_cfg)
255 {
256 	log_page_vld_t page_vld;
257 	log_page_mask_t page_mask;
258 	log_page_value_t page_value;
259 	log_page_relo_t page_reloc;
260 	uint64_t value_offset, reloc_offset, mask_offset;
261 	uint64_t valid_offset;
262 
263 	value_offset = 0;
264 	reloc_offset = 0;
265 	mask_offset = 0;
266 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
267 	if (!RXDMA_CHANNEL_VALID(rdc)) {
268 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
269 		    " rxdma_cfg_logical_page"
270 		    " Illegal RDC number %d \n",
271 		    rdc));
272 		return (NPI_RXDMA_RDC_INVALID);
273 	}
274 
275 	ASSERT(RXDMA_PAGE_VALID(pg_cfg->page_num));
276 	if (!RXDMA_PAGE_VALID(pg_cfg->page_num)) {
277 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
278 		    " rxdma_cfg_logical_page"
279 		    " Illegal page number %d \n",
280 		    pg_cfg->page_num));
281 		return (NPI_RXDMA_PAGE_INVALID);
282 	}
283 
284 	valid_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_VLD_REG, rdc);
285 	NXGE_REG_RD64(handle, valid_offset, &page_vld.value);
286 
287 	if (!pg_cfg->valid) {
288 		if (pg_cfg->page_num == 0)
289 			page_vld.bits.ldw.page0 = 0;
290 
291 		if (pg_cfg->page_num == 1)
292 			page_vld.bits.ldw.page1 = 0;
293 		NXGE_REG_WR64(handle, valid_offset, page_vld.value);
294 		return (NPI_SUCCESS);
295 	}
296 
297 	if (pg_cfg->page_num == 0) {
298 		mask_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_MASK1_REG, rdc);
299 		value_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_VAL1_REG, rdc);
300 		reloc_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_RELO1_REG, rdc);
301 		page_vld.bits.ldw.page0 = 1;
302 	}
303 
304 	if (pg_cfg->page_num == 1) {
305 		mask_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_MASK2_REG, rdc);
306 		value_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_VAL2_REG, rdc);
307 		reloc_offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_RELO2_REG, rdc);
308 		page_vld.bits.ldw.page1 = 1;
309 	}
310 
311 
312 	page_vld.bits.ldw.func = pg_cfg->func_num;
313 
314 	page_mask.value = 0;
315 	page_value.value = 0;
316 	page_reloc.value = 0;
317 
318 
319 	page_mask.bits.ldw.mask = pg_cfg->mask >> LOG_PAGE_ADDR_SHIFT;
320 	page_value.bits.ldw.value = pg_cfg->value >> LOG_PAGE_ADDR_SHIFT;
321 	page_reloc.bits.ldw.relo = pg_cfg->reloc >> LOG_PAGE_ADDR_SHIFT;
322 
323 
324 	NXGE_REG_WR64(handle, mask_offset, page_mask.value);
325 	NXGE_REG_WR64(handle, value_offset, page_value.value);
326 	NXGE_REG_WR64(handle, reloc_offset, page_reloc.value);
327 
328 
329 /* enable the logical page */
330 	NXGE_REG_WR64(handle, valid_offset, page_vld.value);
331 	return (NPI_SUCCESS);
332 }
333 
334 npi_status_t
npi_rxdma_cfg_logical_page_handle(npi_handle_t handle,uint8_t rdc,uint64_t page_handle)335 npi_rxdma_cfg_logical_page_handle(npi_handle_t handle, uint8_t rdc,
336 				    uint64_t page_handle)
337 {
338 	uint64_t offset;
339 	log_page_hdl_t page_hdl;
340 
341 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
342 	if (!RXDMA_CHANNEL_VALID(rdc)) {
343 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
344 		    "rxdma_cfg_logical_page_handle"
345 		    " Illegal RDC number %d \n", rdc));
346 		return (NPI_RXDMA_RDC_INVALID);
347 	}
348 
349 	page_hdl.value = 0;
350 
351 	page_hdl.bits.ldw.handle = (uint32_t)page_handle;
352 	offset = REG_FZC_RDC_OFFSET(RX_LOG_PAGE_HDL_REG, rdc);
353 	NXGE_REG_WR64(handle, offset, page_hdl.value);
354 
355 	return (NPI_SUCCESS);
356 }
357 
358 /*
359  * RX DMA functions
360  */
361 npi_status_t
npi_rxdma_cfg_rdc_ctl(npi_handle_t handle,uint8_t rdc,uint8_t op)362 npi_rxdma_cfg_rdc_ctl(npi_handle_t handle, uint8_t rdc, uint8_t op)
363 {
364 
365 	rxdma_cfig1_t cfg;
366 	uint32_t count = RXDMA_RESET_TRY_COUNT;
367 	uint32_t delay_time = RXDMA_RESET_DELAY;
368 	uint32_t error = NPI_RXDMA_ERROR_ENCODE(NPI_RXDMA_RESET_ERR, rdc);
369 
370 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
371 	if (!RXDMA_CHANNEL_VALID(rdc)) {
372 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
373 		    "npi_rxdma_cfg_rdc_ctl"
374 		    " Illegal RDC number %d \n", rdc));
375 		return (NPI_RXDMA_RDC_INVALID);
376 	}
377 
378 
379 	switch (op) {
380 		case RXDMA_OP_ENABLE:
381 			RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
382 			    &cfg.value);
383 			cfg.bits.ldw.en = 1;
384 			RXDMA_REG_WRITE64(handle, RXDMA_CFIG1_REG,
385 			    rdc, cfg.value);
386 
387 			NXGE_DELAY(delay_time);
388 			RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
389 			    &cfg.value);
390 			while ((count--) && (cfg.bits.ldw.qst == 0)) {
391 				NXGE_DELAY(delay_time);
392 				RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
393 				    &cfg.value);
394 			}
395 
396 			if (cfg.bits.ldw.qst == 0) {
397 				NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
398 				    " npi_rxdma_cfg_rdc_ctl"
399 				    " RXDMA_OP_ENABLE Failed for RDC %d \n",
400 				    rdc));
401 				return (error);
402 			}
403 
404 			break;
405 		case RXDMA_OP_DISABLE:
406 			RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
407 			    &cfg.value);
408 			cfg.bits.ldw.en = 0;
409 			RXDMA_REG_WRITE64(handle, RXDMA_CFIG1_REG,
410 			    rdc, cfg.value);
411 
412 			NXGE_DELAY(delay_time);
413 			RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
414 			    &cfg.value);
415 			while ((count--) && (cfg.bits.ldw.qst == 0)) {
416 				NXGE_DELAY(delay_time);
417 				RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
418 				    &cfg.value);
419 			}
420 			if (cfg.bits.ldw.qst == 0) {
421 				NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
422 				    " npi_rxdma_cfg_rdc_ctl"
423 				    " RXDMA_OP_DISABLE Failed for RDC %d \n",
424 				    rdc));
425 				return (error);
426 			}
427 
428 			break;
429 		case RXDMA_OP_RESET:
430 			cfg.value = 0;
431 			cfg.bits.ldw.rst = 1;
432 			RXDMA_REG_WRITE64(handle,
433 			    RXDMA_CFIG1_REG,
434 			    rdc, cfg.value);
435 			NXGE_DELAY(delay_time);
436 			RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
437 			    &cfg.value);
438 			while ((count--) && (cfg.bits.ldw.rst)) {
439 				NXGE_DELAY(delay_time);
440 				RXDMA_REG_READ64(handle, RXDMA_CFIG1_REG, rdc,
441 				    &cfg.value);
442 			}
443 			if (count == 0) {
444 				NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
445 				    " npi_rxdma_cfg_rdc_ctl"
446 				    " Reset Failed for RDC %d \n",
447 				    rdc));
448 				return (error);
449 			}
450 			break;
451 		default:
452 			return (NPI_RXDMA_SW_PARAM_ERROR);
453 	}
454 
455 	return (NPI_SUCCESS);
456 }
457 
458 npi_status_t
npi_rxdma_cfg_rdc_enable(npi_handle_t handle,uint8_t rdc)459 npi_rxdma_cfg_rdc_enable(npi_handle_t handle, uint8_t rdc)
460 {
461 	return (npi_rxdma_cfg_rdc_ctl(handle, rdc, RXDMA_OP_ENABLE));
462 }
463 
464 npi_status_t
npi_rxdma_cfg_rdc_disable(npi_handle_t handle,uint8_t rdc)465 npi_rxdma_cfg_rdc_disable(npi_handle_t handle, uint8_t rdc)
466 {
467 	return (npi_rxdma_cfg_rdc_ctl(handle, rdc, RXDMA_OP_DISABLE));
468 }
469 
470 npi_status_t
npi_rxdma_cfg_rdc_reset(npi_handle_t handle,uint8_t rdc)471 npi_rxdma_cfg_rdc_reset(npi_handle_t handle, uint8_t rdc)
472 {
473 	return (npi_rxdma_cfg_rdc_ctl(handle, rdc, RXDMA_OP_RESET));
474 }
475 
476 /*
477  * npi_rxdma_cfg_default_port_rdc()
478  * Set the default rdc for the port
479  *
480  * Inputs:
481  *	handle:		register handle interpreted by the underlying OS
482  *	portnm:		Physical Port Number
483  *	rdc:	RX DMA Channel number
484  *
485  * Return:
486  * NPI_SUCCESS
487  * NPI_RXDMA_RDC_INVALID
488  * NPI_RXDMA_PORT_INVALID
489  *
490  */
npi_rxdma_cfg_default_port_rdc(npi_handle_t handle,uint8_t portnm,uint8_t rdc)491 npi_status_t npi_rxdma_cfg_default_port_rdc(npi_handle_t handle,
492 				    uint8_t portnm, uint8_t rdc)
493 {
494 
495 	uint64_t offset;
496 	def_pt_rdc_t cfg;
497 
498 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
499 	if (!RXDMA_CHANNEL_VALID(rdc)) {
500 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
501 		    "rxdma_cfg_default_port_rdc"
502 		    " Illegal RDC number %d \n",
503 		    rdc));
504 		return (NPI_RXDMA_RDC_INVALID);
505 	}
506 
507 	ASSERT(RXDMA_PORT_VALID(portnm));
508 	if (!RXDMA_PORT_VALID(portnm)) {
509 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
510 		    "rxdma_cfg_default_port_rdc"
511 		    " Illegal Port number %d \n",
512 		    portnm));
513 		return (NPI_RXDMA_PORT_INVALID);
514 	}
515 
516 	offset = DEF_PT_RDC_REG(portnm);
517 	cfg.value = 0;
518 	cfg.bits.ldw.rdc = rdc;
519 	NXGE_REG_WR64(handle, offset, cfg.value);
520 	return (NPI_SUCCESS);
521 }
522 
523 npi_status_t
npi_rxdma_cfg_rdc_rcr_ctl(npi_handle_t handle,uint8_t rdc,uint8_t op,uint16_t param)524 npi_rxdma_cfg_rdc_rcr_ctl(npi_handle_t handle, uint8_t rdc,
525 			    uint8_t op, uint16_t param)
526 {
527 	rcrcfig_b_t rcr_cfgb;
528 
529 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
530 	if (!RXDMA_CHANNEL_VALID(rdc)) {
531 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
532 		    "rxdma_cfg_rdc_rcr_ctl"
533 		    " Illegal RDC number %d \n",
534 		    rdc));
535 		return (NPI_RXDMA_RDC_INVALID);
536 	}
537 
538 
539 	RXDMA_REG_READ64(handle, RCRCFIG_B_REG, rdc, &rcr_cfgb.value);
540 
541 	switch (op) {
542 		case RCR_TIMEOUT_ENABLE:
543 			rcr_cfgb.bits.ldw.timeout = (uint8_t)param;
544 			rcr_cfgb.bits.ldw.entout = 1;
545 			break;
546 
547 		case RCR_THRESHOLD:
548 			rcr_cfgb.bits.ldw.pthres = param;
549 			break;
550 
551 		case RCR_TIMEOUT_DISABLE:
552 			rcr_cfgb.bits.ldw.entout = 0;
553 			break;
554 
555 		default:
556 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
557 		    "rxdma_cfg_rdc_rcr_ctl"
558 		    " Illegal opcode %x \n",
559 		    op));
560 		return (NPI_RXDMA_OPCODE_INVALID(rdc));
561 	}
562 
563 	RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, rdc, rcr_cfgb.value);
564 	return (NPI_SUCCESS);
565 }
566 
567 npi_status_t
npi_rxdma_cfg_rdc_rcr_timeout_disable(npi_handle_t handle,uint8_t rdc)568 npi_rxdma_cfg_rdc_rcr_timeout_disable(npi_handle_t handle, uint8_t rdc)
569 {
570 	return (npi_rxdma_cfg_rdc_rcr_ctl(handle, rdc,
571 	    RCR_TIMEOUT_DISABLE, 0));
572 }
573 
574 npi_status_t
npi_rxdma_cfg_rdc_rcr_threshold(npi_handle_t handle,uint8_t rdc,uint16_t rcr_threshold)575 npi_rxdma_cfg_rdc_rcr_threshold(npi_handle_t handle, uint8_t rdc,
576 				    uint16_t rcr_threshold)
577 {
578 	return (npi_rxdma_cfg_rdc_rcr_ctl(handle, rdc,
579 	    RCR_THRESHOLD, rcr_threshold));
580 
581 }
582 
583 npi_status_t
npi_rxdma_cfg_rdc_rcr_timeout(npi_handle_t handle,uint8_t rdc,uint8_t rcr_timeout)584 npi_rxdma_cfg_rdc_rcr_timeout(npi_handle_t handle, uint8_t rdc,
585 			    uint8_t rcr_timeout)
586 {
587 	return (npi_rxdma_cfg_rdc_rcr_ctl(handle, rdc,
588 	    RCR_TIMEOUT_ENABLE, rcr_timeout));
589 
590 }
591 
592 /*
593  * npi_rxdma_cfg_rdc_ring()
594  * Configure The RDC channel Rcv Buffer Ring
595  */
596 npi_status_t
npi_rxdma_cfg_rdc_ring(npi_handle_t handle,uint8_t rdc,rdc_desc_cfg_t * rdc_desc_cfg,boolean_t new_off)597 npi_rxdma_cfg_rdc_ring(npi_handle_t handle, uint8_t rdc,
598 			    rdc_desc_cfg_t *rdc_desc_cfg, boolean_t new_off)
599 {
600 	rbr_cfig_a_t cfga;
601 	rbr_cfig_b_t cfgb;
602 	rxdma_cfig1_t cfg1;
603 	rxdma_cfig2_t cfg2;
604 	rcrcfig_a_t rcr_cfga;
605 	rcrcfig_b_t rcr_cfgb;
606 
607 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
608 	if (!RXDMA_CHANNEL_VALID(rdc)) {
609 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
610 		    "rxdma_cfg_rdc_ring"
611 		    " Illegal RDC number %d \n",
612 		    rdc));
613 		return (NPI_RXDMA_RDC_INVALID);
614 	}
615 
616 
617 	cfga.value = 0;
618 	cfgb.value = 0;
619 	cfg1.value = 0;
620 	cfg2.value = 0;
621 
622 	if (rdc_desc_cfg->mbox_enable == 1) {
623 		cfg1.bits.ldw.mbaddr_h =
624 		    (rdc_desc_cfg->mbox_addr >> 32) & 0xfff;
625 		cfg2.bits.ldw.mbaddr =
626 		    ((rdc_desc_cfg->mbox_addr &
627 		    RXDMA_CFIG2_MBADDR_L_MASK) >>
628 		    RXDMA_CFIG2_MBADDR_L_SHIFT);
629 
630 
631 		/*
632 		 * Only after all the configurations are set, then
633 		 * enable the RDC or else configuration fatal error
634 		 * will be returned (especially if the Hypervisor
635 		 * set up the logical pages with non-zero values.
636 		 * This NPI function only sets up the configuration.
637 		 */
638 	}
639 
640 
641 	if (rdc_desc_cfg->full_hdr == 1)
642 		cfg2.bits.ldw.full_hdr = 1;
643 
644 	if (new_off) {
645 		if (RXDMA_RF_BUFF_OFFSET_VALID(rdc_desc_cfg->offset)) {
646 			switch (rdc_desc_cfg->offset) {
647 			case SW_OFFSET_NO_OFFSET:
648 			case SW_OFFSET_64:
649 			case SW_OFFSET_128:
650 			case SW_OFFSET_192:
651 				cfg2.bits.ldw.offset = rdc_desc_cfg->offset;
652 				cfg2.bits.ldw.offset256 = 0;
653 				break;
654 			case SW_OFFSET_256:
655 			case SW_OFFSET_320:
656 			case SW_OFFSET_384:
657 			case SW_OFFSET_448:
658 				cfg2.bits.ldw.offset =
659 				    rdc_desc_cfg->offset & 0x3;
660 				cfg2.bits.ldw.offset256 = 1;
661 				break;
662 			default:
663 				cfg2.bits.ldw.offset = SW_OFFSET_NO_OFFSET;
664 				cfg2.bits.ldw.offset256 = 0;
665 			}
666 		} else {
667 			cfg2.bits.ldw.offset = SW_OFFSET_NO_OFFSET;
668 			cfg2.bits.ldw.offset256 = 0;
669 		}
670 	} else {
671 		if (RXDMA_BUFF_OFFSET_VALID(rdc_desc_cfg->offset)) {
672 			cfg2.bits.ldw.offset = rdc_desc_cfg->offset;
673 		} else {
674 			cfg2.bits.ldw.offset = SW_OFFSET_NO_OFFSET;
675 		}
676 	}
677 
678 		/* rbr config */
679 
680 	cfga.value = (rdc_desc_cfg->rbr_addr & (RBR_CFIG_A_STDADDR_MASK |
681 	    RBR_CFIG_A_STDADDR_BASE_MASK));
682 
683 	if ((rdc_desc_cfg->rbr_len < RBR_DEFAULT_MIN_LEN) ||
684 	    (rdc_desc_cfg->rbr_len > RBR_DEFAULT_MAX_LEN)) {
685 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
686 		    "npi_rxdma_cfg_rdc_ring"
687 		    " Illegal RBR Queue Length %d \n",
688 		    rdc_desc_cfg->rbr_len));
689 		return (NPI_RXDMA_ERROR_ENCODE(NPI_RXDMA_RBRSIZE_INVALID, rdc));
690 	}
691 
692 
693 	cfga.bits.hdw.len = rdc_desc_cfg->rbr_len;
694 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
695 	    "npi_rxdma_cfg_rdc_ring"
696 	    " CFGA 0x%llx hdw.len %d (RBR LEN %d)\n",
697 	    cfga.value, cfga.bits.hdw.len,
698 	    rdc_desc_cfg->rbr_len));
699 
700 	if (rdc_desc_cfg->page_size == SIZE_4KB)
701 		cfgb.bits.ldw.bksize = RBR_BKSIZE_4K;
702 	else if (rdc_desc_cfg->page_size == SIZE_8KB)
703 		cfgb.bits.ldw.bksize = RBR_BKSIZE_8K;
704 	else if (rdc_desc_cfg->page_size == SIZE_16KB)
705 		cfgb.bits.ldw.bksize = RBR_BKSIZE_16K;
706 	else if (rdc_desc_cfg->page_size == SIZE_32KB)
707 		cfgb.bits.ldw.bksize = RBR_BKSIZE_32K;
708 	else {
709 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
710 		    "rxdma_cfg_rdc_ring"
711 		    " blksize: Illegal buffer size %d \n",
712 		    rdc_desc_cfg->page_size));
713 		return (NPI_RXDMA_BUFSIZE_INVALID);
714 	}
715 
716 	if (rdc_desc_cfg->valid0) {
717 
718 		if (rdc_desc_cfg->size0 == SIZE_256B)
719 			cfgb.bits.ldw.bufsz0 = RBR_BUFSZ0_256B;
720 		else if (rdc_desc_cfg->size0 == SIZE_512B)
721 			cfgb.bits.ldw.bufsz0 = RBR_BUFSZ0_512B;
722 		else if (rdc_desc_cfg->size0 == SIZE_1KB)
723 			cfgb.bits.ldw.bufsz0 = RBR_BUFSZ0_1K;
724 		else if (rdc_desc_cfg->size0 == SIZE_2KB)
725 			cfgb.bits.ldw.bufsz0 = RBR_BUFSZ0_2K;
726 		else {
727 			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
728 			    " rxdma_cfg_rdc_ring"
729 			    " blksize0: Illegal buffer size %x \n",
730 			    rdc_desc_cfg->size0));
731 			return (NPI_RXDMA_BUFSIZE_INVALID);
732 		}
733 		cfgb.bits.ldw.vld0 = 1;
734 	} else {
735 		cfgb.bits.ldw.vld0 = 0;
736 	}
737 
738 
739 	if (rdc_desc_cfg->valid1) {
740 		if (rdc_desc_cfg->size1 == SIZE_1KB)
741 			cfgb.bits.ldw.bufsz1 = RBR_BUFSZ1_1K;
742 		else if (rdc_desc_cfg->size1 == SIZE_2KB)
743 			cfgb.bits.ldw.bufsz1 = RBR_BUFSZ1_2K;
744 		else if (rdc_desc_cfg->size1 == SIZE_4KB)
745 			cfgb.bits.ldw.bufsz1 = RBR_BUFSZ1_4K;
746 		else if (rdc_desc_cfg->size1 == SIZE_8KB)
747 			cfgb.bits.ldw.bufsz1 = RBR_BUFSZ1_8K;
748 		else {
749 			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
750 			    " rxdma_cfg_rdc_ring"
751 			    " blksize1: Illegal buffer size %x \n",
752 			    rdc_desc_cfg->size1));
753 			return (NPI_RXDMA_BUFSIZE_INVALID);
754 		}
755 		cfgb.bits.ldw.vld1 = 1;
756 	} else {
757 		cfgb.bits.ldw.vld1 = 0;
758 	}
759 
760 
761 	if (rdc_desc_cfg->valid2) {
762 		if (rdc_desc_cfg->size2 == SIZE_2KB)
763 			cfgb.bits.ldw.bufsz2 = RBR_BUFSZ2_2K;
764 		else if (rdc_desc_cfg->size2 == SIZE_4KB)
765 			cfgb.bits.ldw.bufsz2 = RBR_BUFSZ2_4K;
766 		else if (rdc_desc_cfg->size2 == SIZE_8KB)
767 			cfgb.bits.ldw.bufsz2 = RBR_BUFSZ2_8K;
768 		else if (rdc_desc_cfg->size2 == SIZE_16KB)
769 			cfgb.bits.ldw.bufsz2 = RBR_BUFSZ2_16K;
770 		else {
771 			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
772 			    " rxdma_cfg_rdc_ring"
773 			    " blksize2: Illegal buffer size %x \n",
774 			    rdc_desc_cfg->size2));
775 			return (NPI_RXDMA_BUFSIZE_INVALID);
776 		}
777 		cfgb.bits.ldw.vld2 = 1;
778 	} else {
779 		cfgb.bits.ldw.vld2 = 0;
780 	}
781 
782 
783 	rcr_cfga.value = (rdc_desc_cfg->rcr_addr &
784 	    (RCRCFIG_A_STADDR_MASK |
785 	    RCRCFIG_A_STADDR_BASE_MASK));
786 
787 
788 	if ((rdc_desc_cfg->rcr_len < RCR_DEFAULT_MIN_LEN) ||
789 	    (rdc_desc_cfg->rcr_len > NXGE_RCR_MAX)) {
790 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
791 		    " rxdma_cfg_rdc_ring"
792 		    " Illegal RCR Queue Length %d \n",
793 		    rdc_desc_cfg->rcr_len));
794 		return (NPI_RXDMA_ERROR_ENCODE(NPI_RXDMA_RCRSIZE_INVALID, rdc));
795 	}
796 
797 	rcr_cfga.bits.hdw.len = rdc_desc_cfg->rcr_len;
798 
799 
800 	rcr_cfgb.value = 0;
801 	if (rdc_desc_cfg->rcr_timeout_enable == 1) {
802 		/* check if the rcr timeout value is valid */
803 
804 		if (RXDMA_RCR_TO_VALID(rdc_desc_cfg->rcr_timeout)) {
805 			rcr_cfgb.bits.ldw.timeout = rdc_desc_cfg->rcr_timeout;
806 			rcr_cfgb.bits.ldw.entout = 1;
807 		} else {
808 			NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
809 			    " rxdma_cfg_rdc_ring"
810 			    " Illegal RCR Timeout value %d \n",
811 			    rdc_desc_cfg->rcr_timeout));
812 			rcr_cfgb.bits.ldw.entout = 0;
813 		}
814 	} else {
815 		rcr_cfgb.bits.ldw.entout = 0;
816 	}
817 
818 		/* check if the rcr threshold value is valid */
819 	if (RXDMA_RCR_THRESH_VALID(rdc_desc_cfg->rcr_threshold)) {
820 		rcr_cfgb.bits.ldw.pthres = rdc_desc_cfg->rcr_threshold;
821 	} else {
822 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
823 		    " rxdma_cfg_rdc_ring"
824 		    " Illegal RCR Threshold value %d \n",
825 		    rdc_desc_cfg->rcr_threshold));
826 		rcr_cfgb.bits.ldw.pthres = 1;
827 	}
828 
829 		/* now do the actual HW configuration */
830 	RXDMA_REG_WRITE64(handle, RXDMA_CFIG1_REG, rdc, cfg1.value);
831 	RXDMA_REG_WRITE64(handle, RXDMA_CFIG2_REG, rdc, cfg2.value);
832 
833 
834 	RXDMA_REG_WRITE64(handle, RBR_CFIG_A_REG, rdc, cfga.value);
835 	RXDMA_REG_WRITE64(handle, RBR_CFIG_B_REG, rdc, cfgb.value);
836 
837 	RXDMA_REG_WRITE64(handle, RCRCFIG_A_REG, rdc, rcr_cfga.value);
838 	RXDMA_REG_WRITE64(handle, RCRCFIG_B_REG, rdc, rcr_cfgb.value);
839 
840 	return (NPI_SUCCESS);
841 
842 }
843 
844 /*
845  * npi_rxdma_red_discard_stat_get
846  * Gets the current discrad count due RED
847  * The counter overflow bit is cleared, if it has been set.
848  *
849  * Inputs:
850  *      handle:	opaque handle interpreted by the underlying OS
851  *	rdc:		RX DMA Channel number
852  *	cnt:	Ptr to structure to write current RDC discard stat
853  *
854  * Return:
855  * NPI_SUCCESS
856  * NPI_RXDMA_RDC_INVALID
857  *
858  */
859 npi_status_t
npi_rxdma_red_discard_stat_get(npi_handle_t handle,uint8_t rdc,rx_disc_cnt_t * cnt)860 npi_rxdma_red_discard_stat_get(npi_handle_t handle, uint8_t rdc,
861 				    rx_disc_cnt_t *cnt)
862 {
863 	uint64_t offset;
864 
865 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
866 	if (!RXDMA_CHANNEL_VALID(rdc)) {
867 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
868 		    " npi_rxdma_red_discard_stat_get"
869 		    " Illegal RDC Number %d \n",
870 		    rdc));
871 		return (NPI_RXDMA_RDC_INVALID);
872 	}
873 
874 	offset = RDC_RED_RDC_DISC_REG(rdc);
875 	NXGE_REG_RD64(handle, offset, &cnt->value);
876 	if (cnt->bits.ldw.oflow) {
877 		NPI_DEBUG_MSG((handle.function, NPI_ERR_CTL,
878 		    " npi_rxdma_red_discard_stat_get"
879 		    " Counter overflow for channel %d ",
880 		    " ..... clearing \n",
881 		    rdc));
882 		cnt->bits.ldw.oflow = 0;
883 		NXGE_REG_WR64(handle, offset, cnt->value);
884 		cnt->bits.ldw.oflow = 1;
885 	}
886 
887 	return (NPI_SUCCESS);
888 }
889 
890 /*
891  * npi_rxdma_red_discard_oflow_clear
892  * Clear RED discard counter overflow bit
893  *
894  * Inputs:
895  *      handle:	opaque handle interpreted by the underlying OS
896  *	rdc:		RX DMA Channel number
897  *
898  * Return:
899  * NPI_SUCCESS
900  * NPI_RXDMA_RDC_INVALID
901  *
902  */
903 npi_status_t
npi_rxdma_red_discard_oflow_clear(npi_handle_t handle,uint8_t rdc)904 npi_rxdma_red_discard_oflow_clear(npi_handle_t handle, uint8_t rdc)
905 
906 {
907 	uint64_t offset;
908 	rx_disc_cnt_t cnt;
909 
910 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
911 	if (!RXDMA_CHANNEL_VALID(rdc)) {
912 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
913 			    " npi_rxdma_red_discard_oflow_clear"
914 			    " Illegal RDC Number %d \n",
915 			    rdc));
916 		return (NPI_RXDMA_RDC_INVALID);
917 	}
918 
919 	offset = RDC_RED_RDC_DISC_REG(rdc);
920 	NXGE_REG_RD64(handle, offset, &cnt.value);
921 	if (cnt.bits.ldw.oflow) {
922 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
923 			    " npi_rxdma_red_discard_oflow_clear"
924 			    " Counter overflow for channel %d ",
925 			    " ..... clearing \n",
926 			    rdc));
927 		cnt.bits.ldw.oflow = 0;
928 		NXGE_REG_WR64(handle, offset, cnt.value);
929 	}
930 	return (NPI_SUCCESS);
931 }
932 
933 /*
934  * npi_rxdma_misc_discard_stat_get
935  * Gets the current discrad count for the rdc due to
936  * buffer pool empty
937  * The counter overflow bit is cleared, if it has been set.
938  *
939  * Inputs:
940  *      handle:	opaque handle interpreted by the underlying OS
941  *	rdc:		RX DMA Channel number
942  *	cnt:	Ptr to structure to write current RDC discard stat
943  *
944  * Return:
945  * NPI_SUCCESS
946  * NPI_RXDMA_RDC_INVALID
947  *
948  */
949 npi_status_t
npi_rxdma_misc_discard_stat_get(npi_handle_t handle,uint8_t rdc,rx_disc_cnt_t * cnt)950 npi_rxdma_misc_discard_stat_get(npi_handle_t handle, uint8_t rdc,
951 				    rx_disc_cnt_t *cnt)
952 {
953 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
954 	if (!RXDMA_CHANNEL_VALID(rdc)) {
955 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
956 		    " npi_rxdma_misc_discard_stat_get"
957 		    " Illegal RDC Number %d \n",
958 		    rdc));
959 		return (NPI_RXDMA_RDC_INVALID);
960 	}
961 
962 	RXDMA_REG_READ64(handle, RXMISC_DISCARD_REG, rdc, &cnt->value);
963 	if (cnt->bits.ldw.oflow) {
964 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
965 		    " npi_rxdma_misc_discard_stat_get"
966 		    " Counter overflow for channel %d ",
967 		    " ..... clearing \n",
968 		    rdc));
969 		cnt->bits.ldw.oflow = 0;
970 		RXDMA_REG_WRITE64(handle, RXMISC_DISCARD_REG, rdc, cnt->value);
971 		cnt->bits.ldw.oflow = 1;
972 	}
973 
974 	return (NPI_SUCCESS);
975 }
976 
977 /*
978  * npi_rxdma_red_discard_oflow_clear
979  * Clear RED discard counter overflow bit
980  * clear the overflow bit for  buffer pool empty discrad counter
981  * for the rdc
982  *
983  * Inputs:
984  *      handle:	opaque handle interpreted by the underlying OS
985  *	rdc:		RX DMA Channel number
986  *
987  * Return:
988  * NPI_SUCCESS
989  * NPI_RXDMA_RDC_INVALID
990  *
991  */
992 npi_status_t
npi_rxdma_misc_discard_oflow_clear(npi_handle_t handle,uint8_t rdc)993 npi_rxdma_misc_discard_oflow_clear(npi_handle_t handle, uint8_t rdc)
994 {
995 	rx_disc_cnt_t cnt;
996 
997 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
998 	if (!RXDMA_CHANNEL_VALID(rdc)) {
999 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1000 		    " npi_rxdma_misc_discard_oflow_clear"
1001 		    " Illegal RDC Number %d \n",
1002 		    rdc));
1003 		return (NPI_RXDMA_RDC_INVALID);
1004 	}
1005 
1006 	RXDMA_REG_READ64(handle, RXMISC_DISCARD_REG, rdc, &cnt.value);
1007 	if (cnt.bits.ldw.oflow) {
1008 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1009 		    " npi_rxdma_misc_discard_oflow_clear"
1010 		    " Counter overflow for channel %d ",
1011 		    " ..... clearing \n",
1012 		    rdc));
1013 		cnt.bits.ldw.oflow = 0;
1014 		RXDMA_REG_WRITE64(handle, RXMISC_DISCARD_REG, rdc, cnt.value);
1015 	}
1016 
1017 	return (NPI_SUCCESS);
1018 }
1019 
1020 /*
1021  * npi_rxdma_ring_perr_stat_get
1022  * Gets the current RDC Memory parity error
1023  * The counter overflow bit is cleared, if it has been set.
1024  *
1025  * Inputs:
1026  * handle:	opaque handle interpreted by the underlying OS
1027  * pre_log:	Structure to write current RDC Prefetch memory
1028  *		Parity Error stat
1029  * sha_log:	Structure to write current RDC Shadow memory
1030  *		Parity Error stat
1031  *
1032  * Return:
1033  * NPI_SUCCESS
1034  *
1035  */
1036 npi_status_t
npi_rxdma_ring_perr_stat_get(npi_handle_t handle,rdmc_par_err_log_t * pre_log,rdmc_par_err_log_t * sha_log)1037 npi_rxdma_ring_perr_stat_get(npi_handle_t handle,
1038 			    rdmc_par_err_log_t *pre_log,
1039 			    rdmc_par_err_log_t *sha_log)
1040 {
1041 	uint64_t pre_offset, sha_offset;
1042 	rdmc_par_err_log_t clr;
1043 	int clr_bits = 0;
1044 
1045 	pre_offset = RDMC_PRE_PAR_ERR_REG;
1046 	sha_offset = RDMC_SHA_PAR_ERR_REG;
1047 	NXGE_REG_RD64(handle, pre_offset, &pre_log->value);
1048 	NXGE_REG_RD64(handle, sha_offset, &sha_log->value);
1049 
1050 	clr.value = pre_log->value;
1051 	if (pre_log->bits.ldw.err) {
1052 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1053 		    " npi_rxdma_ring_perr_stat_get"
1054 		    " PRE ERR Bit set ..... clearing \n"));
1055 		clr.bits.ldw.err = 0;
1056 		clr_bits++;
1057 	}
1058 
1059 	if (pre_log->bits.ldw.merr) {
1060 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1061 		    " npi_rxdma_ring_perr_stat_get"
1062 		    " PRE MERR Bit set ..... clearing \n"));
1063 		clr.bits.ldw.merr = 0;
1064 		clr_bits++;
1065 	}
1066 
1067 	if (clr_bits) {
1068 		NXGE_REG_WR64(handle, pre_offset, clr.value);
1069 	}
1070 
1071 	clr_bits = 0;
1072 	clr.value = sha_log->value;
1073 	if (sha_log->bits.ldw.err) {
1074 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1075 		    " npi_rxdma_ring_perr_stat_get"
1076 		    " SHA ERR Bit set ..... clearing \n"));
1077 		clr.bits.ldw.err = 0;
1078 		clr_bits++;
1079 	}
1080 
1081 	if (sha_log->bits.ldw.merr) {
1082 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1083 		    " npi_rxdma_ring_perr_stat_get"
1084 		    " SHA MERR Bit set ..... clearing \n"));
1085 		clr.bits.ldw.merr = 0;
1086 		clr_bits++;
1087 	}
1088 
1089 	if (clr_bits) {
1090 		NXGE_REG_WR64(handle, sha_offset, clr.value);
1091 	}
1092 
1093 	return (NPI_SUCCESS);
1094 }
1095 
1096 /*
1097  * npi_rxdma_ring_perr_stat_clear
1098  * Clear RDC Memory Parity Error counter overflow bits
1099  *
1100  * Inputs:
1101  *      handle:	opaque handle interpreted by the underlying OS
1102  * Return:
1103  * NPI_SUCCESS
1104  *
1105  */
1106 npi_status_t
npi_rxdma_ring_perr_stat_clear(npi_handle_t handle)1107 npi_rxdma_ring_perr_stat_clear(npi_handle_t handle)
1108 {
1109 	uint64_t pre_offset, sha_offset;
1110 	rdmc_par_err_log_t clr;
1111 	int clr_bits = 0;
1112 	pre_offset = RDMC_PRE_PAR_ERR_REG;
1113 	sha_offset = RDMC_SHA_PAR_ERR_REG;
1114 
1115 	NXGE_REG_RD64(handle, pre_offset, &clr.value);
1116 
1117 	if (clr.bits.ldw.err) {
1118 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1119 		    " npi_rxdma_ring_perr_stat_get"
1120 		    " PRE ERR Bit set ..... clearing \n"));
1121 		clr.bits.ldw.err = 0;
1122 		clr_bits++;
1123 	}
1124 
1125 	if (clr.bits.ldw.merr) {
1126 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1127 		    " npi_rxdma_ring_perr_stat_get"
1128 		    " PRE MERR Bit set ..... clearing \n"));
1129 		clr.bits.ldw.merr = 0;
1130 		clr_bits++;
1131 	}
1132 
1133 	if (clr_bits) {
1134 		NXGE_REG_WR64(handle, pre_offset, clr.value);
1135 	}
1136 
1137 	clr_bits = 0;
1138 	NXGE_REG_RD64(handle, sha_offset, &clr.value);
1139 	if (clr.bits.ldw.err) {
1140 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1141 		    " npi_rxdma_ring_perr_stat_get"
1142 		    " SHA ERR Bit set ..... clearing \n"));
1143 		clr.bits.ldw.err = 0;
1144 		clr_bits++;
1145 	}
1146 
1147 	if (clr.bits.ldw.merr) {
1148 		NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1149 		    " npi_rxdma_ring_perr_stat_get"
1150 		    " SHA MERR Bit set ..... clearing \n"));
1151 		clr.bits.ldw.merr = 0;
1152 		clr_bits++;
1153 	}
1154 
1155 	if (clr_bits) {
1156 		NXGE_REG_WR64(handle, sha_offset, clr.value);
1157 	}
1158 
1159 	return (NPI_SUCCESS);
1160 }
1161 
1162 /*
1163  * Access the RDMC Memory: used for debugging
1164  */
1165 npi_status_t
npi_rxdma_rdmc_memory_io(npi_handle_t handle,rdmc_mem_access_t * data,uint8_t op)1166 npi_rxdma_rdmc_memory_io(npi_handle_t handle,
1167 			    rdmc_mem_access_t *data, uint8_t op)
1168 {
1169 	uint64_t d0_offset, d1_offset, d2_offset, d3_offset, d4_offset;
1170 	uint64_t addr_offset;
1171 	rdmc_mem_addr_t addr;
1172 	rdmc_mem_data_t d0, d1, d2, d3, d4;
1173 	d0.value = 0;
1174 	d1.value = 0;
1175 	d2.value = 0;
1176 	d3.value = 0;
1177 	d4.value = 0;
1178 	addr.value = 0;
1179 
1180 
1181 	if ((data->location != RDMC_MEM_ADDR_PREFETCH) &&
1182 	    (data->location != RDMC_MEM_ADDR_SHADOW)) {
1183 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1184 		    " npi_rxdma_rdmc_memory_io"
1185 		    " Illegal memory Type %x \n",
1186 		    data->location));
1187 		return (NPI_RXDMA_OPCODE_INVALID(0));
1188 	}
1189 
1190 	addr_offset = RDMC_MEM_ADDR_REG;
1191 	addr.bits.ldw.addr = data->addr;
1192 	addr.bits.ldw.pre_shad = data->location;
1193 
1194 	d0_offset = RDMC_MEM_DATA0_REG;
1195 	d1_offset = RDMC_MEM_DATA1_REG;
1196 	d2_offset = RDMC_MEM_DATA2_REG;
1197 	d3_offset = RDMC_MEM_DATA3_REG;
1198 	d4_offset = RDMC_MEM_DATA4_REG;
1199 
1200 
1201 	if (op == RDMC_MEM_WRITE) {
1202 		d0.bits.ldw.data = data->data[0];
1203 		d1.bits.ldw.data = data->data[1];
1204 		d2.bits.ldw.data = data->data[2];
1205 		d3.bits.ldw.data = data->data[3];
1206 		d4.bits.ldw.data = data->data[4];
1207 		NXGE_REG_WR64(handle, addr_offset, addr.value);
1208 		NXGE_REG_WR64(handle, d0_offset, d0.value);
1209 		NXGE_REG_WR64(handle, d1_offset, d1.value);
1210 		NXGE_REG_WR64(handle, d2_offset, d2.value);
1211 		NXGE_REG_WR64(handle, d3_offset, d3.value);
1212 		NXGE_REG_WR64(handle, d4_offset, d4.value);
1213 	}
1214 
1215 	if (op == RDMC_MEM_READ) {
1216 		NXGE_REG_WR64(handle, addr_offset, addr.value);
1217 		NXGE_REG_RD64(handle, d4_offset, &d4.value);
1218 		NXGE_REG_RD64(handle, d3_offset, &d3.value);
1219 		NXGE_REG_RD64(handle, d2_offset, &d2.value);
1220 		NXGE_REG_RD64(handle, d1_offset, &d1.value);
1221 		NXGE_REG_RD64(handle, d0_offset, &d0.value);
1222 
1223 		data->data[0] = d0.bits.ldw.data;
1224 		data->data[1] = d1.bits.ldw.data;
1225 		data->data[2] = d2.bits.ldw.data;
1226 		data->data[3] = d3.bits.ldw.data;
1227 		data->data[4] = d4.bits.ldw.data;
1228 	} else {
1229 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1230 		    " npi_rxdma_rdmc_memory_io"
1231 		    " Illegal opcode %x \n",
1232 		    op));
1233 		return (NPI_RXDMA_OPCODE_INVALID(0));
1234 
1235 	}
1236 
1237 	return (NPI_SUCCESS);
1238 }
1239 
1240 /*
1241  * system wide conf functions
1242  */
1243 npi_status_t
npi_rxdma_cfg_clock_div_set(npi_handle_t handle,uint16_t count)1244 npi_rxdma_cfg_clock_div_set(npi_handle_t handle, uint16_t count)
1245 {
1246 	uint64_t offset;
1247 	rx_dma_ck_div_t clk_div;
1248 
1249 	offset = RX_DMA_CK_DIV_REG;
1250 
1251 	clk_div.value = 0;
1252 	clk_div.bits.ldw.cnt = count;
1253 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1254 	    " npi_rxdma_cfg_clock_div_set: add 0x%llx "
1255 	    "handle 0x%llx value 0x%llx",
1256 	    handle.regp, handle.regh, clk_div.value));
1257 
1258 	NXGE_REG_WR64(handle, offset, clk_div.value);
1259 
1260 	return (NPI_SUCCESS);
1261 }
1262 
1263 npi_status_t
npi_rxdma_cfg_red_rand_init(npi_handle_t handle,uint16_t init_value)1264 npi_rxdma_cfg_red_rand_init(npi_handle_t handle, uint16_t init_value)
1265 {
1266 	uint64_t offset;
1267 	red_ran_init_t rand_reg;
1268 
1269 	offset = RED_RAN_INIT_REG;
1270 
1271 	rand_reg.value = 0;
1272 	rand_reg.bits.ldw.init = init_value;
1273 	rand_reg.bits.ldw.enable = 1;
1274 	NXGE_REG_WR64(handle, offset, rand_reg.value);
1275 
1276 	return (NPI_SUCCESS);
1277 
1278 }
1279 
1280 npi_status_t
npi_rxdma_cfg_red_rand_disable(npi_handle_t handle)1281 npi_rxdma_cfg_red_rand_disable(npi_handle_t handle)
1282 {
1283 	uint64_t offset;
1284 	red_ran_init_t rand_reg;
1285 
1286 	offset = RED_RAN_INIT_REG;
1287 
1288 	NXGE_REG_RD64(handle, offset, &rand_reg.value);
1289 	rand_reg.bits.ldw.enable = 0;
1290 	NXGE_REG_WR64(handle, offset, rand_reg.value);
1291 
1292 	return (NPI_SUCCESS);
1293 
1294 }
1295 
1296 npi_status_t
npi_rxdma_cfg_32bitmode_enable(npi_handle_t handle)1297 npi_rxdma_cfg_32bitmode_enable(npi_handle_t handle)
1298 {
1299 	uint64_t offset;
1300 	rx_addr_md_t md_reg;
1301 	offset = RX_ADDR_MD_REG;
1302 	md_reg.value = 0;
1303 	md_reg.bits.ldw.mode32 = 1;
1304 
1305 	NXGE_REG_WR64(handle, offset, md_reg.value);
1306 	return (NPI_SUCCESS);
1307 
1308 }
1309 
1310 npi_status_t
npi_rxdma_cfg_32bitmode_disable(npi_handle_t handle)1311 npi_rxdma_cfg_32bitmode_disable(npi_handle_t handle)
1312 {
1313 	uint64_t offset;
1314 	rx_addr_md_t md_reg;
1315 	offset = RX_ADDR_MD_REG;
1316 	md_reg.value = 0;
1317 
1318 	NXGE_REG_WR64(handle, offset, md_reg.value);
1319 	return (NPI_SUCCESS);
1320 
1321 }
1322 
1323 npi_status_t
npi_rxdma_cfg_ram_access_enable(npi_handle_t handle)1324 npi_rxdma_cfg_ram_access_enable(npi_handle_t handle)
1325 {
1326 	uint64_t offset;
1327 	rx_addr_md_t md_reg;
1328 	offset = RX_ADDR_MD_REG;
1329 	NXGE_REG_RD64(handle, offset, &md_reg.value);
1330 	md_reg.bits.ldw.ram_acc = 1;
1331 	NXGE_REG_WR64(handle, offset, md_reg.value);
1332 	return (NPI_SUCCESS);
1333 
1334 }
1335 
1336 npi_status_t
npi_rxdma_cfg_ram_access_disable(npi_handle_t handle)1337 npi_rxdma_cfg_ram_access_disable(npi_handle_t handle)
1338 {
1339 	uint64_t offset;
1340 	rx_addr_md_t md_reg;
1341 	offset = RX_ADDR_MD_REG;
1342 	NXGE_REG_RD64(handle, offset, &md_reg.value);
1343 	md_reg.bits.ldw.ram_acc = 0;
1344 	NXGE_REG_WR64(handle, offset, md_reg.value);
1345 	return (NPI_SUCCESS);
1346 
1347 }
1348 
1349 npi_status_t
npi_rxdma_cfg_port_ddr_weight(npi_handle_t handle,uint8_t portnm,uint32_t weight)1350 npi_rxdma_cfg_port_ddr_weight(npi_handle_t handle,
1351 				    uint8_t portnm, uint32_t weight)
1352 {
1353 
1354 	pt_drr_wt_t wt_reg;
1355 	uint64_t offset;
1356 
1357 	ASSERT(RXDMA_PORT_VALID(portnm));
1358 	if (!RXDMA_PORT_VALID(portnm)) {
1359 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1360 		    " rxdma_cfg_port_ddr_weight"
1361 		    " Illegal Port Number %d \n",
1362 		    portnm));
1363 		return (NPI_RXDMA_PORT_INVALID);
1364 	}
1365 
1366 	offset = PT_DRR_WT_REG(portnm);
1367 	wt_reg.value = 0;
1368 	wt_reg.bits.ldw.wt = weight;
1369 	NXGE_REG_WR64(handle, offset, wt_reg.value);
1370 	return (NPI_SUCCESS);
1371 }
1372 
1373 npi_status_t
npi_rxdma_port_usage_get(npi_handle_t handle,uint8_t portnm,uint32_t * blocks)1374 npi_rxdma_port_usage_get(npi_handle_t handle,
1375 				    uint8_t portnm, uint32_t *blocks)
1376 {
1377 
1378 	pt_use_t use_reg;
1379 	uint64_t offset;
1380 
1381 	ASSERT(RXDMA_PORT_VALID(portnm));
1382 	if (!RXDMA_PORT_VALID(portnm)) {
1383 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1384 		    " rxdma_port_usage_get"
1385 		    " Illegal Port Number %d \n",
1386 		    portnm));
1387 		return (NPI_RXDMA_PORT_INVALID);
1388 	}
1389 
1390 	offset = PT_USE_REG(portnm);
1391 	NXGE_REG_RD64(handle, offset, &use_reg.value);
1392 	*blocks = use_reg.bits.ldw.cnt;
1393 	return (NPI_SUCCESS);
1394 
1395 }
1396 
1397 npi_status_t
npi_rxdma_cfg_wred_param(npi_handle_t handle,uint8_t rdc,rdc_red_para_t * wred_params)1398 npi_rxdma_cfg_wred_param(npi_handle_t handle, uint8_t rdc,
1399 				    rdc_red_para_t *wred_params)
1400 {
1401 	rdc_red_para_t wred_reg;
1402 	uint64_t offset;
1403 
1404 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
1405 	if (!RXDMA_CHANNEL_VALID(rdc)) {
1406 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1407 		    " rxdma_cfg_wred_param"
1408 		    " Illegal RDC Number %d \n",
1409 		    rdc));
1410 		return (NPI_RXDMA_RDC_INVALID);
1411 	}
1412 
1413 	/*
1414 	 * need to update RDC_RED_PARA_REG as well as bit defs in
1415 	 * the hw header file
1416 	 */
1417 	offset = RDC_RED_RDC_PARA_REG(rdc);
1418 
1419 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1420 	    " npi_rxdma_cfg_wred_param: "
1421 	    "set RED_PARA: passed value 0x%llx "
1422 	    "win 0x%x thre 0x%x sync 0x%x thre_sync 0x%x",
1423 	    wred_params->value,
1424 	    wred_params->bits.ldw.win,
1425 	    wred_params->bits.ldw.thre,
1426 	    wred_params->bits.ldw.win_syn,
1427 	    wred_params->bits.ldw.thre_sync));
1428 
1429 	wred_reg.value = 0;
1430 	wred_reg.bits.ldw.win = wred_params->bits.ldw.win;
1431 	wred_reg.bits.ldw.thre = wred_params->bits.ldw.thre;
1432 	wred_reg.bits.ldw.win_syn = wred_params->bits.ldw.win_syn;
1433 	wred_reg.bits.ldw.thre_sync = wred_params->bits.ldw.thre_sync;
1434 	NXGE_REG_WR64(handle, offset, wred_reg.value);
1435 
1436 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1437 	    "set RED_PARA: value 0x%llx "
1438 	    "win 0x%x thre 0x%x sync 0x%x thre_sync 0x%x",
1439 	    wred_reg.value,
1440 	    wred_reg.bits.ldw.win,
1441 	    wred_reg.bits.ldw.thre,
1442 	    wred_reg.bits.ldw.win_syn,
1443 	    wred_reg.bits.ldw.thre_sync));
1444 
1445 	return (NPI_SUCCESS);
1446 }
1447 
1448 /*
1449  * npi_rxdma_rdc_table_config()
1450  * Configure/populate the RDC table
1451  *
1452  * Inputs:
1453  *	handle:	register handle interpreted by the underlying OS
1454  *	table:	RDC Group Number
1455  *	map:	A bitmap of the RDCs to populate with.
1456  *	count:	A count of the RDCs expressed in <map>.
1457  *
1458  * Notes:
1459  *	This function assumes that we are not using the TCAM, but are
1460  *	hashing all fields of the incoming ethernet packet!
1461  *
1462  * Return:
1463  *	NPI_SUCCESS
1464  *	NPI_RXDMA_TABLE_INVALID
1465  *
1466  */
1467 npi_status_t
npi_rxdma_rdc_table_config(npi_handle_t handle,uint8_t table,dc_map_t rdc_map,int count)1468 npi_rxdma_rdc_table_config(
1469 	npi_handle_t handle,
1470 	uint8_t table,
1471 	dc_map_t rdc_map,
1472 	int count)
1473 {
1474 	int8_t set[NXGE_MAX_RDCS];
1475 	int i, cursor;
1476 
1477 	rdc_tbl_t rdc_tbl;
1478 	uint64_t offset;
1479 
1480 	ASSERT(RXDMA_TABLE_VALID(table));
1481 	if (!RXDMA_TABLE_VALID(table)) {
1482 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1483 		    " npi_rxdma_cfg_rdc_table"
1484 		    " Illegal RDC Table Number %d \n",
1485 		    table));
1486 		return (NPI_RXDMA_TABLE_INVALID);
1487 	}
1488 
1489 	if (count == 0)		/* This shouldn't happen */
1490 		return (NPI_SUCCESS);
1491 
1492 	for (i = 0, cursor = 0; i < NXGE_MAX_RDCS; i++) {
1493 		if ((1 << i) & rdc_map) {
1494 			set[cursor++] = (int8_t)i;
1495 			if (cursor == count)
1496 				break;
1497 		}
1498 	}
1499 
1500 	rdc_tbl.value = 0;
1501 	offset = REG_RDC_TABLE_OFFSET(table);
1502 
1503 	/* Now write ( NXGE_MAX_RDCS / count ) sets of RDC numbers. */
1504 	for (i = 0, cursor = 0; i < NXGE_MAX_RDCS; i++) {
1505 		rdc_tbl.bits.ldw.rdc = set[cursor++];
1506 		NXGE_REG_WR64(handle, offset, rdc_tbl.value);
1507 		offset += sizeof (rdc_tbl.value);
1508 		if (cursor == count)
1509 			cursor = 0;
1510 	}
1511 
1512 	/*
1513 	 * Here is what the resulting table looks like with:
1514 	 *
1515 	 *  0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f
1516 	 * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
1517 	 * |v |w |x |y |z |v |w |x |y |z |v |w |x |y |z |v | 5 RDCs
1518 	 * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
1519 	 * |w |x |y |z |w |x |y |z |w |x |y |z |w |x |y |z | 4 RDCs
1520 	 * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
1521 	 * |x |y |z |x |y |z |x |y |z |x |y |z |x |y |z |x | 3 RDCs
1522 	 * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
1523 	 * |x |y |x |y |x |y |x |y |x |y |x |y |x |y |x |y | 2 RDCs
1524 	 * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
1525 	 * |x |x |x |x |x |x |x |x |x |x |x |x |x |x |x |x | 1 RDC
1526 	 * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
1527 	 */
1528 
1529 	return (NPI_SUCCESS);
1530 }
1531 
1532 npi_status_t
npi_rxdma_cfg_rdc_table_default_rdc(npi_handle_t handle,uint8_t table,uint8_t rdc)1533 npi_rxdma_cfg_rdc_table_default_rdc(npi_handle_t handle,
1534 			    uint8_t table, uint8_t rdc)
1535 {
1536 	uint64_t offset;
1537 	rdc_tbl_t tbl_reg;
1538 	tbl_reg.value = 0;
1539 
1540 	ASSERT(RXDMA_TABLE_VALID(table));
1541 	if (!RXDMA_TABLE_VALID(table)) {
1542 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1543 		    " npi_rxdma_cfg_rdc_table"
1544 		    " Illegal RDC table Number %d \n",
1545 		    rdc));
1546 		return (NPI_RXDMA_TABLE_INVALID);
1547 	}
1548 
1549 	offset = REG_RDC_TABLE_OFFSET(table);
1550 	tbl_reg.bits.ldw.rdc = rdc;
1551 	NXGE_REG_WR64(handle, offset, tbl_reg.value);
1552 	return (NPI_SUCCESS);
1553 
1554 }
1555 
1556 npi_status_t
npi_rxdma_dump_rdc_table(npi_handle_t handle,uint8_t table)1557 npi_rxdma_dump_rdc_table(npi_handle_t handle,
1558 			    uint8_t table)
1559 {
1560 	uint64_t offset;
1561 	int tbl_offset;
1562 	uint64_t value;
1563 
1564 	ASSERT(RXDMA_TABLE_VALID(table));
1565 	if (!RXDMA_TABLE_VALID(table)) {
1566 		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
1567 		    " npi_rxdma_dump_rdc_table"
1568 		    " Illegal RDC Rable Number %d \n",
1569 		    table));
1570 		return (NPI_RXDMA_TABLE_INVALID);
1571 	}
1572 	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
1573 	    "\n Register Dump for RDC Table %d \n",
1574 	    table));
1575 	offset = REG_RDC_TABLE_OFFSET(table);
1576 	for (tbl_offset = 0; tbl_offset < NXGE_MAX_RDCS; tbl_offset++) {
1577 		NXGE_REG_RD64(handle, offset, &value);
1578 		NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
1579 		    " 0x%08llx 0x%08llx \n",
1580 		    offset, value));
1581 		offset += 8;
1582 	}
1583 	NPI_REG_DUMP_MSG((handle.function, NPI_REG_CTL,
1584 	    "\n Register Dump for RDC Table %d done\n",
1585 	    table));
1586 	return (NPI_SUCCESS);
1587 
1588 }
1589 
1590 npi_status_t
npi_rxdma_rdc_rbr_stat_get(npi_handle_t handle,uint8_t rdc,rbr_stat_t * rbr_stat)1591 npi_rxdma_rdc_rbr_stat_get(npi_handle_t handle, uint8_t rdc,
1592 			    rbr_stat_t *rbr_stat)
1593 {
1594 
1595 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
1596 	if (!RXDMA_CHANNEL_VALID(rdc)) {
1597 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1598 		    " rxdma_rdc_rbr_stat_get"
1599 		    " Illegal RDC Number %d \n",
1600 		    rdc));
1601 		return (NPI_RXDMA_RDC_INVALID);
1602 	}
1603 
1604 	RXDMA_REG_READ64(handle, RBR_STAT_REG, rdc, &rbr_stat->value);
1605 	return (NPI_SUCCESS);
1606 }
1607 
1608 /*
1609  * npi_rxdma_rdc_rbr_head_get
1610  * Gets the current rbr head pointer.
1611  *
1612  * Inputs:
1613  *      handle:	opaque handle interpreted by the underlying OS
1614  *	rdc:		RX DMA Channel number
1615  *	hdptr		ptr to write the rbr head value
1616  *
1617  * Return:
1618  * NPI_SUCCESS
1619  * NPI_RXDMA_RDC_INVALID
1620  */
1621 npi_status_t
npi_rxdma_rdc_rbr_head_get(npi_handle_t handle,uint8_t rdc,addr44_t * hdptr)1622 npi_rxdma_rdc_rbr_head_get(npi_handle_t handle,
1623 			    uint8_t rdc, addr44_t *hdptr)
1624 {
1625 	rbr_hdh_t hh_ptr;
1626 	rbr_hdl_t hl_ptr;
1627 
1628 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
1629 	if (!RXDMA_CHANNEL_VALID(rdc)) {
1630 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1631 		    " rxdma_rdc_rbr_head_get"
1632 		    " Illegal RDC Number %d \n",
1633 		    rdc));
1634 		return (NPI_RXDMA_RDC_INVALID);
1635 	}
1636 	hh_ptr.value = 0;
1637 	hl_ptr.value = 0;
1638 	RXDMA_REG_READ64(handle, RBR_HDH_REG, rdc, &hh_ptr.value);
1639 	RXDMA_REG_READ64(handle, RBR_HDL_REG, rdc, &hl_ptr.value);
1640 	hdptr->bits.ldw = hl_ptr.bits.ldw.head_l << 2;
1641 	hdptr->bits.hdw = hh_ptr.bits.ldw.head_h;
1642 	return (NPI_SUCCESS);
1643 
1644 }
1645 
1646 npi_status_t
npi_rxdma_rdc_rcr_qlen_get(npi_handle_t handle,uint8_t rdc,uint16_t * rcr_qlen)1647 npi_rxdma_rdc_rcr_qlen_get(npi_handle_t handle, uint8_t rdc,
1648 			    uint16_t *rcr_qlen)
1649 {
1650 
1651 	rcrstat_a_t stats;
1652 
1653 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
1654 	if (!RXDMA_CHANNEL_VALID(rdc)) {
1655 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1656 		    " rxdma_rdc_rcr_qlen_get"
1657 		    " Illegal RDC Number %d \n",
1658 		    rdc));
1659 		return (NPI_RXDMA_RDC_INVALID);
1660 	}
1661 
1662 	RXDMA_REG_READ64(handle, RCRSTAT_A_REG, rdc, &stats.value);
1663 	*rcr_qlen =  stats.bits.ldw.qlen;
1664 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1665 	    " rxdma_rdc_rcr_qlen_get"
1666 	    " RDC %d qlen %x qlen %x\n",
1667 	    rdc, *rcr_qlen, stats.bits.ldw.qlen));
1668 	return (NPI_SUCCESS);
1669 }
1670 
1671 npi_status_t
npi_rxdma_rdc_rcr_tail_get(npi_handle_t handle,uint8_t rdc,addr44_t * tail_addr)1672 npi_rxdma_rdc_rcr_tail_get(npi_handle_t handle,
1673 			    uint8_t rdc, addr44_t *tail_addr)
1674 {
1675 
1676 	rcrstat_b_t th_ptr;
1677 	rcrstat_c_t tl_ptr;
1678 
1679 	ASSERT(RXDMA_CHANNEL_VALID(rdc));
1680 	if (!RXDMA_CHANNEL_VALID(rdc)) {
1681 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1682 		    " rxdma_rdc_rcr_tail_get"
1683 		    " Illegal RDC Number %d \n",
1684 		    rdc));
1685 		return (NPI_RXDMA_RDC_INVALID);
1686 	}
1687 	th_ptr.value = 0;
1688 	tl_ptr.value = 0;
1689 	RXDMA_REG_READ64(handle, RCRSTAT_B_REG, rdc, &th_ptr.value);
1690 	RXDMA_REG_READ64(handle, RCRSTAT_C_REG, rdc, &tl_ptr.value);
1691 	tail_addr->bits.ldw = tl_ptr.bits.ldw.tlptr_l << 3;
1692 	tail_addr->bits.hdw = th_ptr.bits.ldw.tlptr_h;
1693 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1694 	    " rxdma_rdc_rcr_tail_get"
1695 	    " RDC %d rcr_tail %llx tl %x\n",
1696 	    rdc, tl_ptr.value,
1697 	    tl_ptr.bits.ldw.tlptr_l));
1698 
1699 	return (NPI_SUCCESS);
1700 
1701 
1702 }
1703 
1704 /*
1705  * npi_rxdma_rxctl_fifo_error_intr_set
1706  * Configure The RX ctrl fifo error interrupt generation
1707  *
1708  * Inputs:
1709  *      handle:	opaque handle interpreted by the underlying OS
1710  *	mask:	rx_ctl_dat_fifo_mask_t specifying the errors
1711  * valid fields in  rx_ctl_dat_fifo_mask_t structure are:
1712  * zcp_eop_err, ipp_eop_err, id_mismatch. If a field is set
1713  * to 1, we will enable interrupt generation for the
1714  * corresponding error condition. In the hardware, the bit(s)
1715  * have to be cleared to enable interrupt.
1716  *
1717  * Return:
1718  * NPI_SUCCESS
1719  *
1720  */
1721 npi_status_t
npi_rxdma_rxctl_fifo_error_intr_set(npi_handle_t handle,rx_ctl_dat_fifo_mask_t * mask)1722 npi_rxdma_rxctl_fifo_error_intr_set(npi_handle_t handle,
1723 				    rx_ctl_dat_fifo_mask_t *mask)
1724 {
1725 	uint64_t offset;
1726 	rx_ctl_dat_fifo_mask_t intr_mask;
1727 	offset = RX_CTL_DAT_FIFO_MASK_REG;
1728 	NXGE_REG_RD64(handle, offset, &intr_mask.value);
1729 
1730 	if (mask->bits.ldw.ipp_eop_err) {
1731 		intr_mask.bits.ldw.ipp_eop_err = 0;
1732 	}
1733 
1734 	if (mask->bits.ldw.zcp_eop_err) {
1735 		intr_mask.bits.ldw.zcp_eop_err = 0;
1736 	}
1737 
1738 	if (mask->bits.ldw.id_mismatch) {
1739 		intr_mask.bits.ldw.id_mismatch = 0;
1740 	}
1741 
1742 	NXGE_REG_WR64(handle, offset, intr_mask.value);
1743 	return (NPI_SUCCESS);
1744 }
1745 
1746 /*
1747  * npi_rxdma_rxctl_fifo_error_stat_get
1748  * Read The RX ctrl fifo error Status
1749  *
1750  * Inputs:
1751  *      handle:	opaque handle interpreted by the underlying OS
1752  *	stat:	rx_ctl_dat_fifo_stat_t to read the errors to
1753  * valid fields in  rx_ctl_dat_fifo_stat_t structure are:
1754  * zcp_eop_err, ipp_eop_err, id_mismatch.
1755  * Return:
1756  * NPI_SUCCESS
1757  *
1758  */
1759 npi_status_t
npi_rxdma_rxctl_fifo_error_intr_get(npi_handle_t handle,rx_ctl_dat_fifo_stat_t * stat)1760 npi_rxdma_rxctl_fifo_error_intr_get(npi_handle_t handle,
1761 			    rx_ctl_dat_fifo_stat_t *stat)
1762 {
1763 	uint64_t offset = RX_CTL_DAT_FIFO_STAT_REG;
1764 	NXGE_REG_RD64(handle, offset, &stat->value);
1765 	return (NPI_SUCCESS);
1766 }
1767 
1768 npi_status_t
npi_rxdma_rdc_rcr_pktread_update(npi_handle_t handle,uint8_t channel,uint16_t pkts_read)1769 npi_rxdma_rdc_rcr_pktread_update(npi_handle_t handle, uint8_t channel,
1770 				    uint16_t pkts_read)
1771 {
1772 
1773 	rx_dma_ctl_stat_t	cs;
1774 	uint16_t min_read = 0;
1775 
1776 	ASSERT(RXDMA_CHANNEL_VALID(channel));
1777 	if (!RXDMA_CHANNEL_VALID(channel)) {
1778 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1779 		    " npi_rxdma_rdc_rcr_pktread_update ",
1780 		    " channel %d", channel));
1781 		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
1782 	}
1783 
1784 	if ((pkts_read < min_read) && (pkts_read > 512)) {
1785 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1786 		    " npi_rxdma_rdc_rcr_pktread_update ",
1787 		    " pkts %d out of bound", pkts_read));
1788 		return (NPI_RXDMA_OPCODE_INVALID(pkts_read));
1789 	}
1790 
1791 	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
1792 	    &cs.value);
1793 	cs.bits.ldw.pktread = pkts_read;
1794 	RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
1795 	    channel, cs.value);
1796 
1797 	return (NPI_SUCCESS);
1798 }
1799 
1800 npi_status_t
npi_rxdma_rdc_rcr_bufread_update(npi_handle_t handle,uint8_t channel,uint16_t bufs_read)1801 npi_rxdma_rdc_rcr_bufread_update(npi_handle_t handle, uint8_t channel,
1802 					    uint16_t bufs_read)
1803 {
1804 
1805 	rx_dma_ctl_stat_t	cs;
1806 	uint16_t min_read = 0;
1807 
1808 	ASSERT(RXDMA_CHANNEL_VALID(channel));
1809 	if (!RXDMA_CHANNEL_VALID(channel)) {
1810 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1811 		    " npi_rxdma_rdc_rcr_bufread_update ",
1812 		    " channel %d", channel));
1813 		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
1814 	}
1815 
1816 	if ((bufs_read < min_read) && (bufs_read > 512)) {
1817 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1818 		    " npi_rxdma_rdc_rcr_bufread_update ",
1819 		    " bufs read %d out of bound", bufs_read));
1820 		return (NPI_RXDMA_OPCODE_INVALID(bufs_read));
1821 	}
1822 
1823 	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
1824 	    &cs.value);
1825 	cs.bits.ldw.ptrread = bufs_read;
1826 	RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
1827 	    channel, cs.value);
1828 
1829 	return (NPI_SUCCESS);
1830 }
1831 
1832 npi_status_t
npi_rxdma_rdc_rcr_read_update(npi_handle_t handle,uint8_t channel,uint16_t pkts_read,uint16_t bufs_read)1833 npi_rxdma_rdc_rcr_read_update(npi_handle_t handle, uint8_t channel,
1834 				    uint16_t pkts_read, uint16_t bufs_read)
1835 {
1836 
1837 	rx_dma_ctl_stat_t	cs;
1838 
1839 	ASSERT(RXDMA_CHANNEL_VALID(channel));
1840 	if (!RXDMA_CHANNEL_VALID(channel)) {
1841 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
1842 		    " npi_rxdma_rdc_rcr_read_update ",
1843 		    " channel %d", channel));
1844 		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
1845 	}
1846 
1847 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1848 	    " npi_rxdma_rdc_rcr_read_update "
1849 	    " bufs read %d pkt read %d",
1850 	    bufs_read, pkts_read));
1851 
1852 	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
1853 	    &cs.value);
1854 
1855 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1856 	    " npi_rxdma_rdc_rcr_read_update: "
1857 	    " value: 0x%llx bufs read %d pkt read %d",
1858 	    cs.value,
1859 	    cs.bits.ldw.ptrread, cs.bits.ldw.pktread));
1860 
1861 	cs.bits.ldw.pktread = pkts_read;
1862 	cs.bits.ldw.ptrread = bufs_read;
1863 
1864 	RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
1865 	    channel, cs.value);
1866 
1867 	RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
1868 	    &cs.value);
1869 
1870 	NPI_DEBUG_MSG((handle.function, NPI_RDC_CTL,
1871 	    " npi_rxdma_rdc_rcr_read_update: read back after update "
1872 	    " value: 0x%llx bufs read %d pkt read %d",
1873 	    cs.value,
1874 	    cs.bits.ldw.ptrread, cs.bits.ldw.pktread));
1875 
1876 	return (NPI_SUCCESS);
1877 }
1878 
1879 /*
1880  * npi_rxdma_channel_mex_set():
1881  *	This function is called to arm the DMA channel with
1882  *	mailbox updating capability. Software needs to rearm
1883  *	for each update by writing to the control and status register.
1884  *
1885  * Parameters:
1886  *	handle		- NPI handle (virtualization flag must be defined).
1887  *	channel		- logical RXDMA channel from 0 to 23.
1888  *			  (If virtualization flag is not set, then
1889  *			   logical channel is the same as the hardware
1890  *			   channel number).
1891  *
1892  * Return:
1893  *	NPI_SUCCESS		- If enable channel with mailbox update
1894  *				  is completed successfully.
1895  *
1896  *	Error:
1897  *	NPI error status code
1898  */
1899 npi_status_t
npi_rxdma_channel_mex_set(npi_handle_t handle,uint8_t channel)1900 npi_rxdma_channel_mex_set(npi_handle_t handle, uint8_t channel)
1901 {
1902 	return (npi_rxdma_channel_control(handle, RXDMA_MEX_SET, channel));
1903 }
1904 
1905 /*
1906  * npi_rxdma_channel_rcrto_clear():
1907  *	This function is called to reset RCRTO bit to 0.
1908  *
1909  * Parameters:
1910  *	handle		- NPI handle (virtualization flag must be defined).
1911  *	channel		- logical RXDMA channel from 0 to 23.
1912  *			  (If virtualization flag is not set, then
1913  *			   logical channel is the same as the hardware
1914  *			   channel number).
1915  * Return:
1916  *	NPI_SUCCESS
1917  *
1918  *	Error:
1919  *	NPI error status code
1920  */
1921 npi_status_t
npi_rxdma_channel_rcrto_clear(npi_handle_t handle,uint8_t channel)1922 npi_rxdma_channel_rcrto_clear(npi_handle_t handle, uint8_t channel)
1923 {
1924 	return (npi_rxdma_channel_control(handle, RXDMA_RCRTO_CLEAR, channel));
1925 }
1926 
1927 /*
1928  * npi_rxdma_channel_pt_drop_pkt_clear():
1929  *	This function is called to clear the port drop packet bit (debug).
1930  *
1931  * Parameters:
1932  *	handle		- NPI handle (virtualization flag must be defined).
1933  *	channel		- logical RXDMA channel from 0 to 23.
1934  *			  (If virtualization flag is not set, then
1935  *			   logical channel is the same as the hardware
1936  *			   channel number).
1937  * Return:
1938  *	NPI_SUCCESS
1939  *
1940  *	Error:
1941  *	NPI error status code
1942  */
1943 npi_status_t
npi_rxdma_channel_pt_drop_pkt_clear(npi_handle_t handle,uint8_t channel)1944 npi_rxdma_channel_pt_drop_pkt_clear(npi_handle_t handle, uint8_t channel)
1945 {
1946 	return (npi_rxdma_channel_control(handle, RXDMA_PT_DROP_PKT_CLEAR,
1947 	    channel));
1948 }
1949 
1950 /*
1951  * npi_rxdma_channel_wred_drop_clear():
1952  *	This function is called to wred drop bit (debug only).
1953  *
1954  * Parameters:
1955  *	handle		- NPI handle (virtualization flag must be defined).
1956  *	channel		- logical RXDMA channel from 0 to 23.
1957  *			  (If virtualization flag is not set, then
1958  *			   logical channel is the same as the hardware
1959  *			   channel number).
1960  * Return:
1961  *	NPI_SUCCESS
1962  *
1963  *	Error:
1964  *	NPI error status code
1965  */
1966 npi_status_t
npi_rxdma_channel_wred_dop_clear(npi_handle_t handle,uint8_t channel)1967 npi_rxdma_channel_wred_dop_clear(npi_handle_t handle, uint8_t channel)
1968 {
1969 	return (npi_rxdma_channel_control(handle, RXDMA_WRED_DROP_CLEAR,
1970 	    channel));
1971 }
1972 
1973 /*
1974  * npi_rxdma_channel_rcr_shfull_clear():
1975  *	This function is called to clear RCR shadow full bit.
1976  *
1977  * Parameters:
1978  *	handle		- NPI handle (virtualization flag must be defined).
1979  *	channel		- logical RXDMA channel from 0 to 23.
1980  *			  (If virtualization flag is not set, then
1981  *			   logical channel is the same as the hardware
1982  *			   channel number).
1983  * Return:
1984  *	NPI_SUCCESS
1985  *
1986  *	Error:
1987  *	NPI error status code
1988  */
1989 npi_status_t
npi_rxdma_channel_rcr_shfull_clear(npi_handle_t handle,uint8_t channel)1990 npi_rxdma_channel_rcr_shfull_clear(npi_handle_t handle, uint8_t channel)
1991 {
1992 	return (npi_rxdma_channel_control(handle, RXDMA_RCR_SFULL_CLEAR,
1993 	    channel));
1994 }
1995 
1996 /*
1997  * npi_rxdma_channel_rcrfull_clear():
1998  *	This function is called to clear RCR full bit.
1999  *
2000  * Parameters:
2001  *	handle		- NPI handle (virtualization flag must be defined).
2002  *	channel		- logical RXDMA channel from 0 to 23.
2003  *			  (If virtualization flag is not set, then
2004  *			   logical channel is the same as the hardware
2005  *			   channel number).
2006  * Return:
2007  *	NPI_SUCCESS
2008  *
2009  *	Error:
2010  *	NPI error status code
2011  */
2012 npi_status_t
npi_rxdma_channel_rcr_full_clear(npi_handle_t handle,uint8_t channel)2013 npi_rxdma_channel_rcr_full_clear(npi_handle_t handle, uint8_t channel)
2014 {
2015 	return (npi_rxdma_channel_control(handle, RXDMA_RCR_FULL_CLEAR,
2016 	    channel));
2017 }
2018 
2019 npi_status_t
npi_rxdma_channel_rbr_empty_clear(npi_handle_t handle,uint8_t channel)2020 npi_rxdma_channel_rbr_empty_clear(npi_handle_t handle, uint8_t channel)
2021 {
2022 	return (npi_rxdma_channel_control(handle,
2023 	    RXDMA_RBR_EMPTY_CLEAR, channel));
2024 }
2025 
2026 npi_status_t
npi_rxdma_channel_cs_clear_all(npi_handle_t handle,uint8_t channel)2027 npi_rxdma_channel_cs_clear_all(npi_handle_t handle, uint8_t channel)
2028 {
2029 	return (npi_rxdma_channel_control(handle, RXDMA_CS_CLEAR_ALL, channel));
2030 }
2031 
2032 /*
2033  * npi_rxdma_channel_control():
2034  *	This function is called to control a receive DMA channel
2035  *	for arming the channel with mailbox updates, resetting
2036  *	various event status bits (control and status register).
2037  *
2038  * Parameters:
2039  *	handle		- NPI handle (virtualization flag must be defined).
2040  *	control		- NPI defined control type supported:
2041  *				- RXDMA_MEX_SET
2042  * 				- RXDMA_RCRTO_CLEAR
2043  *				- RXDMA_PT_DROP_PKT_CLEAR
2044  *				- RXDMA_WRED_DROP_CLEAR
2045  *				- RXDMA_RCR_SFULL_CLEAR
2046  *				- RXDMA_RCR_FULL_CLEAR
2047  *				- RXDMA_RBR_PRE_EMPTY_CLEAR
2048  *				- RXDMA_RBR_EMPTY_CLEAR
2049  *	channel		- logical RXDMA channel from 0 to 23.
2050  *			  (If virtualization flag is not set, then
2051  *			   logical channel is the same as the hardware.
2052  * Return:
2053  *	NPI_SUCCESS
2054  *
2055  *	Error:
2056  *	NPI error status code
2057  */
2058 npi_status_t
npi_rxdma_channel_control(npi_handle_t handle,rxdma_cs_cntl_t control,uint8_t channel)2059 npi_rxdma_channel_control(npi_handle_t handle, rxdma_cs_cntl_t control,
2060 			uint8_t channel)
2061 {
2062 
2063 	rx_dma_ctl_stat_t	cs;
2064 
2065 	ASSERT(RXDMA_CHANNEL_VALID(channel));
2066 	if (!RXDMA_CHANNEL_VALID(channel)) {
2067 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
2068 		    " npi_rxdma_channel_control",
2069 		    " channel", channel));
2070 		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
2071 	}
2072 
2073 	switch (control) {
2074 	case RXDMA_MEX_SET:
2075 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2076 		    &cs.value);
2077 		cs.bits.hdw.mex = 1;
2078 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG,
2079 		    channel, cs.value);
2080 		break;
2081 
2082 	case RXDMA_RCRTO_CLEAR:
2083 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2084 		    &cs.value);
2085 		cs.bits.hdw.rcrto = 0;
2086 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2087 		    cs.value);
2088 		break;
2089 
2090 	case RXDMA_PT_DROP_PKT_CLEAR:
2091 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2092 		    &cs.value);
2093 		cs.bits.hdw.port_drop_pkt = 0;
2094 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2095 		    cs.value);
2096 		break;
2097 
2098 	case RXDMA_WRED_DROP_CLEAR:
2099 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2100 		    &cs.value);
2101 		cs.bits.hdw.wred_drop = 0;
2102 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2103 		    cs.value);
2104 		break;
2105 
2106 	case RXDMA_RCR_SFULL_CLEAR:
2107 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2108 		    &cs.value);
2109 		cs.bits.hdw.rcr_shadow_full = 0;
2110 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2111 		    cs.value);
2112 		break;
2113 
2114 	case RXDMA_RCR_FULL_CLEAR:
2115 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2116 		    &cs.value);
2117 		cs.bits.hdw.rcrfull = 0;
2118 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2119 		    cs.value);
2120 		break;
2121 
2122 	case RXDMA_RBR_PRE_EMPTY_CLEAR:
2123 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2124 		    &cs.value);
2125 		cs.bits.hdw.rbr_pre_empty = 0;
2126 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2127 		    cs.value);
2128 		break;
2129 
2130 	case RXDMA_RBR_EMPTY_CLEAR:
2131 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2132 		    &cs.value);
2133 		cs.bits.hdw.rbr_empty = 1;
2134 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2135 		    cs.value);
2136 		break;
2137 
2138 	case RXDMA_CS_CLEAR_ALL:
2139 		cs.value = 0;
2140 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2141 		    cs.value);
2142 		break;
2143 
2144 	default:
2145 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
2146 		    "npi_rxdma_channel_control",
2147 		    "control", control));
2148 		return (NPI_FAILURE | NPI_RXDMA_OPCODE_INVALID(channel));
2149 	}
2150 
2151 	return (NPI_SUCCESS);
2152 }
2153 
2154 /*
2155  * npi_rxdma_control_status():
2156  *	This function is called to operate on the control
2157  *	and status register.
2158  *
2159  * Parameters:
2160  *	handle		- NPI handle
2161  *	op_mode		- OP_GET: get hardware control and status
2162  *			  OP_SET: set hardware control and status
2163  *			  OP_UPDATE: update hardware control and status.
2164  *			  OP_CLEAR: clear control and status register to 0s.
2165  *	channel		- hardware RXDMA channel from 0 to 23.
2166  *	cs_p		- pointer to hardware defined control and status
2167  *			  structure.
2168  * Return:
2169  *	NPI_SUCCESS
2170  *
2171  *	Error:
2172  *	NPI error status code
2173  */
2174 npi_status_t
npi_rxdma_control_status(npi_handle_t handle,io_op_t op_mode,uint8_t channel,p_rx_dma_ctl_stat_t cs_p)2175 npi_rxdma_control_status(npi_handle_t handle, io_op_t op_mode,
2176 			uint8_t channel, p_rx_dma_ctl_stat_t cs_p)
2177 {
2178 	int			status = NPI_SUCCESS;
2179 	rx_dma_ctl_stat_t	cs;
2180 
2181 	ASSERT(RXDMA_CHANNEL_VALID(channel));
2182 	if (!RXDMA_CHANNEL_VALID(channel)) {
2183 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
2184 		    "npi_rxdma_control_status",
2185 		    "channel", channel));
2186 		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
2187 	}
2188 
2189 	switch (op_mode) {
2190 	case OP_GET:
2191 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2192 		    &cs_p->value);
2193 		break;
2194 
2195 	case OP_SET:
2196 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2197 		    cs_p->value);
2198 		break;
2199 
2200 	case OP_UPDATE:
2201 		RXDMA_REG_READ64(handle, RX_DMA_CTL_STAT_REG, channel,
2202 		    &cs.value);
2203 		RXDMA_REG_WRITE64(handle, RX_DMA_CTL_STAT_REG, channel,
2204 		    cs_p->value | cs.value);
2205 		break;
2206 
2207 	default:
2208 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
2209 		    "npi_rxdma_control_status",
2210 		    "control", op_mode));
2211 		return (NPI_FAILURE | NPI_RXDMA_OPCODE_INVALID(channel));
2212 	}
2213 
2214 	return (status);
2215 }
2216 
2217 /*
2218  * npi_rxdma_event_mask():
2219  *	This function is called to operate on the event mask
2220  *	register which is used for generating interrupts.
2221  *
2222  * Parameters:
2223  *	handle		- NPI handle
2224  *	op_mode		- OP_GET: get hardware event mask
2225  *			  OP_SET: set hardware interrupt event masks
2226  *			  OP_CLEAR: clear control and status register to 0s.
2227  *	channel		- hardware RXDMA channel from 0 to 23.
2228  *	mask_p		- pointer to hardware defined event mask
2229  *			  structure.
2230  * Return:
2231  *	NPI_SUCCESS		- If set is complete successfully.
2232  *
2233  *	Error:
2234  *	NPI error status code
2235  */
2236 npi_status_t
npi_rxdma_event_mask(npi_handle_t handle,io_op_t op_mode,uint8_t channel,p_rx_dma_ent_msk_t mask_p)2237 npi_rxdma_event_mask(npi_handle_t handle, io_op_t op_mode,
2238 		uint8_t channel, p_rx_dma_ent_msk_t mask_p)
2239 {
2240 	int			status = NPI_SUCCESS;
2241 	rx_dma_ent_msk_t	mask;
2242 
2243 	ASSERT(RXDMA_CHANNEL_VALID(channel));
2244 	if (!RXDMA_CHANNEL_VALID(channel)) {
2245 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
2246 		    "npi_rxdma_event_mask",
2247 		    "channel", channel));
2248 		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
2249 	}
2250 
2251 	switch (op_mode) {
2252 	case OP_GET:
2253 		RXDMA_REG_READ64(handle, RX_DMA_ENT_MSK_REG, channel,
2254 		    &mask_p->value);
2255 		break;
2256 
2257 	case OP_SET:
2258 		RXDMA_REG_WRITE64(handle, RX_DMA_ENT_MSK_REG, channel,
2259 		    mask_p->value);
2260 		break;
2261 
2262 	case OP_UPDATE:
2263 		RXDMA_REG_READ64(handle, RX_DMA_ENT_MSK_REG, channel,
2264 		    &mask.value);
2265 		RXDMA_REG_WRITE64(handle, RX_DMA_ENT_MSK_REG, channel,
2266 		    mask_p->value | mask.value);
2267 		break;
2268 
2269 	default:
2270 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
2271 		    "npi_rxdma_event_mask",
2272 		    "eventmask", op_mode));
2273 		return (NPI_FAILURE | NPI_RXDMA_OPCODE_INVALID(channel));
2274 	}
2275 
2276 	return (status);
2277 }
2278 
2279 /*
2280  * npi_rxdma_event_mask_config():
2281  *	This function is called to operate on the event mask
2282  *	register which is used for generating interrupts
2283  *	and status register.
2284  *
2285  * Parameters:
2286  *	handle		- NPI handle
2287  *	op_mode		- OP_GET: get hardware event mask
2288  *			  OP_SET: set hardware interrupt event masks
2289  *			  OP_CLEAR: clear control and status register to 0s.
2290  *	channel		- hardware RXDMA channel from 0 to 23.
2291  *	mask_cfgp		- pointer to NPI defined event mask
2292  *			  enum data type.
2293  * Return:
2294  *	NPI_SUCCESS		- If set is complete successfully.
2295  *
2296  *	Error:
2297  *	NPI error status code
2298  */
2299 npi_status_t
npi_rxdma_event_mask_config(npi_handle_t handle,io_op_t op_mode,uint8_t channel,rxdma_ent_msk_cfg_t * mask_cfgp)2300 npi_rxdma_event_mask_config(npi_handle_t handle, io_op_t op_mode,
2301 		uint8_t channel, rxdma_ent_msk_cfg_t *mask_cfgp)
2302 {
2303 	int		status = NPI_SUCCESS;
2304 	uint64_t	configuration = *mask_cfgp;
2305 	uint64_t	value;
2306 
2307 	ASSERT(RXDMA_CHANNEL_VALID(channel));
2308 	if (!RXDMA_CHANNEL_VALID(channel)) {
2309 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
2310 		    "npi_rxdma_event_mask_config",
2311 		    "channel", channel));
2312 		return (NPI_FAILURE | NPI_RXDMA_CHANNEL_INVALID(channel));
2313 	}
2314 
2315 	switch (op_mode) {
2316 	case OP_GET:
2317 		RXDMA_REG_READ64(handle, RX_DMA_ENT_MSK_REG, channel,
2318 		    (uint64_t *)mask_cfgp);
2319 		break;
2320 
2321 	case OP_SET:
2322 		RXDMA_REG_WRITE64(handle, RX_DMA_ENT_MSK_REG, channel,
2323 		    configuration);
2324 		break;
2325 
2326 	case OP_UPDATE:
2327 		RXDMA_REG_READ64(handle, RX_DMA_ENT_MSK_REG, channel, &value);
2328 		RXDMA_REG_WRITE64(handle, RX_DMA_ENT_MSK_REG, channel,
2329 		    configuration | value);
2330 		break;
2331 
2332 	case OP_CLEAR:
2333 		RXDMA_REG_WRITE64(handle, RX_DMA_ENT_MSK_REG, channel,
2334 		    CFG_RXDMA_MASK_ALL);
2335 		break;
2336 	default:
2337 		NPI_ERROR_MSG((handle.function, NPI_ERR_CTL,
2338 		    "npi_rxdma_event_mask_config",
2339 		    "eventmask", op_mode));
2340 		return (NPI_FAILURE | NPI_RXDMA_OPCODE_INVALID(channel));
2341 	}
2342 
2343 	return (status);
2344 }
2345