xref: /illumos-gate/usr/src/uts/common/io/nxge/nxge_fflp.c (revision 86ef0a63e1cfa5dc98606efef379365acca98063)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <npi_fflp.h>
28 #include <npi_mac.h>
29 #include <nxge_defs.h>
30 #include <nxge_flow.h>
31 #include <nxge_fflp.h>
32 #include <nxge_impl.h>
33 #include <nxge_fflp_hash.h>
34 #include <nxge_common.h>
35 
36 
37 /*
38  * Function prototypes
39  */
40 static nxge_status_t nxge_fflp_vlan_tbl_clear_all(p_nxge_t);
41 static nxge_status_t nxge_fflp_tcam_invalidate_all(p_nxge_t);
42 static nxge_status_t nxge_fflp_tcam_init(p_nxge_t);
43 static nxge_status_t nxge_fflp_fcram_invalidate_all(p_nxge_t);
44 static nxge_status_t nxge_fflp_fcram_init(p_nxge_t);
45 static int nxge_flow_need_hash_lookup(p_nxge_t, flow_resource_t *);
46 static void nxge_fill_tcam_entry_tcp(p_nxge_t, flow_spec_t *, tcam_entry_t *);
47 static void nxge_fill_tcam_entry_udp(p_nxge_t, flow_spec_t *, tcam_entry_t *);
48 static void nxge_fill_tcam_entry_sctp(p_nxge_t, flow_spec_t *, tcam_entry_t *);
49 static void nxge_fill_tcam_entry_tcp_ipv6(p_nxge_t, flow_spec_t *,
50 	tcam_entry_t *);
51 static void nxge_fill_tcam_entry_udp_ipv6(p_nxge_t, flow_spec_t *,
52 	tcam_entry_t *);
53 static void nxge_fill_tcam_entry_sctp_ipv6(p_nxge_t, flow_spec_t *,
54 	tcam_entry_t *);
55 static uint8_t nxge_get_rdc_offset(p_nxge_t, uint8_t, uint64_t);
56 static uint8_t nxge_get_rdc_group(p_nxge_t, uint8_t, uint64_t);
57 static uint16_t nxge_tcam_get_index(p_nxge_t, uint16_t);
58 static uint32_t nxge_tcam_cls_to_flow(uint32_t);
59 static uint8_t nxge_iptun_pkt_type_to_pid(uint8_t);
60 static npi_status_t nxge_set_iptun_usr_cls_reg(p_nxge_t, uint64_t,
61 					iptun_cfg_t *);
62 static boolean_t nxge_is_iptun_cls_present(p_nxge_t, uint8_t, int *);
63 
64 /*
65  * functions used outside this file
66  */
67 nxge_status_t nxge_fflp_config_vlan_table(p_nxge_t, uint16_t);
68 nxge_status_t nxge_fflp_ip_class_config_all(p_nxge_t);
69 nxge_status_t nxge_add_flow(p_nxge_t, flow_resource_t *);
70 static nxge_status_t nxge_tcam_handle_ip_fragment(p_nxge_t);
71 nxge_status_t nxge_add_tcam_entry(p_nxge_t, flow_resource_t *);
72 nxge_status_t nxge_add_fcram_entry(p_nxge_t, flow_resource_t *);
73 nxge_status_t nxge_flow_get_hash(p_nxge_t, flow_resource_t *,
74 	uint32_t *, uint16_t *);
75 int nxge_get_valid_tcam_cnt(p_nxge_t);
76 void nxge_get_tcam_entry_all(p_nxge_t, rx_class_cfg_t *);
77 void nxge_get_tcam_entry(p_nxge_t, flow_resource_t *);
78 void nxge_del_tcam_entry(p_nxge_t, uint32_t);
79 void nxge_add_iptun_class(p_nxge_t, iptun_cfg_t *, uint8_t *);
80 void nxge_cfg_iptun_hash(p_nxge_t, iptun_cfg_t *, uint8_t);
81 void nxge_del_iptun_class(p_nxge_t, uint8_t);
82 void nxge_get_iptun_class(p_nxge_t, iptun_cfg_t *, uint8_t);
83 void nxge_set_ip_cls_sym(p_nxge_t, uint8_t, uint8_t);
84 void nxge_get_ip_cls_sym(p_nxge_t, uint8_t, uint8_t *);
85 
86 
87 nxge_status_t
nxge_tcam_dump_entry(p_nxge_t nxgep,uint32_t location)88 nxge_tcam_dump_entry(p_nxge_t nxgep, uint32_t location)
89 {
90 	tcam_entry_t tcam_rdptr;
91 	uint64_t asc_ram = 0;
92 	npi_handle_t handle;
93 	npi_status_t status;
94 
95 	handle = nxgep->npi_reg_handle;
96 
97 	bzero((char *)&tcam_rdptr, sizeof (struct tcam_entry));
98 	status = npi_fflp_tcam_entry_read(handle, (tcam_location_t)location,
99 	    (struct tcam_entry *)&tcam_rdptr);
100 	if (status & NPI_FAILURE) {
101 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
102 		    " nxge_tcam_dump_entry:"
103 		    "  tcam read failed at location %d ", location));
104 		return (NXGE_ERROR);
105 	}
106 	status = npi_fflp_tcam_asc_ram_entry_read(handle,
107 	    (tcam_location_t)location, &asc_ram);
108 
109 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "location %x\n"
110 	    " key:  %llx %llx %llx %llx \n"
111 	    " mask: %llx %llx %llx %llx \n"
112 	    " ASC RAM %llx \n", location,
113 	    tcam_rdptr.key0, tcam_rdptr.key1,
114 	    tcam_rdptr.key2, tcam_rdptr.key3,
115 	    tcam_rdptr.mask0, tcam_rdptr.mask1,
116 	    tcam_rdptr.mask2, tcam_rdptr.mask3, asc_ram));
117 	return (NXGE_OK);
118 }
119 
120 void
nxge_get_tcam(p_nxge_t nxgep,p_mblk_t mp)121 nxge_get_tcam(p_nxge_t nxgep, p_mblk_t mp)
122 {
123 	uint32_t tcam_loc;
124 	int *lptr;
125 	int location;
126 
127 	uint32_t start_location = 0;
128 	uint32_t stop_location = nxgep->classifier.tcam_size;
129 	lptr = (int *)mp->b_rptr;
130 	location = *lptr;
131 
132 	if ((location >= nxgep->classifier.tcam_size) || (location < -1)) {
133 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
134 		    "nxge_tcam_dump: Invalid location %d \n", location));
135 		return;
136 	}
137 	if (location == -1) {
138 		start_location = 0;
139 		stop_location = nxgep->classifier.tcam_size;
140 	} else {
141 		start_location = location;
142 		stop_location = location + 1;
143 	}
144 	for (tcam_loc = start_location; tcam_loc < stop_location; tcam_loc++)
145 		(void) nxge_tcam_dump_entry(nxgep, tcam_loc);
146 }
147 
148 /*
149  * nxge_fflp_vlan_table_invalidate_all
150  * invalidates the vlan RDC table entries.
151  * INPUT
152  * nxge    soft state data structure
153  * Return
154  *      NXGE_OK
155  *      NXGE_ERROR
156  *
157  */
158 
159 static nxge_status_t
nxge_fflp_vlan_tbl_clear_all(p_nxge_t nxgep)160 nxge_fflp_vlan_tbl_clear_all(p_nxge_t nxgep)
161 {
162 	vlan_id_t vlan_id;
163 	npi_handle_t handle;
164 	npi_status_t rs = NPI_SUCCESS;
165 	vlan_id_t start = 0, stop = NXGE_MAX_VLANS;
166 
167 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_vlan_tbl_clear_all "));
168 	handle = nxgep->npi_reg_handle;
169 	for (vlan_id = start; vlan_id < stop; vlan_id++) {
170 		rs = npi_fflp_cfg_vlan_table_clear(handle, vlan_id);
171 		if (rs != NPI_SUCCESS) {
172 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
173 			    "VLAN Table invalidate failed for vlan id %d ",
174 			    vlan_id));
175 			return (NXGE_ERROR | rs);
176 		}
177 	}
178 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_vlan_tbl_clear_all "));
179 	return (NXGE_OK);
180 }
181 
182 /*
183  * The following functions are used by other modules to init
184  * the fflp module.
185  * these functions are the basic API used to init
186  * the fflp modules (tcam, fcram etc ......)
187  *
188  * The TCAM search future would be disabled  by default.
189  */
190 
191 static nxge_status_t
nxge_fflp_tcam_init(p_nxge_t nxgep)192 nxge_fflp_tcam_init(p_nxge_t nxgep)
193 {
194 	uint8_t access_ratio;
195 	tcam_class_t class;
196 	npi_status_t rs = NPI_SUCCESS;
197 	npi_handle_t handle;
198 
199 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_tcam_init"));
200 	handle = nxgep->npi_reg_handle;
201 
202 	rs = npi_fflp_cfg_tcam_disable(handle);
203 	if (rs != NPI_SUCCESS) {
204 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "failed TCAM Disable\n"));
205 		return (NXGE_ERROR | rs);
206 	}
207 
208 	access_ratio = nxgep->param_arr[param_tcam_access_ratio].value;
209 	rs = npi_fflp_cfg_tcam_access(handle, access_ratio);
210 	if (rs != NPI_SUCCESS) {
211 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
212 		    "failed TCAM Access cfg\n"));
213 		return (NXGE_ERROR | rs);
214 	}
215 
216 	/* disable configurable classes */
217 	/* disable the configurable ethernet classes; */
218 	for (class = TCAM_CLASS_ETYPE_1;
219 	    class <= TCAM_CLASS_ETYPE_2; class++) {
220 		rs = npi_fflp_cfg_enet_usr_cls_disable(handle, class);
221 		if (rs != NPI_SUCCESS) {
222 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
223 			    "TCAM USR Ether Class config failed."));
224 			return (NXGE_ERROR | rs);
225 		}
226 	}
227 
228 	/* disable the configurable ip classes; */
229 	for (class = TCAM_CLASS_IP_USER_4;
230 	    class <= TCAM_CLASS_IP_USER_7; class++) {
231 		rs = npi_fflp_cfg_ip_usr_cls_disable(handle, class);
232 		if (rs != NPI_SUCCESS) {
233 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
234 			    "TCAM USR IP Class cnfg failed."));
235 			return (NXGE_ERROR | rs);
236 		}
237 	}
238 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_tcam_init"));
239 	return (NXGE_OK);
240 }
241 
242 /*
243  * nxge_fflp_tcam_invalidate_all
244  * invalidates all the tcam entries.
245  * INPUT
246  * nxge    soft state data structure
247  * Return
248  *      NXGE_OK
249  *      NXGE_ERROR
250  *
251  */
252 
253 
254 static nxge_status_t
nxge_fflp_tcam_invalidate_all(p_nxge_t nxgep)255 nxge_fflp_tcam_invalidate_all(p_nxge_t nxgep)
256 {
257 	uint16_t location;
258 	npi_status_t rs = NPI_SUCCESS;
259 	npi_handle_t handle;
260 	uint16_t start = 0, stop = nxgep->classifier.tcam_size;
261 	p_nxge_hw_list_t hw_p;
262 
263 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
264 	    "==> nxge_fflp_tcam_invalidate_all"));
265 	handle = nxgep->npi_reg_handle;
266 	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
267 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
268 		    " nxge_fflp_tcam_invalidate_all:"
269 		    " common hardware not set", nxgep->niu_type));
270 		return (NXGE_ERROR);
271 	}
272 	MUTEX_ENTER(&hw_p->nxge_tcam_lock);
273 	for (location = start; location < stop; location++) {
274 		rs = npi_fflp_tcam_entry_invalidate(handle, location);
275 		if (rs != NPI_SUCCESS) {
276 			MUTEX_EXIT(&hw_p->nxge_tcam_lock);
277 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
278 			    "TCAM invalidate failed at loc %d ", location));
279 			return (NXGE_ERROR | rs);
280 		}
281 	}
282 	MUTEX_EXIT(&hw_p->nxge_tcam_lock);
283 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
284 	    "<== nxge_fflp_tcam_invalidate_all"));
285 	return (NXGE_OK);
286 }
287 
288 /*
289  * nxge_fflp_fcram_entry_invalidate_all
290  * invalidates all the FCRAM entries.
291  * INPUT
292  * nxge    soft state data structure
293  * Return
294  *      NXGE_OK
295  *      NXGE_ERROR
296  *
297  */
298 
299 static nxge_status_t
nxge_fflp_fcram_invalidate_all(p_nxge_t nxgep)300 nxge_fflp_fcram_invalidate_all(p_nxge_t nxgep)
301 {
302 	npi_handle_t handle;
303 	npi_status_t rs = NPI_SUCCESS;
304 	part_id_t pid = 0;
305 	uint8_t base_mask, base_reloc;
306 	fcram_entry_t fc;
307 	uint32_t location;
308 	uint32_t increment, last_location;
309 
310 	/*
311 	 * (1) configure and enable partition 0 with no relocation
312 	 * (2) Assume the FCRAM is used as IPv4 exact match entry cells
313 	 * (3) Invalidate these cells by clearing the valid bit in
314 	 * the subareas 0 and 4
315 	 * (4) disable the partition
316 	 *
317 	 */
318 
319 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_fcram_invalidate_all"));
320 
321 	base_mask = base_reloc = 0x0;
322 	handle = nxgep->npi_reg_handle;
323 	rs = npi_fflp_cfg_fcram_partition(handle, pid, base_mask, base_reloc);
324 
325 	if (rs != NPI_SUCCESS) {
326 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "failed partition cfg\n"));
327 		return (NXGE_ERROR | rs);
328 	}
329 	rs = npi_fflp_cfg_fcram_partition_disable(handle, pid);
330 
331 	if (rs != NPI_SUCCESS) {
332 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
333 		    "failed partition enable\n"));
334 		return (NXGE_ERROR | rs);
335 	}
336 	fc.dreg[0].value = 0;
337 	fc.hash_hdr_valid = 0;
338 	fc.hash_hdr_ext = 1;	/* specify as IPV4 exact match entry */
339 	increment = sizeof (hash_ipv4_t);
340 	last_location = FCRAM_SIZE * 0x40;
341 
342 	for (location = 0; location < last_location; location += increment) {
343 		rs = npi_fflp_fcram_subarea_write(handle, pid,
344 		    location, fc.value[0]);
345 		if (rs != NPI_SUCCESS) {
346 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
347 			    "failed write at location %x ", location));
348 			return (NXGE_ERROR | rs);
349 		}
350 	}
351 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_fcram_invalidate_all"));
352 	return (NXGE_OK);
353 }
354 
355 static nxge_status_t
nxge_fflp_fcram_init(p_nxge_t nxgep)356 nxge_fflp_fcram_init(p_nxge_t nxgep)
357 {
358 	fflp_fcram_output_drive_t strength;
359 	fflp_fcram_qs_t qs;
360 	npi_status_t rs = NPI_SUCCESS;
361 	uint8_t access_ratio;
362 	int partition;
363 	npi_handle_t handle;
364 	uint32_t min_time, max_time, sys_time;
365 
366 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_fcram_init"));
367 
368 	/*
369 	 * Recommended values are needed.
370 	 */
371 	min_time = FCRAM_REFRESH_DEFAULT_MIN_TIME;
372 	max_time = FCRAM_REFRESH_DEFAULT_MAX_TIME;
373 	sys_time = FCRAM_REFRESH_DEFAULT_SYS_TIME;
374 
375 	handle = nxgep->npi_reg_handle;
376 	strength = FCRAM_OUTDR_NORMAL;
377 	qs = FCRAM_QS_MODE_QS;
378 	rs = npi_fflp_cfg_fcram_reset(handle, strength, qs);
379 	if (rs != NPI_SUCCESS) {
380 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "failed FCRAM Reset. "));
381 		return (NXGE_ERROR | rs);
382 	}
383 
384 	access_ratio = nxgep->param_arr[param_fcram_access_ratio].value;
385 	rs = npi_fflp_cfg_fcram_access(handle, access_ratio);
386 	if (rs != NPI_SUCCESS) {
387 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "failed FCRAM Access ratio"
388 		    "configuration \n"));
389 		return (NXGE_ERROR | rs);
390 	}
391 	rs = npi_fflp_cfg_fcram_refresh_time(handle, min_time,
392 	    max_time, sys_time);
393 	if (rs != NPI_SUCCESS) {
394 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
395 		    "failed FCRAM refresh cfg"));
396 		return (NXGE_ERROR);
397 	}
398 
399 	/* disable all the partitions until explicitly enabled */
400 	for (partition = 0; partition < FFLP_FCRAM_MAX_PARTITION; partition++) {
401 		rs = npi_fflp_cfg_fcram_partition_disable(handle, partition);
402 		if (rs != NPI_SUCCESS) {
403 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
404 			    "failed FCRAM partition"
405 			    " enable for partition %d ", partition));
406 			return (NXGE_ERROR | rs);
407 		}
408 	}
409 
410 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_fcram_init"));
411 	return (NXGE_OK);
412 }
413 
414 nxge_status_t
nxge_logical_mac_assign_rdc_table(p_nxge_t nxgep,uint8_t alt_mac)415 nxge_logical_mac_assign_rdc_table(p_nxge_t nxgep, uint8_t alt_mac)
416 {
417 	npi_status_t rs = NPI_SUCCESS;
418 	hostinfo_t mac_rdc;
419 	npi_handle_t handle;
420 	p_nxge_class_pt_cfg_t p_class_cfgp;
421 
422 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
423 	if (p_class_cfgp->mac_host_info[alt_mac].flag == 0) {
424 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
425 		    " nxge_logical_mac_assign_rdc_table"
426 		    " unconfigured alt MAC addr %d ", alt_mac));
427 		return (NXGE_ERROR);
428 	}
429 	handle = nxgep->npi_reg_handle;
430 	mac_rdc.value = 0;
431 	mac_rdc.bits.w0.rdc_tbl_num =
432 	    p_class_cfgp->mac_host_info[alt_mac].rdctbl;
433 	mac_rdc.bits.w0.mac_pref = p_class_cfgp->mac_host_info[alt_mac].mpr_npr;
434 
435 	rs = npi_mac_hostinfo_entry(handle, OP_SET,
436 	    nxgep->function_num, alt_mac, &mac_rdc);
437 
438 	if (rs != NPI_SUCCESS) {
439 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
440 		    "failed Assign RDC table"));
441 		return (NXGE_ERROR | rs);
442 	}
443 	return (NXGE_OK);
444 }
445 
446 nxge_status_t
nxge_main_mac_assign_rdc_table(p_nxge_t nxgep)447 nxge_main_mac_assign_rdc_table(p_nxge_t nxgep)
448 {
449 	npi_status_t rs = NPI_SUCCESS;
450 	hostinfo_t mac_rdc;
451 	npi_handle_t handle;
452 	int i;
453 
454 	handle = nxgep->npi_reg_handle;
455 	mac_rdc.value = 0;
456 	mac_rdc.bits.w0.rdc_tbl_num = nxgep->class_config.mac_rdcgrp;
457 	mac_rdc.bits.w0.mac_pref = 1;
458 	switch (nxgep->function_num) {
459 	case 0:
460 	case 1:
461 		/*
462 		 * Tests indicate that it is OK not to re-initialize the
463 		 * hostinfo registers for the XMAC's alternate MAC
464 		 * addresses. But that is necessary for BMAC (case 2
465 		 * and case 3 below)
466 		 */
467 		rs = npi_mac_hostinfo_entry(handle, OP_SET,
468 		    nxgep->function_num, XMAC_UNIQUE_HOST_INFO_ENTRY, &mac_rdc);
469 		break;
470 	case 2:
471 	case 3:
472 		rs = npi_mac_hostinfo_entry(handle, OP_SET,
473 		    nxgep->function_num, BMAC_UNIQUE_HOST_INFO_ENTRY, &mac_rdc);
474 		for (i = 1; i <= BMAC_MAX_ALT_ADDR_ENTRY; i++)
475 			rs |= npi_mac_hostinfo_entry(handle, OP_SET,
476 			    nxgep->function_num, i, &mac_rdc);
477 		break;
478 	default:
479 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
480 		    "failed Assign RDC table (invalid function #)"));
481 		return (NXGE_ERROR);
482 	}
483 
484 	if (rs != NPI_SUCCESS) {
485 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
486 		    "failed Assign RDC table"));
487 		return (NXGE_ERROR | rs);
488 	}
489 	return (NXGE_OK);
490 }
491 
492 /*
493  * Initialize hostinfo registers for alternate MAC addresses and
494  * multicast MAC address.
495  */
496 nxge_status_t
nxge_alt_mcast_mac_assign_rdc_table(p_nxge_t nxgep)497 nxge_alt_mcast_mac_assign_rdc_table(p_nxge_t nxgep)
498 {
499 	npi_status_t rs = NPI_SUCCESS;
500 	hostinfo_t mac_rdc;
501 	npi_handle_t handle;
502 
503 	handle = nxgep->npi_reg_handle;
504 	mac_rdc.value = 0;
505 	mac_rdc.bits.w0.rdc_tbl_num = nxgep->class_config.mcast_rdcgrp;
506 	mac_rdc.bits.w0.mac_pref = 1;
507 	switch (nxgep->function_num) {
508 	case 0:
509 	case 1:
510 		rs = npi_mac_hostinfo_entry(handle, OP_SET,
511 		    nxgep->function_num, XMAC_MULTI_HOST_INFO_ENTRY, &mac_rdc);
512 		break;
513 	case 2:
514 	case 3:
515 		rs = npi_mac_hostinfo_entry(handle, OP_SET,
516 		    nxgep->function_num, BMAC_MULTI_HOST_INFO_ENTRY, &mac_rdc);
517 		break;
518 	default:
519 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
520 		    "failed Assign RDC table (invalid function #)"));
521 		return (NXGE_ERROR);
522 	}
523 
524 	if (rs != NPI_SUCCESS) {
525 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
526 		    "failed Assign RDC table"));
527 		return (NXGE_ERROR | rs);
528 	}
529 	return (NXGE_OK);
530 }
531 
532 nxge_status_t
nxge_fflp_init_hostinfo(p_nxge_t nxgep)533 nxge_fflp_init_hostinfo(p_nxge_t nxgep)
534 {
535 	nxge_status_t status = NXGE_OK;
536 
537 	status = nxge_alt_mcast_mac_assign_rdc_table(nxgep);
538 	status |= nxge_main_mac_assign_rdc_table(nxgep);
539 	return (status);
540 }
541 
542 nxge_status_t
nxge_fflp_hw_reset(p_nxge_t nxgep)543 nxge_fflp_hw_reset(p_nxge_t nxgep)
544 {
545 	npi_handle_t handle;
546 	npi_status_t rs = NPI_SUCCESS;
547 	nxge_status_t status = NXGE_OK;
548 
549 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_hw_reset"));
550 
551 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
552 		status = nxge_fflp_fcram_init(nxgep);
553 		if (status != NXGE_OK) {
554 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
555 			    " failed FCRAM init. "));
556 			return (status);
557 		}
558 	}
559 
560 	status = nxge_fflp_tcam_init(nxgep);
561 	if (status != NXGE_OK) {
562 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
563 		    "failed TCAM init."));
564 		return (status);
565 	}
566 
567 	handle = nxgep->npi_reg_handle;
568 	rs = npi_fflp_cfg_llcsnap_enable(handle);
569 	if (rs != NPI_SUCCESS) {
570 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
571 		    "failed LLCSNAP enable. "));
572 		return (NXGE_ERROR | rs);
573 	}
574 
575 	rs = npi_fflp_cfg_cam_errorcheck_disable(handle);
576 	if (rs != NPI_SUCCESS) {
577 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
578 		    "failed CAM Error Check enable. "));
579 		return (NXGE_ERROR | rs);
580 	}
581 
582 	/* init the hash generators */
583 	rs = npi_fflp_cfg_hash_h1poly(handle, 0);
584 	if (rs != NPI_SUCCESS) {
585 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
586 		    "failed H1 Poly Init. "));
587 		return (NXGE_ERROR | rs);
588 	}
589 
590 	rs = npi_fflp_cfg_hash_h2poly(handle, 0);
591 	if (rs != NPI_SUCCESS) {
592 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
593 		    "failed H2 Poly Init. "));
594 		return (NXGE_ERROR | rs);
595 	}
596 
597 	/* invalidate TCAM entries */
598 	status = nxge_fflp_tcam_invalidate_all(nxgep);
599 	if (status != NXGE_OK) {
600 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
601 		    "failed TCAM Entry Invalidate. "));
602 		return (status);
603 	}
604 
605 	/* invalidate FCRAM entries */
606 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
607 		status = nxge_fflp_fcram_invalidate_all(nxgep);
608 		if (status != NXGE_OK) {
609 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
610 			    "failed FCRAM Entry Invalidate."));
611 			return (status);
612 		}
613 	}
614 
615 	/* invalidate VLAN RDC tables */
616 	status = nxge_fflp_vlan_tbl_clear_all(nxgep);
617 	if (status != NXGE_OK) {
618 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
619 		    "failed VLAN Table Invalidate. "));
620 		return (status);
621 	}
622 	nxgep->classifier.state |= NXGE_FFLP_HW_RESET;
623 
624 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_hw_reset"));
625 	return (NXGE_OK);
626 }
627 
628 nxge_status_t
nxge_cfg_ip_cls_flow_key(p_nxge_t nxgep,tcam_class_t l3_class,uint32_t class_config)629 nxge_cfg_ip_cls_flow_key(p_nxge_t nxgep, tcam_class_t l3_class,
630     uint32_t class_config)
631 {
632 	flow_key_cfg_t fcfg;
633 	npi_handle_t handle;
634 	npi_status_t rs = NPI_SUCCESS;
635 
636 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_cfg_ip_cls_flow_key"));
637 	handle = nxgep->npi_reg_handle;
638 	bzero(&fcfg, sizeof (flow_key_cfg_t));
639 
640 	if (class_config & NXGE_CLASS_FLOW_USE_PROTO)
641 		fcfg.use_proto = 1;
642 	if (class_config & NXGE_CLASS_FLOW_USE_DST_PORT)
643 		fcfg.use_dport = 1;
644 	if (class_config & NXGE_CLASS_FLOW_USE_SRC_PORT)
645 		fcfg.use_sport = 1;
646 	if (class_config & NXGE_CLASS_FLOW_USE_IPDST)
647 		fcfg.use_daddr = 1;
648 	if (class_config & NXGE_CLASS_FLOW_USE_IPSRC)
649 		fcfg.use_saddr = 1;
650 	if (class_config & NXGE_CLASS_FLOW_USE_VLAN)
651 		fcfg.use_vlan = 1;
652 	if (class_config & NXGE_CLASS_FLOW_USE_L2DA)
653 		fcfg.use_l2da = 1;
654 	if (class_config & NXGE_CLASS_FLOW_USE_PORTNUM)
655 		fcfg.use_portnum = 1;
656 	fcfg.ip_opts_exist = 0;
657 
658 	rs = npi_fflp_cfg_ip_cls_flow_key(handle, l3_class, &fcfg);
659 	if (rs & NPI_FFLP_ERROR) {
660 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, " nxge_cfg_ip_cls_flow_key"
661 		    " opt %x for class %d failed ", class_config, l3_class));
662 		return (NXGE_ERROR | rs);
663 	}
664 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " <== nxge_cfg_ip_cls_flow_key"));
665 	return (NXGE_OK);
666 }
667 
668 nxge_status_t
nxge_cfg_ip_cls_flow_key_get(p_nxge_t nxgep,tcam_class_t l3_class,uint32_t * class_config)669 nxge_cfg_ip_cls_flow_key_get(p_nxge_t nxgep, tcam_class_t l3_class,
670     uint32_t *class_config)
671 {
672 	flow_key_cfg_t fcfg;
673 	npi_handle_t handle;
674 	npi_status_t rs = NPI_SUCCESS;
675 	uint32_t ccfg = 0;
676 
677 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_cfg_ip_cls_flow_key_get"));
678 	handle = nxgep->npi_reg_handle;
679 	bzero(&fcfg, sizeof (flow_key_cfg_t));
680 
681 	rs = npi_fflp_cfg_ip_cls_flow_key_get(handle, l3_class, &fcfg);
682 	if (rs & NPI_FFLP_ERROR) {
683 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, " nxge_cfg_ip_cls_flow_key"
684 		    " opt %x for class %d failed ", class_config, l3_class));
685 		return (NXGE_ERROR | rs);
686 	}
687 
688 	if (fcfg.use_proto)
689 		ccfg |= NXGE_CLASS_FLOW_USE_PROTO;
690 	if (fcfg.use_dport)
691 		ccfg |= NXGE_CLASS_FLOW_USE_DST_PORT;
692 	if (fcfg.use_sport)
693 		ccfg |= NXGE_CLASS_FLOW_USE_SRC_PORT;
694 	if (fcfg.use_daddr)
695 		ccfg |= NXGE_CLASS_FLOW_USE_IPDST;
696 	if (fcfg.use_saddr)
697 		ccfg |= NXGE_CLASS_FLOW_USE_IPSRC;
698 	if (fcfg.use_vlan)
699 		ccfg |= NXGE_CLASS_FLOW_USE_VLAN;
700 	if (fcfg.use_l2da)
701 		ccfg |= NXGE_CLASS_FLOW_USE_L2DA;
702 	if (fcfg.use_portnum)
703 		ccfg |= NXGE_CLASS_FLOW_USE_PORTNUM;
704 
705 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
706 	    " nxge_cfg_ip_cls_flow_key_get %x", ccfg));
707 	*class_config = ccfg;
708 
709 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
710 	    " <== nxge_cfg_ip_cls_flow_key_get"));
711 	return (NXGE_OK);
712 }
713 
714 static nxge_status_t
nxge_cfg_tcam_ip_class_get(p_nxge_t nxgep,tcam_class_t class,uint32_t * class_config)715 nxge_cfg_tcam_ip_class_get(p_nxge_t nxgep, tcam_class_t class,
716     uint32_t *class_config)
717 {
718 	npi_status_t rs = NPI_SUCCESS;
719 	tcam_key_cfg_t cfg;
720 	npi_handle_t handle;
721 	uint32_t ccfg = 0;
722 
723 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_cfg_tcam_ip_class"));
724 
725 	bzero(&cfg, sizeof (tcam_key_cfg_t));
726 	handle = nxgep->npi_reg_handle;
727 
728 	rs = npi_fflp_cfg_ip_cls_tcam_key_get(handle, class, &cfg);
729 	if (rs & NPI_FFLP_ERROR) {
730 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, " nxge_cfg_tcam_ip_class"
731 		    " opt %x for class %d failed ", class_config, class));
732 		return (NXGE_ERROR | rs);
733 	}
734 	if (cfg.discard)
735 		ccfg |= NXGE_CLASS_DISCARD;
736 	if (cfg.lookup_enable)
737 		ccfg |= NXGE_CLASS_TCAM_LOOKUP;
738 	if (cfg.use_ip_daddr)
739 		ccfg |= NXGE_CLASS_TCAM_USE_SRC_ADDR;
740 	*class_config = ccfg;
741 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
742 	    " ==> nxge_cfg_tcam_ip_class %x", ccfg));
743 	return (NXGE_OK);
744 }
745 
746 static nxge_status_t
nxge_cfg_tcam_ip_class(p_nxge_t nxgep,tcam_class_t class,uint32_t class_config)747 nxge_cfg_tcam_ip_class(p_nxge_t nxgep, tcam_class_t class,
748     uint32_t class_config)
749 {
750 	npi_status_t rs = NPI_SUCCESS;
751 	tcam_key_cfg_t cfg;
752 	npi_handle_t handle;
753 	p_nxge_class_pt_cfg_t p_class_cfgp;
754 
755 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_cfg_tcam_ip_class"));
756 
757 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
758 	p_class_cfgp->class_cfg[class] = class_config;
759 
760 	bzero(&cfg, sizeof (tcam_key_cfg_t));
761 	handle = nxgep->npi_reg_handle;
762 	cfg.discard = 0;
763 	cfg.lookup_enable = 0;
764 	cfg.use_ip_daddr = 0;
765 	if (class_config & NXGE_CLASS_DISCARD)
766 		cfg.discard = 1;
767 	if (class_config & NXGE_CLASS_TCAM_LOOKUP)
768 		cfg.lookup_enable = 1;
769 	if (class_config & NXGE_CLASS_TCAM_USE_SRC_ADDR)
770 		cfg.use_ip_daddr = 1;
771 
772 	rs = npi_fflp_cfg_ip_cls_tcam_key(handle, class, &cfg);
773 	if (rs & NPI_FFLP_ERROR) {
774 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, " nxge_cfg_tcam_ip_class"
775 		    " opt %x for class %d failed ", class_config, class));
776 		return (NXGE_ERROR | rs);
777 	}
778 	return (NXGE_OK);
779 }
780 
781 nxge_status_t
nxge_fflp_set_hash1(p_nxge_t nxgep,uint32_t h1)782 nxge_fflp_set_hash1(p_nxge_t nxgep, uint32_t h1)
783 {
784 	npi_status_t rs = NPI_SUCCESS;
785 	npi_handle_t handle;
786 	p_nxge_class_pt_cfg_t p_class_cfgp;
787 
788 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_init_h1"));
789 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
790 	p_class_cfgp->init_h1 = h1;
791 	handle = nxgep->npi_reg_handle;
792 	rs = npi_fflp_cfg_hash_h1poly(handle, h1);
793 	if (rs & NPI_FFLP_ERROR) {
794 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
795 		    " nxge_fflp_init_h1 %x failed ", h1));
796 		return (NXGE_ERROR | rs);
797 	}
798 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " <== nxge_fflp_init_h1"));
799 	return (NXGE_OK);
800 }
801 
802 nxge_status_t
nxge_fflp_set_hash2(p_nxge_t nxgep,uint16_t h2)803 nxge_fflp_set_hash2(p_nxge_t nxgep, uint16_t h2)
804 {
805 	npi_status_t rs = NPI_SUCCESS;
806 	npi_handle_t handle;
807 	p_nxge_class_pt_cfg_t p_class_cfgp;
808 
809 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_init_h2"));
810 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
811 	p_class_cfgp->init_h2 = h2;
812 
813 	handle = nxgep->npi_reg_handle;
814 	rs = npi_fflp_cfg_hash_h2poly(handle, h2);
815 	if (rs & NPI_FFLP_ERROR) {
816 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
817 		    " nxge_fflp_init_h2 %x failed ", h2));
818 		return (NXGE_ERROR | rs);
819 	}
820 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " <== nxge_fflp_init_h2"));
821 	return (NXGE_OK);
822 }
823 
824 nxge_status_t
nxge_classify_init_sw(p_nxge_t nxgep)825 nxge_classify_init_sw(p_nxge_t nxgep)
826 {
827 	nxge_classify_t *classify_ptr;
828 
829 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_classify_init_sw"));
830 	classify_ptr = &nxgep->classifier;
831 
832 	if (classify_ptr->state & NXGE_FFLP_SW_INIT) {
833 		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
834 		    "nxge_classify_init_sw already init"));
835 		return (NXGE_OK);
836 	}
837 
838 	classify_ptr->tcam_size = nxgep->nxge_hw_p->tcam_size / nxgep->nports;
839 	classify_ptr->tcam_entries = (tcam_flow_spec_t *)nxgep->nxge_hw_p->tcam;
840 	classify_ptr->tcam_top = nxgep->function_num;
841 
842 	/* Init defaults */
843 	/*
844 	 * add hacks required for HW shortcomings for example, code to handle
845 	 * fragmented packets
846 	 */
847 	nxge_init_h1_table();
848 	nxge_crc_ccitt_init();
849 	nxgep->classifier.tcam_location = nxgep->function_num;
850 	nxgep->classifier.fragment_bug = 1;
851 	classify_ptr->state |= NXGE_FFLP_SW_INIT;
852 
853 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_classify_init_sw"));
854 	return (NXGE_OK);
855 }
856 
857 nxge_status_t
nxge_classify_exit_sw(p_nxge_t nxgep)858 nxge_classify_exit_sw(p_nxge_t nxgep)
859 {
860 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_classify_exit_sw"));
861 	nxgep->classifier.state = 0;
862 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_classify_exit_sw"));
863 	return (NXGE_OK);
864 }
865 
866 /*
867  * Figures out the RDC Group for the entry
868  *
869  * The current implementation is just a place holder and it
870  * returns 0.
871  * The real location determining algorithm would consider
872  * the partition etc ... before deciding w
873  *
874  */
875 
876 /* ARGSUSED */
877 static uint8_t
nxge_get_rdc_group(p_nxge_t nxgep,uint8_t class,uint64_t cookie)878 nxge_get_rdc_group(p_nxge_t nxgep, uint8_t class, uint64_t cookie)
879 {
880 	int use_port_rdc_grp = 0;
881 	uint8_t rdc_grp = 0;
882 	p_nxge_dma_pt_cfg_t p_dma_cfgp;
883 	p_nxge_hw_pt_cfg_t p_cfgp;
884 	p_nxge_rdc_grp_t rdc_grp_p;
885 
886 	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
887 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
888 	rdc_grp_p = &p_dma_cfgp->rdc_grps[use_port_rdc_grp];
889 	rdc_grp = p_cfgp->def_mac_rxdma_grpid;
890 
891 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
892 	    "nxge_get_rdc_group: grp 0x%x real_grp %x grpp $%p\n",
893 	    cookie, rdc_grp, rdc_grp_p));
894 	return (rdc_grp);
895 }
896 
897 /* ARGSUSED */
898 static uint8_t
nxge_get_rdc_offset(p_nxge_t nxgep,uint8_t class,uint64_t cookie)899 nxge_get_rdc_offset(p_nxge_t nxgep, uint8_t class, uint64_t cookie)
900 {
901 	return ((uint8_t)cookie);
902 }
903 
904 /* ARGSUSED */
905 static void
nxge_fill_tcam_entry_udp(p_nxge_t nxgep,flow_spec_t * flow_spec,tcam_entry_t * tcam_ptr)906 nxge_fill_tcam_entry_udp(p_nxge_t nxgep, flow_spec_t *flow_spec,
907     tcam_entry_t *tcam_ptr)
908 {
909 #define	fspec_key (flow_spec->uh.udpip4spec)
910 #define	fspec_mask (flow_spec->um.udpip4spec)
911 
912 	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_key, fspec_key.ip4dst);
913 	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_mask, fspec_mask.ip4dst);
914 	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_key, fspec_key.ip4src);
915 	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_mask, fspec_mask.ip4src);
916 	TCAM_IP_PORTS(tcam_ptr->ip4_port_key,
917 	    fspec_key.pdst, fspec_key.psrc);
918 	TCAM_IP_PORTS(tcam_ptr->ip4_port_mask,
919 	    fspec_mask.pdst, fspec_mask.psrc);
920 	TCAM_IP_CLASS(tcam_ptr->ip4_class_key,
921 	    tcam_ptr->ip4_class_mask,
922 	    TCAM_CLASS_UDP_IPV4);
923 	TCAM_IP_PROTO(tcam_ptr->ip4_proto_key,
924 	    tcam_ptr->ip4_proto_mask,
925 	    IPPROTO_UDP);
926 	tcam_ptr->ip4_tos_key = fspec_key.tos;
927 	tcam_ptr->ip4_tos_mask = fspec_mask.tos;
928 #undef fspec_key
929 #undef fspec_mask
930 }
931 
932 static void
nxge_fill_tcam_entry_udp_ipv6(p_nxge_t nxgep,flow_spec_t * flow_spec,tcam_entry_t * tcam_ptr)933 nxge_fill_tcam_entry_udp_ipv6(p_nxge_t nxgep, flow_spec_t *flow_spec,
934     tcam_entry_t *tcam_ptr)
935 {
936 	p_nxge_class_pt_cfg_t p_class_cfgp;
937 #define	fspec_key (flow_spec->uh.udpip6spec)
938 #define	fspec_mask (flow_spec->um.udpip6spec)
939 
940 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
941 	if (p_class_cfgp->class_cfg[TCAM_CLASS_UDP_IPV6] &
942 	    NXGE_CLASS_TCAM_USE_SRC_ADDR) {
943 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key.ip6src);
944 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask.ip6src);
945 	} else {
946 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key.ip6dst);
947 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask.ip6dst);
948 	}
949 
950 	TCAM_IP_CLASS(tcam_ptr->ip6_class_key,
951 	    tcam_ptr->ip6_class_mask, TCAM_CLASS_UDP_IPV6);
952 	TCAM_IP_PROTO(tcam_ptr->ip6_nxt_hdr_key,
953 	    tcam_ptr->ip6_nxt_hdr_mask, IPPROTO_UDP);
954 	TCAM_IP_PORTS(tcam_ptr->ip6_port_key,
955 	    fspec_key.pdst, fspec_key.psrc);
956 	TCAM_IP_PORTS(tcam_ptr->ip6_port_mask,
957 	    fspec_mask.pdst, fspec_mask.psrc);
958 	tcam_ptr->ip6_tos_key = fspec_key.tos;
959 	tcam_ptr->ip6_tos_mask = fspec_mask.tos;
960 #undef fspec_key
961 #undef fspec_mask
962 }
963 
964 /* ARGSUSED */
965 static void
nxge_fill_tcam_entry_tcp(p_nxge_t nxgep,flow_spec_t * flow_spec,tcam_entry_t * tcam_ptr)966 nxge_fill_tcam_entry_tcp(p_nxge_t nxgep, flow_spec_t *flow_spec,
967     tcam_entry_t *tcam_ptr)
968 {
969 #define	fspec_key (flow_spec->uh.tcpip4spec)
970 #define	fspec_mask (flow_spec->um.tcpip4spec)
971 
972 	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_key, fspec_key.ip4dst);
973 	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_mask, fspec_mask.ip4dst);
974 	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_key, fspec_key.ip4src);
975 	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_mask, fspec_mask.ip4src);
976 	TCAM_IP_PORTS(tcam_ptr->ip4_port_key,
977 	    fspec_key.pdst, fspec_key.psrc);
978 	TCAM_IP_PORTS(tcam_ptr->ip4_port_mask,
979 	    fspec_mask.pdst, fspec_mask.psrc);
980 	TCAM_IP_CLASS(tcam_ptr->ip4_class_key,
981 	    tcam_ptr->ip4_class_mask, TCAM_CLASS_TCP_IPV4);
982 	TCAM_IP_PROTO(tcam_ptr->ip4_proto_key,
983 	    tcam_ptr->ip4_proto_mask, IPPROTO_TCP);
984 	tcam_ptr->ip4_tos_key = fspec_key.tos;
985 	tcam_ptr->ip4_tos_mask = fspec_mask.tos;
986 #undef fspec_key
987 #undef fspec_mask
988 }
989 
990 /* ARGSUSED */
991 static void
nxge_fill_tcam_entry_sctp(p_nxge_t nxgep,flow_spec_t * flow_spec,tcam_entry_t * tcam_ptr)992 nxge_fill_tcam_entry_sctp(p_nxge_t nxgep, flow_spec_t *flow_spec,
993     tcam_entry_t *tcam_ptr)
994 {
995 #define	fspec_key (flow_spec->uh.tcpip4spec)
996 #define	fspec_mask (flow_spec->um.tcpip4spec)
997 
998 	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_key, fspec_key.ip4dst);
999 	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_mask, fspec_mask.ip4dst);
1000 	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_key, fspec_key.ip4src);
1001 	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_mask, fspec_mask.ip4src);
1002 	TCAM_IP_CLASS(tcam_ptr->ip4_class_key,
1003 	    tcam_ptr->ip4_class_mask, TCAM_CLASS_SCTP_IPV4);
1004 	TCAM_IP_PROTO(tcam_ptr->ip4_proto_key,
1005 	    tcam_ptr->ip4_proto_mask, IPPROTO_SCTP);
1006 	TCAM_IP_PORTS(tcam_ptr->ip4_port_key,
1007 	    fspec_key.pdst, fspec_key.psrc);
1008 	TCAM_IP_PORTS(tcam_ptr->ip4_port_mask,
1009 	    fspec_mask.pdst, fspec_mask.psrc);
1010 	tcam_ptr->ip4_tos_key = fspec_key.tos;
1011 	tcam_ptr->ip4_tos_mask = fspec_mask.tos;
1012 #undef fspec_key
1013 #undef fspec_mask
1014 }
1015 
1016 static void
nxge_fill_tcam_entry_tcp_ipv6(p_nxge_t nxgep,flow_spec_t * flow_spec,tcam_entry_t * tcam_ptr)1017 nxge_fill_tcam_entry_tcp_ipv6(p_nxge_t nxgep, flow_spec_t *flow_spec,
1018     tcam_entry_t *tcam_ptr)
1019 {
1020 	p_nxge_class_pt_cfg_t p_class_cfgp;
1021 #define	fspec_key (flow_spec->uh.tcpip6spec)
1022 #define	fspec_mask (flow_spec->um.tcpip6spec)
1023 
1024 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1025 	if (p_class_cfgp->class_cfg[TCAM_CLASS_UDP_IPV6] &
1026 	    NXGE_CLASS_TCAM_USE_SRC_ADDR) {
1027 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key.ip6src);
1028 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask.ip6src);
1029 	} else {
1030 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key.ip6dst);
1031 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask.ip6dst);
1032 	}
1033 
1034 	TCAM_IP_CLASS(tcam_ptr->ip6_class_key,
1035 	    tcam_ptr->ip6_class_mask, TCAM_CLASS_TCP_IPV6);
1036 	TCAM_IP_PROTO(tcam_ptr->ip6_nxt_hdr_key,
1037 	    tcam_ptr->ip6_nxt_hdr_mask, IPPROTO_TCP);
1038 	TCAM_IP_PORTS(tcam_ptr->ip6_port_key,
1039 	    fspec_key.pdst, fspec_key.psrc);
1040 	TCAM_IP_PORTS(tcam_ptr->ip6_port_mask,
1041 	    fspec_mask.pdst, fspec_mask.psrc);
1042 	tcam_ptr->ip6_tos_key = fspec_key.tos;
1043 	tcam_ptr->ip6_tos_mask = fspec_mask.tos;
1044 #undef fspec_key
1045 #undef fspec_mask
1046 }
1047 
1048 static void
nxge_fill_tcam_entry_sctp_ipv6(p_nxge_t nxgep,flow_spec_t * flow_spec,tcam_entry_t * tcam_ptr)1049 nxge_fill_tcam_entry_sctp_ipv6(p_nxge_t nxgep, flow_spec_t *flow_spec,
1050     tcam_entry_t *tcam_ptr)
1051 {
1052 	p_nxge_class_pt_cfg_t p_class_cfgp;
1053 #define	fspec_key (flow_spec->uh.tcpip6spec)
1054 #define	fspec_mask (flow_spec->um.tcpip6spec)
1055 
1056 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1057 
1058 	if (p_class_cfgp->class_cfg[TCAM_CLASS_UDP_IPV6] &
1059 	    NXGE_CLASS_TCAM_USE_SRC_ADDR) {
1060 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key.ip6src);
1061 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask.ip6src);
1062 	} else {
1063 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key.ip6dst);
1064 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask.ip6dst);
1065 	}
1066 
1067 	TCAM_IP_CLASS(tcam_ptr->ip6_class_key,
1068 	    tcam_ptr->ip6_class_mask, TCAM_CLASS_SCTP_IPV6);
1069 	TCAM_IP_PROTO(tcam_ptr->ip6_nxt_hdr_key,
1070 	    tcam_ptr->ip6_nxt_hdr_mask, IPPROTO_SCTP);
1071 	TCAM_IP_PORTS(tcam_ptr->ip6_port_key,
1072 	    fspec_key.pdst, fspec_key.psrc);
1073 	TCAM_IP_PORTS(tcam_ptr->ip6_port_mask,
1074 	    fspec_mask.pdst, fspec_mask.psrc);
1075 	tcam_ptr->ip6_tos_key = fspec_key.tos;
1076 	tcam_ptr->ip6_tos_mask = fspec_mask.tos;
1077 #undef fspec_key
1078 #undef fspec_mask
1079 }
1080 
1081 /* ARGSUSED */
1082 static void
nxge_fill_tcam_entry_ah_esp(p_nxge_t nxgep,flow_spec_t * flow_spec,tcam_entry_t * tcam_ptr)1083 nxge_fill_tcam_entry_ah_esp(p_nxge_t nxgep, flow_spec_t *flow_spec,
1084     tcam_entry_t *tcam_ptr)
1085 {
1086 #define	fspec_key (flow_spec->uh.ahip4spec)
1087 #define	fspec_mask (flow_spec->um.ahip4spec)
1088 
1089 	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_key, fspec_key.ip4dst);
1090 	TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_mask, fspec_mask.ip4dst);
1091 	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_key, fspec_key.ip4src);
1092 	TCAM_IPV4_ADDR(tcam_ptr->ip4_src_mask, fspec_mask.ip4src);
1093 
1094 	tcam_ptr->ip4_port_key = fspec_key.spi;
1095 	tcam_ptr->ip4_port_mask = fspec_mask.spi;
1096 
1097 	TCAM_IP_CLASS(tcam_ptr->ip4_class_key,
1098 	    tcam_ptr->ip4_class_mask,
1099 	    TCAM_CLASS_AH_ESP_IPV4);
1100 
1101 	if (flow_spec->flow_type == FSPEC_AHIP4) {
1102 		TCAM_IP_PROTO(tcam_ptr->ip4_proto_key,
1103 		    tcam_ptr->ip4_proto_mask, IPPROTO_AH);
1104 	} else {
1105 		TCAM_IP_PROTO(tcam_ptr->ip4_proto_key,
1106 		    tcam_ptr->ip4_proto_mask, IPPROTO_ESP);
1107 	}
1108 	tcam_ptr->ip4_tos_key = fspec_key.tos;
1109 	tcam_ptr->ip4_tos_mask = fspec_mask.tos;
1110 #undef fspec_key
1111 #undef fspec_mask
1112 }
1113 
1114 static void
nxge_fill_tcam_entry_ah_esp_ipv6(p_nxge_t nxgep,flow_spec_t * flow_spec,tcam_entry_t * tcam_ptr)1115 nxge_fill_tcam_entry_ah_esp_ipv6(p_nxge_t nxgep, flow_spec_t *flow_spec,
1116     tcam_entry_t *tcam_ptr)
1117 {
1118 	p_nxge_class_pt_cfg_t p_class_cfgp;
1119 #define	fspec_key (flow_spec->uh.ahip6spec)
1120 #define	fspec_mask (flow_spec->um.ahip6spec)
1121 
1122 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1123 	if (p_class_cfgp->class_cfg[TCAM_CLASS_AH_ESP_IPV6] &
1124 	    NXGE_CLASS_TCAM_USE_SRC_ADDR) {
1125 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key.ip6src);
1126 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask.ip6src);
1127 	} else {
1128 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_key, fspec_key.ip6dst);
1129 		TCAM_IPV6_ADDR(tcam_ptr->ip6_ip_addr_mask, fspec_mask.ip6dst);
1130 	}
1131 
1132 	TCAM_IP_CLASS(tcam_ptr->ip6_class_key,
1133 	    tcam_ptr->ip6_class_mask, TCAM_CLASS_AH_ESP_IPV6);
1134 
1135 	if (flow_spec->flow_type == FSPEC_AHIP6) {
1136 		TCAM_IP_PROTO(tcam_ptr->ip6_nxt_hdr_key,
1137 		    tcam_ptr->ip6_nxt_hdr_mask, IPPROTO_AH);
1138 	} else {
1139 		TCAM_IP_PROTO(tcam_ptr->ip6_nxt_hdr_key,
1140 		    tcam_ptr->ip6_nxt_hdr_mask, IPPROTO_ESP);
1141 	}
1142 	tcam_ptr->ip6_port_key = fspec_key.spi;
1143 	tcam_ptr->ip6_port_mask = fspec_mask.spi;
1144 	tcam_ptr->ip6_tos_key = fspec_key.tos;
1145 	tcam_ptr->ip6_tos_mask = fspec_mask.tos;
1146 #undef fspec_key
1147 #undef fspec_mask
1148 }
1149 
1150 /* ARGSUSED */
1151 static void
nxge_fill_tcam_entry_ip_usr(p_nxge_t nxgep,flow_spec_t * flow_spec,tcam_entry_t * tcam_ptr,tcam_class_t class)1152 nxge_fill_tcam_entry_ip_usr(p_nxge_t nxgep, flow_spec_t *flow_spec,
1153     tcam_entry_t *tcam_ptr, tcam_class_t class)
1154 {
1155 #define	fspec_key (flow_spec->uh.ip_usr_spec)
1156 #define	fspec_mask (flow_spec->um.ip_usr_spec)
1157 
1158 	if (fspec_key.ip_ver == FSPEC_IP4) {
1159 		TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_key, fspec_key.ip4dst);
1160 		TCAM_IPV4_ADDR(tcam_ptr->ip4_dest_mask, fspec_mask.ip4dst);
1161 		TCAM_IPV4_ADDR(tcam_ptr->ip4_src_key, fspec_key.ip4src);
1162 		TCAM_IPV4_ADDR(tcam_ptr->ip4_src_mask, fspec_mask.ip4src);
1163 
1164 		tcam_ptr->ip4_port_key = fspec_key.l4_4_bytes;
1165 		tcam_ptr->ip4_port_mask = fspec_mask.l4_4_bytes;
1166 
1167 		TCAM_IP_CLASS(tcam_ptr->ip4_class_key,
1168 		    tcam_ptr->ip4_class_mask, class);
1169 
1170 		tcam_ptr->ip4_proto_key = fspec_key.proto;
1171 		tcam_ptr->ip4_proto_mask = fspec_mask.proto;
1172 
1173 		tcam_ptr->ip4_tos_key = fspec_key.tos;
1174 		tcam_ptr->ip4_tos_mask = fspec_mask.tos;
1175 	}
1176 #undef fspec_key
1177 #undef fspec_mask
1178 }
1179 
1180 
1181 nxge_status_t
nxge_flow_get_hash(p_nxge_t nxgep,flow_resource_t * flow_res,uint32_t * H1,uint16_t * H2)1182 nxge_flow_get_hash(p_nxge_t nxgep, flow_resource_t *flow_res,
1183     uint32_t *H1, uint16_t *H2)
1184 {
1185 	flow_spec_t *flow_spec;
1186 	uint32_t class_cfg;
1187 	flow_template_t ft;
1188 	p_nxge_class_pt_cfg_t p_class_cfgp;
1189 
1190 	int ft_size = sizeof (flow_template_t);
1191 
1192 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_flow_get_hash"));
1193 
1194 	flow_spec = (flow_spec_t *)&flow_res->flow_spec;
1195 	bzero((char *)&ft, ft_size);
1196 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1197 
1198 	switch (flow_spec->flow_type) {
1199 	case FSPEC_TCPIP4:
1200 		class_cfg = p_class_cfgp->class_cfg[TCAM_CLASS_TCP_IPV4];
1201 		if (class_cfg & NXGE_CLASS_FLOW_USE_PROTO)
1202 			ft.ip_proto = IPPROTO_TCP;
1203 		if (class_cfg & NXGE_CLASS_FLOW_USE_IPSRC)
1204 			ft.ip4_saddr = flow_res->flow_spec.uh.tcpip4spec.ip4src;
1205 		if (class_cfg & NXGE_CLASS_FLOW_USE_IPDST)
1206 			ft.ip4_daddr = flow_res->flow_spec.uh.tcpip4spec.ip4dst;
1207 		if (class_cfg & NXGE_CLASS_FLOW_USE_SRC_PORT)
1208 			ft.ip_src_port = flow_res->flow_spec.uh.tcpip4spec.psrc;
1209 		if (class_cfg & NXGE_CLASS_FLOW_USE_DST_PORT)
1210 			ft.ip_dst_port = flow_res->flow_spec.uh.tcpip4spec.pdst;
1211 		break;
1212 
1213 	case FSPEC_UDPIP4:
1214 		class_cfg = p_class_cfgp->class_cfg[TCAM_CLASS_UDP_IPV4];
1215 		if (class_cfg & NXGE_CLASS_FLOW_USE_PROTO)
1216 			ft.ip_proto = IPPROTO_UDP;
1217 		if (class_cfg & NXGE_CLASS_FLOW_USE_IPSRC)
1218 			ft.ip4_saddr = flow_res->flow_spec.uh.udpip4spec.ip4src;
1219 		if (class_cfg & NXGE_CLASS_FLOW_USE_IPDST)
1220 			ft.ip4_daddr = flow_res->flow_spec.uh.udpip4spec.ip4dst;
1221 		if (class_cfg & NXGE_CLASS_FLOW_USE_SRC_PORT)
1222 			ft.ip_src_port = flow_res->flow_spec.uh.udpip4spec.psrc;
1223 		if (class_cfg & NXGE_CLASS_FLOW_USE_DST_PORT)
1224 			ft.ip_dst_port = flow_res->flow_spec.uh.udpip4spec.pdst;
1225 		break;
1226 
1227 	default:
1228 		return (NXGE_ERROR);
1229 	}
1230 
1231 	*H1 = nxge_compute_h1(p_class_cfgp->init_h1,
1232 	    (uint32_t *)&ft, ft_size) & 0xfffff;
1233 	*H2 = nxge_compute_h2(p_class_cfgp->init_h2,
1234 	    (uint8_t *)&ft, ft_size);
1235 
1236 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_flow_get_hash"));
1237 	return (NXGE_OK);
1238 }
1239 
1240 nxge_status_t
nxge_add_fcram_entry(p_nxge_t nxgep,flow_resource_t * flow_res)1241 nxge_add_fcram_entry(p_nxge_t nxgep, flow_resource_t *flow_res)
1242 {
1243 	uint32_t H1;
1244 	uint16_t H2;
1245 	nxge_status_t status = NXGE_OK;
1246 
1247 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_add_fcram_entry"));
1248 	status = nxge_flow_get_hash(nxgep, flow_res, &H1, &H2);
1249 	if (status != NXGE_OK) {
1250 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1251 		    " nxge_add_fcram_entry failed "));
1252 		return (status);
1253 	}
1254 
1255 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_add_fcram_entry"));
1256 	return (NXGE_OK);
1257 }
1258 
1259 /*
1260  * Already decided this flow goes into the tcam
1261  */
1262 
1263 nxge_status_t
nxge_add_tcam_entry(p_nxge_t nxgep,flow_resource_t * flow_res)1264 nxge_add_tcam_entry(p_nxge_t nxgep, flow_resource_t *flow_res)
1265 {
1266 	npi_handle_t handle;
1267 	uint64_t channel_cookie;
1268 	uint64_t flow_cookie;
1269 	flow_spec_t *flow_spec;
1270 	npi_status_t rs = NPI_SUCCESS;
1271 	tcam_entry_t tcam_ptr;
1272 	tcam_location_t location;
1273 	uint8_t offset, rdc_grp;
1274 	p_nxge_hw_list_t hw_p;
1275 	uint64_t class;
1276 
1277 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_add_tcam_entry"));
1278 	handle = nxgep->npi_reg_handle;
1279 
1280 	bzero((void *)&tcam_ptr, sizeof (tcam_entry_t));
1281 	flow_spec = (flow_spec_t *)&flow_res->flow_spec;
1282 	flow_cookie = flow_res->flow_cookie;
1283 	channel_cookie = flow_res->channel_cookie;
1284 	location = (tcam_location_t)nxge_tcam_get_index(nxgep,
1285 	    (uint16_t)flow_res->location);
1286 
1287 	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
1288 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1289 		    " nxge_add_tcam_entry: common hardware not set",
1290 		    nxgep->niu_type));
1291 		return (NXGE_ERROR);
1292 	}
1293 
1294 	class = TCAM_CLASS_INVALID;
1295 	if (flow_spec->flow_type == FSPEC_IP_USR) {
1296 		int i;
1297 		int add_usr_cls = 0;
1298 		int ipv6 = 0;
1299 		nxge_usr_l3_cls_t *l3_ucls_p;
1300 #define	uspec (flow_spec->uh.ip_usr_spec)
1301 #define	umask (flow_spec->um.ip_usr_spec)
1302 
1303 		MUTEX_ENTER(&hw_p->nxge_tcam_lock);
1304 
1305 		for (i = 0; i < NXGE_L3_PROG_CLS; i++) {
1306 			l3_ucls_p = &hw_p->tcam_l3_prog_cls[i];
1307 			if (l3_ucls_p->valid && l3_ucls_p->tcam_ref_cnt) {
1308 				if (uspec.proto == l3_ucls_p->pid) {
1309 					class = l3_ucls_p->cls;
1310 					l3_ucls_p->tcam_ref_cnt++;
1311 					add_usr_cls = 1;
1312 					break;
1313 				}
1314 			} else if (l3_ucls_p->valid == 0) {
1315 				/* Program new user IP class */
1316 				switch (i) {
1317 				case 0:
1318 					class = TCAM_CLASS_IP_USER_4;
1319 					break;
1320 				case 1:
1321 					class = TCAM_CLASS_IP_USER_5;
1322 					break;
1323 				case 2:
1324 					class = TCAM_CLASS_IP_USER_6;
1325 					break;
1326 				case 3:
1327 					class = TCAM_CLASS_IP_USER_7;
1328 					break;
1329 				default:
1330 					break;
1331 				}
1332 				if (uspec.ip_ver == FSPEC_IP6)
1333 					ipv6 = 1;
1334 				rs = npi_fflp_cfg_ip_usr_cls_set(handle,
1335 				    (tcam_class_t)class, uspec.tos,
1336 				    umask.tos, uspec.proto, ipv6);
1337 				if (rs != NPI_SUCCESS)
1338 					goto fail;
1339 
1340 				rs = npi_fflp_cfg_ip_usr_cls_enable(handle,
1341 				    (tcam_class_t)class);
1342 				if (rs != NPI_SUCCESS)
1343 					goto fail;
1344 
1345 				l3_ucls_p->cls = class;
1346 				l3_ucls_p->pid = uspec.proto;
1347 				l3_ucls_p->tcam_ref_cnt++;
1348 				l3_ucls_p->valid = 1;
1349 				add_usr_cls = 1;
1350 				break;
1351 			} else if (l3_ucls_p->tcam_ref_cnt == 0 &&
1352 			    uspec.proto == l3_ucls_p->pid) {
1353 				/*
1354 				 * The class has already been programmed,
1355 				 * probably for flow hash
1356 				 */
1357 				class = l3_ucls_p->cls;
1358 				if (uspec.ip_ver == FSPEC_IP6)
1359 					ipv6 = 1;
1360 				rs = npi_fflp_cfg_ip_usr_cls_set(handle,
1361 				    (tcam_class_t)class, uspec.tos,
1362 				    umask.tos, uspec.proto, ipv6);
1363 				if (rs != NPI_SUCCESS)
1364 					goto fail;
1365 
1366 				rs = npi_fflp_cfg_ip_usr_cls_enable(handle,
1367 				    (tcam_class_t)class);
1368 				if (rs != NPI_SUCCESS)
1369 					goto fail;
1370 
1371 				l3_ucls_p->pid = uspec.proto;
1372 				l3_ucls_p->tcam_ref_cnt++;
1373 				add_usr_cls = 1;
1374 				break;
1375 			}
1376 		}
1377 		if (!add_usr_cls) {
1378 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1379 			    "nxge_add_tcam_entry: Could not find/insert class"
1380 			    "for pid %d", uspec.proto));
1381 			goto fail;
1382 		}
1383 		MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1384 #undef uspec
1385 #undef umask
1386 	}
1387 
1388 	switch (flow_spec->flow_type) {
1389 	case FSPEC_TCPIP4:
1390 		nxge_fill_tcam_entry_tcp(nxgep, flow_spec, &tcam_ptr);
1391 		rdc_grp = nxge_get_rdc_group(nxgep, TCAM_CLASS_TCP_IPV4,
1392 		    flow_cookie);
1393 		offset = nxge_get_rdc_offset(nxgep, TCAM_CLASS_TCP_IPV4,
1394 		    channel_cookie);
1395 		break;
1396 
1397 	case FSPEC_UDPIP4:
1398 		nxge_fill_tcam_entry_udp(nxgep, flow_spec, &tcam_ptr);
1399 		rdc_grp = nxge_get_rdc_group(nxgep,
1400 		    TCAM_CLASS_UDP_IPV4,
1401 		    flow_cookie);
1402 		offset = nxge_get_rdc_offset(nxgep,
1403 		    TCAM_CLASS_UDP_IPV4,
1404 		    channel_cookie);
1405 		break;
1406 
1407 	case FSPEC_TCPIP6:
1408 		nxge_fill_tcam_entry_tcp_ipv6(nxgep,
1409 		    flow_spec, &tcam_ptr);
1410 		rdc_grp = nxge_get_rdc_group(nxgep, TCAM_CLASS_TCP_IPV6,
1411 		    flow_cookie);
1412 		offset = nxge_get_rdc_offset(nxgep, TCAM_CLASS_TCP_IPV6,
1413 		    channel_cookie);
1414 		break;
1415 
1416 	case FSPEC_UDPIP6:
1417 		nxge_fill_tcam_entry_udp_ipv6(nxgep,
1418 		    flow_spec, &tcam_ptr);
1419 		rdc_grp = nxge_get_rdc_group(nxgep,
1420 		    TCAM_CLASS_UDP_IPV6,
1421 		    flow_cookie);
1422 		offset = nxge_get_rdc_offset(nxgep,
1423 		    TCAM_CLASS_UDP_IPV6,
1424 		    channel_cookie);
1425 		break;
1426 
1427 	case FSPEC_SCTPIP4:
1428 		nxge_fill_tcam_entry_sctp(nxgep, flow_spec, &tcam_ptr);
1429 		rdc_grp = nxge_get_rdc_group(nxgep,
1430 		    TCAM_CLASS_SCTP_IPV4,
1431 		    flow_cookie);
1432 		offset = nxge_get_rdc_offset(nxgep,
1433 		    TCAM_CLASS_SCTP_IPV4,
1434 		    channel_cookie);
1435 		break;
1436 
1437 	case FSPEC_SCTPIP6:
1438 		nxge_fill_tcam_entry_sctp_ipv6(nxgep,
1439 		    flow_spec, &tcam_ptr);
1440 		rdc_grp = nxge_get_rdc_group(nxgep,
1441 		    TCAM_CLASS_SCTP_IPV6,
1442 		    flow_cookie);
1443 		offset = nxge_get_rdc_offset(nxgep,
1444 		    TCAM_CLASS_SCTP_IPV6,
1445 		    channel_cookie);
1446 		break;
1447 
1448 	case FSPEC_AHIP4:
1449 	case FSPEC_ESPIP4:
1450 		nxge_fill_tcam_entry_ah_esp(nxgep, flow_spec, &tcam_ptr);
1451 		rdc_grp = nxge_get_rdc_group(nxgep,
1452 		    TCAM_CLASS_AH_ESP_IPV4,
1453 		    flow_cookie);
1454 		offset = nxge_get_rdc_offset(nxgep,
1455 		    TCAM_CLASS_AH_ESP_IPV4,
1456 		    channel_cookie);
1457 		break;
1458 
1459 	case FSPEC_AHIP6:
1460 	case FSPEC_ESPIP6:
1461 		nxge_fill_tcam_entry_ah_esp_ipv6(nxgep,
1462 		    flow_spec, &tcam_ptr);
1463 		rdc_grp = nxge_get_rdc_group(nxgep,
1464 		    TCAM_CLASS_AH_ESP_IPV6,
1465 		    flow_cookie);
1466 		offset = nxge_get_rdc_offset(nxgep,
1467 		    TCAM_CLASS_AH_ESP_IPV6,
1468 		    channel_cookie);
1469 		break;
1470 
1471 	case FSPEC_IP_USR:
1472 		nxge_fill_tcam_entry_ip_usr(nxgep, flow_spec, &tcam_ptr,
1473 		    (tcam_class_t)class);
1474 		rdc_grp = nxge_get_rdc_group(nxgep,
1475 		    (tcam_class_t)class, flow_cookie);
1476 		offset = nxge_get_rdc_offset(nxgep,
1477 		    (tcam_class_t)class, channel_cookie);
1478 		break;
1479 	default:
1480 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1481 		    "nxge_add_tcam_entry: Unknown flow spec 0x%x",
1482 		    flow_spec->flow_type));
1483 		return (NXGE_ERROR);
1484 	}
1485 
1486 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1487 	    " nxge_add_tcam_entry write"
1488 	    " for location %d offset %d", location, offset));
1489 
1490 	MUTEX_ENTER(&hw_p->nxge_tcam_lock);
1491 	rs = npi_fflp_tcam_entry_write(handle, location, &tcam_ptr);
1492 
1493 	if (rs & NPI_FFLP_ERROR) {
1494 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1495 		    " nxge_add_tcam_entry write"
1496 		    " failed for location %d", location));
1497 		goto fail;
1498 	}
1499 
1500 	tcam_ptr.match_action.value = 0;
1501 	tcam_ptr.match_action.bits.ldw.rdctbl = rdc_grp;
1502 	tcam_ptr.match_action.bits.ldw.offset = offset;
1503 	tcam_ptr.match_action.bits.ldw.tres =
1504 	    TRES_TERM_OVRD_L2RDC;
1505 	if (channel_cookie == NXGE_PKT_DISCARD)
1506 		tcam_ptr.match_action.bits.ldw.disc = 1;
1507 	rs = npi_fflp_tcam_asc_ram_entry_write(handle,
1508 	    location, tcam_ptr.match_action.value);
1509 	if (rs & NPI_FFLP_ERROR) {
1510 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1511 		    " nxge_add_tcam_entry write"
1512 		    " failed for ASC RAM location %d", location));
1513 		goto fail;
1514 	}
1515 	bcopy((void *) &tcam_ptr,
1516 	    (void *) &nxgep->classifier.tcam_entries[location].tce,
1517 	    sizeof (tcam_entry_t));
1518 	nxgep->classifier.tcam_entry_cnt++;
1519 	nxgep->classifier.tcam_entries[location].valid = 1;
1520 
1521 	MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1522 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_add_tcam_entry"));
1523 	return (NXGE_OK);
1524 fail:
1525 	MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1526 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_add_tcam_entry FAILED"));
1527 	return (NXGE_ERROR);
1528 }
1529 
1530 static nxge_status_t
nxge_tcam_handle_ip_fragment(p_nxge_t nxgep)1531 nxge_tcam_handle_ip_fragment(p_nxge_t nxgep)
1532 {
1533 	tcam_entry_t tcam_ptr;
1534 	tcam_location_t location;
1535 	uint8_t class;
1536 	uint32_t class_config;
1537 	npi_handle_t handle;
1538 	npi_status_t rs = NPI_SUCCESS;
1539 	p_nxge_hw_list_t hw_p;
1540 	nxge_status_t status = NXGE_OK;
1541 
1542 	handle = nxgep->npi_reg_handle;
1543 	class = 0;
1544 	bzero((void *)&tcam_ptr, sizeof (tcam_entry_t));
1545 	tcam_ptr.ip4_noport_key = 1;
1546 	tcam_ptr.ip4_noport_mask = 1;
1547 	location = nxgep->function_num;
1548 	nxgep->classifier.fragment_bug_location = location;
1549 
1550 	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
1551 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1552 		    " nxge_tcam_handle_ip_fragment: common hardware not set",
1553 		    nxgep->niu_type));
1554 		return (NXGE_ERROR);
1555 	}
1556 	MUTEX_ENTER(&hw_p->nxge_tcam_lock);
1557 	rs = npi_fflp_tcam_entry_write(handle,
1558 	    location, &tcam_ptr);
1559 
1560 	if (rs & NPI_FFLP_ERROR) {
1561 		MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1562 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1563 		    " nxge_tcam_handle_ip_fragment "
1564 		    " tcam_entry write"
1565 		    " failed for location %d", location));
1566 		return (NXGE_ERROR);
1567 	}
1568 	tcam_ptr.match_action.bits.ldw.rdctbl = nxgep->class_config.mac_rdcgrp;
1569 	tcam_ptr.match_action.bits.ldw.offset = 0;	/* use the default */
1570 	tcam_ptr.match_action.bits.ldw.tres =
1571 	    TRES_TERM_USE_OFFSET;
1572 	rs = npi_fflp_tcam_asc_ram_entry_write(handle,
1573 	    location, tcam_ptr.match_action.value);
1574 
1575 	if (rs & NPI_FFLP_ERROR) {
1576 		MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1577 		NXGE_DEBUG_MSG((nxgep,
1578 		    FFLP_CTL,
1579 		    " nxge_tcam_handle_ip_fragment "
1580 		    " tcam_entry write"
1581 		    " failed for ASC RAM location %d", location));
1582 		return (NXGE_ERROR);
1583 	}
1584 	bcopy((void *) &tcam_ptr,
1585 	    (void *) &nxgep->classifier.tcam_entries[location].tce,
1586 	    sizeof (tcam_entry_t));
1587 	nxgep->classifier.tcam_entry_cnt++;
1588 	nxgep->classifier.tcam_entries[location].valid = 1;
1589 	for (class = TCAM_CLASS_TCP_IPV4;
1590 	    class <= TCAM_CLASS_SCTP_IPV6; class++) {
1591 		class_config = nxgep->class_config.class_cfg[class];
1592 		class_config |= NXGE_CLASS_TCAM_LOOKUP;
1593 		status = nxge_fflp_ip_class_config(nxgep, class, class_config);
1594 
1595 		if (status & NPI_FFLP_ERROR) {
1596 			MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1597 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1598 			    "nxge_tcam_handle_ip_fragment "
1599 			    "nxge_fflp_ip_class_config failed "
1600 			    " class %d config %x ", class, class_config));
1601 			return (NXGE_ERROR);
1602 		}
1603 	}
1604 
1605 	rs = npi_fflp_cfg_tcam_enable(handle);
1606 	if (rs & NPI_FFLP_ERROR) {
1607 		MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1608 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1609 		    "nxge_tcam_handle_ip_fragment "
1610 		    " nxge_fflp_config_tcam_enable failed"));
1611 		return (NXGE_ERROR);
1612 	}
1613 	MUTEX_EXIT(&hw_p->nxge_tcam_lock);
1614 	return (NXGE_OK);
1615 }
1616 
1617 /* ARGSUSED */
1618 static int
nxge_flow_need_hash_lookup(p_nxge_t nxgep,flow_resource_t * flow_res)1619 nxge_flow_need_hash_lookup(p_nxge_t nxgep, flow_resource_t *flow_res)
1620 {
1621 	return (0);
1622 }
1623 
1624 nxge_status_t
nxge_add_flow(p_nxge_t nxgep,flow_resource_t * flow_res)1625 nxge_add_flow(p_nxge_t nxgep, flow_resource_t *flow_res)
1626 {
1627 
1628 	int insert_hash = 0;
1629 	nxge_status_t status = NXGE_OK;
1630 
1631 	if (NXGE_IS_VALID_NEPTUNE_TYPE(nxgep)) {
1632 		/* determine whether to do TCAM or Hash flow */
1633 		insert_hash = nxge_flow_need_hash_lookup(nxgep, flow_res);
1634 	}
1635 	if (insert_hash) {
1636 		status = nxge_add_fcram_entry(nxgep, flow_res);
1637 	} else {
1638 		status = nxge_add_tcam_entry(nxgep, flow_res);
1639 	}
1640 	return (status);
1641 }
1642 
1643 void
nxge_put_tcam(p_nxge_t nxgep,p_mblk_t mp)1644 nxge_put_tcam(p_nxge_t nxgep, p_mblk_t mp)
1645 {
1646 	flow_resource_t *fs;
1647 
1648 	fs = (flow_resource_t *)mp->b_rptr;
1649 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1650 	    "nxge_put_tcam addr fs $%p  type %x offset %x",
1651 	    fs, fs->flow_spec.flow_type, fs->channel_cookie));
1652 	(void) nxge_add_tcam_entry(nxgep, fs);
1653 }
1654 
1655 nxge_status_t
nxge_fflp_config_tcam_enable(p_nxge_t nxgep)1656 nxge_fflp_config_tcam_enable(p_nxge_t nxgep)
1657 {
1658 	npi_handle_t handle = nxgep->npi_reg_handle;
1659 	npi_status_t rs = NPI_SUCCESS;
1660 
1661 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_config_tcam_enable"));
1662 	rs = npi_fflp_cfg_tcam_enable(handle);
1663 	if (rs & NPI_FFLP_ERROR) {
1664 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1665 		    " nxge_fflp_config_tcam_enable failed"));
1666 		return (NXGE_ERROR | rs);
1667 	}
1668 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " <== nxge_fflp_config_tcam_enable"));
1669 	return (NXGE_OK);
1670 }
1671 
1672 nxge_status_t
nxge_fflp_config_tcam_disable(p_nxge_t nxgep)1673 nxge_fflp_config_tcam_disable(p_nxge_t nxgep)
1674 {
1675 	npi_handle_t handle = nxgep->npi_reg_handle;
1676 	npi_status_t rs = NPI_SUCCESS;
1677 
1678 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1679 	    " ==> nxge_fflp_config_tcam_disable"));
1680 	rs = npi_fflp_cfg_tcam_disable(handle);
1681 	if (rs & NPI_FFLP_ERROR) {
1682 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1683 		    " nxge_fflp_config_tcam_disable failed"));
1684 		return (NXGE_ERROR | rs);
1685 	}
1686 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1687 	    " <== nxge_fflp_config_tcam_disable"));
1688 	return (NXGE_OK);
1689 }
1690 
1691 nxge_status_t
nxge_fflp_config_hash_lookup_enable(p_nxge_t nxgep)1692 nxge_fflp_config_hash_lookup_enable(p_nxge_t nxgep)
1693 {
1694 	npi_handle_t handle = nxgep->npi_reg_handle;
1695 	npi_status_t rs = NPI_SUCCESS;
1696 	p_nxge_dma_pt_cfg_t p_dma_cfgp;
1697 	p_nxge_hw_pt_cfg_t p_cfgp;
1698 	uint8_t partition;
1699 
1700 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1701 	    " ==> nxge_fflp_config_hash_lookup_enable"));
1702 	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
1703 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
1704 
1705 	for (partition = 0; partition < NXGE_MAX_RDC_GROUPS; partition++) {
1706 		if (p_cfgp->grpids[partition]) {
1707 			rs = npi_fflp_cfg_fcram_partition_enable(
1708 			    handle, partition);
1709 			if (rs != NPI_SUCCESS) {
1710 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1711 				    " nxge_fflp_config_hash_lookup_enable"
1712 				    "failed FCRAM partition"
1713 				    " enable for partition %d ", partition));
1714 				return (NXGE_ERROR | rs);
1715 			}
1716 		}
1717 	}
1718 
1719 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1720 	    " <== nxge_fflp_config_hash_lookup_enable"));
1721 	return (NXGE_OK);
1722 }
1723 
1724 nxge_status_t
nxge_fflp_config_hash_lookup_disable(p_nxge_t nxgep)1725 nxge_fflp_config_hash_lookup_disable(p_nxge_t nxgep)
1726 {
1727 	npi_handle_t handle = nxgep->npi_reg_handle;
1728 	npi_status_t rs = NPI_SUCCESS;
1729 	p_nxge_dma_pt_cfg_t p_dma_cfgp;
1730 	p_nxge_hw_pt_cfg_t p_cfgp;
1731 	uint8_t partition;
1732 
1733 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1734 	    " ==> nxge_fflp_config_hash_lookup_disable"));
1735 	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
1736 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
1737 
1738 	for (partition = 0; partition < NXGE_MAX_RDC_GROUPS; partition++) {
1739 		if (p_cfgp->grpids[partition]) {
1740 			rs = npi_fflp_cfg_fcram_partition_disable(handle,
1741 			    partition);
1742 			if (rs != NPI_SUCCESS) {
1743 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1744 				    " nxge_fflp_config_hash_lookup_disable"
1745 				    " failed FCRAM partition"
1746 				    " disable for partition %d ", partition));
1747 				return (NXGE_ERROR | rs);
1748 			}
1749 		}
1750 	}
1751 
1752 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1753 	    " <== nxge_fflp_config_hash_lookup_disable"));
1754 	return (NXGE_OK);
1755 }
1756 
1757 nxge_status_t
nxge_fflp_config_llc_snap_enable(p_nxge_t nxgep)1758 nxge_fflp_config_llc_snap_enable(p_nxge_t nxgep)
1759 {
1760 	npi_handle_t handle = nxgep->npi_reg_handle;
1761 	npi_status_t rs = NPI_SUCCESS;
1762 
1763 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1764 	    " ==> nxge_fflp_config_llc_snap_enable"));
1765 	rs = npi_fflp_cfg_llcsnap_enable(handle);
1766 	if (rs & NPI_FFLP_ERROR) {
1767 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1768 		    " nxge_fflp_config_llc_snap_enable failed"));
1769 		return (NXGE_ERROR | rs);
1770 	}
1771 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1772 	    " <== nxge_fflp_config_llc_snap_enable"));
1773 	return (NXGE_OK);
1774 }
1775 
1776 nxge_status_t
nxge_fflp_config_llc_snap_disable(p_nxge_t nxgep)1777 nxge_fflp_config_llc_snap_disable(p_nxge_t nxgep)
1778 {
1779 	npi_handle_t handle = nxgep->npi_reg_handle;
1780 	npi_status_t rs = NPI_SUCCESS;
1781 
1782 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1783 	    " ==> nxge_fflp_config_llc_snap_disable"));
1784 	rs = npi_fflp_cfg_llcsnap_disable(handle);
1785 	if (rs & NPI_FFLP_ERROR) {
1786 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1787 		    " nxge_fflp_config_llc_snap_disable failed"));
1788 		return (NXGE_ERROR | rs);
1789 	}
1790 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1791 	    " <== nxge_fflp_config_llc_snap_disable"));
1792 	return (NXGE_OK);
1793 }
1794 
1795 nxge_status_t
nxge_fflp_ip_usr_class_config(p_nxge_t nxgep,tcam_class_t class,uint32_t config)1796 nxge_fflp_ip_usr_class_config(p_nxge_t nxgep, tcam_class_t class,
1797     uint32_t config)
1798 {
1799 	npi_status_t rs = NPI_SUCCESS;
1800 	npi_handle_t handle = nxgep->npi_reg_handle;
1801 	uint8_t tos, tos_mask, proto, ver = 0;
1802 	uint8_t class_enable = 0;
1803 
1804 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_ip_usr_class_config"));
1805 
1806 	tos = (config & NXGE_CLASS_CFG_IP_TOS_MASK) >>
1807 	    NXGE_CLASS_CFG_IP_TOS_SHIFT;
1808 	tos_mask = (config & NXGE_CLASS_CFG_IP_TOS_MASK_MASK) >>
1809 	    NXGE_CLASS_CFG_IP_TOS_MASK_SHIFT;
1810 	proto = (config & NXGE_CLASS_CFG_IP_PROTO_MASK) >>
1811 	    NXGE_CLASS_CFG_IP_PROTO_SHIFT;
1812 	if (config & NXGE_CLASS_CFG_IP_IPV6_MASK)
1813 		ver = 1;
1814 	if (config & NXGE_CLASS_CFG_IP_ENABLE_MASK)
1815 		class_enable = 1;
1816 	rs = npi_fflp_cfg_ip_usr_cls_set(handle, class, tos, tos_mask,
1817 	    proto, ver);
1818 	if (rs & NPI_FFLP_ERROR) {
1819 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1820 		    " nxge_fflp_ip_usr_class_config"
1821 		    " for class %d failed ", class));
1822 		return (NXGE_ERROR | rs);
1823 	}
1824 	if (class_enable)
1825 		rs = npi_fflp_cfg_ip_usr_cls_enable(handle, class);
1826 	else
1827 		rs = npi_fflp_cfg_ip_usr_cls_disable(handle, class);
1828 
1829 	if (rs & NPI_FFLP_ERROR) {
1830 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1831 		    " nxge_fflp_ip_usr_class_config"
1832 		    " TCAM enable/disable for class %d failed ", class));
1833 		return (NXGE_ERROR | rs);
1834 	}
1835 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_ip_usr_class_config"));
1836 	return (NXGE_OK);
1837 }
1838 
1839 nxge_status_t
nxge_fflp_ip_class_config(p_nxge_t nxgep,tcam_class_t class,uint32_t config)1840 nxge_fflp_ip_class_config(p_nxge_t nxgep, tcam_class_t class, uint32_t config)
1841 {
1842 	uint32_t class_config;
1843 	nxge_status_t t_status = NXGE_OK;
1844 	nxge_status_t f_status = NXGE_OK;
1845 	p_nxge_class_pt_cfg_t p_class_cfgp;
1846 
1847 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_ip_class_config"));
1848 
1849 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1850 	class_config = p_class_cfgp->class_cfg[class];
1851 
1852 	if (class_config != config) {
1853 		p_class_cfgp->class_cfg[class] = config;
1854 		class_config = config;
1855 	}
1856 
1857 	t_status = nxge_cfg_tcam_ip_class(nxgep, class, class_config);
1858 	f_status = nxge_cfg_ip_cls_flow_key(nxgep, class, class_config);
1859 
1860 	if (t_status & NPI_FFLP_ERROR) {
1861 		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1862 		    " nxge_fflp_ip_class_config %x"
1863 		    " for class %d tcam failed", config, class));
1864 		return (t_status);
1865 	}
1866 	if (f_status & NPI_FFLP_ERROR) {
1867 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1868 		    " nxge_fflp_ip_class_config %x"
1869 		    " for class %d flow key failed", config, class));
1870 		return (f_status);
1871 	}
1872 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_ip_class_config"));
1873 	return (NXGE_OK);
1874 }
1875 
1876 nxge_status_t
nxge_fflp_ip_class_config_get(p_nxge_t nxgep,tcam_class_t class,uint32_t * config)1877 nxge_fflp_ip_class_config_get(p_nxge_t nxgep, tcam_class_t class,
1878     uint32_t *config)
1879 {
1880 	uint32_t t_class_config, f_class_config;
1881 	int t_status = NXGE_OK;
1882 	int f_status = NXGE_OK;
1883 
1884 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, " ==> nxge_fflp_ip_class_config"));
1885 
1886 	t_class_config = f_class_config = 0;
1887 	t_status = nxge_cfg_tcam_ip_class_get(nxgep, class, &t_class_config);
1888 	f_status = nxge_cfg_ip_cls_flow_key_get(nxgep, class, &f_class_config);
1889 
1890 	if (t_status & NPI_FFLP_ERROR) {
1891 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1892 		    " nxge_fflp_ip_class_config_get  "
1893 		    " for class %d tcam failed", class));
1894 		return (t_status);
1895 	}
1896 
1897 	if (f_status & NPI_FFLP_ERROR) {
1898 		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1899 		    " nxge_fflp_ip_class_config_get  "
1900 		    " for class %d flow key failed", class));
1901 		return (f_status);
1902 	}
1903 
1904 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
1905 	    " nxge_fflp_ip_class_config tcam %x flow %x",
1906 	    t_class_config, f_class_config));
1907 
1908 	*config = t_class_config | f_class_config;
1909 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_ip_class_config_get"));
1910 	return (NXGE_OK);
1911 }
1912 
1913 nxge_status_t
nxge_fflp_ip_class_config_all(p_nxge_t nxgep)1914 nxge_fflp_ip_class_config_all(p_nxge_t nxgep)
1915 {
1916 	uint32_t class_config;
1917 	tcam_class_t class;
1918 
1919 #ifdef	NXGE_DEBUG
1920 	int status = NXGE_OK;
1921 #endif
1922 
1923 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_ip_class_config"));
1924 	for (class = TCAM_CLASS_TCP_IPV4;
1925 	    class <= TCAM_CLASS_SCTP_IPV6; class++) {
1926 		class_config = nxgep->class_config.class_cfg[class];
1927 #ifndef	NXGE_DEBUG
1928 		(void) nxge_fflp_ip_class_config(nxgep, class, class_config);
1929 #else
1930 		status = nxge_fflp_ip_class_config(nxgep, class, class_config);
1931 		if (status & NPI_FFLP_ERROR) {
1932 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1933 			    "nxge_fflp_ip_class_config failed "
1934 			    " class %d config %x ",
1935 			    class, class_config));
1936 		}
1937 #endif
1938 	}
1939 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_ip_class_config"));
1940 	return (NXGE_OK);
1941 }
1942 
1943 nxge_status_t
nxge_fflp_config_vlan_table(p_nxge_t nxgep,uint16_t vlan_id)1944 nxge_fflp_config_vlan_table(p_nxge_t nxgep, uint16_t vlan_id)
1945 {
1946 	uint8_t port, rdc_grp;
1947 	npi_handle_t handle;
1948 	npi_status_t rs = NPI_SUCCESS;
1949 	uint8_t priority = 1;
1950 	p_nxge_mv_cfg_t vlan_table;
1951 	p_nxge_class_pt_cfg_t p_class_cfgp;
1952 	p_nxge_hw_list_t hw_p;
1953 
1954 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_config_vlan_table"));
1955 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
1956 	handle = nxgep->npi_reg_handle;
1957 	vlan_table = p_class_cfgp->vlan_tbl;
1958 	port = nxgep->function_num;
1959 
1960 	if (vlan_table[vlan_id].flag == 0) {
1961 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1962 		    " nxge_fflp_config_vlan_table"
1963 		    " vlan id is not configured %d", vlan_id));
1964 		return (NXGE_ERROR);
1965 	}
1966 
1967 	if ((hw_p = nxgep->nxge_hw_p) == NULL) {
1968 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1969 		    " nxge_fflp_config_vlan_table:"
1970 		    " common hardware not set", nxgep->niu_type));
1971 		return (NXGE_ERROR);
1972 	}
1973 	MUTEX_ENTER(&hw_p->nxge_vlan_lock);
1974 	rdc_grp = vlan_table[vlan_id].rdctbl;
1975 	rs = npi_fflp_cfg_enet_vlan_table_assoc(handle,
1976 	    port, vlan_id,
1977 	    rdc_grp, priority);
1978 
1979 	MUTEX_EXIT(&hw_p->nxge_vlan_lock);
1980 	if (rs & NPI_FFLP_ERROR) {
1981 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
1982 		    "nxge_fflp_config_vlan_table failed "
1983 		    " Port %d vlan_id %d rdc_grp %d",
1984 		    port, vlan_id, rdc_grp));
1985 		return (NXGE_ERROR | rs);
1986 	}
1987 
1988 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_fflp_config_vlan_table"));
1989 	return (NXGE_OK);
1990 }
1991 
1992 nxge_status_t
nxge_fflp_update_hw(p_nxge_t nxgep)1993 nxge_fflp_update_hw(p_nxge_t nxgep)
1994 {
1995 	nxge_status_t status = NXGE_OK;
1996 	p_nxge_param_t pa;
1997 	uint64_t cfgd_vlans;
1998 	uint64_t *val_ptr;
1999 	int i;
2000 	int num_macs;
2001 	uint8_t alt_mac;
2002 	nxge_param_map_t *p_map;
2003 	p_nxge_mv_cfg_t vlan_table;
2004 	p_nxge_class_pt_cfg_t p_class_cfgp;
2005 	p_nxge_dma_pt_cfg_t p_all_cfgp;
2006 	p_nxge_hw_pt_cfg_t p_cfgp;
2007 
2008 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_fflp_update_hw"));
2009 
2010 	p_class_cfgp = (p_nxge_class_pt_cfg_t)&nxgep->class_config;
2011 	p_all_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
2012 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_all_cfgp->hw_config;
2013 
2014 	status = nxge_fflp_set_hash1(nxgep, p_class_cfgp->init_h1);
2015 	if (status != NXGE_OK) {
2016 		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
2017 		    "nxge_fflp_set_hash1 Failed"));
2018 		return (NXGE_ERROR);
2019 	}
2020 
2021 	status = nxge_fflp_set_hash2(nxgep, p_class_cfgp->init_h2);
2022 	if (status != NXGE_OK) {
2023 		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
2024 		    "nxge_fflp_set_hash2 Failed"));
2025 		return (NXGE_ERROR);
2026 	}
2027 	vlan_table = p_class_cfgp->vlan_tbl;
2028 
2029 	/* configure vlan tables */
2030 	pa = (p_nxge_param_t)&nxgep->param_arr[param_vlan_2rdc_grp];
2031 	val_ptr = (uint64_t *)pa->value;
2032 	cfgd_vlans = ((pa->type & NXGE_PARAM_ARRAY_CNT_MASK) >>
2033 	    NXGE_PARAM_ARRAY_CNT_SHIFT);
2034 
2035 	for (i = 0; i < cfgd_vlans; i++) {
2036 		p_map = (nxge_param_map_t *)&val_ptr[i];
2037 		if (vlan_table[p_map->param_id].flag) {
2038 			status = nxge_fflp_config_vlan_table(nxgep,
2039 			    p_map->param_id);
2040 			if (status != NXGE_OK) {
2041 				NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
2042 				    "nxge_fflp_config_vlan_table Failed"));
2043 				return (NXGE_ERROR);
2044 			}
2045 		}
2046 	}
2047 
2048 	/* config MAC addresses */
2049 	num_macs = p_cfgp->max_macs;
2050 	pa = (p_nxge_param_t)&nxgep->param_arr[param_mac_2rdc_grp];
2051 	val_ptr = (uint64_t *)pa->value;
2052 
2053 	for (alt_mac = 0; alt_mac < num_macs; alt_mac++) {
2054 		if (p_class_cfgp->mac_host_info[alt_mac].flag) {
2055 			status = nxge_logical_mac_assign_rdc_table(nxgep,
2056 			    alt_mac);
2057 			if (status != NXGE_OK) {
2058 				NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
2059 				    "nxge_logical_mac_assign_rdc_table"
2060 				    " Failed"));
2061 				return (NXGE_ERROR);
2062 			}
2063 		}
2064 	}
2065 
2066 	/* Config Hash values */
2067 	/* config classes */
2068 	status = nxge_fflp_ip_class_config_all(nxgep);
2069 	if (status != NXGE_OK) {
2070 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2071 		    "nxge_fflp_ip_class_config_all Failed"));
2072 		return (NXGE_ERROR);
2073 	}
2074 	return (NXGE_OK);
2075 }
2076 
2077 nxge_status_t
nxge_classify_init_hw(p_nxge_t nxgep)2078 nxge_classify_init_hw(p_nxge_t nxgep)
2079 {
2080 	nxge_status_t status = NXGE_OK;
2081 
2082 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "==> nxge_classify_init_hw"));
2083 
2084 	if (nxgep->classifier.state & NXGE_FFLP_HW_INIT) {
2085 		NXGE_DEBUG_MSG((nxgep, FFLP_CTL,
2086 		    "nxge_classify_init_hw already init"));
2087 		return (NXGE_OK);
2088 	}
2089 
2090 	/* Now do a real configuration */
2091 	status = nxge_fflp_update_hw(nxgep);
2092 	if (status != NXGE_OK) {
2093 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2094 		    "nxge_fflp_update_hw failed"));
2095 		return (NXGE_ERROR);
2096 	}
2097 
2098 	/* Init RDC tables? ? who should do that? rxdma or fflp ? */
2099 	/* attach rdc table to the MAC port. */
2100 	status = nxge_main_mac_assign_rdc_table(nxgep);
2101 	if (status != NXGE_OK) {
2102 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2103 		    "nxge_main_mac_assign_rdc_table failed"));
2104 		return (NXGE_ERROR);
2105 	}
2106 
2107 	status = nxge_alt_mcast_mac_assign_rdc_table(nxgep);
2108 	if (status != NXGE_OK) {
2109 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2110 		    "nxge_multicast_mac_assign_rdc_table failed"));
2111 		return (NXGE_ERROR);
2112 	}
2113 
2114 	if (nxgep->classifier.fragment_bug == 1) {
2115 		status = nxge_tcam_handle_ip_fragment(nxgep);
2116 		if (status != NXGE_OK) {
2117 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2118 			    "nxge_tcam_handle_ip_fragment failed"));
2119 			return (NXGE_ERROR);
2120 		}
2121 	}
2122 
2123 	nxgep->classifier.state |= NXGE_FFLP_HW_INIT;
2124 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_classify_init_hw"));
2125 	return (NXGE_OK);
2126 }
2127 
2128 nxge_status_t
nxge_fflp_handle_sys_errors(p_nxge_t nxgep)2129 nxge_fflp_handle_sys_errors(p_nxge_t nxgep)
2130 {
2131 	npi_handle_t handle;
2132 	p_nxge_fflp_stats_t statsp;
2133 	uint8_t portn, rdc_grp;
2134 	p_nxge_dma_pt_cfg_t p_dma_cfgp;
2135 	p_nxge_hw_pt_cfg_t p_cfgp;
2136 	vlan_par_err_t vlan_err;
2137 	tcam_err_t tcam_err;
2138 	hash_lookup_err_log1_t fcram1_err;
2139 	hash_lookup_err_log2_t fcram2_err;
2140 	hash_tbl_data_log_t fcram_err;
2141 
2142 	handle = nxgep->npi_handle;
2143 	statsp = (p_nxge_fflp_stats_t)&nxgep->statsp->fflp_stats;
2144 	portn = nxgep->mac.portnum;
2145 
2146 	/*
2147 	 * need to read the fflp error registers to figure out what the error
2148 	 * is
2149 	 */
2150 	npi_fflp_vlan_error_get(handle, &vlan_err);
2151 	npi_fflp_tcam_error_get(handle, &tcam_err);
2152 
2153 	if (vlan_err.bits.ldw.m_err || vlan_err.bits.ldw.err) {
2154 		NXGE_ERROR_MSG((nxgep, FFLP_CTL,
2155 		    " vlan table parity error on port %d"
2156 		    " addr: 0x%x data: 0x%x",
2157 		    portn, vlan_err.bits.ldw.addr,
2158 		    vlan_err.bits.ldw.data));
2159 		statsp->vlan_parity_err++;
2160 
2161 		if (vlan_err.bits.ldw.m_err) {
2162 			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
2163 			    " vlan table multiple errors on port %d",
2164 			    portn));
2165 		}
2166 		statsp->errlog.vlan = (uint32_t)vlan_err.value;
2167 		NXGE_FM_REPORT_ERROR(nxgep, 0, 0,
2168 		    NXGE_FM_EREPORT_FFLP_VLAN_PAR_ERR);
2169 		npi_fflp_vlan_error_clear(handle);
2170 	}
2171 
2172 	if (tcam_err.bits.ldw.err) {
2173 		if (tcam_err.bits.ldw.p_ecc != 0) {
2174 			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
2175 			    " TCAM ECC error on port %d"
2176 			    " TCAM entry: 0x%x syndrome: 0x%x",
2177 			    portn, tcam_err.bits.ldw.addr,
2178 			    tcam_err.bits.ldw.syndrome));
2179 			statsp->tcam_ecc_err++;
2180 		} else {
2181 			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
2182 			    " TCAM Parity error on port %d"
2183 			    " addr: 0x%x parity value: 0x%x",
2184 			    portn, tcam_err.bits.ldw.addr,
2185 			    tcam_err.bits.ldw.syndrome));
2186 			statsp->tcam_parity_err++;
2187 		}
2188 
2189 		if (tcam_err.bits.ldw.mult) {
2190 			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
2191 			    " TCAM Multiple errors on port %d", portn));
2192 		} else {
2193 			NXGE_ERROR_MSG((nxgep, FFLP_CTL,
2194 			    " TCAM PIO error on port %d", portn));
2195 		}
2196 
2197 		statsp->errlog.tcam = (uint32_t)tcam_err.value;
2198 		NXGE_FM_REPORT_ERROR(nxgep, 0, 0,
2199 		    NXGE_FM_EREPORT_FFLP_TCAM_ERR);
2200 		npi_fflp_tcam_error_clear(handle);
2201 	}
2202 
2203 	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
2204 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
2205 
2206 	for (rdc_grp = 0; rdc_grp < NXGE_MAX_RDC_GROUPS; rdc_grp++) {
2207 		if (p_cfgp->grpids[rdc_grp]) {
2208 			npi_fflp_fcram_error_get(handle, &fcram_err, rdc_grp);
2209 			if (fcram_err.bits.ldw.pio_err) {
2210 				NXGE_ERROR_MSG((nxgep, FFLP_CTL,
2211 				    " FCRAM PIO ECC error on port %d"
2212 				    " rdc group: %d Hash Table addr: 0x%x"
2213 				    " syndrome: 0x%x",
2214 				    portn, rdc_grp,
2215 				    fcram_err.bits.ldw.fcram_addr,
2216 				    fcram_err.bits.ldw.syndrome));
2217 				statsp->hash_pio_err[rdc_grp]++;
2218 				statsp->errlog.hash_pio[rdc_grp] =
2219 				    (uint32_t)fcram_err.value;
2220 				NXGE_FM_REPORT_ERROR(nxgep, 0, 0,
2221 				    NXGE_FM_EREPORT_FFLP_HASHT_DATA_ERR);
2222 				npi_fflp_fcram_error_clear(handle, rdc_grp);
2223 			}
2224 		}
2225 	}
2226 
2227 	npi_fflp_fcram_error_log1_get(handle, &fcram1_err);
2228 	if (fcram1_err.bits.ldw.ecc_err) {
2229 		char *multi_str = "";
2230 		char *multi_bit_str = "";
2231 
2232 		npi_fflp_fcram_error_log2_get(handle, &fcram2_err);
2233 		if (fcram1_err.bits.ldw.mult_lk) {
2234 			multi_str = "multiple";
2235 		}
2236 		if (fcram1_err.bits.ldw.mult_bit) {
2237 			multi_bit_str = "multiple bits";
2238 		}
2239 		statsp->hash_lookup_err++;
2240 		NXGE_ERROR_MSG((nxgep, FFLP_CTL,
2241 		    " FCRAM %s lookup %s ECC error on port %d"
2242 		    " H1: 0x%x Subarea: 0x%x Syndrome: 0x%x",
2243 		    multi_str, multi_bit_str, portn,
2244 		    fcram2_err.bits.ldw.h1,
2245 		    fcram2_err.bits.ldw.subarea,
2246 		    fcram2_err.bits.ldw.syndrome));
2247 		NXGE_FM_REPORT_ERROR(nxgep, 0, 0,
2248 		    NXGE_FM_EREPORT_FFLP_HASHT_LOOKUP_ERR);
2249 	}
2250 	statsp->errlog.hash_lookup1 = (uint32_t)fcram1_err.value;
2251 	statsp->errlog.hash_lookup2 = (uint32_t)fcram2_err.value;
2252 	return (NXGE_OK);
2253 }
2254 
2255 int
nxge_get_valid_tcam_cnt(p_nxge_t nxgep)2256 nxge_get_valid_tcam_cnt(p_nxge_t nxgep)
2257 {
2258 	return ((nxgep->classifier.fragment_bug == 1) ?
2259 	    nxgep->classifier.tcam_entry_cnt - 1 :
2260 	    nxgep->classifier.tcam_entry_cnt);
2261 }
2262 
2263 int
nxge_rxdma_channel_cnt(p_nxge_t nxgep)2264 nxge_rxdma_channel_cnt(p_nxge_t nxgep)
2265 {
2266 	p_nxge_dma_pt_cfg_t p_dma_cfgp;
2267 	p_nxge_hw_pt_cfg_t p_cfgp;
2268 
2269 	p_dma_cfgp = (p_nxge_dma_pt_cfg_t)&nxgep->pt_config;
2270 	p_cfgp = (p_nxge_hw_pt_cfg_t)&p_dma_cfgp->hw_config;
2271 	return (p_cfgp->max_rdcs);
2272 }
2273 
2274 /* ARGSUSED */
2275 int
nxge_rxclass_ioctl(p_nxge_t nxgep,queue_t * wq,mblk_t * mp)2276 nxge_rxclass_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp)
2277 {
2278 	uint32_t cmd;
2279 	rx_class_cfg_t *cfg_info = (rx_class_cfg_t *)mp->b_rptr;
2280 
2281 	if (nxgep == NULL) {
2282 		return (-1);
2283 	}
2284 	cmd = cfg_info->cmd;
2285 	switch (cmd) {
2286 	default:
2287 		return (-1);
2288 
2289 	case NXGE_RX_CLASS_GCHAN:
2290 		cfg_info->data = nxge_rxdma_channel_cnt(nxgep);
2291 		break;
2292 	case NXGE_RX_CLASS_GRULE_CNT:
2293 		MUTEX_ENTER(&nxgep->nxge_hw_p->nxge_tcam_lock);
2294 		cfg_info->rule_cnt = nxge_get_valid_tcam_cnt(nxgep);
2295 		MUTEX_EXIT(&nxgep->nxge_hw_p->nxge_tcam_lock);
2296 		break;
2297 	case NXGE_RX_CLASS_GRULE:
2298 		nxge_get_tcam_entry(nxgep, &cfg_info->fs);
2299 		break;
2300 	case NXGE_RX_CLASS_GRULE_ALL:
2301 		nxge_get_tcam_entry_all(nxgep, cfg_info);
2302 		break;
2303 	case NXGE_RX_CLASS_RULE_DEL:
2304 		nxge_del_tcam_entry(nxgep, cfg_info->fs.location);
2305 		break;
2306 	case NXGE_RX_CLASS_RULE_INS:
2307 		(void) nxge_add_tcam_entry(nxgep, &cfg_info->fs);
2308 		break;
2309 	}
2310 	return (0);
2311 }
2312 /* ARGSUSED */
2313 int
nxge_rxhash_ioctl(p_nxge_t nxgep,queue_t * wq,mblk_t * mp)2314 nxge_rxhash_ioctl(p_nxge_t nxgep, queue_t *wq, mblk_t *mp)
2315 {
2316 	uint32_t cmd;
2317 	cfg_cmd_t	*cfg_info = (cfg_cmd_t *)mp->b_rptr;
2318 
2319 	if (nxgep == NULL) {
2320 		return (-1);
2321 	}
2322 	cmd = cfg_info->cmd;
2323 
2324 	switch (cmd) {
2325 	default:
2326 		return (-1);
2327 	case NXGE_IPTUN_CFG_ADD_CLS:
2328 		nxge_add_iptun_class(nxgep, &cfg_info->iptun_cfg,
2329 		    &cfg_info->class_id);
2330 		break;
2331 	case NXGE_IPTUN_CFG_SET_HASH:
2332 		nxge_cfg_iptun_hash(nxgep, &cfg_info->iptun_cfg,
2333 		    cfg_info->class_id);
2334 		break;
2335 	case NXGE_IPTUN_CFG_DEL_CLS:
2336 		nxge_del_iptun_class(nxgep, cfg_info->class_id);
2337 		break;
2338 	case NXGE_IPTUN_CFG_GET_CLS:
2339 		nxge_get_iptun_class(nxgep, &cfg_info->iptun_cfg,
2340 		    cfg_info->class_id);
2341 		break;
2342 	case NXGE_CLS_CFG_SET_SYM:
2343 		nxge_set_ip_cls_sym(nxgep, cfg_info->class_id, cfg_info->sym);
2344 		break;
2345 	case NXGE_CLS_CFG_GET_SYM:
2346 		nxge_get_ip_cls_sym(nxgep, cfg_info->class_id, &cfg_info->sym);
2347 		break;
2348 	}
2349 	return (0);
2350 }
2351 
2352 void
nxge_get_tcam_entry_all(p_nxge_t nxgep,rx_class_cfg_t * cfgp)2353 nxge_get_tcam_entry_all(p_nxge_t nxgep, rx_class_cfg_t *cfgp)
2354 {
2355 	nxge_classify_t *clasp = &nxgep->classifier;
2356 	uint16_t	n_entries;
2357 	int		i, j, k;
2358 	tcam_flow_spec_t	*tcam_entryp;
2359 
2360 	cfgp->data = clasp->tcam_size;
2361 	MUTEX_ENTER(&nxgep->nxge_hw_p->nxge_tcam_lock);
2362 	n_entries = cfgp->rule_cnt;
2363 
2364 	for (i = 0, j = 0; j < cfgp->data; j++) {
2365 		k = nxge_tcam_get_index(nxgep, j);
2366 		tcam_entryp = &clasp->tcam_entries[k];
2367 		if (tcam_entryp->valid != 1)
2368 			continue;
2369 		cfgp->rule_locs[i] = j;
2370 		i++;
2371 	};
2372 	MUTEX_EXIT(&nxgep->nxge_hw_p->nxge_tcam_lock);
2373 
2374 	if (n_entries != i) {
2375 		/* print warning, this should not happen */
2376 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_get_tcam_entry_all"
2377 		    "n_entries[%d] != i[%d]!!!", n_entries, i));
2378 	}
2379 }
2380 
2381 
2382 /* Entries for the ports are interleaved in the TCAM */
2383 static uint16_t
nxge_tcam_get_index(p_nxge_t nxgep,uint16_t index)2384 nxge_tcam_get_index(p_nxge_t nxgep, uint16_t index)
2385 {
2386 	/* One entry reserved for IP fragment rule */
2387 	if (index >= (nxgep->classifier.tcam_size - 1))
2388 		index = 0;
2389 	if (nxgep->classifier.fragment_bug == 1)
2390 		index++;
2391 	return (nxgep->classifier.tcam_top + (index * nxgep->nports));
2392 }
2393 
2394 static uint32_t
nxge_tcam_cls_to_flow(uint32_t class_code)2395 nxge_tcam_cls_to_flow(uint32_t class_code)
2396 {
2397 	switch (class_code) {
2398 	case TCAM_CLASS_TCP_IPV4:
2399 		return (FSPEC_TCPIP4);
2400 	case TCAM_CLASS_UDP_IPV4:
2401 		return (FSPEC_UDPIP4);
2402 	case TCAM_CLASS_AH_ESP_IPV4:
2403 		return (FSPEC_AHIP4);
2404 	case TCAM_CLASS_SCTP_IPV4:
2405 		return (FSPEC_SCTPIP4);
2406 	case  TCAM_CLASS_TCP_IPV6:
2407 		return (FSPEC_TCPIP6);
2408 	case TCAM_CLASS_UDP_IPV6:
2409 		return (FSPEC_UDPIP6);
2410 	case TCAM_CLASS_AH_ESP_IPV6:
2411 		return (FSPEC_AHIP6);
2412 	case TCAM_CLASS_SCTP_IPV6:
2413 		return (FSPEC_SCTPIP6);
2414 	case TCAM_CLASS_IP_USER_4:
2415 	case TCAM_CLASS_IP_USER_5:
2416 	case TCAM_CLASS_IP_USER_6:
2417 	case TCAM_CLASS_IP_USER_7:
2418 		return (FSPEC_IP_USR);
2419 	default:
2420 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL, "nxge_tcam_cls_to_flow"
2421 		    ": Unknown class code [0x%x]", class_code));
2422 		break;
2423 	}
2424 	return (0);
2425 }
2426 
2427 void
nxge_get_tcam_entry(p_nxge_t nxgep,flow_resource_t * fs)2428 nxge_get_tcam_entry(p_nxge_t nxgep, flow_resource_t *fs)
2429 {
2430 	uint16_t	index;
2431 	tcam_flow_spec_t *tcam_ep;
2432 	tcam_entry_t	*tp;
2433 	flow_spec_t	*fspec;
2434 #define	fspec_key (fspec->uh.tcpip4spec)
2435 #define	fspec_mask (fspec->um.tcpip4spec)
2436 
2437 	index = nxge_tcam_get_index(nxgep, (uint16_t)fs->location);
2438 	tcam_ep = &nxgep->classifier.tcam_entries[index];
2439 	if (tcam_ep->valid != 1) {
2440 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_get_tcam_entry: :"
2441 		    "Entry [%d] invalid for index [%d]", fs->location, index));
2442 		return;
2443 	}
2444 
2445 	/* Fill the flow spec entry */
2446 	tp = &tcam_ep->tce;
2447 	fspec = &fs->flow_spec;
2448 	fspec->flow_type = nxge_tcam_cls_to_flow(tp->ip4_class_key);
2449 
2450 	/* TODO - look at proto field to differentiate between AH and ESP */
2451 	if (fspec->flow_type == FSPEC_AHIP4) {
2452 		if (tp->ip4_proto_key == IPPROTO_ESP)
2453 			fspec->flow_type = FSPEC_ESPIP4;
2454 	}
2455 
2456 	switch (tp->ip4_class_key) {
2457 	case TCAM_CLASS_TCP_IPV4:
2458 	case TCAM_CLASS_UDP_IPV4:
2459 	case TCAM_CLASS_AH_ESP_IPV4:
2460 	case TCAM_CLASS_SCTP_IPV4:
2461 		FSPEC_IPV4_ADDR(fspec_key.ip4dst, tp->ip4_dest_key);
2462 		FSPEC_IPV4_ADDR(fspec_mask.ip4dst, tp->ip4_dest_mask);
2463 		FSPEC_IPV4_ADDR(fspec_key.ip4src, tp->ip4_src_key);
2464 		FSPEC_IPV4_ADDR(fspec_mask.ip4src, tp->ip4_src_mask);
2465 		fspec_key.tos = tp->ip4_tos_key;
2466 		fspec_mask.tos = tp->ip4_tos_mask;
2467 		break;
2468 	default:
2469 		break;
2470 	}
2471 
2472 	switch (tp->ip4_class_key) {
2473 	case TCAM_CLASS_TCP_IPV4:
2474 	case TCAM_CLASS_UDP_IPV4:
2475 	case TCAM_CLASS_SCTP_IPV4:
2476 		FSPEC_IP_PORTS(fspec_key.pdst, fspec_key.psrc,
2477 		    tp->ip4_port_key);
2478 		FSPEC_IP_PORTS(fspec_mask.pdst, fspec_mask.psrc,
2479 		    tp->ip4_port_mask);
2480 		break;
2481 	case TCAM_CLASS_AH_ESP_IPV4:
2482 		fspec->uh.ahip4spec.spi = tp->ip4_port_key;
2483 		fspec->um.ahip4spec.spi = tp->ip4_port_mask;
2484 		break;
2485 	case TCAM_CLASS_IP_USER_4:
2486 	case TCAM_CLASS_IP_USER_5:
2487 	case TCAM_CLASS_IP_USER_6:
2488 	case TCAM_CLASS_IP_USER_7:
2489 		fspec->uh.ip_usr_spec.l4_4_bytes = tp->ip4_port_key;
2490 		fspec->um.ip_usr_spec.l4_4_bytes = tp->ip4_port_mask;
2491 		fspec->uh.ip_usr_spec.ip_ver = FSPEC_IP4;
2492 		fspec->uh.ip_usr_spec.proto = tp->ip4_proto_key;
2493 		fspec->um.ip_usr_spec.proto = tp->ip4_proto_mask;
2494 		break;
2495 	default:
2496 		break;
2497 	}
2498 
2499 	if (tp->match_action.bits.ldw.disc == 1) {
2500 		fs->channel_cookie = NXGE_PKT_DISCARD;
2501 	} else {
2502 		fs->channel_cookie = tp->match_action.bits.ldw.offset;
2503 	}
2504 #undef fspec_key
2505 #undef fspec_mask
2506 }
2507 
2508 void
nxge_del_tcam_entry(p_nxge_t nxgep,uint32_t location)2509 nxge_del_tcam_entry(p_nxge_t nxgep, uint32_t location)
2510 {
2511 	npi_status_t rs = NPI_SUCCESS;
2512 	uint16_t	index;
2513 	tcam_flow_spec_t *tcam_ep;
2514 	tcam_entry_t	*tp;
2515 	tcam_class_t	class;
2516 
2517 	MUTEX_ENTER(&nxgep->nxge_hw_p->nxge_tcam_lock);
2518 	index = nxge_tcam_get_index(nxgep, (uint16_t)location);
2519 	tcam_ep = &nxgep->classifier.tcam_entries[index];
2520 	if (tcam_ep->valid != 1) {
2521 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_del_tcam_entry: :"
2522 		    "Entry [%d] invalid for index [%d]", location, index));
2523 		goto fail;
2524 	}
2525 
2526 	/* Fill the flow spec entry */
2527 	tp = &tcam_ep->tce;
2528 	class = tp->ip4_class_key;
2529 	if (class >= TCAM_CLASS_IP_USER_4 && class <= TCAM_CLASS_IP_USER_7) {
2530 		int i;
2531 		nxge_usr_l3_cls_t *l3_ucls_p;
2532 		p_nxge_hw_list_t hw_p = nxgep->nxge_hw_p;
2533 
2534 		for (i = 0; i < NXGE_L3_PROG_CLS; i++) {
2535 			l3_ucls_p = &hw_p->tcam_l3_prog_cls[i];
2536 			if (l3_ucls_p->valid) {
2537 				if (l3_ucls_p->cls == class &&
2538 				    l3_ucls_p->tcam_ref_cnt) {
2539 					l3_ucls_p->tcam_ref_cnt--;
2540 					if (l3_ucls_p->tcam_ref_cnt > 0)
2541 						continue;
2542 					/* disable class */
2543 					rs = npi_fflp_cfg_ip_usr_cls_disable(
2544 					    nxgep->npi_reg_handle,
2545 					    (tcam_class_t)class);
2546 					if (rs != NPI_SUCCESS)
2547 						goto fail;
2548 					l3_ucls_p->cls = 0;
2549 					l3_ucls_p->pid = 0;
2550 					l3_ucls_p->valid = 0;
2551 					break;
2552 				}
2553 			}
2554 		}
2555 		if (i == NXGE_L3_PROG_CLS) {
2556 			NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2557 			    "nxge_del_tcam_entry: Usr class "
2558 			    "0x%llx not found", (unsigned long long) class));
2559 			goto fail;
2560 		}
2561 	}
2562 
2563 	rs = npi_fflp_tcam_entry_invalidate(nxgep->npi_reg_handle, index);
2564 	if (rs != NPI_SUCCESS) {
2565 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2566 		    "nxge_del_tcam_entry: TCAM invalidate failed "
2567 		    "at loc %d ", location));
2568 		goto fail;
2569 	}
2570 
2571 	nxgep->classifier.tcam_entries[index].valid = 0;
2572 	nxgep->classifier.tcam_entry_cnt--;
2573 
2574 	MUTEX_EXIT(&nxgep->nxge_hw_p->nxge_tcam_lock);
2575 	NXGE_DEBUG_MSG((nxgep, FFLP_CTL, "<== nxge_del_tcam_entry"));
2576 	return;
2577 fail:
2578 	MUTEX_EXIT(&nxgep->nxge_hw_p->nxge_tcam_lock);
2579 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2580 	    "<== nxge_del_tcam_entry FAILED"));
2581 }
2582 
2583 static uint8_t
nxge_iptun_pkt_type_to_pid(uint8_t pkt_type)2584 nxge_iptun_pkt_type_to_pid(uint8_t pkt_type)
2585 {
2586 	uint8_t pid = 0;
2587 
2588 	switch (pkt_type) {
2589 	case IPTUN_PKT_IPV4:
2590 		pid = 4;
2591 		break;
2592 	case IPTUN_PKT_IPV6:
2593 		pid = 41;
2594 		break;
2595 	case IPTUN_PKT_GRE:
2596 		pid = 47;
2597 		break;
2598 	case IPTUN_PKT_GTP:
2599 		pid = 17;
2600 		break;
2601 	default:
2602 		NXGE_ERROR_MSG((NULL, NXGE_ERR_CTL,
2603 		    "nxge_iptun_pkt_type_to_pid: Unknown pkt type 0x%x",
2604 		    pkt_type));
2605 		break;
2606 	}
2607 
2608 	return (pid);
2609 }
2610 
2611 static npi_status_t
nxge_set_iptun_usr_cls_reg(p_nxge_t nxgep,uint64_t class,iptun_cfg_t * iptunp)2612 nxge_set_iptun_usr_cls_reg(p_nxge_t nxgep, uint64_t class,
2613     iptun_cfg_t *iptunp)
2614 {
2615 	npi_handle_t handle = nxgep->npi_reg_handle;
2616 	npi_status_t rs = NPI_SUCCESS;
2617 
2618 	switch (iptunp->in_pkt_type) {
2619 	case IPTUN_PKT_IPV4:
2620 	case IPTUN_PKT_IPV6:
2621 		rs = npi_fflp_cfg_ip_usr_cls_set_iptun(handle,
2622 		    (tcam_class_t)class, 0, 0, 0, 0);
2623 		break;
2624 	case IPTUN_PKT_GRE:
2625 		rs = npi_fflp_cfg_ip_usr_cls_set_iptun(handle,
2626 		    (tcam_class_t)class, iptunp->l4b0_val,
2627 		    iptunp->l4b0_mask, 0, 0);
2628 		break;
2629 	case IPTUN_PKT_GTP:
2630 		rs = npi_fflp_cfg_ip_usr_cls_set_iptun(handle,
2631 		    (tcam_class_t)class, 0, 0, iptunp->l4b23_val,
2632 		    (iptunp->l4b23_sel & 0x01));
2633 		break;
2634 	default:
2635 		rs = NPI_FFLP_TCAM_CLASS_INVALID;
2636 		break;
2637 	}
2638 	return (rs);
2639 }
2640 
2641 void
nxge_add_iptun_class(p_nxge_t nxgep,iptun_cfg_t * iptunp,uint8_t * cls_idp)2642 nxge_add_iptun_class(p_nxge_t nxgep, iptun_cfg_t *iptunp,
2643     uint8_t *cls_idp)
2644 {
2645 	int i, add_cls;
2646 	uint8_t pid;
2647 	uint64_t class;
2648 	p_nxge_hw_list_t hw_p = nxgep->nxge_hw_p;
2649 	npi_handle_t handle = nxgep->npi_reg_handle;
2650 	npi_status_t rs = NPI_SUCCESS;
2651 
2652 	pid = nxge_iptun_pkt_type_to_pid(iptunp->in_pkt_type);
2653 	if (pid == 0)
2654 		return;
2655 
2656 	add_cls = 0;
2657 	MUTEX_ENTER(&hw_p->nxge_tcam_lock);
2658 
2659 	/* Get an user programmable class ID */
2660 	class = TCAM_CLASS_INVALID;
2661 	for (i = 0; i < NXGE_L3_PROG_CLS; i++) {
2662 		if (hw_p->tcam_l3_prog_cls[i].valid == 0) {
2663 			/* todo add new usr class reg */
2664 			switch (i) {
2665 			case 0:
2666 				class = TCAM_CLASS_IP_USER_4;
2667 				break;
2668 			case 1:
2669 				class = TCAM_CLASS_IP_USER_5;
2670 				break;
2671 			case 2:
2672 				class = TCAM_CLASS_IP_USER_6;
2673 				break;
2674 			case 3:
2675 				class = TCAM_CLASS_IP_USER_7;
2676 				break;
2677 			default:
2678 				break;
2679 			}
2680 			rs = npi_fflp_cfg_ip_usr_cls_set(handle,
2681 			    (tcam_class_t)class, 0, 0, pid, 0);
2682 			if (rs != NPI_SUCCESS)
2683 				goto fail;
2684 
2685 			rs = nxge_set_iptun_usr_cls_reg(nxgep, class, iptunp);
2686 
2687 			if (rs != NPI_SUCCESS)
2688 				goto fail;
2689 
2690 			rs = npi_fflp_cfg_ip_usr_cls_enable(handle,
2691 			    (tcam_class_t)class);
2692 			if (rs != NPI_SUCCESS)
2693 				goto fail;
2694 
2695 			hw_p->tcam_l3_prog_cls[i].cls = class;
2696 			hw_p->tcam_l3_prog_cls[i].pid = pid;
2697 			hw_p->tcam_l3_prog_cls[i].flow_pkt_type =
2698 			    iptunp->in_pkt_type;
2699 			hw_p->tcam_l3_prog_cls[i].valid = 1;
2700 			*cls_idp = (uint8_t)class;
2701 			add_cls = 1;
2702 			break;
2703 		} else if (hw_p->tcam_l3_prog_cls[i].pid == pid) {
2704 			if (hw_p->tcam_l3_prog_cls[i].flow_pkt_type == 0) {
2705 				/* there is no flow key */
2706 				/* todo program the existing usr class reg */
2707 
2708 				rs = nxge_set_iptun_usr_cls_reg(nxgep, class,
2709 				    iptunp);
2710 				if (rs != NPI_SUCCESS)
2711 					goto fail;
2712 
2713 				rs = npi_fflp_cfg_ip_usr_cls_enable(handle,
2714 				    (tcam_class_t)class);
2715 				if (rs != NPI_SUCCESS)
2716 					goto fail;
2717 
2718 				hw_p->tcam_l3_prog_cls[i].flow_pkt_type =
2719 				    iptunp->in_pkt_type;
2720 				*cls_idp = (uint8_t)class;
2721 				add_cls = 1;
2722 			} else {
2723 				NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2724 				    "nxge_add_iptun_class: L3 usr "
2725 				    "programmable class with pid %d "
2726 				    "already exists", pid));
2727 			}
2728 			break;
2729 		}
2730 	}
2731 	MUTEX_EXIT(&hw_p->nxge_tcam_lock);
2732 
2733 	if (add_cls != 1) {
2734 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2735 		    "nxge_add_iptun_class: Could not add IP tunneling class"));
2736 	}
2737 	return;
2738 fail:
2739 	MUTEX_EXIT(&hw_p->nxge_tcam_lock);
2740 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_add_iptun_class: FAILED"));
2741 }
2742 
2743 static boolean_t
nxge_is_iptun_cls_present(p_nxge_t nxgep,uint8_t cls_id,int * idx)2744 nxge_is_iptun_cls_present(p_nxge_t nxgep, uint8_t cls_id, int *idx)
2745 {
2746 	int i;
2747 	p_nxge_hw_list_t hw_p = nxgep->nxge_hw_p;
2748 
2749 	MUTEX_ENTER(&hw_p->nxge_tcam_lock);
2750 	for (i = 0; i < NXGE_L3_PROG_CLS; i++) {
2751 		if (hw_p->tcam_l3_prog_cls[i].valid &&
2752 		    hw_p->tcam_l3_prog_cls[i].flow_pkt_type != 0) {
2753 			if (hw_p->tcam_l3_prog_cls[i].cls == cls_id)
2754 				break;
2755 		}
2756 	}
2757 	MUTEX_EXIT(&hw_p->nxge_tcam_lock);
2758 
2759 	if (i == NXGE_L3_PROG_CLS) {
2760 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2761 		    "nxge_is_iptun_cls_present: Invalid class %d", cls_id));
2762 		return (B_FALSE);
2763 	} else {
2764 		*idx = i;
2765 		return (B_TRUE);
2766 	}
2767 }
2768 
2769 void
nxge_cfg_iptun_hash(p_nxge_t nxgep,iptun_cfg_t * iptunp,uint8_t cls_id)2770 nxge_cfg_iptun_hash(p_nxge_t nxgep, iptun_cfg_t *iptunp, uint8_t cls_id)
2771 {
2772 	int idx;
2773 	npi_handle_t handle = nxgep->npi_reg_handle;
2774 	flow_key_cfg_t cfg;
2775 
2776 	/* check to see that this is a valid class ID */
2777 	if (!nxge_is_iptun_cls_present(nxgep, cls_id, &idx)) {
2778 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2779 		    "nxge_cfg_iptun_hash: nxge_is_iptun_cls_present "
2780 		    "failed for cls_id %d", cls_id));
2781 		return;
2782 	}
2783 
2784 	bzero((void *)&cfg, sizeof (flow_key_cfg_t));
2785 
2786 	/*
2787 	 * This ensures that all 4 bytes of the XOR value are loaded to the
2788 	 * hash key.
2789 	 */
2790 	cfg.use_dport = cfg.use_sport = cfg.ip_opts_exist = 1;
2791 
2792 	cfg.l4_xor_sel = (iptunp->l4xor_sel & FL_KEY_USR_L4XOR_MSK);
2793 	cfg.use_l4_md = 1;
2794 
2795 	if (iptunp->hash_flags & HASH_L3PROTO)
2796 		cfg.use_proto = 1;
2797 	else if (iptunp->hash_flags & HASH_IPDA)
2798 		cfg.use_daddr = 1;
2799 	else if (iptunp->hash_flags & HASH_IPSA)
2800 		cfg.use_saddr = 1;
2801 	else if (iptunp->hash_flags & HASH_VLAN)
2802 		cfg.use_vlan = 1;
2803 	else if (iptunp->hash_flags & HASH_L2DA)
2804 		cfg.use_l2da = 1;
2805 	else if (iptunp->hash_flags & HASH_IFPORT)
2806 		cfg.use_portnum = 1;
2807 
2808 	(void) npi_fflp_cfg_ip_cls_flow_key_rfnl(handle, (tcam_class_t)cls_id,
2809 	    &cfg);
2810 }
2811 
2812 void
nxge_del_iptun_class(p_nxge_t nxgep,uint8_t cls_id)2813 nxge_del_iptun_class(p_nxge_t nxgep, uint8_t cls_id)
2814 {
2815 	int i;
2816 	npi_handle_t handle = nxgep->npi_reg_handle;
2817 	npi_status_t rs = NPI_SUCCESS;
2818 
2819 
2820 	/* check to see that this is a valid class ID */
2821 	if (!nxge_is_iptun_cls_present(nxgep, cls_id, &i)) {
2822 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2823 		    "nxge_del_iptun_class: Invalid class ID 0x%x", cls_id));
2824 		return;
2825 	}
2826 
2827 	MUTEX_ENTER(&nxgep->nxge_hw_p->nxge_tcam_lock);
2828 	rs = npi_fflp_cfg_ip_usr_cls_disable(handle, (tcam_class_t)cls_id);
2829 	if (rs != NPI_SUCCESS)
2830 		goto fail;
2831 	nxgep->nxge_hw_p->tcam_l3_prog_cls[i].flow_pkt_type = 0;
2832 	if (nxgep->nxge_hw_p->tcam_l3_prog_cls[i].tcam_ref_cnt == 0)
2833 		nxgep->nxge_hw_p->tcam_l3_prog_cls[i].valid = 0;
2834 
2835 	MUTEX_EXIT(&nxgep->nxge_hw_p->nxge_tcam_lock);
2836 	return;
2837 fail:
2838 	MUTEX_EXIT(&nxgep->nxge_hw_p->nxge_tcam_lock);
2839 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_del_iptun_class: FAILED"));
2840 }
2841 
2842 void
nxge_get_iptun_class(p_nxge_t nxgep,iptun_cfg_t * iptunp,uint8_t cls_id)2843 nxge_get_iptun_class(p_nxge_t nxgep, iptun_cfg_t *iptunp, uint8_t cls_id)
2844 {
2845 	int i;
2846 	uint8_t pid;
2847 	npi_handle_t handle = nxgep->npi_reg_handle;
2848 	npi_status_t rs = NPI_SUCCESS;
2849 	flow_key_cfg_t cfg;
2850 	uint8_t l4b0_val;
2851 	uint8_t l4b0_mask;
2852 	uint8_t l4b23_sel;
2853 	uint16_t l4b23_val;
2854 
2855 	/* check to see that this is a valid class ID */
2856 	if (!nxge_is_iptun_cls_present(nxgep, cls_id, &i))
2857 		return;
2858 
2859 	bzero((void *)iptunp, sizeof (iptun_cfg_t));
2860 
2861 	pid = nxgep->nxge_hw_p->tcam_l3_prog_cls[i].pid;
2862 
2863 	rs = npi_fflp_cfg_ip_usr_cls_get_iptun(handle, (tcam_class_t)cls_id,
2864 	    &l4b0_val, &l4b0_mask, &l4b23_val, &l4b23_sel);
2865 	if (rs != NPI_SUCCESS)
2866 		goto fail;
2867 
2868 	iptunp->l4b0_val = l4b0_val;
2869 	iptunp->l4b0_mask = l4b0_mask;
2870 	iptunp->l4b23_val = l4b23_val;
2871 	iptunp->l4b23_sel = l4b23_sel;
2872 
2873 	if (rs != NPI_SUCCESS)
2874 		goto fail;
2875 
2876 	rs = npi_fflp_cfg_ip_cls_flow_key_get_rfnl(handle,
2877 	    (tcam_class_t)cls_id, &cfg);
2878 	if (rs != NPI_SUCCESS)
2879 		goto fail;
2880 
2881 	iptunp->l4xor_sel = cfg.l4_xor_sel;
2882 	if (cfg.use_proto)
2883 		iptunp->hash_flags |= HASH_L3PROTO;
2884 	else if (cfg.use_daddr)
2885 		iptunp->hash_flags |= HASH_IPDA;
2886 	else if (cfg.use_saddr)
2887 		iptunp->hash_flags |= HASH_IPSA;
2888 	else if (cfg.use_vlan)
2889 		iptunp->hash_flags |= HASH_VLAN;
2890 	else if (cfg.use_l2da)
2891 		iptunp->hash_flags |= HASH_L2DA;
2892 	else if (cfg.use_portnum)
2893 		iptunp->hash_flags |= HASH_IFPORT;
2894 
2895 	switch (pid) {
2896 	case 4:
2897 		iptunp->in_pkt_type = IPTUN_PKT_IPV4;
2898 		break;
2899 	case 41:
2900 		iptunp->in_pkt_type = IPTUN_PKT_IPV6;
2901 		break;
2902 	case 47:
2903 		iptunp->in_pkt_type = IPTUN_PKT_GRE;
2904 		break;
2905 	case 17:
2906 		iptunp->in_pkt_type = IPTUN_PKT_GTP;
2907 		break;
2908 	default:
2909 		iptunp->in_pkt_type = 0;
2910 		break;
2911 	}
2912 
2913 	return;
2914 fail:
2915 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_get_iptun_class: FAILED"));
2916 }
2917 
2918 void
nxge_set_ip_cls_sym(p_nxge_t nxgep,uint8_t cls_id,uint8_t sym)2919 nxge_set_ip_cls_sym(p_nxge_t nxgep, uint8_t cls_id, uint8_t sym)
2920 {
2921 	npi_handle_t handle = nxgep->npi_reg_handle;
2922 	npi_status_t rs = NPI_SUCCESS;
2923 	boolean_t sym_en = (sym == 1) ? B_TRUE : B_FALSE;
2924 
2925 	rs = npi_fflp_cfg_sym_ip_cls_flow_key(handle, (tcam_class_t)cls_id,
2926 	    sym_en);
2927 	if (rs != NPI_SUCCESS)
2928 		NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL,
2929 		    "nxge_set_ip_cls_sym: FAILED"));
2930 }
2931 
2932 void
nxge_get_ip_cls_sym(p_nxge_t nxgep,uint8_t cls_id,uint8_t * sym)2933 nxge_get_ip_cls_sym(p_nxge_t nxgep, uint8_t cls_id, uint8_t *sym)
2934 {
2935 	npi_handle_t handle = nxgep->npi_reg_handle;
2936 	npi_status_t rs = NPI_SUCCESS;
2937 	flow_key_cfg_t cfg;
2938 
2939 	rs = npi_fflp_cfg_ip_cls_flow_key_get_rfnl(handle,
2940 	    (tcam_class_t)cls_id, &cfg);
2941 	if (rs != NPI_SUCCESS)
2942 		goto fail;
2943 
2944 	if (cfg.use_sym)
2945 		*sym = 1;
2946 	else
2947 		*sym = 0;
2948 	return;
2949 fail:
2950 	NXGE_ERROR_MSG((nxgep, NXGE_ERR_CTL, "nxge_get_ip_cls_sym: FAILED"));
2951 }
2952