xref: /illumos-gate/usr/src/uts/common/io/qede/579xx/drivers/ecore/ecore_l2.c (revision 04443fde3a2688e592571a7bcca114860cf07bb3)
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, v.1,  (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 
22 /*
23 * Copyright 2014-2017 Cavium, Inc.
24 * The contents of this file are subject to the terms of the Common Development
25 * and Distribution License, v.1,  (the "License").
26 
27 * You may not use this file except in compliance with the License.
28 
29 * You can obtain a copy of the License at available
30 * at http://opensource.org/licenses/CDDL-1.0
31 
32 * See the License for the specific language governing permissions and
33 * limitations under the License.
34 */
35 
36 #include "bcm_osal.h"
37 
38 #include "ecore.h"
39 #include "ecore_status.h"
40 #include "ecore_hsi_eth.h"
41 #include "ecore_chain.h"
42 #include "ecore_spq.h"
43 #include "ecore_init_fw_funcs.h"
44 #include "ecore_cxt.h"
45 #include "ecore_l2.h"
46 #include "ecore_sp_commands.h"
47 #include "ecore_gtt_reg_addr.h"
48 #include "ecore_iro.h"
49 #include "reg_addr.h"
50 #include "ecore_int.h"
51 #include "ecore_hw.h"
52 #include "ecore_vf.h"
53 #include "ecore_sriov.h"
54 #include "ecore_mcp.h"
55 
56 #define ECORE_MAX_SGES_NUM 16
57 #define CRC32_POLY 0x1edc6f41
58 
59 struct ecore_l2_info {
60 	u32 queues;
61 	unsigned long **pp_qid_usage;
62 
63 	/* The lock is meant to synchronize access to the qid usage */
64 	osal_mutex_t lock;
65 };
66 
ecore_l2_alloc(struct ecore_hwfn * p_hwfn)67 enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn)
68 {
69 	struct ecore_l2_info *p_l2_info;
70 	unsigned long **pp_qids;
71 	u32 i;
72 
73 	if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
74 		return ECORE_SUCCESS;
75 
76 	p_l2_info = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_l2_info));
77 	if (!p_l2_info)
78 		return ECORE_NOMEM;
79 	p_hwfn->p_l2_info = p_l2_info;
80 
81 	if (IS_PF(p_hwfn->p_dev)) {
82 		p_l2_info->queues = RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
83 	} else {
84 		u8 rx = 0, tx = 0;
85 
86 		ecore_vf_get_num_rxqs(p_hwfn, &rx);
87 		ecore_vf_get_num_txqs(p_hwfn, &tx);
88 
89 		p_l2_info->queues = (u32)OSAL_MAX_T(u8, rx, tx);
90 	}
91 
92 	pp_qids = OSAL_VZALLOC(p_hwfn->p_dev,
93 			       sizeof(unsigned long *) *
94 			       p_l2_info->queues);
95 	if (pp_qids == OSAL_NULL)
96 		return ECORE_NOMEM;
97 	p_l2_info->pp_qid_usage = pp_qids;
98 
99 	for (i = 0; i < p_l2_info->queues; i++) {
100 		pp_qids[i] = OSAL_VZALLOC(p_hwfn->p_dev,
101 					  MAX_QUEUES_PER_QZONE / 8);
102 		if (pp_qids[i] == OSAL_NULL)
103 			return ECORE_NOMEM;
104 	}
105 
106 #ifdef CONFIG_ECORE_LOCK_ALLOC
107 	OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock);
108 #endif
109 
110 	return ECORE_SUCCESS;
111 }
112 
ecore_l2_setup(struct ecore_hwfn * p_hwfn)113 void ecore_l2_setup(struct ecore_hwfn *p_hwfn)
114 {
115 	if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
116 		return;
117 
118 	OSAL_MUTEX_INIT(&p_hwfn->p_l2_info->lock);
119 }
120 
ecore_l2_free(struct ecore_hwfn * p_hwfn)121 void ecore_l2_free(struct ecore_hwfn *p_hwfn)
122 {
123 	u32 i;
124 
125 	if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
126 		return;
127 
128 	if (p_hwfn->p_l2_info == OSAL_NULL)
129 		return;
130 
131 	if (p_hwfn->p_l2_info->pp_qid_usage == OSAL_NULL)
132 		goto out_l2_info;
133 
134 	/* Free until hit first uninitialized entry */
135 	for (i = 0; i < p_hwfn->p_l2_info->queues; i++) {
136 		if (p_hwfn->p_l2_info->pp_qid_usage[i] == OSAL_NULL)
137 			break;
138 		OSAL_VFREE(p_hwfn->p_dev,
139 			   p_hwfn->p_l2_info->pp_qid_usage[i]);
140 	}
141 
142 #ifdef CONFIG_ECORE_LOCK_ALLOC
143 	/* Lock is last to initialize, if everything else was */
144 	if (i == p_hwfn->p_l2_info->queues)
145 		OSAL_MUTEX_DEALLOC(&p_hwfn->p_l2_info->lock);
146 #endif
147 
148 	OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info->pp_qid_usage);
149 
150 out_l2_info:
151 	OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info);
152 	p_hwfn->p_l2_info = OSAL_NULL;
153 }
154 
155 /* TODO - we'll need locking around these... */
ecore_eth_queue_qid_usage_add(struct ecore_hwfn * p_hwfn,struct ecore_queue_cid * p_cid)156 static bool ecore_eth_queue_qid_usage_add(struct ecore_hwfn *p_hwfn,
157 					  struct ecore_queue_cid *p_cid)
158 {
159 	struct ecore_l2_info *p_l2_info = p_hwfn->p_l2_info;
160 	u16 queue_id = p_cid->rel.queue_id;
161 	bool b_rc = true;
162 	u8 first;
163 
164 	OSAL_MUTEX_ACQUIRE(&p_l2_info->lock);
165 
166 	if (queue_id > p_l2_info->queues) {
167 		DP_NOTICE(p_hwfn, true,
168 			  "Requested to increase usage for qzone %04x out of %08x\n",
169 			  queue_id, p_l2_info->queues);
170 		b_rc = false;
171 		goto out;
172 	}
173 
174 	first = (u8)OSAL_FIND_FIRST_ZERO_BIT(p_l2_info->pp_qid_usage[queue_id],
175 					     MAX_QUEUES_PER_QZONE);
176 	if (first >= MAX_QUEUES_PER_QZONE) {
177 		b_rc = false;
178 		goto out;
179 	}
180 
181 	OSAL_SET_BIT(first, p_l2_info->pp_qid_usage[queue_id]);
182 	p_cid->qid_usage_idx = first;
183 
184 out:
185 	OSAL_MUTEX_RELEASE(&p_l2_info->lock);
186 	return b_rc;
187 }
188 
ecore_eth_queue_qid_usage_del(struct ecore_hwfn * p_hwfn,struct ecore_queue_cid * p_cid)189 static void ecore_eth_queue_qid_usage_del(struct ecore_hwfn *p_hwfn,
190 					  struct ecore_queue_cid *p_cid)
191 {
192 	OSAL_MUTEX_ACQUIRE(&p_hwfn->p_l2_info->lock);
193 
194 	OSAL_CLEAR_BIT(p_cid->qid_usage_idx,
195 		       p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]);
196 
197 	OSAL_MUTEX_RELEASE(&p_hwfn->p_l2_info->lock);
198 }
199 
ecore_eth_queue_cid_release(struct ecore_hwfn * p_hwfn,struct ecore_queue_cid * p_cid)200 void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
201 				 struct ecore_queue_cid *p_cid)
202 {
203 	bool b_legacy_vf = !!(p_cid->vf_legacy &
204 			      ECORE_QCID_LEGACY_VF_CID);
205 
206 	/* VFs' CIDs are 0-based in PF-view, and uninitialized on VF.
207 	 * For legacy vf-queues, the CID doesn't go through here.
208 	 */
209 	if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf)
210 		_ecore_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid);
211 
212 	/* VFs maintain the index inside queue-zone on their own */
213 	if (p_cid->vfid == ECORE_QUEUE_CID_PF)
214 		ecore_eth_queue_qid_usage_del(p_hwfn, p_cid);
215 
216 	OSAL_VFREE(p_hwfn->p_dev, p_cid);
217 }
218 
219 /* The internal is only meant to be directly called by PFs initializeing CIDs
220  * for their VFs.
221  */
222 static struct ecore_queue_cid *
_ecore_eth_queue_to_cid(struct ecore_hwfn * p_hwfn,u16 opaque_fid,u32 cid,struct ecore_queue_start_common_params * p_params,struct ecore_queue_cid_vf_params * p_vf_params)223 _ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
224 			u16 opaque_fid, u32 cid,
225 			struct ecore_queue_start_common_params *p_params,
226 			struct ecore_queue_cid_vf_params *p_vf_params)
227 {
228 	struct ecore_queue_cid *p_cid;
229 	enum _ecore_status_t rc;
230 
231 	p_cid = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_cid));
232 	if (p_cid == OSAL_NULL)
233 		return OSAL_NULL;
234 
235 	p_cid->opaque_fid = opaque_fid;
236 	p_cid->cid = cid;
237 	p_cid->p_owner = p_hwfn;
238 
239 	/* Fill in parameters */
240 	p_cid->rel.vport_id = p_params->vport_id;
241 	p_cid->rel.queue_id = p_params->queue_id;
242 	p_cid->rel.stats_id = p_params->stats_id;
243 	p_cid->sb_igu_id = p_params->p_sb->igu_sb_id;
244 	p_cid->sb_idx = p_params->sb_idx;
245 
246 	/* Fill-in bits related to VFs' queues if information was provided */
247 	if (p_vf_params != OSAL_NULL) {
248 		p_cid->vfid = p_vf_params->vfid;
249 		p_cid->vf_qid = p_vf_params->vf_qid;
250 		p_cid->vf_legacy = p_vf_params->vf_legacy;
251 	} else {
252 		p_cid->vfid = ECORE_QUEUE_CID_PF;
253 	}
254 
255 	/* Don't try calculating the absolute indices for VFs */
256 	if (IS_VF(p_hwfn->p_dev)) {
257 		p_cid->abs = p_cid->rel;
258 
259 		goto out;
260 	}
261 
262 	/* Calculate the engine-absolute indices of the resources.
263 	 * The would guarantee they're valid later on.
264 	 * In some cases [SBs] we already have the right values.
265 	 */
266 	rc = ecore_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
267 	if (rc != ECORE_SUCCESS)
268 		goto fail;
269 
270 	rc = ecore_fw_l2_queue(p_hwfn, p_cid->rel.queue_id,
271 			       &p_cid->abs.queue_id);
272 	if (rc != ECORE_SUCCESS)
273 		goto fail;
274 
275 	/* In case of a PF configuring its VF's queues, the stats-id is already
276 	 * absolute [since there's a single index that's suitable per-VF].
277 	 */
278 	if (p_cid->vfid == ECORE_QUEUE_CID_PF) {
279 		rc = ecore_fw_vport(p_hwfn, p_cid->rel.stats_id,
280 				    &p_cid->abs.stats_id);
281 		if (rc != ECORE_SUCCESS)
282 			goto fail;
283 	} else {
284 		p_cid->abs.stats_id = p_cid->rel.stats_id;
285 	}
286 
287 out:
288 	/* VF-images have provided the qid_usage_idx on their own.
289 	 * Otherwise, we need to allocate a unique one.
290 	 */
291 	if (!p_vf_params) {
292 		if (!ecore_eth_queue_qid_usage_add(p_hwfn, p_cid))
293 			goto fail;
294 	} else {
295 		p_cid->qid_usage_idx = p_vf_params->qid_usage_idx;
296 	}
297 
298 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
299 		   "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
300 		   p_cid->opaque_fid, p_cid->cid,
301 		   p_cid->rel.vport_id, p_cid->abs.vport_id,
302 		   p_cid->rel.queue_id,	p_cid->qid_usage_idx,
303 		   p_cid->abs.queue_id,
304 		   p_cid->rel.stats_id, p_cid->abs.stats_id,
305 		   p_cid->sb_igu_id, p_cid->sb_idx);
306 
307 	return p_cid;
308 
309 fail:
310 	OSAL_VFREE(p_hwfn->p_dev, p_cid);
311 	return OSAL_NULL;
312 }
313 
314 struct ecore_queue_cid *
ecore_eth_queue_to_cid(struct ecore_hwfn * p_hwfn,u16 opaque_fid,struct ecore_queue_start_common_params * p_params,struct ecore_queue_cid_vf_params * p_vf_params)315 ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
316 		       struct ecore_queue_start_common_params *p_params,
317 		       struct ecore_queue_cid_vf_params *p_vf_params)
318 {
319 	struct ecore_queue_cid *p_cid;
320 	u8 vfid = ECORE_CXT_PF_CID;
321 	bool b_legacy_vf = false;
322 	u32 cid = 0;
323 
324 	/* In case of legacy VFs, The CID can be derived from the additional
325 	 * VF parameters - the VF assumes queue X uses CID X, so we can simply
326 	 * use the vf_qid for this purpose as well.
327 	 */
328 	if (p_vf_params) {
329 		vfid = p_vf_params->vfid;
330 
331 		if (p_vf_params->vf_legacy &
332 		    ECORE_QCID_LEGACY_VF_CID) {
333 			b_legacy_vf = true;
334 			cid = p_vf_params->vf_qid;
335 		}
336 	}
337 
338 	/* Get a unique firmware CID for this queue, in case it's a PF.
339 	 * VF's don't need a CID as the queue configuration will be done
340 	 * by PF.
341 	 */
342 	if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf) {
343 		if (_ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
344 					   &cid, vfid) != ECORE_SUCCESS) {
345 			DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
346 			return OSAL_NULL;
347 		}
348 	}
349 
350 	p_cid = _ecore_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
351 					p_params, p_vf_params);
352 	if ((p_cid == OSAL_NULL) && IS_PF(p_hwfn->p_dev) && !b_legacy_vf)
353 		_ecore_cxt_release_cid(p_hwfn, cid, vfid);
354 
355 	return p_cid;
356 }
357 
358 static struct ecore_queue_cid *
ecore_eth_queue_to_cid_pf(struct ecore_hwfn * p_hwfn,u16 opaque_fid,struct ecore_queue_start_common_params * p_params)359 ecore_eth_queue_to_cid_pf(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
360 			  struct ecore_queue_start_common_params *p_params)
361 {
362 	return ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, OSAL_NULL);
363 }
364 
ecore_sp_eth_vport_start(struct ecore_hwfn * p_hwfn,struct ecore_sp_vport_start_params * p_params)365 enum _ecore_status_t ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
366 					      struct ecore_sp_vport_start_params *p_params)
367 {
368 	struct vport_start_ramrod_data *p_ramrod = OSAL_NULL;
369 	struct ecore_spq_entry *p_ent = OSAL_NULL;
370 	struct ecore_sp_init_data init_data;
371 	u16 rx_mode = 0, tx_err = 0;
372 	u8 abs_vport_id = 0;
373 	enum _ecore_status_t rc = ECORE_NOTIMPL;
374 
375 	rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
376 	if (rc != ECORE_SUCCESS)
377 		return rc;
378 
379 	/* Get SPQ entry */
380 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
381 	init_data.cid = ecore_spq_get_cid(p_hwfn);
382 	init_data.opaque_fid = p_params->opaque_fid;
383 	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
384 
385 	rc = ecore_sp_init_request(p_hwfn, &p_ent,
386 				   ETH_RAMROD_VPORT_START,
387 				   PROTOCOLID_ETH, &init_data);
388 	if (rc != ECORE_SUCCESS)
389 		return rc;
390 
391 	p_ramrod = &p_ent->ramrod.vport_start;
392 	p_ramrod->vport_id = abs_vport_id;
393 
394 	p_ramrod->mtu = OSAL_CPU_TO_LE16(p_params->mtu);
395 	p_ramrod->inner_vlan_removal_en	= p_params->remove_inner_vlan;
396 	p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts;
397 	p_ramrod->drop_ttl0_en	= p_params->drop_ttl0;
398 	p_ramrod->untagged = p_params->only_untagged;
399 	p_ramrod->zero_placement_offset = p_params->zero_placement_offset;
400 
401 	SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
402 	SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
403 
404 	p_ramrod->rx_mode.state	= OSAL_CPU_TO_LE16(rx_mode);
405 
406 	/* Handle requests for strict behavior on transmission errors */
407 	SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE,
408 		  p_params->b_err_illegal_vlan_mode ?
409 		  ETH_TX_ERR_ASSERT_MALICIOUS : 0);
410 	SET_FIELD(tx_err, ETH_TX_ERR_VALS_PACKET_TOO_SMALL,
411 		  p_params->b_err_small_pkt ?
412 		  ETH_TX_ERR_ASSERT_MALICIOUS : 0);
413 	SET_FIELD(tx_err, ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR,
414 		  p_params->b_err_anti_spoof ?
415 		  ETH_TX_ERR_ASSERT_MALICIOUS : 0);
416 	SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS,
417 		  p_params->b_err_illegal_inband_mode ?
418 		  ETH_TX_ERR_ASSERT_MALICIOUS : 0);
419 	SET_FIELD(tx_err, ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG,
420 		  p_params->b_err_vlan_insert_with_inband ?
421 		  ETH_TX_ERR_ASSERT_MALICIOUS : 0);
422 	SET_FIELD(tx_err, ETH_TX_ERR_VALS_MTU_VIOLATION,
423 		  p_params->b_err_big_pkt ?
424 		  ETH_TX_ERR_ASSERT_MALICIOUS : 0);
425 	SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME,
426 		  p_params->b_err_ctrl_frame ?
427 		  ETH_TX_ERR_ASSERT_MALICIOUS : 0);
428 	p_ramrod->tx_err_behav.values = OSAL_CPU_TO_LE16(tx_err);
429 
430 	/* TPA related fields */
431 	OSAL_MEMSET(&p_ramrod->tpa_param, 0,
432 		    sizeof(struct eth_vport_tpa_param));
433 	p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
434 
435 	switch (p_params->tpa_mode) {
436 	case ECORE_TPA_MODE_GRO:
437 		p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
438 		p_ramrod->tpa_param.tpa_max_size = (u16)-1;
439 		p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu/2;
440 		p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu/2;
441 		p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
442 		p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
443 		p_ramrod->tpa_param.tpa_ipv4_tunn_en_flg = 1;
444 		p_ramrod->tpa_param.tpa_ipv6_tunn_en_flg = 1;
445 		p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
446 		p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
447 		break;
448 	default:
449 		break;
450 	}
451 
452 	p_ramrod->tx_switching_en = p_params->tx_switching;
453 #ifndef ASIC_ONLY
454 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
455 		p_ramrod->tx_switching_en = 0;
456 #endif
457 
458 	p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
459 	p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
460 
461 	/* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
462 	p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_hwfn->p_dev,
463 						    p_params->concrete_fid);
464 
465 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
466 }
467 
ecore_sp_vport_start(struct ecore_hwfn * p_hwfn,struct ecore_sp_vport_start_params * p_params)468 enum _ecore_status_t ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
469 					  struct ecore_sp_vport_start_params *p_params)
470 {
471 	if (IS_VF(p_hwfn->p_dev))
472 		return ecore_vf_pf_vport_start(p_hwfn, p_params->vport_id,
473 					       p_params->mtu,
474 					       p_params->remove_inner_vlan,
475 					       p_params->tpa_mode,
476 					       p_params->max_buffers_per_cqe,
477 					       p_params->only_untagged);
478 
479 	return ecore_sp_eth_vport_start(p_hwfn, p_params);
480 }
481 
482 static enum _ecore_status_t
ecore_sp_vport_update_rss(struct ecore_hwfn * p_hwfn,struct vport_update_ramrod_data * p_ramrod,struct ecore_rss_params * p_rss)483 ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn,
484 			  struct vport_update_ramrod_data *p_ramrod,
485 			  struct ecore_rss_params *p_rss)
486 {
487 	struct eth_vport_rss_config *p_config;
488 	int i, table_size;
489 	enum _ecore_status_t rc = ECORE_SUCCESS;
490 
491 	if (!p_rss) {
492 		p_ramrod->common.update_rss_flg = 0;
493 		return rc;
494 	}
495 	p_config = &p_ramrod->rss_config;
496 
497 	OSAL_BUILD_BUG_ON(ECORE_RSS_IND_TABLE_SIZE !=
498 			   ETH_RSS_IND_TABLE_ENTRIES_NUM);
499 
500 	rc = ecore_fw_rss_eng(p_hwfn, p_rss->rss_eng_id,
501 			      &p_config->rss_id);
502 	if (rc != ECORE_SUCCESS)
503 		return rc;
504 
505 	p_ramrod->common.update_rss_flg = p_rss->update_rss_config;
506 	p_config->update_rss_capabilities = p_rss->update_rss_capabilities;
507 	p_config->update_rss_ind_table = p_rss->update_rss_ind_table;
508 	p_config->update_rss_key = p_rss->update_rss_key;
509 
510 	p_config->rss_mode = p_rss->rss_enable ?
511 			     ETH_VPORT_RSS_MODE_REGULAR :
512 			     ETH_VPORT_RSS_MODE_DISABLED;
513 
514 	p_config->capabilities = 0;
515 
516 	SET_FIELD(p_config->capabilities,
517 		  ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
518 		  !!(p_rss->rss_caps & ECORE_RSS_IPV4));
519 	SET_FIELD(p_config->capabilities,
520 		  ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
521 		  !!(p_rss->rss_caps & ECORE_RSS_IPV6));
522 	SET_FIELD(p_config->capabilities,
523 		  ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
524 		  !!(p_rss->rss_caps & ECORE_RSS_IPV4_TCP));
525 	SET_FIELD(p_config->capabilities,
526 		  ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
527 		  !!(p_rss->rss_caps & ECORE_RSS_IPV6_TCP));
528 	SET_FIELD(p_config->capabilities,
529 		  ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
530 		  !!(p_rss->rss_caps & ECORE_RSS_IPV4_UDP));
531 	SET_FIELD(p_config->capabilities,
532 		  ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
533 		  !!(p_rss->rss_caps & ECORE_RSS_IPV6_UDP));
534 	p_config->tbl_size = p_rss->rss_table_size_log;
535 	p_config->capabilities =
536 		OSAL_CPU_TO_LE16(p_config->capabilities);
537 
538 	DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
539 		   "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
540 		   p_ramrod->common.update_rss_flg,
541 		   p_config->rss_mode,
542 		   p_config->update_rss_capabilities,
543 		   p_config->capabilities,
544 		   p_config->update_rss_ind_table,
545 		   p_config->update_rss_key);
546 
547 	table_size = OSAL_MIN_T(int, ECORE_RSS_IND_TABLE_SIZE,
548 				1 << p_config->tbl_size);
549 	for (i = 0; i < table_size; i++) {
550 		struct ecore_queue_cid *p_queue = p_rss->rss_ind_table[i];
551 
552 		if (!p_queue)
553 			return ECORE_INVAL;
554 
555 		p_config->indirection_table[i] =
556 				OSAL_CPU_TO_LE16(p_queue->abs.queue_id);
557 	}
558 
559 	DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
560 		   "Configured RSS indirection table [%d entries]:\n",
561 		   table_size);
562 	for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i += 0x10) {
563 		DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
564 			   "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
565 			   OSAL_LE16_TO_CPU(p_config->indirection_table[i]),
566 			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 1]),
567 			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 2]),
568 			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 3]),
569 			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 4]),
570 			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 5]),
571 			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 6]),
572 			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 7]),
573 			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 8]),
574 			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 9]),
575 			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 10]),
576 			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 11]),
577 			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 12]),
578 			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 13]),
579 			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 14]),
580 			   OSAL_LE16_TO_CPU(p_config->indirection_table[i + 15]));
581 	}
582 
583 	for (i = 0; i <  10; i++)
584 		p_config->rss_key[i] = OSAL_CPU_TO_LE32(p_rss->rss_key[i]);
585 
586 	return rc;
587 }
588 
589 static void
ecore_sp_update_accept_mode(struct ecore_hwfn * p_hwfn,struct vport_update_ramrod_data * p_ramrod,struct ecore_filter_accept_flags accept_flags)590 ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn,
591 			    struct vport_update_ramrod_data *p_ramrod,
592 			    struct ecore_filter_accept_flags accept_flags)
593 {
594 	p_ramrod->common.update_rx_mode_flg =
595 					accept_flags.update_rx_mode_config;
596 	p_ramrod->common.update_tx_mode_flg =
597 					accept_flags.update_tx_mode_config;
598 
599 #ifndef ASIC_ONLY
600 	/* On B0 emulation we cannot enable Tx, since this would cause writes
601 	 * to PVFC HW block which isn't implemented in emulation.
602 	 */
603 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
604 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
605 			   "Non-Asic - prevent Tx mode in vport update\n");
606 		p_ramrod->common.update_tx_mode_flg = 0;
607 	}
608 #endif
609 
610 	/* Set Rx mode accept flags */
611 	if (p_ramrod->common.update_rx_mode_flg) {
612 		u8 accept_filter = accept_flags.rx_accept_filter;
613 		u16 state = 0;
614 
615 		SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
616 			  !(!!(accept_filter & ECORE_ACCEPT_UCAST_MATCHED) ||
617 			   !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED)));
618 
619 		SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
620 			  !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED));
621 
622 		SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
623 			  !(!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) ||
624 			   !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
625 
626 		SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
627 			  (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
628 			   !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
629 
630 		SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
631 			  !!(accept_filter & ECORE_ACCEPT_BCAST));
632 
633 		p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(state);
634 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
635 			   "vport[%02x] p_ramrod->rx_mode.state = 0x%x\n",
636 			   p_ramrod->common.vport_id, state);
637 	}
638 
639 	/* Set Tx mode accept flags */
640 	if (p_ramrod->common.update_tx_mode_flg) {
641 		u8 accept_filter = accept_flags.tx_accept_filter;
642 		u16 state = 0;
643 
644 		SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
645 			  !!(accept_filter & ECORE_ACCEPT_NONE));
646 
647 		SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
648 			  !!(accept_filter & ECORE_ACCEPT_NONE));
649 
650 		SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
651 			  (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
652 			   !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
653 
654 		SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
655 			  !!(accept_filter & ECORE_ACCEPT_BCAST));
656 
657 		p_ramrod->tx_mode.state = OSAL_CPU_TO_LE16(state);
658 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
659 			   "vport[%02x] p_ramrod->tx_mode.state = 0x%x\n",
660 			   p_ramrod->common.vport_id, state);
661 	}
662 }
663 
664 static void
ecore_sp_vport_update_sge_tpa(struct ecore_hwfn * p_hwfn,struct vport_update_ramrod_data * p_ramrod,struct ecore_sge_tpa_params * p_params)665 ecore_sp_vport_update_sge_tpa(struct ecore_hwfn *p_hwfn,
666 			      struct vport_update_ramrod_data *p_ramrod,
667 			      struct ecore_sge_tpa_params *p_params)
668 {
669 	struct eth_vport_tpa_param *p_tpa;
670 
671 	if (!p_params) {
672 		p_ramrod->common.update_tpa_param_flg = 0;
673 		p_ramrod->common.update_tpa_en_flg = 0;
674 		p_ramrod->common.update_tpa_param_flg = 0;
675 		return;
676 	}
677 
678 	p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
679 	p_tpa = &p_ramrod->tpa_param;
680 	p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
681 	p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
682 	p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
683 	p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
684 
685 	p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
686 	p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
687 	p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
688 	p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
689 	p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
690 	p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
691 	p_tpa->tpa_max_size = p_params->tpa_max_size;
692 	p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
693 	p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
694 }
695 
696 static void
ecore_sp_update_mcast_bin(struct ecore_hwfn * p_hwfn,struct vport_update_ramrod_data * p_ramrod,struct ecore_sp_vport_update_params * p_params)697 ecore_sp_update_mcast_bin(struct ecore_hwfn *p_hwfn,
698 			  struct vport_update_ramrod_data *p_ramrod,
699 			  struct ecore_sp_vport_update_params *p_params)
700 {
701 	int i;
702 
703 	OSAL_MEMSET(&p_ramrod->approx_mcast.bins, 0,
704 		    sizeof(p_ramrod->approx_mcast.bins));
705 
706 	if (!p_params->update_approx_mcast_flg)
707 		return;
708 
709 	p_ramrod->common.update_approx_mcast_flg = 1;
710 	for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
711 		u32 *p_bins = (u32 *)p_params->bins;
712 
713 		p_ramrod->approx_mcast.bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
714 	}
715 }
716 
ecore_sp_vport_update(struct ecore_hwfn * p_hwfn,struct ecore_sp_vport_update_params * p_params,enum spq_mode comp_mode,struct ecore_spq_comp_cb * p_comp_data)717 enum _ecore_status_t ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
718 					   struct ecore_sp_vport_update_params *p_params,
719 					   enum spq_mode comp_mode,
720 					   struct ecore_spq_comp_cb *p_comp_data)
721 {
722 	struct ecore_rss_params *p_rss_params = p_params->rss_params;
723 	struct vport_update_ramrod_data_cmn *p_cmn;
724 	struct ecore_sp_init_data init_data;
725 	struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
726 	struct ecore_spq_entry *p_ent = OSAL_NULL;
727 	u8 abs_vport_id = 0, val;
728 	enum _ecore_status_t rc = ECORE_NOTIMPL;
729 
730 	if (IS_VF(p_hwfn->p_dev)) {
731 		rc = ecore_vf_pf_vport_update(p_hwfn, p_params);
732 		return rc;
733 	}
734 
735 	rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
736 	if (rc != ECORE_SUCCESS)
737 		return rc;
738 
739 	/* Get SPQ entry */
740 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
741 	init_data.cid = ecore_spq_get_cid(p_hwfn);
742 	init_data.opaque_fid = p_params->opaque_fid;
743 	init_data.comp_mode = comp_mode;
744 	init_data.p_comp_data = p_comp_data;
745 
746 	rc = ecore_sp_init_request(p_hwfn, &p_ent,
747 				   ETH_RAMROD_VPORT_UPDATE,
748 				   PROTOCOLID_ETH, &init_data);
749 	if (rc != ECORE_SUCCESS)
750 		return rc;
751 
752 	/* Copy input params to ramrod according to FW struct */
753 	p_ramrod = &p_ent->ramrod.vport_update;
754 	p_cmn = &p_ramrod->common;
755 
756 	p_cmn->vport_id = abs_vport_id;
757 
758 	p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
759 	p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
760 	p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
761 	p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
762 
763 	p_cmn->accept_any_vlan = p_params->accept_any_vlan;
764 	val = p_params->update_accept_any_vlan_flg;
765 	p_cmn->update_accept_any_vlan_flg = val;
766 
767 	p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
768 	val = p_params->update_inner_vlan_removal_flg;
769 	p_cmn->update_inner_vlan_removal_en_flg = val;
770 
771 	p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
772 	val = p_params->update_default_vlan_enable_flg;
773 	p_cmn->update_default_vlan_en_flg = val;
774 
775 	p_cmn->default_vlan = OSAL_CPU_TO_LE16(p_params->default_vlan);
776 	p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
777 
778 	p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
779 
780 	p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
781 #ifndef ASIC_ONLY
782 	if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
783 		if (p_ramrod->common.tx_switching_en ||
784 		    p_ramrod->common.update_tx_switching_en_flg) {
785 			DP_NOTICE(p_hwfn, false, "FPGA - why are we seeing tx-switching? Overriding it\n");
786 			p_ramrod->common.tx_switching_en = 0;
787 			p_ramrod->common.update_tx_switching_en_flg = 1;
788 		}
789 #endif
790 	p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
791 
792 	p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
793 	val = p_params->update_anti_spoofing_en_flg;
794 	p_ramrod->common.update_anti_spoofing_en_flg = val;
795 
796 	rc = ecore_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
797 	if (rc != ECORE_SUCCESS) {
798 		/* Return spq entry which is taken in ecore_sp_init_request()*/
799 		ecore_spq_return_entry(p_hwfn, p_ent);
800 		return rc;
801 	}
802 
803 	/* Update mcast bins for VFs, PF doesn't use this functionality */
804 	ecore_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
805 
806 	ecore_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
807 	ecore_sp_vport_update_sge_tpa(p_hwfn, p_ramrod,
808 				      p_params->sge_tpa_params);
809 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
810 }
811 
ecore_sp_vport_stop(struct ecore_hwfn * p_hwfn,u16 opaque_fid,u8 vport_id)812 enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
813 					  u16 opaque_fid,
814 					  u8 vport_id)
815 {
816 	struct vport_stop_ramrod_data *p_ramrod;
817 	struct ecore_sp_init_data init_data;
818 	struct ecore_spq_entry *p_ent;
819 	u8 abs_vport_id = 0;
820 	enum _ecore_status_t rc;
821 
822 	if (IS_VF(p_hwfn->p_dev))
823 		return ecore_vf_pf_vport_stop(p_hwfn);
824 
825 	rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
826 	if (rc != ECORE_SUCCESS)
827 		return rc;
828 
829 	/* Get SPQ entry */
830 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
831 	init_data.cid = ecore_spq_get_cid(p_hwfn);
832 	init_data.opaque_fid = opaque_fid;
833 	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
834 
835 	rc = ecore_sp_init_request(p_hwfn, &p_ent,
836 				   ETH_RAMROD_VPORT_STOP,
837 				   PROTOCOLID_ETH, &init_data);
838 	if (rc != ECORE_SUCCESS)
839 		return rc;
840 
841 	p_ramrod = &p_ent->ramrod.vport_stop;
842 	p_ramrod->vport_id = abs_vport_id;
843 
844 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
845 }
846 
847 static enum _ecore_status_t
ecore_vf_pf_accept_flags(struct ecore_hwfn * p_hwfn,struct ecore_filter_accept_flags * p_accept_flags)848 ecore_vf_pf_accept_flags(struct ecore_hwfn *p_hwfn,
849 			 struct ecore_filter_accept_flags *p_accept_flags)
850 {
851 	struct ecore_sp_vport_update_params s_params;
852 
853 	OSAL_MEMSET(&s_params, 0, sizeof(s_params));
854 	OSAL_MEMCPY(&s_params.accept_flags, p_accept_flags,
855 		    sizeof(struct ecore_filter_accept_flags));
856 
857 	return ecore_vf_pf_vport_update(p_hwfn, &s_params);
858 }
859 
ecore_filter_accept_cmd(struct ecore_dev * p_dev,u8 vport,struct ecore_filter_accept_flags accept_flags,u8 update_accept_any_vlan,u8 accept_any_vlan,enum spq_mode comp_mode,struct ecore_spq_comp_cb * p_comp_data)860 enum _ecore_status_t ecore_filter_accept_cmd(struct ecore_dev *p_dev,
861 					     u8 vport,
862 					     struct ecore_filter_accept_flags accept_flags,
863 					     u8 update_accept_any_vlan,
864 					     u8 accept_any_vlan,
865 					     enum spq_mode comp_mode,
866 					     struct ecore_spq_comp_cb *p_comp_data)
867 {
868 	struct ecore_sp_vport_update_params vport_update_params;
869 	int i, rc;
870 
871 	/* Prepare and send the vport rx_mode change */
872 	OSAL_MEMSET(&vport_update_params, 0, sizeof(vport_update_params));
873 	vport_update_params.vport_id = vport;
874 	vport_update_params.accept_flags = accept_flags;
875 	vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
876 	vport_update_params.accept_any_vlan = accept_any_vlan;
877 
878 	for_each_hwfn(p_dev, i) {
879 		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
880 
881 		vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
882 
883 		if (IS_VF(p_dev)) {
884 			rc = ecore_vf_pf_accept_flags(p_hwfn, &accept_flags);
885 			if (rc != ECORE_SUCCESS)
886 				return rc;
887 			continue;
888 		}
889 
890 		rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
891 					   comp_mode, p_comp_data);
892 		if (rc != ECORE_SUCCESS) {
893 			DP_ERR(p_dev, "Update rx_mode failed %d\n", rc);
894 			return rc;
895 		}
896 
897 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
898 			   "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
899 			   accept_flags.rx_accept_filter,
900 			   accept_flags.tx_accept_filter);
901 
902 		if (update_accept_any_vlan)
903 			DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
904 				   "accept_any_vlan=%d configured\n",
905 				   accept_any_vlan);
906 	}
907 
908 	return 0;
909 }
910 
911 enum _ecore_status_t
ecore_eth_rxq_start_ramrod(struct ecore_hwfn * p_hwfn,struct ecore_queue_cid * p_cid,u16 bd_max_bytes,dma_addr_t bd_chain_phys_addr,dma_addr_t cqe_pbl_addr,u16 cqe_pbl_size)912 ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
913 			   struct ecore_queue_cid *p_cid,
914 			   u16 bd_max_bytes,
915 			   dma_addr_t bd_chain_phys_addr,
916 			   dma_addr_t cqe_pbl_addr,
917 			   u16 cqe_pbl_size)
918 {
919 	struct rx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
920 	struct ecore_spq_entry *p_ent = OSAL_NULL;
921 	struct ecore_sp_init_data init_data;
922 	enum _ecore_status_t rc = ECORE_NOTIMPL;
923 
924 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
925 		   p_cid->opaque_fid, p_cid->cid, p_cid->abs.queue_id,
926 		   p_cid->abs.vport_id, p_cid->sb_igu_id);
927 
928 	/* Get SPQ entry */
929 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
930 	init_data.cid = p_cid->cid;
931 	init_data.opaque_fid = p_cid->opaque_fid;
932 	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
933 
934 	rc = ecore_sp_init_request(p_hwfn, &p_ent,
935 				   ETH_RAMROD_RX_QUEUE_START,
936 				   PROTOCOLID_ETH, &init_data);
937 	if (rc != ECORE_SUCCESS)
938 		return rc;
939 
940 	p_ramrod = &p_ent->ramrod.rx_queue_start;
941 
942 	p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id);
943 	p_ramrod->sb_index = p_cid->sb_idx;
944 	p_ramrod->vport_id = p_cid->abs.vport_id;
945 	p_ramrod->stats_counter_id = p_cid->abs.stats_id;
946 	p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
947 	p_ramrod->complete_cqe_flg = 0;
948 	p_ramrod->complete_event_flg = 1;
949 
950 	p_ramrod->bd_max_bytes = OSAL_CPU_TO_LE16(bd_max_bytes);
951 	DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
952 
953 	p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size);
954 	DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
955 
956 	if (p_cid->vfid != ECORE_QUEUE_CID_PF) {
957 		bool b_legacy_vf = !!(p_cid->vf_legacy &
958 				      ECORE_QCID_LEGACY_VF_RX_PROD);
959 
960 		p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
961 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Queue%s is meant for VF rxq[%02x]\n",
962 			   b_legacy_vf ? " [legacy]" : "",
963 			   p_cid->vf_qid);
964 		p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf;
965 	}
966 
967 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
968 }
969 
970 static enum _ecore_status_t
ecore_eth_pf_rx_queue_start(struct ecore_hwfn * p_hwfn,struct ecore_queue_cid * p_cid,u16 bd_max_bytes,dma_addr_t bd_chain_phys_addr,dma_addr_t cqe_pbl_addr,u16 cqe_pbl_size,void OSAL_IOMEM ** pp_prod)971 ecore_eth_pf_rx_queue_start(struct ecore_hwfn *p_hwfn,
972 			    struct ecore_queue_cid *p_cid,
973 			    u16 bd_max_bytes,
974 			    dma_addr_t bd_chain_phys_addr,
975 			    dma_addr_t cqe_pbl_addr,
976 			    u16 cqe_pbl_size,
977 			    void OSAL_IOMEM **pp_prod)
978 {
979 	u32 init_prod_val = 0;
980 
981 	*pp_prod = (u8 OSAL_IOMEM*)
982 		    p_hwfn->regview +
983 		    GTT_BAR0_MAP_REG_MSDM_RAM +
984 		    MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
985 
986 	/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
987 	__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
988 			  (u32 *)(&init_prod_val));
989 
990 	return ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
991 					  bd_max_bytes,
992 					  bd_chain_phys_addr,
993 					  cqe_pbl_addr, cqe_pbl_size);
994 }
995 
996 enum _ecore_status_t
ecore_eth_rx_queue_start(struct ecore_hwfn * p_hwfn,u16 opaque_fid,struct ecore_queue_start_common_params * p_params,u16 bd_max_bytes,dma_addr_t bd_chain_phys_addr,dma_addr_t cqe_pbl_addr,u16 cqe_pbl_size,struct ecore_rxq_start_ret_params * p_ret_params)997 ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
998 			 u16 opaque_fid,
999 			 struct ecore_queue_start_common_params *p_params,
1000 			 u16 bd_max_bytes,
1001 			 dma_addr_t bd_chain_phys_addr,
1002 			 dma_addr_t cqe_pbl_addr,
1003 			 u16 cqe_pbl_size,
1004 			 struct ecore_rxq_start_ret_params *p_ret_params)
1005 {
1006 	struct ecore_queue_cid *p_cid;
1007 	enum _ecore_status_t rc;
1008 
1009 	/* Allocate a CID for the queue */
1010 	p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params);
1011 	if (p_cid == OSAL_NULL)
1012 		return ECORE_NOMEM;
1013 
1014 	if (IS_PF(p_hwfn->p_dev))
1015 		rc = ecore_eth_pf_rx_queue_start(p_hwfn, p_cid,
1016 						 bd_max_bytes,
1017 						 bd_chain_phys_addr,
1018 						 cqe_pbl_addr, cqe_pbl_size,
1019 						 &p_ret_params->p_prod);
1020 	else
1021 		rc = ecore_vf_pf_rxq_start(p_hwfn, p_cid,
1022 					   bd_max_bytes,
1023 					   bd_chain_phys_addr,
1024 					   cqe_pbl_addr,
1025 					   cqe_pbl_size,
1026 					   &p_ret_params->p_prod);
1027 
1028 	/* Provide the caller with a reference to as handler */
1029 	if (rc != ECORE_SUCCESS)
1030 		ecore_eth_queue_cid_release(p_hwfn, p_cid);
1031 	else
1032 		p_ret_params->p_handle = (void *)p_cid;
1033 
1034 	return rc;
1035 }
1036 
ecore_sp_eth_rx_queues_update(struct ecore_hwfn * p_hwfn,void ** pp_rxq_handles,u8 num_rxqs,u8 complete_cqe_flg,u8 complete_event_flg,enum spq_mode comp_mode,struct ecore_spq_comp_cb * p_comp_data)1037 enum _ecore_status_t ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
1038 						   void **pp_rxq_handles,
1039 						   u8 num_rxqs,
1040 						   u8 complete_cqe_flg,
1041 						   u8 complete_event_flg,
1042 						   enum spq_mode comp_mode,
1043 						   struct ecore_spq_comp_cb *p_comp_data)
1044 {
1045 	struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL;
1046 	struct ecore_spq_entry *p_ent = OSAL_NULL;
1047 	struct ecore_sp_init_data init_data;
1048 	struct ecore_queue_cid *p_cid;
1049 	enum _ecore_status_t rc = ECORE_NOTIMPL;
1050 	u8 i;
1051 
1052 #ifndef LINUX_REMOVE
1053 	if (IS_VF(p_hwfn->p_dev))
1054 		return ecore_vf_pf_rxqs_update(p_hwfn,
1055 					       (struct ecore_queue_cid **)
1056 					       pp_rxq_handles,
1057 					       num_rxqs,
1058 					       complete_cqe_flg,
1059 					       complete_event_flg);
1060 #endif
1061 
1062 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1063 	init_data.comp_mode = comp_mode;
1064 	init_data.p_comp_data = p_comp_data;
1065 
1066 	for (i = 0; i < num_rxqs; i++) {
1067 		p_cid = ((struct ecore_queue_cid **)pp_rxq_handles)[i];
1068 
1069 		/* Get SPQ entry */
1070 		init_data.cid = p_cid->cid;
1071 		init_data.opaque_fid = p_cid->opaque_fid;
1072 
1073 		rc = ecore_sp_init_request(p_hwfn, &p_ent,
1074 					   ETH_RAMROD_RX_QUEUE_UPDATE,
1075 					   PROTOCOLID_ETH, &init_data);
1076 		if (rc != ECORE_SUCCESS)
1077 			return rc;
1078 
1079 		p_ramrod = &p_ent->ramrod.rx_queue_update;
1080 		p_ramrod->vport_id = p_cid->abs.vport_id;
1081 
1082 		p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1083 		p_ramrod->complete_cqe_flg = complete_cqe_flg;
1084 		p_ramrod->complete_event_flg = complete_event_flg;
1085 
1086 		rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1087 		if (rc != ECORE_SUCCESS)
1088 			return rc;
1089 	}
1090 
1091 	return rc;
1092 }
1093 
1094 static enum _ecore_status_t
ecore_eth_pf_rx_queue_stop(struct ecore_hwfn * p_hwfn,struct ecore_queue_cid * p_cid,bool b_eq_completion_only,bool b_cqe_completion)1095 ecore_eth_pf_rx_queue_stop(struct ecore_hwfn *p_hwfn,
1096 			   struct ecore_queue_cid *p_cid,
1097 			   bool b_eq_completion_only,
1098 			   bool b_cqe_completion)
1099 {
1100 	struct rx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL;
1101 	struct ecore_spq_entry *p_ent = OSAL_NULL;
1102 	struct ecore_sp_init_data init_data;
1103 	enum _ecore_status_t rc;
1104 
1105 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1106 	init_data.cid = p_cid->cid;
1107 	init_data.opaque_fid = p_cid->opaque_fid;
1108 	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1109 
1110 	rc = ecore_sp_init_request(p_hwfn, &p_ent,
1111 				   ETH_RAMROD_RX_QUEUE_STOP,
1112 				   PROTOCOLID_ETH, &init_data);
1113 	if (rc != ECORE_SUCCESS)
1114 		return rc;
1115 
1116 	p_ramrod = &p_ent->ramrod.rx_queue_stop;
1117 	p_ramrod->vport_id = p_cid->abs.vport_id;
1118 	p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1119 
1120 	/* Cleaning the queue requires the completion to arrive there.
1121 	 * In addition, VFs require the answer to come as eqe to PF.
1122 	 */
1123 	p_ramrod->complete_cqe_flg = ((p_cid->vfid == ECORE_QUEUE_CID_PF) &&
1124 				      !b_eq_completion_only) ||
1125 				     b_cqe_completion;
1126 	p_ramrod->complete_event_flg = (p_cid->vfid != ECORE_QUEUE_CID_PF) ||
1127 				       b_eq_completion_only;
1128 
1129 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1130 }
1131 
ecore_eth_rx_queue_stop(struct ecore_hwfn * p_hwfn,void * p_rxq,bool eq_completion_only,bool cqe_completion)1132 enum _ecore_status_t ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
1133 					     void *p_rxq,
1134 					     bool eq_completion_only,
1135 					     bool cqe_completion)
1136 {
1137 	struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_rxq;
1138 	enum _ecore_status_t rc = ECORE_NOTIMPL;
1139 
1140 	if (IS_PF(p_hwfn->p_dev))
1141 		rc = ecore_eth_pf_rx_queue_stop(p_hwfn, p_cid,
1142 						eq_completion_only,
1143 						cqe_completion);
1144 	else
1145 		rc = ecore_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion);
1146 
1147 	if (rc == ECORE_SUCCESS)
1148 		ecore_eth_queue_cid_release(p_hwfn, p_cid);
1149 	return rc;
1150 }
1151 
1152 enum _ecore_status_t
ecore_eth_txq_start_ramrod(struct ecore_hwfn * p_hwfn,struct ecore_queue_cid * p_cid,dma_addr_t pbl_addr,u16 pbl_size,u16 pq_id)1153 ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
1154 			   struct ecore_queue_cid *p_cid,
1155 			   dma_addr_t pbl_addr, u16 pbl_size,
1156 			   u16 pq_id)
1157 {
1158 	struct tx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
1159 	struct ecore_spq_entry *p_ent = OSAL_NULL;
1160 	struct ecore_sp_init_data init_data;
1161 	enum _ecore_status_t rc = ECORE_NOTIMPL;
1162 
1163 	/* Get SPQ entry */
1164 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1165 	init_data.cid = p_cid->cid;
1166 	init_data.opaque_fid = p_cid->opaque_fid;
1167 	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1168 
1169 	rc = ecore_sp_init_request(p_hwfn, &p_ent,
1170 				   ETH_RAMROD_TX_QUEUE_START,
1171 				   PROTOCOLID_ETH, &init_data);
1172 	if (rc != ECORE_SUCCESS)
1173 		return rc;
1174 
1175 	p_ramrod = &p_ent->ramrod.tx_queue_start;
1176 	p_ramrod->vport_id = p_cid->abs.vport_id;
1177 
1178 	p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id);
1179 	p_ramrod->sb_index = p_cid->sb_idx;
1180 	p_ramrod->stats_counter_id = p_cid->abs.stats_id;
1181 
1182 	p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1183 	p_ramrod->same_as_last_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1184 
1185 	p_ramrod->pbl_size = OSAL_CPU_TO_LE16(pbl_size);
1186 	DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
1187 
1188 	p_ramrod->qm_pq_id = OSAL_CPU_TO_LE16(pq_id);
1189 
1190 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1191 }
1192 
1193 static enum _ecore_status_t
ecore_eth_pf_tx_queue_start(struct ecore_hwfn * p_hwfn,struct ecore_queue_cid * p_cid,u8 tc,dma_addr_t pbl_addr,u16 pbl_size,void OSAL_IOMEM ** pp_doorbell)1194 ecore_eth_pf_tx_queue_start(struct ecore_hwfn *p_hwfn,
1195 			    struct ecore_queue_cid *p_cid,
1196 			    u8 tc,
1197 			    dma_addr_t pbl_addr, u16 pbl_size,
1198 			    void OSAL_IOMEM **pp_doorbell)
1199 {
1200 	enum _ecore_status_t rc;
1201 
1202 	/* TODO - set tc in the pq_params for multi-cos */
1203 	rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
1204 					pbl_addr, pbl_size,
1205 					ecore_get_cm_pq_idx_mcos(p_hwfn, tc));
1206 	if (rc != ECORE_SUCCESS)
1207 		return rc;
1208 
1209 	/* Provide the caller with the necessary return values */
1210 	*pp_doorbell = (u8 OSAL_IOMEM *)
1211 		       p_hwfn->doorbells +
1212 		       DB_ADDR(p_cid->cid, DQ_DEMS_LEGACY);
1213 
1214 	return ECORE_SUCCESS;
1215 }
1216 
1217 enum _ecore_status_t
ecore_eth_tx_queue_start(struct ecore_hwfn * p_hwfn,u16 opaque_fid,struct ecore_queue_start_common_params * p_params,u8 tc,dma_addr_t pbl_addr,u16 pbl_size,struct ecore_txq_start_ret_params * p_ret_params)1218 ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
1219 			 struct ecore_queue_start_common_params *p_params,
1220 			 u8 tc,
1221 			 dma_addr_t pbl_addr, u16 pbl_size,
1222 			 struct ecore_txq_start_ret_params *p_ret_params)
1223 {
1224 	struct ecore_queue_cid *p_cid;
1225 	enum _ecore_status_t rc;
1226 
1227 	p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params);
1228 	if (p_cid == OSAL_NULL)
1229 		return ECORE_INVAL;
1230 
1231 	if (IS_PF(p_hwfn->p_dev))
1232 		rc = ecore_eth_pf_tx_queue_start(p_hwfn, p_cid, tc,
1233 						 pbl_addr, pbl_size,
1234 						 &p_ret_params->p_doorbell);
1235 	else
1236 		rc = ecore_vf_pf_txq_start(p_hwfn, p_cid,
1237 					   pbl_addr, pbl_size,
1238 					   &p_ret_params->p_doorbell);
1239 
1240 	if (rc != ECORE_SUCCESS)
1241 		ecore_eth_queue_cid_release(p_hwfn, p_cid);
1242 	else
1243 		p_ret_params->p_handle = (void *)p_cid;
1244 
1245 	return rc;
1246 }
1247 
1248 static enum _ecore_status_t
ecore_eth_pf_tx_queue_stop(struct ecore_hwfn * p_hwfn,struct ecore_queue_cid * p_cid)1249 ecore_eth_pf_tx_queue_stop(struct ecore_hwfn *p_hwfn,
1250 			   struct ecore_queue_cid *p_cid)
1251 {
1252 	struct ecore_spq_entry *p_ent = OSAL_NULL;
1253 	struct ecore_sp_init_data init_data;
1254 	enum _ecore_status_t rc;
1255 
1256 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1257 	init_data.cid = p_cid->cid;
1258 	init_data.opaque_fid = p_cid->opaque_fid;
1259 	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1260 
1261 	rc = ecore_sp_init_request(p_hwfn, &p_ent,
1262 				   ETH_RAMROD_TX_QUEUE_STOP,
1263 				   PROTOCOLID_ETH, &init_data);
1264 	if (rc != ECORE_SUCCESS)
1265 		return rc;
1266 
1267 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1268 }
1269 
ecore_eth_tx_queue_stop(struct ecore_hwfn * p_hwfn,void * p_handle)1270 enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
1271 					     void *p_handle)
1272 {
1273 	struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle;
1274 	enum _ecore_status_t rc;
1275 
1276 	if (IS_PF(p_hwfn->p_dev))
1277 		rc = ecore_eth_pf_tx_queue_stop(p_hwfn, p_cid);
1278 	else
1279 		rc = ecore_vf_pf_txq_stop(p_hwfn, p_cid);
1280 
1281 	if (rc == ECORE_SUCCESS)
1282 		ecore_eth_queue_cid_release(p_hwfn, p_cid);
1283 	return rc;
1284 }
1285 
ecore_filter_action(enum ecore_filter_opcode opcode)1286 static enum eth_filter_action ecore_filter_action(enum ecore_filter_opcode opcode)
1287 {
1288 	enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
1289 
1290 	switch (opcode) {
1291 	case ECORE_FILTER_ADD:
1292 		action = ETH_FILTER_ACTION_ADD;
1293 		break;
1294 	case ECORE_FILTER_REMOVE:
1295 		action = ETH_FILTER_ACTION_REMOVE;
1296 		break;
1297 	case ECORE_FILTER_FLUSH:
1298 		action = ETH_FILTER_ACTION_REMOVE_ALL;
1299 		break;
1300 	default:
1301 		action = MAX_ETH_FILTER_ACTION;
1302 	}
1303 
1304 	return action;
1305 }
1306 
1307 static enum _ecore_status_t
ecore_filter_ucast_common(struct ecore_hwfn * p_hwfn,u16 opaque_fid,struct ecore_filter_ucast * p_filter_cmd,struct vport_filter_update_ramrod_data ** pp_ramrod,struct ecore_spq_entry ** pp_ent,enum spq_mode comp_mode,struct ecore_spq_comp_cb * p_comp_data)1308 ecore_filter_ucast_common(struct ecore_hwfn *p_hwfn,
1309 			  u16 opaque_fid,
1310 			  struct ecore_filter_ucast *p_filter_cmd,
1311 			  struct vport_filter_update_ramrod_data **pp_ramrod,
1312 			  struct ecore_spq_entry **pp_ent,
1313 			  enum spq_mode comp_mode,
1314 			  struct ecore_spq_comp_cb *p_comp_data)
1315 {
1316 	u8 vport_to_add_to = 0, vport_to_remove_from = 0;
1317 	struct vport_filter_update_ramrod_data *p_ramrod;
1318 	struct eth_filter_cmd *p_first_filter;
1319 	struct eth_filter_cmd *p_second_filter;
1320 	struct ecore_sp_init_data init_data;
1321 	enum eth_filter_action action;
1322 	enum _ecore_status_t rc;
1323 
1324 	rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1325 			    &vport_to_remove_from);
1326 	if (rc != ECORE_SUCCESS)
1327 		return rc;
1328 
1329 	rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1330 			    &vport_to_add_to);
1331 	if (rc != ECORE_SUCCESS)
1332 		return rc;
1333 
1334 	/* Get SPQ entry */
1335 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1336 	init_data.cid = ecore_spq_get_cid(p_hwfn);
1337 	init_data.opaque_fid = opaque_fid;
1338 	init_data.comp_mode = comp_mode;
1339 	init_data.p_comp_data = p_comp_data;
1340 
1341 	rc = ecore_sp_init_request(p_hwfn, pp_ent,
1342 				   ETH_RAMROD_FILTERS_UPDATE,
1343 				   PROTOCOLID_ETH, &init_data);
1344 	if (rc != ECORE_SUCCESS)
1345 		return rc;
1346 
1347 	*pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
1348 	p_ramrod = *pp_ramrod;
1349 	p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
1350 	p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
1351 
1352 #ifndef ASIC_ONLY
1353 	if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
1354 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1355 			   "Non-Asic - prevent Tx filters\n");
1356 		p_ramrod->filter_cmd_hdr.tx = 0;
1357 	}
1358 
1359 #endif
1360 
1361 	switch (p_filter_cmd->opcode) {
1362 	case ECORE_FILTER_REPLACE:
1363 	case ECORE_FILTER_MOVE:
1364 		p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
1365 	default:
1366 		p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break;
1367 	}
1368 
1369 	p_first_filter = &p_ramrod->filter_cmds[0];
1370 	p_second_filter = &p_ramrod->filter_cmds[1];
1371 
1372 	switch (p_filter_cmd->type) {
1373 	case ECORE_FILTER_MAC:
1374 		p_first_filter->type = ETH_FILTER_TYPE_MAC; break;
1375 	case ECORE_FILTER_VLAN:
1376 		p_first_filter->type = ETH_FILTER_TYPE_VLAN; break;
1377 	case ECORE_FILTER_MAC_VLAN:
1378 		p_first_filter->type = ETH_FILTER_TYPE_PAIR; break;
1379 	case ECORE_FILTER_INNER_MAC:
1380 		p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break;
1381 	case ECORE_FILTER_INNER_VLAN:
1382 		p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break;
1383 	case ECORE_FILTER_INNER_PAIR:
1384 		p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break;
1385 	case ECORE_FILTER_INNER_MAC_VNI_PAIR:
1386 		p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
1387 		break;
1388 	case ECORE_FILTER_MAC_VNI_PAIR:
1389 		p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break;
1390 	case ECORE_FILTER_VNI:
1391 		p_first_filter->type = ETH_FILTER_TYPE_VNI; break;
1392 	}
1393 
1394 	if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
1395 	    (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1396 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
1397 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
1398 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1399 	    (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR))
1400 		ecore_set_fw_mac_addr(&p_first_filter->mac_msb,
1401 				      &p_first_filter->mac_mid,
1402 				      &p_first_filter->mac_lsb,
1403 				      (u8 *)p_filter_cmd->mac);
1404 
1405 	if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
1406 	    (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1407 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
1408 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
1409 		p_first_filter->vlan_id = OSAL_CPU_TO_LE16(p_filter_cmd->vlan);
1410 
1411 	if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1412 	    (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
1413 	    (p_first_filter->type == ETH_FILTER_TYPE_VNI))
1414 		p_first_filter->vni = OSAL_CPU_TO_LE32(p_filter_cmd->vni);
1415 
1416 	if (p_filter_cmd->opcode == ECORE_FILTER_MOVE) {
1417 		p_second_filter->type = p_first_filter->type;
1418 		p_second_filter->mac_msb = p_first_filter->mac_msb;
1419 		p_second_filter->mac_mid = p_first_filter->mac_mid;
1420 		p_second_filter->mac_lsb = p_first_filter->mac_lsb;
1421 		p_second_filter->vlan_id = p_first_filter->vlan_id;
1422 		p_second_filter->vni = p_first_filter->vni;
1423 
1424 		p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
1425 
1426 		p_first_filter->vport_id = vport_to_remove_from;
1427 
1428 		p_second_filter->action = ETH_FILTER_ACTION_ADD;
1429 		p_second_filter->vport_id = vport_to_add_to;
1430 	} else if (p_filter_cmd->opcode == ECORE_FILTER_REPLACE) {
1431 		p_first_filter->vport_id = vport_to_add_to;
1432 		OSAL_MEMCPY(p_second_filter, p_first_filter,
1433 			    sizeof(*p_second_filter));
1434 		p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL;
1435 		p_second_filter->action = ETH_FILTER_ACTION_ADD;
1436 	} else {
1437 		action = ecore_filter_action(p_filter_cmd->opcode);
1438 
1439 		if (action == MAX_ETH_FILTER_ACTION) {
1440 			DP_NOTICE(p_hwfn, true,
1441 				  "%d is not supported yet\n",
1442 				  p_filter_cmd->opcode);
1443 			return ECORE_NOTIMPL;
1444 		}
1445 
1446 		p_first_filter->action = action;
1447 		p_first_filter->vport_id =
1448 			(p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ?
1449 			vport_to_remove_from : vport_to_add_to;
1450 	}
1451 
1452 	return ECORE_SUCCESS;
1453 }
1454 
ecore_sp_eth_filter_ucast(struct ecore_hwfn * p_hwfn,u16 opaque_fid,struct ecore_filter_ucast * p_filter_cmd,enum spq_mode comp_mode,struct ecore_spq_comp_cb * p_comp_data)1455 enum _ecore_status_t ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
1456 					       u16 opaque_fid,
1457 					       struct ecore_filter_ucast *p_filter_cmd,
1458 					       enum spq_mode comp_mode,
1459 					       struct ecore_spq_comp_cb *p_comp_data)
1460 {
1461 	struct vport_filter_update_ramrod_data *p_ramrod = OSAL_NULL;
1462 	struct ecore_spq_entry *p_ent = OSAL_NULL;
1463 	struct eth_filter_cmd_header *p_header;
1464 	enum _ecore_status_t rc;
1465 
1466 	rc = ecore_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
1467 				       &p_ramrod, &p_ent,
1468 				       comp_mode, p_comp_data);
1469 	if (rc != ECORE_SUCCESS) {
1470 		DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
1471 		return rc;
1472 	}
1473 	p_header = &p_ramrod->filter_cmd_hdr;
1474 	p_header->assert_on_error = p_filter_cmd->assert_on_error;
1475 
1476 	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1477 	if (rc != ECORE_SUCCESS) {
1478 		DP_ERR(p_hwfn,
1479 		       "Unicast filter ADD command failed %d\n",
1480 		       rc);
1481 		return rc;
1482 	}
1483 
1484 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1485 		   "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1486 		   (p_filter_cmd->opcode == ECORE_FILTER_ADD) ? "ADD" :
1487 		    ((p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ?
1488 		     "REMOVE" :
1489 		     ((p_filter_cmd->opcode == ECORE_FILTER_MOVE) ?
1490 		      "MOVE" : "REPLACE")),
1491 		   (p_filter_cmd->type == ECORE_FILTER_MAC) ? "MAC" :
1492 		    ((p_filter_cmd->type == ECORE_FILTER_VLAN) ?
1493 		     "VLAN" : "MAC & VLAN"),
1494 		   p_ramrod->filter_cmd_hdr.cmd_cnt,
1495 		   p_filter_cmd->is_rx_filter,
1496 		   p_filter_cmd->is_tx_filter);
1497 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1498 		   "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1499 		   p_filter_cmd->vport_to_add_to,
1500 		   p_filter_cmd->vport_to_remove_from,
1501 		   p_filter_cmd->mac[0], p_filter_cmd->mac[1],
1502 		   p_filter_cmd->mac[2], p_filter_cmd->mac[3],
1503 		   p_filter_cmd->mac[4], p_filter_cmd->mac[5],
1504 		   p_filter_cmd->vlan);
1505 
1506 	return ECORE_SUCCESS;
1507 }
1508 
1509 /*******************************************************************************
1510  * Description:
1511  *         Calculates crc 32 on a buffer
1512  *         Note: crc32_length MUST be aligned to 8
1513  * Return:
1514  ******************************************************************************/
ecore_calc_crc32c(u8 * crc32_packet,u32 crc32_length,u32 crc32_seed,u8 complement)1515 static u32 ecore_calc_crc32c(u8 *crc32_packet,
1516 			     u32 crc32_length,
1517 			     u32 crc32_seed,
1518 			     u8 complement)
1519 {
1520 	u32 byte = 0, bit = 0, crc32_result = crc32_seed;
1521 	u8  msb = 0, current_byte = 0;
1522 
1523 	if ((crc32_packet == OSAL_NULL) ||
1524 	    (crc32_length == 0) ||
1525 	    ((crc32_length % 8) != 0)) {
1526 		return crc32_result;
1527 	}
1528 
1529 	for (byte = 0; byte < crc32_length; byte++) {
1530 		current_byte = crc32_packet[byte];
1531 		for (bit = 0; bit < 8; bit++) {
1532 			msb = (u8)(crc32_result >> 31);
1533 			crc32_result = crc32_result << 1;
1534 			if (msb != (0x1 & (current_byte >> bit))) {
1535 				crc32_result = crc32_result ^ CRC32_POLY;
1536 				crc32_result |= 1; /*crc32_result[0] = 1;*/
1537 			}
1538 		}
1539 	}
1540 
1541 	return crc32_result;
1542 }
1543 
ecore_crc32c_le(u32 seed,u8 * mac,u32 len)1544 static u32 ecore_crc32c_le(u32 seed, u8 *mac, u32 len)
1545 {
1546 	u32 packet_buf[2] = {0};
1547 
1548 	OSAL_MEMCPY((u8 *)(&packet_buf[0]), &mac[0], 6);
1549 	return ecore_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
1550 }
1551 
ecore_mcast_bin_from_mac(u8 * mac)1552 u8 ecore_mcast_bin_from_mac(u8 *mac)
1553 {
1554 	u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
1555 				  mac, ETH_ALEN);
1556 
1557 	return crc & 0xff;
1558 }
1559 
1560 static enum _ecore_status_t
ecore_sp_eth_filter_mcast(struct ecore_hwfn * p_hwfn,u16 opaque_fid,struct ecore_filter_mcast * p_filter_cmd,enum spq_mode comp_mode,struct ecore_spq_comp_cb * p_comp_data)1561 ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
1562 			  u16 opaque_fid,
1563 			  struct ecore_filter_mcast *p_filter_cmd,
1564 			  enum spq_mode comp_mode,
1565 			  struct ecore_spq_comp_cb *p_comp_data)
1566 {
1567 	unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1568 	struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
1569 	struct ecore_spq_entry *p_ent = OSAL_NULL;
1570 	struct ecore_sp_init_data init_data;
1571 	u8 abs_vport_id = 0;
1572 	enum _ecore_status_t rc;
1573 	int i;
1574 
1575 	if (p_filter_cmd->opcode == ECORE_FILTER_ADD)
1576 		rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1577 				    &abs_vport_id);
1578 	else
1579 		rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1580 				    &abs_vport_id);
1581 	if (rc != ECORE_SUCCESS)
1582 		return rc;
1583 
1584 	/* Get SPQ entry */
1585 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1586 	init_data.cid = ecore_spq_get_cid(p_hwfn);
1587 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1588 	init_data.comp_mode = comp_mode;
1589 	init_data.p_comp_data = p_comp_data;
1590 
1591 	rc = ecore_sp_init_request(p_hwfn, &p_ent,
1592 				   ETH_RAMROD_VPORT_UPDATE,
1593 				   PROTOCOLID_ETH, &init_data);
1594 	if (rc != ECORE_SUCCESS) {
1595 		DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
1596 		return rc;
1597 	}
1598 
1599 	p_ramrod = &p_ent->ramrod.vport_update;
1600 	p_ramrod->common.update_approx_mcast_flg = 1;
1601 
1602 	/* explicitly clear out the entire vector */
1603 	OSAL_MEMSET(&p_ramrod->approx_mcast.bins,
1604 		    0, sizeof(p_ramrod->approx_mcast.bins));
1605 	OSAL_MEMSET(bins, 0, sizeof(unsigned long) *
1606 		    ETH_MULTICAST_MAC_BINS_IN_REGS);
1607 	/* filter ADD op is explicit set op and it removes
1608 	*  any existing filters for the vport.
1609 	*/
1610 	if (p_filter_cmd->opcode == ECORE_FILTER_ADD) {
1611 		for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1612 			u32 bit;
1613 
1614 			bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1615 			OSAL_SET_BIT(bit, bins);
1616 		}
1617 
1618 		/* Convert to correct endianity */
1619 		for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1620 			struct vport_update_ramrod_mcast *p_ramrod_bins;
1621 			u32 *p_bins = (u32 *)bins;
1622 
1623 			p_ramrod_bins = &p_ramrod->approx_mcast;
1624 			p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
1625 		}
1626 	}
1627 
1628 	p_ramrod->common.vport_id = abs_vport_id;
1629 
1630 	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1631 	if (rc != ECORE_SUCCESS)
1632 		DP_ERR(p_hwfn, "Multicast filter command failed %d\n", rc);
1633 
1634 	return rc;
1635 }
1636 
ecore_filter_mcast_cmd(struct ecore_dev * p_dev,struct ecore_filter_mcast * p_filter_cmd,enum spq_mode comp_mode,struct ecore_spq_comp_cb * p_comp_data)1637 enum _ecore_status_t ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
1638 					    struct ecore_filter_mcast *p_filter_cmd,
1639 					    enum spq_mode comp_mode,
1640 					    struct ecore_spq_comp_cb *p_comp_data)
1641 {
1642 	enum _ecore_status_t rc = ECORE_SUCCESS;
1643 	int i;
1644 
1645 	/* only ADD and REMOVE operations are supported for multi-cast */
1646 	if ((p_filter_cmd->opcode != ECORE_FILTER_ADD  &&
1647 	     (p_filter_cmd->opcode != ECORE_FILTER_REMOVE)) ||
1648 	     (p_filter_cmd->num_mc_addrs > ECORE_MAX_MC_ADDRS)) {
1649 		return ECORE_INVAL;
1650 	}
1651 
1652 	for_each_hwfn(p_dev, i) {
1653 		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1654 		u16 opaque_fid;
1655 
1656 		if (IS_VF(p_dev)) {
1657 			ecore_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
1658 			continue;
1659 		}
1660 
1661 		opaque_fid = p_hwfn->hw_info.opaque_fid;
1662 		rc = ecore_sp_eth_filter_mcast(p_hwfn,
1663 					       opaque_fid,
1664 					       p_filter_cmd,
1665 					       comp_mode,
1666 					       p_comp_data);
1667 		if (rc != ECORE_SUCCESS)
1668 			break;
1669 	}
1670 
1671 	return rc;
1672 }
1673 
ecore_filter_ucast_cmd(struct ecore_dev * p_dev,struct ecore_filter_ucast * p_filter_cmd,enum spq_mode comp_mode,struct ecore_spq_comp_cb * p_comp_data)1674 enum _ecore_status_t ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
1675 					    struct ecore_filter_ucast *p_filter_cmd,
1676 					    enum spq_mode comp_mode,
1677 					    struct ecore_spq_comp_cb *p_comp_data)
1678 {
1679 	enum _ecore_status_t rc = ECORE_SUCCESS;
1680 	int i;
1681 
1682 	for_each_hwfn(p_dev, i) {
1683 		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1684 		u16 opaque_fid;
1685 
1686 		if (IS_VF(p_dev)) {
1687 			rc = ecore_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
1688 			continue;
1689 		}
1690 
1691 		opaque_fid = p_hwfn->hw_info.opaque_fid;
1692 		rc = ecore_sp_eth_filter_ucast(p_hwfn,
1693 					       opaque_fid,
1694 					       p_filter_cmd,
1695 					       comp_mode,
1696 					       p_comp_data);
1697 		if (rc != ECORE_SUCCESS)
1698 			break;
1699 	}
1700 
1701 	return rc;
1702 }
1703 
1704 /* Statistics related code */
__ecore_get_vport_pstats_addrlen(struct ecore_hwfn * p_hwfn,u32 * p_addr,u32 * p_len,u16 statistics_bin)1705 static void __ecore_get_vport_pstats_addrlen(struct ecore_hwfn *p_hwfn,
1706 					     u32 *p_addr, u32 *p_len,
1707 					     u16 statistics_bin)
1708 {
1709 	if (IS_PF(p_hwfn->p_dev)) {
1710 		*p_addr = BAR0_MAP_REG_PSDM_RAM +
1711 			  PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1712 		*p_len = sizeof(struct eth_pstorm_per_queue_stat);
1713 	} else {
1714 		struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1715 		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1716 
1717 		*p_addr = p_resp->pfdev_info.stats_info.pstats.address;
1718 		*p_len = p_resp->pfdev_info.stats_info.pstats.len;
1719 	}
1720 }
1721 
__ecore_get_vport_pstats(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_eth_stats * p_stats,u16 statistics_bin)1722 static void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn,
1723 				     struct ecore_ptt *p_ptt,
1724 				     struct ecore_eth_stats *p_stats,
1725 				     u16 statistics_bin)
1726 {
1727 	struct eth_pstorm_per_queue_stat pstats;
1728 	u32 pstats_addr = 0, pstats_len = 0;
1729 
1730 	__ecore_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
1731 					 statistics_bin);
1732 
1733 	OSAL_MEMSET(&pstats, 0, sizeof(pstats));
1734 	ecore_memcpy_from(p_hwfn, p_ptt, &pstats,
1735 			  pstats_addr, pstats_len);
1736 
1737 	p_stats->common.tx_ucast_bytes +=
1738 		HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1739 	p_stats->common.tx_mcast_bytes +=
1740 		HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1741 	p_stats->common.tx_bcast_bytes +=
1742 		HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1743 	p_stats->common.tx_ucast_pkts +=
1744 		HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1745 	p_stats->common.tx_mcast_pkts +=
1746 		HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1747 	p_stats->common.tx_bcast_pkts +=
1748 		HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1749 	p_stats->common.tx_err_drop_pkts +=
1750 		HILO_64_REGPAIR(pstats.error_drop_pkts);
1751 }
1752 
__ecore_get_vport_tstats(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_eth_stats * p_stats,u16 statistics_bin)1753 static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn,
1754 				     struct ecore_ptt *p_ptt,
1755 				     struct ecore_eth_stats *p_stats,
1756 				     u16 statistics_bin)
1757 {
1758 	struct tstorm_per_port_stat tstats;
1759 	u32 tstats_addr, tstats_len;
1760 
1761 	if (IS_PF(p_hwfn->p_dev)) {
1762 		tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1763 			      TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
1764 		tstats_len = sizeof(struct tstorm_per_port_stat);
1765 	} else {
1766 		struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1767 		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1768 
1769 		tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
1770 		tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
1771 	}
1772 
1773 	OSAL_MEMSET(&tstats, 0, sizeof(tstats));
1774 	ecore_memcpy_from(p_hwfn, p_ptt, &tstats,
1775 			  tstats_addr, tstats_len);
1776 
1777 	p_stats->common.mftag_filter_discards +=
1778 		HILO_64_REGPAIR(tstats.mftag_filter_discard);
1779 	p_stats->common.mac_filter_discards +=
1780 		HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
1781 }
1782 
__ecore_get_vport_ustats_addrlen(struct ecore_hwfn * p_hwfn,u32 * p_addr,u32 * p_len,u16 statistics_bin)1783 static void __ecore_get_vport_ustats_addrlen(struct ecore_hwfn *p_hwfn,
1784 					     u32 *p_addr, u32 *p_len,
1785 					     u16 statistics_bin)
1786 {
1787 	if (IS_PF(p_hwfn->p_dev)) {
1788 		*p_addr = BAR0_MAP_REG_USDM_RAM +
1789 			  USTORM_QUEUE_STAT_OFFSET(statistics_bin);
1790 		*p_len = sizeof(struct eth_ustorm_per_queue_stat);
1791 	} else {
1792 		struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1793 		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1794 
1795 		*p_addr = p_resp->pfdev_info.stats_info.ustats.address;
1796 		*p_len = p_resp->pfdev_info.stats_info.ustats.len;
1797 	}
1798 }
1799 
__ecore_get_vport_ustats(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_eth_stats * p_stats,u16 statistics_bin)1800 static void __ecore_get_vport_ustats(struct ecore_hwfn *p_hwfn,
1801 				     struct ecore_ptt *p_ptt,
1802 				     struct ecore_eth_stats *p_stats,
1803 				     u16 statistics_bin)
1804 {
1805 	struct eth_ustorm_per_queue_stat ustats;
1806 	u32 ustats_addr = 0, ustats_len = 0;
1807 
1808 	__ecore_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
1809 					 statistics_bin);
1810 
1811 	OSAL_MEMSET(&ustats, 0, sizeof(ustats));
1812 	ecore_memcpy_from(p_hwfn, p_ptt, &ustats,
1813 			  ustats_addr, ustats_len);
1814 
1815 	p_stats->common.rx_ucast_bytes +=
1816 		HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1817 	p_stats->common.rx_mcast_bytes +=
1818 		HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1819 	p_stats->common.rx_bcast_bytes +=
1820 		HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1821 	p_stats->common.rx_ucast_pkts +=
1822 		HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1823 	p_stats->common.rx_mcast_pkts +=
1824 		HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1825 	p_stats->common.rx_bcast_pkts +=
1826 		HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1827 }
1828 
__ecore_get_vport_mstats_addrlen(struct ecore_hwfn * p_hwfn,u32 * p_addr,u32 * p_len,u16 statistics_bin)1829 static void __ecore_get_vport_mstats_addrlen(struct ecore_hwfn *p_hwfn,
1830 					     u32 *p_addr, u32 *p_len,
1831 					     u16 statistics_bin)
1832 {
1833 	if (IS_PF(p_hwfn->p_dev)) {
1834 		*p_addr = BAR0_MAP_REG_MSDM_RAM +
1835 			  MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1836 		*p_len = sizeof(struct eth_mstorm_per_queue_stat);
1837 	} else {
1838 		struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1839 		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1840 
1841 		*p_addr = p_resp->pfdev_info.stats_info.mstats.address;
1842 		*p_len = p_resp->pfdev_info.stats_info.mstats.len;
1843 	}
1844 }
1845 
__ecore_get_vport_mstats(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_eth_stats * p_stats,u16 statistics_bin)1846 static void __ecore_get_vport_mstats(struct ecore_hwfn *p_hwfn,
1847 				     struct ecore_ptt *p_ptt,
1848 				     struct ecore_eth_stats *p_stats,
1849 				     u16 statistics_bin)
1850 {
1851 	struct eth_mstorm_per_queue_stat mstats;
1852 	u32 mstats_addr = 0, mstats_len = 0;
1853 
1854 	__ecore_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
1855 					 statistics_bin);
1856 
1857 	OSAL_MEMSET(&mstats, 0, sizeof(mstats));
1858 	ecore_memcpy_from(p_hwfn, p_ptt, &mstats,
1859 			  mstats_addr, mstats_len);
1860 
1861 	p_stats->common.no_buff_discards +=
1862 		HILO_64_REGPAIR(mstats.no_buff_discard);
1863 	p_stats->common.packet_too_big_discard +=
1864 		HILO_64_REGPAIR(mstats.packet_too_big_discard);
1865 	p_stats->common.ttl0_discard +=
1866 		HILO_64_REGPAIR(mstats.ttl0_discard);
1867 	p_stats->common.tpa_coalesced_pkts +=
1868 		HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
1869 	p_stats->common.tpa_coalesced_events +=
1870 		HILO_64_REGPAIR(mstats.tpa_coalesced_events);
1871 	p_stats->common.tpa_aborts_num +=
1872 		HILO_64_REGPAIR(mstats.tpa_aborts_num);
1873 	p_stats->common.tpa_coalesced_bytes +=
1874 		HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
1875 }
1876 
__ecore_get_vport_port_stats(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_eth_stats * p_stats)1877 static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn,
1878 					 struct ecore_ptt *p_ptt,
1879 					 struct ecore_eth_stats *p_stats)
1880 {
1881 	struct ecore_eth_stats_common *p_common = &p_stats->common;
1882 	struct port_stats port_stats;
1883 	int j;
1884 
1885 	OSAL_MEMSET(&port_stats, 0, sizeof(port_stats));
1886 
1887 	ecore_memcpy_from(p_hwfn, p_ptt, &port_stats,
1888 			  p_hwfn->mcp_info->port_addr +
1889 			  offsetof(struct public_port, stats),
1890 			  sizeof(port_stats));
1891 
1892 	p_common->rx_64_byte_packets += port_stats.eth.r64;
1893 	p_common->rx_65_to_127_byte_packets += port_stats.eth.r127;
1894 	p_common->rx_128_to_255_byte_packets += port_stats.eth.r255;
1895 	p_common->rx_256_to_511_byte_packets += port_stats.eth.r511;
1896 	p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
1897 	p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
1898 	p_common->rx_crc_errors += port_stats.eth.rfcs;
1899 	p_common->rx_mac_crtl_frames += port_stats.eth.rxcf;
1900 	p_common->rx_pause_frames += port_stats.eth.rxpf;
1901 	p_common->rx_pfc_frames += port_stats.eth.rxpp;
1902 	p_common->rx_align_errors += port_stats.eth.raln;
1903 	p_common->rx_carrier_errors += port_stats.eth.rfcr;
1904 	p_common->rx_oversize_packets += port_stats.eth.rovr;
1905 	p_common->rx_jabbers += port_stats.eth.rjbr;
1906 	p_common->rx_undersize_packets += port_stats.eth.rund;
1907 	p_common->rx_fragments += port_stats.eth.rfrg;
1908 	p_common->tx_64_byte_packets += port_stats.eth.t64;
1909 	p_common->tx_65_to_127_byte_packets += port_stats.eth.t127;
1910 	p_common->tx_128_to_255_byte_packets += port_stats.eth.t255;
1911 	p_common->tx_256_to_511_byte_packets += port_stats.eth.t511;
1912 	p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
1913 	p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
1914 	p_common->tx_pause_frames += port_stats.eth.txpf;
1915 	p_common->tx_pfc_frames += port_stats.eth.txpp;
1916 	p_common->rx_mac_bytes += port_stats.eth.rbyte;
1917 	p_common->rx_mac_uc_packets += port_stats.eth.rxuca;
1918 	p_common->rx_mac_mc_packets += port_stats.eth.rxmca;
1919 	p_common->rx_mac_bc_packets += port_stats.eth.rxbca;
1920 	p_common->rx_mac_frames_ok += port_stats.eth.rxpok;
1921 	p_common->tx_mac_bytes += port_stats.eth.tbyte;
1922 	p_common->tx_mac_uc_packets += port_stats.eth.txuca;
1923 	p_common->tx_mac_mc_packets += port_stats.eth.txmca;
1924 	p_common->tx_mac_bc_packets += port_stats.eth.txbca;
1925 	p_common->tx_mac_ctrl_frames += port_stats.eth.txcf;
1926 	for (j = 0; j < 8; j++) {
1927 		p_common->brb_truncates += port_stats.brb.brb_truncate[j];
1928 		p_common->brb_discards += port_stats.brb.brb_discard[j];
1929 	}
1930 
1931 	if (ECORE_IS_BB(p_hwfn->p_dev)) {
1932 		struct ecore_eth_stats_bb *p_bb = &p_stats->bb;
1933 
1934 		p_bb->rx_1519_to_1522_byte_packets +=
1935 			port_stats.eth.u0.bb0.r1522;
1936 		p_bb->rx_1519_to_2047_byte_packets +=
1937 			port_stats.eth.u0.bb0.r2047;
1938 		p_bb->rx_2048_to_4095_byte_packets +=
1939 			port_stats.eth.u0.bb0.r4095;
1940 		p_bb->rx_4096_to_9216_byte_packets +=
1941 			port_stats.eth.u0.bb0.r9216;
1942 		p_bb->rx_9217_to_16383_byte_packets +=
1943 			port_stats.eth.u0.bb0.r16383;
1944 		p_bb->tx_1519_to_2047_byte_packets +=
1945 			port_stats.eth.u1.bb1.t2047;
1946 		p_bb->tx_2048_to_4095_byte_packets +=
1947 			port_stats.eth.u1.bb1.t4095;
1948 		p_bb->tx_4096_to_9216_byte_packets +=
1949 			port_stats.eth.u1.bb1.t9216;
1950 		p_bb->tx_9217_to_16383_byte_packets +=
1951 			port_stats.eth.u1.bb1.t16383;
1952 		p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec;
1953 		p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl;
1954 	} else {
1955 		struct ecore_eth_stats_ah *p_ah = &p_stats->ah;
1956 
1957 		p_ah->rx_1519_to_max_byte_packets +=
1958 			port_stats.eth.u0.ah0.r1519_to_max;
1959 		p_ah->tx_1519_to_max_byte_packets =
1960 			port_stats.eth.u1.ah1.t1519_to_max;
1961 	}
1962 }
1963 
__ecore_get_vport_stats(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_eth_stats * stats,u16 statistics_bin,bool b_get_port_stats)1964 void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
1965 			     struct ecore_ptt *p_ptt,
1966 			     struct ecore_eth_stats *stats,
1967 			     u16 statistics_bin, bool b_get_port_stats)
1968 {
1969 	__ecore_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
1970 	__ecore_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
1971 	__ecore_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
1972 	__ecore_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
1973 
1974 #ifndef ASIC_ONLY
1975 	/* Avoid getting PORT stats for emulation.*/
1976 	if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1977 		return;
1978 #endif
1979 
1980 	if (b_get_port_stats && p_hwfn->mcp_info)
1981 		__ecore_get_vport_port_stats(p_hwfn, p_ptt, stats);
1982 }
1983 
_ecore_get_vport_stats(struct ecore_dev * p_dev,struct ecore_eth_stats * stats)1984 static void _ecore_get_vport_stats(struct ecore_dev *p_dev,
1985 				   struct ecore_eth_stats *stats)
1986 {
1987 	u8 fw_vport = 0;
1988 	int i;
1989 
1990 	OSAL_MEMSET(stats, 0, sizeof(*stats));
1991 
1992 	for_each_hwfn(p_dev, i) {
1993 		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1994 		struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
1995 					  ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
1996 
1997 		if (IS_PF(p_dev)) {
1998 			/* The main vport index is relative first */
1999 			if (ecore_fw_vport(p_hwfn, 0, &fw_vport)) {
2000 				DP_ERR(p_hwfn, "No vport available!\n");
2001 				goto out;
2002 			}
2003 		}
2004 
2005 		if (IS_PF(p_dev) && !p_ptt) {
2006 			DP_ERR(p_hwfn, "Failed to acquire ptt\n");
2007 			continue;
2008 		}
2009 
2010 		__ecore_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
2011 					IS_PF(p_dev) ? true : false);
2012 
2013 out:
2014 		if (IS_PF(p_dev) && p_ptt)
2015 			ecore_ptt_release(p_hwfn, p_ptt);
2016 	}
2017 }
2018 
ecore_get_vport_stats(struct ecore_dev * p_dev,struct ecore_eth_stats * stats)2019 void ecore_get_vport_stats(struct ecore_dev *p_dev,
2020 			   struct ecore_eth_stats *stats)
2021 {
2022 	u32 i;
2023 
2024 	if (!p_dev) {
2025 		OSAL_MEMSET(stats, 0, sizeof(*stats));
2026 		return;
2027 	}
2028 
2029 	_ecore_get_vport_stats(p_dev, stats);
2030 
2031 	if (!p_dev->reset_stats)
2032 		return;
2033 
2034 	/* Reduce the statistics baseline */
2035 	for (i = 0; i < sizeof(struct ecore_eth_stats) / sizeof(u64); i++)
2036 		((u64 *)stats)[i] -= ((u64 *)p_dev->reset_stats)[i];
2037 }
2038 
2039 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
ecore_reset_vport_stats(struct ecore_dev * p_dev)2040 void ecore_reset_vport_stats(struct ecore_dev *p_dev)
2041 {
2042 	int i;
2043 
2044 	for_each_hwfn(p_dev, i) {
2045 		struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
2046 		struct eth_mstorm_per_queue_stat mstats;
2047 		struct eth_ustorm_per_queue_stat ustats;
2048 		struct eth_pstorm_per_queue_stat pstats;
2049 		struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
2050 					  ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
2051 		u32 addr = 0, len = 0;
2052 
2053 		if (IS_PF(p_dev) && !p_ptt) {
2054 			DP_ERR(p_hwfn, "Failed to acquire ptt\n");
2055 			continue;
2056 		}
2057 
2058 		OSAL_MEMSET(&mstats, 0, sizeof(mstats));
2059 		__ecore_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
2060 		ecore_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
2061 
2062 		OSAL_MEMSET(&ustats, 0, sizeof(ustats));
2063 		__ecore_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
2064 		ecore_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
2065 
2066 		OSAL_MEMSET(&pstats, 0, sizeof(pstats));
2067 		__ecore_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
2068 		ecore_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
2069 
2070 		if (IS_PF(p_dev))
2071 			ecore_ptt_release(p_hwfn, p_ptt);
2072 	}
2073 
2074 	/* PORT statistics are not necessarily reset, so we need to
2075 	 * read and create a baseline for future statistics.
2076 	 */
2077 	if (!p_dev->reset_stats)
2078 		DP_INFO(p_dev, "Reset stats not allocated\n");
2079 	else
2080 		_ecore_get_vport_stats(p_dev, p_dev->reset_stats);
2081 }
2082 
ecore_arfs_mode_configure(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_arfs_config_params * p_cfg_params)2083 void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
2084 			       struct ecore_ptt *p_ptt,
2085 			       struct ecore_arfs_config_params *p_cfg_params)
2086 {
2087 	if (p_cfg_params->arfs_enable) {
2088 		ecore_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
2089 					  p_cfg_params->tcp,
2090 					  p_cfg_params->udp,
2091 					  p_cfg_params->ipv4,
2092 					  p_cfg_params->ipv6);
2093 		DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2094 			   "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
2095 			   p_cfg_params->tcp ? "Enable" : "Disable",
2096 			   p_cfg_params->udp ? "Enable" : "Disable",
2097 			   p_cfg_params->ipv4 ? "Enable" : "Disable",
2098 			   p_cfg_params->ipv6 ? "Enable" : "Disable");
2099 	} else {
2100 		ecore_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
2101 	}
2102 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Configured ARFS mode : %s\n",
2103 		   p_cfg_params->arfs_enable ? "Enable" : "Disable");
2104 }
2105 
2106 enum _ecore_status_t
ecore_configure_rfs_ntuple_filter(struct ecore_hwfn * p_hwfn,struct ecore_ptt * p_ptt,struct ecore_spq_comp_cb * p_cb,dma_addr_t p_addr,u16 length,u16 qid,u8 vport_id,bool b_is_add)2107 ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
2108 				  struct ecore_ptt *p_ptt,
2109 				  struct ecore_spq_comp_cb *p_cb,
2110 				  dma_addr_t p_addr, u16 length,
2111 				  u16 qid, u8 vport_id,
2112 				  bool b_is_add)
2113 {
2114 	struct rx_update_gft_filter_data *p_ramrod = OSAL_NULL;
2115 	struct ecore_spq_entry *p_ent = OSAL_NULL;
2116 	struct ecore_sp_init_data init_data;
2117 	u16 abs_rx_q_id = 0;
2118 	u8 abs_vport_id = 0;
2119 	enum _ecore_status_t rc = ECORE_NOTIMPL;
2120 
2121 	rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
2122 	if (rc != ECORE_SUCCESS)
2123 		return rc;
2124 
2125 	rc = ecore_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
2126 	if (rc != ECORE_SUCCESS)
2127 		return rc;
2128 
2129 	/* Get SPQ entry */
2130 	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
2131 	init_data.cid = ecore_spq_get_cid(p_hwfn);
2132 
2133 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2134 
2135 	if (p_cb) {
2136 		init_data.comp_mode = ECORE_SPQ_MODE_CB;
2137 		init_data.p_comp_data = p_cb;
2138 	} else {
2139 		init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
2140 	}
2141 
2142 	rc = ecore_sp_init_request(p_hwfn, &p_ent,
2143 				   ETH_RAMROD_GFT_UPDATE_FILTER,
2144 				   PROTOCOLID_ETH, &init_data);
2145 	if (rc != ECORE_SUCCESS)
2146 		return rc;
2147 
2148 	p_ramrod = &p_ent->ramrod.rx_update_gft;
2149 
2150 	DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
2151 	p_ramrod->pkt_hdr_length = OSAL_CPU_TO_LE16(length);
2152 	p_ramrod->rx_qid_or_action_icid = OSAL_CPU_TO_LE16(abs_rx_q_id);
2153 	p_ramrod->vport_id = abs_vport_id;
2154 	p_ramrod->filter_type = RFS_FILTER_TYPE;
2155 	p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER
2156 					   : GFT_DELETE_FILTER;
2157 
2158 	DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2159 		   "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n",
2160 		   abs_vport_id, abs_rx_q_id,
2161 		   b_is_add ? "Adding" : "Removing",
2162 		   (u64)p_addr, length);
2163 
2164 	return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
2165 }
2166