xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c (revision fcc8487d477a3452a1d0ccbdd4c5e0e1e3cb8bed)
1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include "en.h"
34 
35 static void mlx5e_get_drvinfo(struct net_device *dev,
36 			      struct ethtool_drvinfo *drvinfo)
37 {
38 	struct mlx5e_priv *priv = netdev_priv(dev);
39 	struct mlx5_core_dev *mdev = priv->mdev;
40 
41 	strlcpy(drvinfo->driver, DRIVER_NAME, sizeof(drvinfo->driver));
42 	strlcpy(drvinfo->version, DRIVER_VERSION " (" DRIVER_RELDATE ")",
43 		sizeof(drvinfo->version));
44 	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
45 		 "%d.%d.%04d (%.16s)",
46 		 fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev),
47 		 mdev->board_id);
48 	strlcpy(drvinfo->bus_info, pci_name(mdev->pdev),
49 		sizeof(drvinfo->bus_info));
50 }
51 
52 struct ptys2ethtool_config {
53 	__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
54 	__ETHTOOL_DECLARE_LINK_MODE_MASK(advertised);
55 	u32 speed;
56 };
57 
58 static struct ptys2ethtool_config ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER];
59 
60 #define MLX5_BUILD_PTYS2ETHTOOL_CONFIG(reg_, speed_, ...)               \
61 	({                                                              \
62 		struct ptys2ethtool_config *cfg;                        \
63 		const unsigned int modes[] = { __VA_ARGS__ };           \
64 		unsigned int i;                                         \
65 		cfg = &ptys2ethtool_table[reg_];                        \
66 		cfg->speed = speed_;                                    \
67 		bitmap_zero(cfg->supported,                             \
68 			    __ETHTOOL_LINK_MODE_MASK_NBITS);            \
69 		bitmap_zero(cfg->advertised,                            \
70 			    __ETHTOOL_LINK_MODE_MASK_NBITS);            \
71 		for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) {             \
72 			__set_bit(modes[i], cfg->supported);            \
73 			__set_bit(modes[i], cfg->advertised);           \
74 		}                                                       \
75 	})
76 
77 void mlx5e_build_ptys2ethtool_map(void)
78 {
79 	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_CX_SGMII, SPEED_1000,
80 				       ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
81 	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_KX, SPEED_1000,
82 				       ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
83 	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CX4, SPEED_10000,
84 				       ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
85 	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KX4, SPEED_10000,
86 				       ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
87 	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KR, SPEED_10000,
88 				       ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
89 	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_20GBASE_KR2, SPEED_20000,
90 				       ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT);
91 	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_CR4, SPEED_40000,
92 				       ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT);
93 	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_KR4, SPEED_40000,
94 				       ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT);
95 	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_56GBASE_R4, SPEED_56000,
96 				       ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT);
97 	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CR, SPEED_10000,
98 				       ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
99 	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_SR, SPEED_10000,
100 				       ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
101 	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_ER, SPEED_10000,
102 				       ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
103 	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_SR4, SPEED_40000,
104 				       ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT);
105 	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_LR4, SPEED_40000,
106 				       ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT);
107 	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_SR2, SPEED_50000,
108 				       ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT);
109 	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_CR4, SPEED_100000,
110 				       ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT);
111 	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_SR4, SPEED_100000,
112 				       ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT);
113 	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_KR4, SPEED_100000,
114 				       ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT);
115 	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, SPEED_100000,
116 				       ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT);
117 	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, SPEED_10000,
118 				       ETHTOOL_LINK_MODE_10000baseT_Full_BIT);
119 	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, SPEED_25000,
120 				       ETHTOOL_LINK_MODE_25000baseCR_Full_BIT);
121 	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_KR, SPEED_25000,
122 				       ETHTOOL_LINK_MODE_25000baseKR_Full_BIT);
123 	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_SR, SPEED_25000,
124 				       ETHTOOL_LINK_MODE_25000baseSR_Full_BIT);
125 	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_CR2, SPEED_50000,
126 				       ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT);
127 	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_KR2, SPEED_50000,
128 				       ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT);
129 }
130 
131 static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
132 {
133 	struct mlx5_core_dev *mdev = priv->mdev;
134 	u8 pfc_en_tx;
135 	u8 pfc_en_rx;
136 	int err;
137 
138 	err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
139 
140 	return err ? 0 : pfc_en_tx | pfc_en_rx;
141 }
142 
143 static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
144 {
145 	struct mlx5_core_dev *mdev = priv->mdev;
146 	u32 rx_pause;
147 	u32 tx_pause;
148 	int err;
149 
150 	err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
151 
152 	return err ? false : rx_pause | tx_pause;
153 }
154 
155 #define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter))
156 #define MLX5E_NUM_RQ_STATS(priv) (NUM_RQ_STATS * (priv)->channels.num)
157 #define MLX5E_NUM_SQ_STATS(priv) \
158 	(NUM_SQ_STATS * (priv)->channels.num * (priv)->channels.params.num_tc)
159 #define MLX5E_NUM_PFC_COUNTERS(priv) \
160 	((mlx5e_query_global_pause_combined(priv) + hweight8(mlx5e_query_pfc_combined(priv))) * \
161 	  NUM_PPORT_PER_PRIO_PFC_COUNTERS)
162 
163 static int mlx5e_get_sset_count(struct net_device *dev, int sset)
164 {
165 	struct mlx5e_priv *priv = netdev_priv(dev);
166 
167 	switch (sset) {
168 	case ETH_SS_STATS:
169 		return NUM_SW_COUNTERS +
170 		       MLX5E_NUM_Q_CNTRS(priv) +
171 		       NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS(priv) +
172 		       NUM_PCIE_COUNTERS(priv) +
173 		       MLX5E_NUM_RQ_STATS(priv) +
174 		       MLX5E_NUM_SQ_STATS(priv) +
175 		       MLX5E_NUM_PFC_COUNTERS(priv) +
176 		       ARRAY_SIZE(mlx5e_pme_status_desc) +
177 		       ARRAY_SIZE(mlx5e_pme_error_desc);
178 
179 	case ETH_SS_PRIV_FLAGS:
180 		return ARRAY_SIZE(mlx5e_priv_flags);
181 	case ETH_SS_TEST:
182 		return mlx5e_self_test_num(priv);
183 	/* fallthrough */
184 	default:
185 		return -EOPNOTSUPP;
186 	}
187 }
188 
189 static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
190 {
191 	int i, j, tc, prio, idx = 0;
192 	unsigned long pfc_combined;
193 
194 	/* SW counters */
195 	for (i = 0; i < NUM_SW_COUNTERS; i++)
196 		strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
197 
198 	/* Q counters */
199 	for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
200 		strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format);
201 
202 	/* VPORT counters */
203 	for (i = 0; i < NUM_VPORT_COUNTERS; i++)
204 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
205 		       vport_stats_desc[i].format);
206 
207 	/* PPORT counters */
208 	for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
209 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
210 		       pport_802_3_stats_desc[i].format);
211 
212 	for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
213 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
214 		       pport_2863_stats_desc[i].format);
215 
216 	for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
217 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
218 		       pport_2819_stats_desc[i].format);
219 
220 	for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv); i++)
221 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
222 		       pport_phy_statistical_stats_desc[i].format);
223 
224 	for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++)
225 		strcpy(data + (idx++) * ETH_GSTRING_LEN,
226 		       pcie_perf_stats_desc[i].format);
227 
228 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
229 		for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
230 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
231 				pport_per_prio_traffic_stats_desc[i].format, prio);
232 	}
233 
234 	pfc_combined = mlx5e_query_pfc_combined(priv);
235 	for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
236 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
237 			char pfc_string[ETH_GSTRING_LEN];
238 
239 			snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
240 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
241 				pport_per_prio_pfc_stats_desc[i].format, pfc_string);
242 		}
243 	}
244 
245 	if (mlx5e_query_global_pause_combined(priv)) {
246 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
247 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
248 				pport_per_prio_pfc_stats_desc[i].format, "global");
249 		}
250 	}
251 
252 	/* port module event counters */
253 	for (i = 0; i < ARRAY_SIZE(mlx5e_pme_status_desc); i++)
254 		strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
255 
256 	for (i = 0; i < ARRAY_SIZE(mlx5e_pme_error_desc); i++)
257 		strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
258 
259 	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
260 		return;
261 
262 	/* per channel counters */
263 	for (i = 0; i < priv->channels.num; i++)
264 		for (j = 0; j < NUM_RQ_STATS; j++)
265 			sprintf(data + (idx++) * ETH_GSTRING_LEN,
266 				rq_stats_desc[j].format, i);
267 
268 	for (tc = 0; tc < priv->channels.params.num_tc; tc++)
269 		for (i = 0; i < priv->channels.num; i++)
270 			for (j = 0; j < NUM_SQ_STATS; j++)
271 				sprintf(data + (idx++) * ETH_GSTRING_LEN,
272 					sq_stats_desc[j].format,
273 					priv->channel_tc2txq[i][tc]);
274 }
275 
276 static void mlx5e_get_strings(struct net_device *dev,
277 			      uint32_t stringset, uint8_t *data)
278 {
279 	struct mlx5e_priv *priv = netdev_priv(dev);
280 	int i;
281 
282 	switch (stringset) {
283 	case ETH_SS_PRIV_FLAGS:
284 		for (i = 0; i < ARRAY_SIZE(mlx5e_priv_flags); i++)
285 			strcpy(data + i * ETH_GSTRING_LEN, mlx5e_priv_flags[i]);
286 		break;
287 
288 	case ETH_SS_TEST:
289 		for (i = 0; i < mlx5e_self_test_num(priv); i++)
290 			strcpy(data + i * ETH_GSTRING_LEN,
291 			       mlx5e_self_tests[i]);
292 		break;
293 
294 	case ETH_SS_STATS:
295 		mlx5e_fill_stats_strings(priv, data);
296 		break;
297 	}
298 }
299 
300 static void mlx5e_get_ethtool_stats(struct net_device *dev,
301 				    struct ethtool_stats *stats, u64 *data)
302 {
303 	struct mlx5e_priv *priv = netdev_priv(dev);
304 	struct mlx5e_channels *channels;
305 	struct mlx5_priv *mlx5_priv;
306 	int i, j, tc, prio, idx = 0;
307 	unsigned long pfc_combined;
308 
309 	if (!data)
310 		return;
311 
312 	mutex_lock(&priv->state_lock);
313 	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
314 		mlx5e_update_stats(priv);
315 	channels = &priv->channels;
316 	mutex_unlock(&priv->state_lock);
317 
318 	for (i = 0; i < NUM_SW_COUNTERS; i++)
319 		data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
320 						   sw_stats_desc, i);
321 
322 	for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
323 		data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
324 						   q_stats_desc, i);
325 
326 	for (i = 0; i < NUM_VPORT_COUNTERS; i++)
327 		data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
328 						  vport_stats_desc, i);
329 
330 	for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
331 		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
332 						  pport_802_3_stats_desc, i);
333 
334 	for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
335 		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
336 						  pport_2863_stats_desc, i);
337 
338 	for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
339 		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
340 						  pport_2819_stats_desc, i);
341 
342 	for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv); i++)
343 		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
344 						  pport_phy_statistical_stats_desc, i);
345 
346 	for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++)
347 		data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
348 						  pcie_perf_stats_desc, i);
349 
350 	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
351 		for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
352 			data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
353 						 pport_per_prio_traffic_stats_desc, i);
354 	}
355 
356 	pfc_combined = mlx5e_query_pfc_combined(priv);
357 	for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
358 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
359 			data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
360 							  pport_per_prio_pfc_stats_desc, i);
361 		}
362 	}
363 
364 	if (mlx5e_query_global_pause_combined(priv)) {
365 		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
366 			data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
367 							  pport_per_prio_pfc_stats_desc, i);
368 		}
369 	}
370 
371 	/* port module event counters */
372 	mlx5_priv =  &priv->mdev->priv;
373 	for (i = 0; i < ARRAY_SIZE(mlx5e_pme_status_desc); i++)
374 		data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.status_counters,
375 						   mlx5e_pme_status_desc, i);
376 
377 	for (i = 0; i < ARRAY_SIZE(mlx5e_pme_error_desc); i++)
378 		data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.error_counters,
379 						   mlx5e_pme_error_desc, i);
380 
381 	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
382 		return;
383 
384 	/* per channel counters */
385 	for (i = 0; i < channels->num; i++)
386 		for (j = 0; j < NUM_RQ_STATS; j++)
387 			data[idx++] =
388 			       MLX5E_READ_CTR64_CPU(&channels->c[i]->rq.stats,
389 						    rq_stats_desc, j);
390 
391 	for (tc = 0; tc < priv->channels.params.num_tc; tc++)
392 		for (i = 0; i < channels->num; i++)
393 			for (j = 0; j < NUM_SQ_STATS; j++)
394 				data[idx++] = MLX5E_READ_CTR64_CPU(&channels->c[i]->sq[tc].stats,
395 								   sq_stats_desc, j);
396 }
397 
398 static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type,
399 				    int num_wqe)
400 {
401 	int packets_per_wqe;
402 	int stride_size;
403 	int num_strides;
404 	int wqe_size;
405 
406 	if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
407 		return num_wqe;
408 
409 	stride_size = 1 << priv->channels.params.mpwqe_log_stride_sz;
410 	num_strides = 1 << priv->channels.params.mpwqe_log_num_strides;
411 	wqe_size = stride_size * num_strides;
412 
413 	packets_per_wqe = wqe_size /
414 			  ALIGN(ETH_DATA_LEN, stride_size);
415 	return (1 << (order_base_2(num_wqe * packets_per_wqe) - 1));
416 }
417 
418 static u32 mlx5e_packets_to_rx_wqes(struct mlx5e_priv *priv, int rq_wq_type,
419 				    int num_packets)
420 {
421 	int packets_per_wqe;
422 	int stride_size;
423 	int num_strides;
424 	int wqe_size;
425 	int num_wqes;
426 
427 	if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
428 		return num_packets;
429 
430 	stride_size = 1 << priv->channels.params.mpwqe_log_stride_sz;
431 	num_strides = 1 << priv->channels.params.mpwqe_log_num_strides;
432 	wqe_size = stride_size * num_strides;
433 
434 	num_packets = (1 << order_base_2(num_packets));
435 
436 	packets_per_wqe = wqe_size /
437 			  ALIGN(ETH_DATA_LEN, stride_size);
438 	num_wqes = DIV_ROUND_UP(num_packets, packets_per_wqe);
439 	return 1 << (order_base_2(num_wqes));
440 }
441 
442 static void mlx5e_get_ringparam(struct net_device *dev,
443 				struct ethtool_ringparam *param)
444 {
445 	struct mlx5e_priv *priv = netdev_priv(dev);
446 	int rq_wq_type = priv->channels.params.rq_wq_type;
447 
448 	param->rx_max_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
449 							 1 << mlx5_max_log_rq_size(rq_wq_type));
450 	param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
451 	param->rx_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
452 						     1 << priv->channels.params.log_rq_size);
453 	param->tx_pending     = 1 << priv->channels.params.log_sq_size;
454 }
455 
456 static int mlx5e_set_ringparam(struct net_device *dev,
457 			       struct ethtool_ringparam *param)
458 {
459 	struct mlx5e_priv *priv = netdev_priv(dev);
460 	int rq_wq_type = priv->channels.params.rq_wq_type;
461 	struct mlx5e_channels new_channels = {};
462 	u32 rx_pending_wqes;
463 	u32 min_rq_size;
464 	u32 max_rq_size;
465 	u8 log_rq_size;
466 	u8 log_sq_size;
467 	u32 num_mtts;
468 	int err = 0;
469 
470 	if (param->rx_jumbo_pending) {
471 		netdev_info(dev, "%s: rx_jumbo_pending not supported\n",
472 			    __func__);
473 		return -EINVAL;
474 	}
475 	if (param->rx_mini_pending) {
476 		netdev_info(dev, "%s: rx_mini_pending not supported\n",
477 			    __func__);
478 		return -EINVAL;
479 	}
480 
481 	min_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
482 					       1 << mlx5_min_log_rq_size(rq_wq_type));
483 	max_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
484 					       1 << mlx5_max_log_rq_size(rq_wq_type));
485 	rx_pending_wqes = mlx5e_packets_to_rx_wqes(priv, rq_wq_type,
486 						   param->rx_pending);
487 
488 	if (param->rx_pending < min_rq_size) {
489 		netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n",
490 			    __func__, param->rx_pending,
491 			    min_rq_size);
492 		return -EINVAL;
493 	}
494 	if (param->rx_pending > max_rq_size) {
495 		netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n",
496 			    __func__, param->rx_pending,
497 			    max_rq_size);
498 		return -EINVAL;
499 	}
500 
501 	num_mtts = MLX5E_REQUIRED_MTTS(rx_pending_wqes);
502 	if (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
503 	    !MLX5E_VALID_NUM_MTTS(num_mtts)) {
504 		netdev_info(dev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n",
505 			    __func__, param->rx_pending);
506 		return -EINVAL;
507 	}
508 
509 	if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
510 		netdev_info(dev, "%s: tx_pending (%d) < min (%d)\n",
511 			    __func__, param->tx_pending,
512 			    1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
513 		return -EINVAL;
514 	}
515 	if (param->tx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE)) {
516 		netdev_info(dev, "%s: tx_pending (%d) > max (%d)\n",
517 			    __func__, param->tx_pending,
518 			    1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE);
519 		return -EINVAL;
520 	}
521 
522 	log_rq_size = order_base_2(rx_pending_wqes);
523 	log_sq_size = order_base_2(param->tx_pending);
524 
525 	if (log_rq_size == priv->channels.params.log_rq_size &&
526 	    log_sq_size == priv->channels.params.log_sq_size)
527 		return 0;
528 
529 	mutex_lock(&priv->state_lock);
530 
531 	new_channels.params = priv->channels.params;
532 	new_channels.params.log_rq_size = log_rq_size;
533 	new_channels.params.log_sq_size = log_sq_size;
534 
535 	if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
536 		priv->channels.params = new_channels.params;
537 		goto unlock;
538 	}
539 
540 	err = mlx5e_open_channels(priv, &new_channels);
541 	if (err)
542 		goto unlock;
543 
544 	mlx5e_switch_priv_channels(priv, &new_channels, NULL);
545 
546 unlock:
547 	mutex_unlock(&priv->state_lock);
548 
549 	return err;
550 }
551 
552 static void mlx5e_get_channels(struct net_device *dev,
553 			       struct ethtool_channels *ch)
554 {
555 	struct mlx5e_priv *priv = netdev_priv(dev);
556 
557 	ch->max_combined   = priv->profile->max_nch(priv->mdev);
558 	ch->combined_count = priv->channels.params.num_channels;
559 }
560 
561 static int mlx5e_set_channels(struct net_device *dev,
562 			      struct ethtool_channels *ch)
563 {
564 	struct mlx5e_priv *priv = netdev_priv(dev);
565 	unsigned int count = ch->combined_count;
566 	struct mlx5e_channels new_channels = {};
567 	bool arfs_enabled;
568 	int err = 0;
569 
570 	if (!count) {
571 		netdev_info(dev, "%s: combined_count=0 not supported\n",
572 			    __func__);
573 		return -EINVAL;
574 	}
575 
576 	if (priv->channels.params.num_channels == count)
577 		return 0;
578 
579 	mutex_lock(&priv->state_lock);
580 
581 	new_channels.params = priv->channels.params;
582 	new_channels.params.num_channels = count;
583 	mlx5e_build_default_indir_rqt(priv->mdev, new_channels.params.indirection_rqt,
584 				      MLX5E_INDIR_RQT_SIZE, count);
585 
586 	if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
587 		priv->channels.params = new_channels.params;
588 		goto out;
589 	}
590 
591 	/* Create fresh channels with new parameters */
592 	err = mlx5e_open_channels(priv, &new_channels);
593 	if (err)
594 		goto out;
595 
596 	arfs_enabled = dev->features & NETIF_F_NTUPLE;
597 	if (arfs_enabled)
598 		mlx5e_arfs_disable(priv);
599 
600 	/* Switch to new channels, set new parameters and close old ones */
601 	mlx5e_switch_priv_channels(priv, &new_channels, NULL);
602 
603 	if (arfs_enabled) {
604 		err = mlx5e_arfs_enable(priv);
605 		if (err)
606 			netdev_err(dev, "%s: mlx5e_arfs_enable failed: %d\n",
607 				   __func__, err);
608 	}
609 
610 out:
611 	mutex_unlock(&priv->state_lock);
612 
613 	return err;
614 }
615 
616 static int mlx5e_get_coalesce(struct net_device *netdev,
617 			      struct ethtool_coalesce *coal)
618 {
619 	struct mlx5e_priv *priv = netdev_priv(netdev);
620 
621 	if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
622 		return -EOPNOTSUPP;
623 
624 	coal->rx_coalesce_usecs       = priv->channels.params.rx_cq_moderation.usec;
625 	coal->rx_max_coalesced_frames = priv->channels.params.rx_cq_moderation.pkts;
626 	coal->tx_coalesce_usecs       = priv->channels.params.tx_cq_moderation.usec;
627 	coal->tx_max_coalesced_frames = priv->channels.params.tx_cq_moderation.pkts;
628 	coal->use_adaptive_rx_coalesce = priv->channels.params.rx_am_enabled;
629 
630 	return 0;
631 }
632 
633 static void
634 mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
635 {
636 	struct mlx5_core_dev *mdev = priv->mdev;
637 	int tc;
638 	int i;
639 
640 	for (i = 0; i < priv->channels.num; ++i) {
641 		struct mlx5e_channel *c = priv->channels.c[i];
642 
643 		for (tc = 0; tc < c->num_tc; tc++) {
644 			mlx5_core_modify_cq_moderation(mdev,
645 						&c->sq[tc].cq.mcq,
646 						coal->tx_coalesce_usecs,
647 						coal->tx_max_coalesced_frames);
648 		}
649 
650 		mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
651 					       coal->rx_coalesce_usecs,
652 					       coal->rx_max_coalesced_frames);
653 	}
654 }
655 
656 static int mlx5e_set_coalesce(struct net_device *netdev,
657 			      struct ethtool_coalesce *coal)
658 {
659 	struct mlx5e_priv *priv    = netdev_priv(netdev);
660 	struct mlx5_core_dev *mdev = priv->mdev;
661 	struct mlx5e_channels new_channels = {};
662 	int err = 0;
663 	bool reset;
664 
665 	if (!MLX5_CAP_GEN(mdev, cq_moderation))
666 		return -EOPNOTSUPP;
667 
668 	mutex_lock(&priv->state_lock);
669 	new_channels.params = priv->channels.params;
670 
671 	new_channels.params.tx_cq_moderation.usec = coal->tx_coalesce_usecs;
672 	new_channels.params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames;
673 	new_channels.params.rx_cq_moderation.usec = coal->rx_coalesce_usecs;
674 	new_channels.params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames;
675 	new_channels.params.rx_am_enabled         = !!coal->use_adaptive_rx_coalesce;
676 
677 	if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
678 		priv->channels.params = new_channels.params;
679 		goto out;
680 	}
681 	/* we are opened */
682 
683 	reset = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_am_enabled;
684 	if (!reset) {
685 		mlx5e_set_priv_channels_coalesce(priv, coal);
686 		priv->channels.params = new_channels.params;
687 		goto out;
688 	}
689 
690 	/* open fresh channels with new coal parameters */
691 	err = mlx5e_open_channels(priv, &new_channels);
692 	if (err)
693 		goto out;
694 
695 	mlx5e_switch_priv_channels(priv, &new_channels, NULL);
696 
697 out:
698 	mutex_unlock(&priv->state_lock);
699 	return err;
700 }
701 
702 static void ptys2ethtool_supported_link(unsigned long *supported_modes,
703 					u32 eth_proto_cap)
704 {
705 	unsigned long proto_cap = eth_proto_cap;
706 	int proto;
707 
708 	for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER)
709 		bitmap_or(supported_modes, supported_modes,
710 			  ptys2ethtool_table[proto].supported,
711 			  __ETHTOOL_LINK_MODE_MASK_NBITS);
712 }
713 
714 static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
715 				    u32 eth_proto_cap)
716 {
717 	unsigned long proto_cap = eth_proto_cap;
718 	int proto;
719 
720 	for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER)
721 		bitmap_or(advertising_modes, advertising_modes,
722 			  ptys2ethtool_table[proto].advertised,
723 			  __ETHTOOL_LINK_MODE_MASK_NBITS);
724 }
725 
726 static void ptys2ethtool_supported_port(struct ethtool_link_ksettings *link_ksettings,
727 					u32 eth_proto_cap)
728 {
729 	if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
730 			   | MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
731 			   | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
732 			   | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
733 			   | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
734 			   | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
735 		ethtool_link_ksettings_add_link_mode(link_ksettings, supported, FIBRE);
736 	}
737 
738 	if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4)
739 			   | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4)
740 			   | MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
741 			   | MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
742 			   | MLX5E_PROT_MASK(MLX5E_1000BASE_KX))) {
743 		ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Backplane);
744 	}
745 }
746 
747 int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
748 {
749 	u32 max_speed = 0;
750 	u32 proto_cap;
751 	int err;
752 	int i;
753 
754 	err = mlx5_query_port_proto_cap(mdev, &proto_cap, MLX5_PTYS_EN);
755 	if (err)
756 		return err;
757 
758 	for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i)
759 		if (proto_cap & MLX5E_PROT_MASK(i))
760 			max_speed = max(max_speed, ptys2ethtool_table[i].speed);
761 
762 	*speed = max_speed;
763 	return 0;
764 }
765 
766 static void get_speed_duplex(struct net_device *netdev,
767 			     u32 eth_proto_oper,
768 			     struct ethtool_link_ksettings *link_ksettings)
769 {
770 	int i;
771 	u32 speed = SPEED_UNKNOWN;
772 	u8 duplex = DUPLEX_UNKNOWN;
773 
774 	if (!netif_carrier_ok(netdev))
775 		goto out;
776 
777 	for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
778 		if (eth_proto_oper & MLX5E_PROT_MASK(i)) {
779 			speed = ptys2ethtool_table[i].speed;
780 			duplex = DUPLEX_FULL;
781 			break;
782 		}
783 	}
784 out:
785 	link_ksettings->base.speed = speed;
786 	link_ksettings->base.duplex = duplex;
787 }
788 
789 static void get_supported(u32 eth_proto_cap,
790 			  struct ethtool_link_ksettings *link_ksettings)
791 {
792 	unsigned long *supported = link_ksettings->link_modes.supported;
793 
794 	ptys2ethtool_supported_port(link_ksettings, eth_proto_cap);
795 	ptys2ethtool_supported_link(supported, eth_proto_cap);
796 	ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
797 	ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Asym_Pause);
798 }
799 
800 static void get_advertising(u32 eth_proto_cap, u8 tx_pause,
801 			    u8 rx_pause,
802 			    struct ethtool_link_ksettings *link_ksettings)
803 {
804 	unsigned long *advertising = link_ksettings->link_modes.advertising;
805 
806 	ptys2ethtool_adver_link(advertising, eth_proto_cap);
807 	if (tx_pause)
808 		ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
809 	if (tx_pause ^ rx_pause)
810 		ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Asym_Pause);
811 }
812 
813 static u8 get_connector_port(u32 eth_proto)
814 {
815 	if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
816 			 | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
817 			 | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
818 			 | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
819 			return PORT_FIBRE;
820 	}
821 
822 	if (eth_proto & (MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
823 			 | MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
824 			 | MLX5E_PROT_MASK(MLX5E_100GBASE_CR4))) {
825 			return PORT_DA;
826 	}
827 
828 	if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
829 			 | MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
830 			 | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4)
831 			 | MLX5E_PROT_MASK(MLX5E_100GBASE_KR4))) {
832 			return PORT_NONE;
833 	}
834 
835 	return PORT_OTHER;
836 }
837 
838 static void get_lp_advertising(u32 eth_proto_lp,
839 			       struct ethtool_link_ksettings *link_ksettings)
840 {
841 	unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
842 
843 	ptys2ethtool_adver_link(lp_advertising, eth_proto_lp);
844 }
845 
846 static int mlx5e_get_link_ksettings(struct net_device *netdev,
847 				    struct ethtool_link_ksettings *link_ksettings)
848 {
849 	struct mlx5e_priv *priv    = netdev_priv(netdev);
850 	struct mlx5_core_dev *mdev = priv->mdev;
851 	u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
852 	u32 eth_proto_cap;
853 	u32 eth_proto_admin;
854 	u32 eth_proto_lp;
855 	u32 eth_proto_oper;
856 	u8 an_disable_admin;
857 	u8 an_status;
858 	int err;
859 
860 	err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
861 	if (err) {
862 		netdev_err(netdev, "%s: query port ptys failed: %d\n",
863 			   __func__, err);
864 		goto err_query_ptys;
865 	}
866 
867 	eth_proto_cap    = MLX5_GET(ptys_reg, out, eth_proto_capability);
868 	eth_proto_admin  = MLX5_GET(ptys_reg, out, eth_proto_admin);
869 	eth_proto_oper   = MLX5_GET(ptys_reg, out, eth_proto_oper);
870 	eth_proto_lp     = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
871 	an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
872 	an_status        = MLX5_GET(ptys_reg, out, an_status);
873 
874 	ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
875 	ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
876 
877 	get_supported(eth_proto_cap, link_ksettings);
878 	get_advertising(eth_proto_admin, 0, 0, link_ksettings);
879 	get_speed_duplex(netdev, eth_proto_oper, link_ksettings);
880 
881 	eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
882 
883 	link_ksettings->base.port = get_connector_port(eth_proto_oper);
884 	get_lp_advertising(eth_proto_lp, link_ksettings);
885 
886 	if (an_status == MLX5_AN_COMPLETE)
887 		ethtool_link_ksettings_add_link_mode(link_ksettings,
888 						     lp_advertising, Autoneg);
889 
890 	link_ksettings->base.autoneg = an_disable_admin ? AUTONEG_DISABLE :
891 							  AUTONEG_ENABLE;
892 	ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
893 					     Autoneg);
894 	if (!an_disable_admin)
895 		ethtool_link_ksettings_add_link_mode(link_ksettings,
896 						     advertising, Autoneg);
897 
898 err_query_ptys:
899 	return err;
900 }
901 
902 static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
903 {
904 	u32 i, ptys_modes = 0;
905 
906 	for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
907 		if (bitmap_intersects(ptys2ethtool_table[i].advertised,
908 				      link_modes,
909 				      __ETHTOOL_LINK_MODE_MASK_NBITS))
910 			ptys_modes |= MLX5E_PROT_MASK(i);
911 	}
912 
913 	return ptys_modes;
914 }
915 
916 static u32 mlx5e_ethtool2ptys_speed_link(u32 speed)
917 {
918 	u32 i, speed_links = 0;
919 
920 	for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
921 		if (ptys2ethtool_table[i].speed == speed)
922 			speed_links |= MLX5E_PROT_MASK(i);
923 	}
924 
925 	return speed_links;
926 }
927 
928 static int mlx5e_set_link_ksettings(struct net_device *netdev,
929 				    const struct ethtool_link_ksettings *link_ksettings)
930 {
931 	struct mlx5e_priv *priv    = netdev_priv(netdev);
932 	struct mlx5_core_dev *mdev = priv->mdev;
933 	u32 eth_proto_cap, eth_proto_admin;
934 	bool an_changes = false;
935 	u8 an_disable_admin;
936 	u8 an_disable_cap;
937 	bool an_disable;
938 	u32 link_modes;
939 	u8 an_status;
940 	u32 speed;
941 	int err;
942 
943 	speed = link_ksettings->base.speed;
944 
945 	link_modes = link_ksettings->base.autoneg == AUTONEG_ENABLE ?
946 		mlx5e_ethtool2ptys_adver_link(link_ksettings->link_modes.advertising) :
947 		mlx5e_ethtool2ptys_speed_link(speed);
948 
949 	err = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
950 	if (err) {
951 		netdev_err(netdev, "%s: query port eth proto cap failed: %d\n",
952 			   __func__, err);
953 		goto out;
954 	}
955 
956 	link_modes = link_modes & eth_proto_cap;
957 	if (!link_modes) {
958 		netdev_err(netdev, "%s: Not supported link mode(s) requested",
959 			   __func__);
960 		err = -EINVAL;
961 		goto out;
962 	}
963 
964 	err = mlx5_query_port_proto_admin(mdev, &eth_proto_admin, MLX5_PTYS_EN);
965 	if (err) {
966 		netdev_err(netdev, "%s: query port eth proto admin failed: %d\n",
967 			   __func__, err);
968 		goto out;
969 	}
970 
971 	mlx5_query_port_autoneg(mdev, MLX5_PTYS_EN, &an_status,
972 				&an_disable_cap, &an_disable_admin);
973 
974 	an_disable = link_ksettings->base.autoneg == AUTONEG_DISABLE;
975 	an_changes = ((!an_disable && an_disable_admin) ||
976 		      (an_disable && !an_disable_admin));
977 
978 	if (!an_changes && link_modes == eth_proto_admin)
979 		goto out;
980 
981 	mlx5_set_port_ptys(mdev, an_disable, link_modes, MLX5_PTYS_EN);
982 	mlx5_toggle_port_link(mdev);
983 
984 out:
985 	return err;
986 }
987 
988 static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev)
989 {
990 	struct mlx5e_priv *priv = netdev_priv(netdev);
991 
992 	return sizeof(priv->channels.params.toeplitz_hash_key);
993 }
994 
995 static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
996 {
997 	return MLX5E_INDIR_RQT_SIZE;
998 }
999 
1000 static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
1001 			  u8 *hfunc)
1002 {
1003 	struct mlx5e_priv *priv = netdev_priv(netdev);
1004 
1005 	if (indir)
1006 		memcpy(indir, priv->channels.params.indirection_rqt,
1007 		       sizeof(priv->channels.params.indirection_rqt));
1008 
1009 	if (key)
1010 		memcpy(key, priv->channels.params.toeplitz_hash_key,
1011 		       sizeof(priv->channels.params.toeplitz_hash_key));
1012 
1013 	if (hfunc)
1014 		*hfunc = priv->channels.params.rss_hfunc;
1015 
1016 	return 0;
1017 }
1018 
1019 static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
1020 {
1021 	void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
1022 	struct mlx5_core_dev *mdev = priv->mdev;
1023 	int ctxlen = MLX5_ST_SZ_BYTES(tirc);
1024 	int tt;
1025 
1026 	MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
1027 
1028 	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
1029 		memset(tirc, 0, ctxlen);
1030 		mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc);
1031 		mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
1032 	}
1033 }
1034 
1035 static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
1036 			  const u8 *key, const u8 hfunc)
1037 {
1038 	struct mlx5e_priv *priv = netdev_priv(dev);
1039 	int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1040 	bool hash_changed = false;
1041 	void *in;
1042 
1043 	if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
1044 	    (hfunc != ETH_RSS_HASH_XOR) &&
1045 	    (hfunc != ETH_RSS_HASH_TOP))
1046 		return -EINVAL;
1047 
1048 	in = mlx5_vzalloc(inlen);
1049 	if (!in)
1050 		return -ENOMEM;
1051 
1052 	mutex_lock(&priv->state_lock);
1053 
1054 	if (hfunc != ETH_RSS_HASH_NO_CHANGE &&
1055 	    hfunc != priv->channels.params.rss_hfunc) {
1056 		priv->channels.params.rss_hfunc = hfunc;
1057 		hash_changed = true;
1058 	}
1059 
1060 	if (indir) {
1061 		memcpy(priv->channels.params.indirection_rqt, indir,
1062 		       sizeof(priv->channels.params.indirection_rqt));
1063 
1064 		if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
1065 			u32 rqtn = priv->indir_rqt.rqtn;
1066 			struct mlx5e_redirect_rqt_param rrp = {
1067 				.is_rss = true,
1068 				{
1069 					.rss = {
1070 						.hfunc = priv->channels.params.rss_hfunc,
1071 						.channels  = &priv->channels,
1072 					},
1073 				},
1074 			};
1075 
1076 			mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
1077 		}
1078 	}
1079 
1080 	if (key) {
1081 		memcpy(priv->channels.params.toeplitz_hash_key, key,
1082 		       sizeof(priv->channels.params.toeplitz_hash_key));
1083 		hash_changed = hash_changed ||
1084 			       priv->channels.params.rss_hfunc == ETH_RSS_HASH_TOP;
1085 	}
1086 
1087 	if (hash_changed)
1088 		mlx5e_modify_tirs_hash(priv, in, inlen);
1089 
1090 	mutex_unlock(&priv->state_lock);
1091 
1092 	kvfree(in);
1093 
1094 	return 0;
1095 }
1096 
1097 static int mlx5e_get_rxnfc(struct net_device *netdev,
1098 			   struct ethtool_rxnfc *info, u32 *rule_locs)
1099 {
1100 	struct mlx5e_priv *priv = netdev_priv(netdev);
1101 	int err = 0;
1102 
1103 	switch (info->cmd) {
1104 	case ETHTOOL_GRXRINGS:
1105 		info->data = priv->channels.params.num_channels;
1106 		break;
1107 	case ETHTOOL_GRXCLSRLCNT:
1108 		info->rule_cnt = priv->fs.ethtool.tot_num_rules;
1109 		break;
1110 	case ETHTOOL_GRXCLSRULE:
1111 		err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
1112 		break;
1113 	case ETHTOOL_GRXCLSRLALL:
1114 		err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
1115 		break;
1116 	default:
1117 		err = -EOPNOTSUPP;
1118 		break;
1119 	}
1120 
1121 	return err;
1122 }
1123 
1124 static int mlx5e_get_tunable(struct net_device *dev,
1125 			     const struct ethtool_tunable *tuna,
1126 			     void *data)
1127 {
1128 	const struct mlx5e_priv *priv = netdev_priv(dev);
1129 	int err = 0;
1130 
1131 	switch (tuna->id) {
1132 	case ETHTOOL_TX_COPYBREAK:
1133 		*(u32 *)data = priv->channels.params.tx_max_inline;
1134 		break;
1135 	default:
1136 		err = -EINVAL;
1137 		break;
1138 	}
1139 
1140 	return err;
1141 }
1142 
1143 static int mlx5e_set_tunable(struct net_device *dev,
1144 			     const struct ethtool_tunable *tuna,
1145 			     const void *data)
1146 {
1147 	struct mlx5e_priv *priv = netdev_priv(dev);
1148 	struct mlx5_core_dev *mdev = priv->mdev;
1149 	struct mlx5e_channels new_channels = {};
1150 	int err = 0;
1151 	u32 val;
1152 
1153 	mutex_lock(&priv->state_lock);
1154 
1155 	switch (tuna->id) {
1156 	case ETHTOOL_TX_COPYBREAK:
1157 		val = *(u32 *)data;
1158 		if (val > mlx5e_get_max_inline_cap(mdev)) {
1159 			err = -EINVAL;
1160 			break;
1161 		}
1162 
1163 		new_channels.params = priv->channels.params;
1164 		new_channels.params.tx_max_inline = val;
1165 
1166 		if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
1167 			priv->channels.params = new_channels.params;
1168 			break;
1169 		}
1170 
1171 		err = mlx5e_open_channels(priv, &new_channels);
1172 		if (err)
1173 			break;
1174 		mlx5e_switch_priv_channels(priv, &new_channels, NULL);
1175 
1176 		break;
1177 	default:
1178 		err = -EINVAL;
1179 		break;
1180 	}
1181 
1182 	mutex_unlock(&priv->state_lock);
1183 	return err;
1184 }
1185 
1186 static void mlx5e_get_pauseparam(struct net_device *netdev,
1187 				 struct ethtool_pauseparam *pauseparam)
1188 {
1189 	struct mlx5e_priv *priv    = netdev_priv(netdev);
1190 	struct mlx5_core_dev *mdev = priv->mdev;
1191 	int err;
1192 
1193 	err = mlx5_query_port_pause(mdev, &pauseparam->rx_pause,
1194 				    &pauseparam->tx_pause);
1195 	if (err) {
1196 		netdev_err(netdev, "%s: mlx5_query_port_pause failed:0x%x\n",
1197 			   __func__, err);
1198 	}
1199 }
1200 
1201 static int mlx5e_set_pauseparam(struct net_device *netdev,
1202 				struct ethtool_pauseparam *pauseparam)
1203 {
1204 	struct mlx5e_priv *priv    = netdev_priv(netdev);
1205 	struct mlx5_core_dev *mdev = priv->mdev;
1206 	int err;
1207 
1208 	if (pauseparam->autoneg)
1209 		return -EINVAL;
1210 
1211 	err = mlx5_set_port_pause(mdev,
1212 				  pauseparam->rx_pause ? 1 : 0,
1213 				  pauseparam->tx_pause ? 1 : 0);
1214 	if (err) {
1215 		netdev_err(netdev, "%s: mlx5_set_port_pause failed:0x%x\n",
1216 			   __func__, err);
1217 	}
1218 
1219 	return err;
1220 }
1221 
1222 static int mlx5e_get_ts_info(struct net_device *dev,
1223 			     struct ethtool_ts_info *info)
1224 {
1225 	struct mlx5e_priv *priv = netdev_priv(dev);
1226 	int ret;
1227 
1228 	ret = ethtool_op_get_ts_info(dev, info);
1229 	if (ret)
1230 		return ret;
1231 
1232 	info->phc_index = priv->tstamp.ptp ?
1233 			  ptp_clock_index(priv->tstamp.ptp) : -1;
1234 
1235 	if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
1236 		return 0;
1237 
1238 	info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
1239 				 SOF_TIMESTAMPING_RX_HARDWARE |
1240 				 SOF_TIMESTAMPING_RAW_HARDWARE;
1241 
1242 	info->tx_types = (BIT(1) << HWTSTAMP_TX_OFF) |
1243 			 (BIT(1) << HWTSTAMP_TX_ON);
1244 
1245 	info->rx_filters = (BIT(1) << HWTSTAMP_FILTER_NONE) |
1246 			   (BIT(1) << HWTSTAMP_FILTER_ALL);
1247 
1248 	return 0;
1249 }
1250 
1251 static __u32 mlx5e_get_wol_supported(struct mlx5_core_dev *mdev)
1252 {
1253 	__u32 ret = 0;
1254 
1255 	if (MLX5_CAP_GEN(mdev, wol_g))
1256 		ret |= WAKE_MAGIC;
1257 
1258 	if (MLX5_CAP_GEN(mdev, wol_s))
1259 		ret |= WAKE_MAGICSECURE;
1260 
1261 	if (MLX5_CAP_GEN(mdev, wol_a))
1262 		ret |= WAKE_ARP;
1263 
1264 	if (MLX5_CAP_GEN(mdev, wol_b))
1265 		ret |= WAKE_BCAST;
1266 
1267 	if (MLX5_CAP_GEN(mdev, wol_m))
1268 		ret |= WAKE_MCAST;
1269 
1270 	if (MLX5_CAP_GEN(mdev, wol_u))
1271 		ret |= WAKE_UCAST;
1272 
1273 	if (MLX5_CAP_GEN(mdev, wol_p))
1274 		ret |= WAKE_PHY;
1275 
1276 	return ret;
1277 }
1278 
1279 static __u32 mlx5e_refomrat_wol_mode_mlx5_to_linux(u8 mode)
1280 {
1281 	__u32 ret = 0;
1282 
1283 	if (mode & MLX5_WOL_MAGIC)
1284 		ret |= WAKE_MAGIC;
1285 
1286 	if (mode & MLX5_WOL_SECURED_MAGIC)
1287 		ret |= WAKE_MAGICSECURE;
1288 
1289 	if (mode & MLX5_WOL_ARP)
1290 		ret |= WAKE_ARP;
1291 
1292 	if (mode & MLX5_WOL_BROADCAST)
1293 		ret |= WAKE_BCAST;
1294 
1295 	if (mode & MLX5_WOL_MULTICAST)
1296 		ret |= WAKE_MCAST;
1297 
1298 	if (mode & MLX5_WOL_UNICAST)
1299 		ret |= WAKE_UCAST;
1300 
1301 	if (mode & MLX5_WOL_PHY_ACTIVITY)
1302 		ret |= WAKE_PHY;
1303 
1304 	return ret;
1305 }
1306 
1307 static u8 mlx5e_refomrat_wol_mode_linux_to_mlx5(__u32 mode)
1308 {
1309 	u8 ret = 0;
1310 
1311 	if (mode & WAKE_MAGIC)
1312 		ret |= MLX5_WOL_MAGIC;
1313 
1314 	if (mode & WAKE_MAGICSECURE)
1315 		ret |= MLX5_WOL_SECURED_MAGIC;
1316 
1317 	if (mode & WAKE_ARP)
1318 		ret |= MLX5_WOL_ARP;
1319 
1320 	if (mode & WAKE_BCAST)
1321 		ret |= MLX5_WOL_BROADCAST;
1322 
1323 	if (mode & WAKE_MCAST)
1324 		ret |= MLX5_WOL_MULTICAST;
1325 
1326 	if (mode & WAKE_UCAST)
1327 		ret |= MLX5_WOL_UNICAST;
1328 
1329 	if (mode & WAKE_PHY)
1330 		ret |= MLX5_WOL_PHY_ACTIVITY;
1331 
1332 	return ret;
1333 }
1334 
1335 static void mlx5e_get_wol(struct net_device *netdev,
1336 			  struct ethtool_wolinfo *wol)
1337 {
1338 	struct mlx5e_priv *priv = netdev_priv(netdev);
1339 	struct mlx5_core_dev *mdev = priv->mdev;
1340 	u8 mlx5_wol_mode;
1341 	int err;
1342 
1343 	memset(wol, 0, sizeof(*wol));
1344 
1345 	wol->supported = mlx5e_get_wol_supported(mdev);
1346 	if (!wol->supported)
1347 		return;
1348 
1349 	err = mlx5_query_port_wol(mdev, &mlx5_wol_mode);
1350 	if (err)
1351 		return;
1352 
1353 	wol->wolopts = mlx5e_refomrat_wol_mode_mlx5_to_linux(mlx5_wol_mode);
1354 }
1355 
1356 static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1357 {
1358 	struct mlx5e_priv *priv = netdev_priv(netdev);
1359 	struct mlx5_core_dev *mdev = priv->mdev;
1360 	__u32 wol_supported = mlx5e_get_wol_supported(mdev);
1361 	u32 mlx5_wol_mode;
1362 
1363 	if (!wol_supported)
1364 		return -EOPNOTSUPP;
1365 
1366 	if (wol->wolopts & ~wol_supported)
1367 		return -EINVAL;
1368 
1369 	mlx5_wol_mode = mlx5e_refomrat_wol_mode_linux_to_mlx5(wol->wolopts);
1370 
1371 	return mlx5_set_port_wol(mdev, mlx5_wol_mode);
1372 }
1373 
1374 static int mlx5e_set_phys_id(struct net_device *dev,
1375 			     enum ethtool_phys_id_state state)
1376 {
1377 	struct mlx5e_priv *priv = netdev_priv(dev);
1378 	struct mlx5_core_dev *mdev = priv->mdev;
1379 	u16 beacon_duration;
1380 
1381 	if (!MLX5_CAP_GEN(mdev, beacon_led))
1382 		return -EOPNOTSUPP;
1383 
1384 	switch (state) {
1385 	case ETHTOOL_ID_ACTIVE:
1386 		beacon_duration = MLX5_BEACON_DURATION_INF;
1387 		break;
1388 	case ETHTOOL_ID_INACTIVE:
1389 		beacon_duration = MLX5_BEACON_DURATION_OFF;
1390 		break;
1391 	default:
1392 		return -EOPNOTSUPP;
1393 	}
1394 
1395 	return mlx5_set_port_beacon(mdev, beacon_duration);
1396 }
1397 
1398 static int mlx5e_get_module_info(struct net_device *netdev,
1399 				 struct ethtool_modinfo *modinfo)
1400 {
1401 	struct mlx5e_priv *priv = netdev_priv(netdev);
1402 	struct mlx5_core_dev *dev = priv->mdev;
1403 	int size_read = 0;
1404 	u8 data[4];
1405 
1406 	size_read = mlx5_query_module_eeprom(dev, 0, 2, data);
1407 	if (size_read < 2)
1408 		return -EIO;
1409 
1410 	/* data[0] = identifier byte */
1411 	switch (data[0]) {
1412 	case MLX5_MODULE_ID_QSFP:
1413 		modinfo->type       = ETH_MODULE_SFF_8436;
1414 		modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
1415 		break;
1416 	case MLX5_MODULE_ID_QSFP_PLUS:
1417 	case MLX5_MODULE_ID_QSFP28:
1418 		/* data[1] = revision id */
1419 		if (data[0] == MLX5_MODULE_ID_QSFP28 || data[1] >= 0x3) {
1420 			modinfo->type       = ETH_MODULE_SFF_8636;
1421 			modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
1422 		} else {
1423 			modinfo->type       = ETH_MODULE_SFF_8436;
1424 			modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
1425 		}
1426 		break;
1427 	case MLX5_MODULE_ID_SFP:
1428 		modinfo->type       = ETH_MODULE_SFF_8472;
1429 		modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
1430 		break;
1431 	default:
1432 		netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n",
1433 			   __func__, data[0]);
1434 		return -EINVAL;
1435 	}
1436 
1437 	return 0;
1438 }
1439 
1440 static int mlx5e_get_module_eeprom(struct net_device *netdev,
1441 				   struct ethtool_eeprom *ee,
1442 				   u8 *data)
1443 {
1444 	struct mlx5e_priv *priv = netdev_priv(netdev);
1445 	struct mlx5_core_dev *mdev = priv->mdev;
1446 	int offset = ee->offset;
1447 	int size_read;
1448 	int i = 0;
1449 
1450 	if (!ee->len)
1451 		return -EINVAL;
1452 
1453 	memset(data, 0, ee->len);
1454 
1455 	while (i < ee->len) {
1456 		size_read = mlx5_query_module_eeprom(mdev, offset, ee->len - i,
1457 						     data + i);
1458 
1459 		if (!size_read)
1460 			/* Done reading */
1461 			return 0;
1462 
1463 		if (size_read < 0) {
1464 			netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n",
1465 				   __func__, size_read);
1466 			return 0;
1467 		}
1468 
1469 		i += size_read;
1470 		offset += size_read;
1471 	}
1472 
1473 	return 0;
1474 }
1475 
1476 typedef int (*mlx5e_pflag_handler)(struct net_device *netdev, bool enable);
1477 
1478 static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
1479 {
1480 	struct mlx5e_priv *priv = netdev_priv(netdev);
1481 	struct mlx5_core_dev *mdev = priv->mdev;
1482 	struct mlx5e_channels new_channels = {};
1483 	bool rx_mode_changed;
1484 	u8 rx_cq_period_mode;
1485 	int err = 0;
1486 
1487 	rx_cq_period_mode = enable ?
1488 		MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
1489 		MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1490 	rx_mode_changed = rx_cq_period_mode != priv->channels.params.rx_cq_period_mode;
1491 
1492 	if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
1493 	    !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
1494 		return -EOPNOTSUPP;
1495 
1496 	if (!rx_mode_changed)
1497 		return 0;
1498 
1499 	new_channels.params = priv->channels.params;
1500 	mlx5e_set_rx_cq_mode_params(&new_channels.params, rx_cq_period_mode);
1501 
1502 	if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
1503 		priv->channels.params = new_channels.params;
1504 		return 0;
1505 	}
1506 
1507 	err = mlx5e_open_channels(priv, &new_channels);
1508 	if (err)
1509 		return err;
1510 
1511 	mlx5e_switch_priv_channels(priv, &new_channels, NULL);
1512 	return 0;
1513 }
1514 
1515 int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val)
1516 {
1517 	bool curr_val = MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS);
1518 	struct mlx5e_channels new_channels = {};
1519 	int err = 0;
1520 
1521 	if (!MLX5_CAP_GEN(priv->mdev, cqe_compression))
1522 		return new_val ? -EOPNOTSUPP : 0;
1523 
1524 	if (curr_val == new_val)
1525 		return 0;
1526 
1527 	new_channels.params = priv->channels.params;
1528 	MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
1529 
1530 	mlx5e_set_rq_type_params(priv->mdev, &new_channels.params,
1531 				 new_channels.params.rq_wq_type);
1532 
1533 	if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
1534 		priv->channels.params = new_channels.params;
1535 		return 0;
1536 	}
1537 
1538 	err = mlx5e_open_channels(priv, &new_channels);
1539 	if (err)
1540 		return err;
1541 
1542 	mlx5e_switch_priv_channels(priv, &new_channels, NULL);
1543 	return 0;
1544 }
1545 
1546 static int set_pflag_rx_cqe_compress(struct net_device *netdev,
1547 				     bool enable)
1548 {
1549 	struct mlx5e_priv *priv = netdev_priv(netdev);
1550 	struct mlx5_core_dev *mdev = priv->mdev;
1551 
1552 	if (!MLX5_CAP_GEN(mdev, cqe_compression))
1553 		return -EOPNOTSUPP;
1554 
1555 	if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
1556 		netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n");
1557 		return -EINVAL;
1558 	}
1559 
1560 	mlx5e_modify_rx_cqe_compression_locked(priv, enable);
1561 	priv->channels.params.rx_cqe_compress_def = enable;
1562 
1563 	return 0;
1564 }
1565 
1566 static int mlx5e_handle_pflag(struct net_device *netdev,
1567 			      u32 wanted_flags,
1568 			      enum mlx5e_priv_flag flag,
1569 			      mlx5e_pflag_handler pflag_handler)
1570 {
1571 	struct mlx5e_priv *priv = netdev_priv(netdev);
1572 	bool enable = !!(wanted_flags & flag);
1573 	u32 changes = wanted_flags ^ priv->channels.params.pflags;
1574 	int err;
1575 
1576 	if (!(changes & flag))
1577 		return 0;
1578 
1579 	err = pflag_handler(netdev, enable);
1580 	if (err) {
1581 		netdev_err(netdev, "%s private flag 0x%x failed err %d\n",
1582 			   enable ? "Enable" : "Disable", flag, err);
1583 		return err;
1584 	}
1585 
1586 	MLX5E_SET_PFLAG(&priv->channels.params, flag, enable);
1587 	return 0;
1588 }
1589 
1590 static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags)
1591 {
1592 	struct mlx5e_priv *priv = netdev_priv(netdev);
1593 	int err;
1594 
1595 	mutex_lock(&priv->state_lock);
1596 	err = mlx5e_handle_pflag(netdev, pflags,
1597 				 MLX5E_PFLAG_RX_CQE_BASED_MODER,
1598 				 set_pflag_rx_cqe_based_moder);
1599 	if (err)
1600 		goto out;
1601 
1602 	err = mlx5e_handle_pflag(netdev, pflags,
1603 				 MLX5E_PFLAG_RX_CQE_COMPRESS,
1604 				 set_pflag_rx_cqe_compress);
1605 
1606 out:
1607 	mutex_unlock(&priv->state_lock);
1608 	return err;
1609 }
1610 
1611 static u32 mlx5e_get_priv_flags(struct net_device *netdev)
1612 {
1613 	struct mlx5e_priv *priv = netdev_priv(netdev);
1614 
1615 	return priv->channels.params.pflags;
1616 }
1617 
1618 static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1619 {
1620 	int err = 0;
1621 	struct mlx5e_priv *priv = netdev_priv(dev);
1622 
1623 	switch (cmd->cmd) {
1624 	case ETHTOOL_SRXCLSRLINS:
1625 		err = mlx5e_ethtool_flow_replace(priv, &cmd->fs);
1626 		break;
1627 	case ETHTOOL_SRXCLSRLDEL:
1628 		err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
1629 		break;
1630 	default:
1631 		err = -EOPNOTSUPP;
1632 		break;
1633 	}
1634 
1635 	return err;
1636 }
1637 
1638 const struct ethtool_ops mlx5e_ethtool_ops = {
1639 	.get_drvinfo       = mlx5e_get_drvinfo,
1640 	.get_link          = ethtool_op_get_link,
1641 	.get_strings       = mlx5e_get_strings,
1642 	.get_sset_count    = mlx5e_get_sset_count,
1643 	.get_ethtool_stats = mlx5e_get_ethtool_stats,
1644 	.get_ringparam     = mlx5e_get_ringparam,
1645 	.set_ringparam     = mlx5e_set_ringparam,
1646 	.get_channels      = mlx5e_get_channels,
1647 	.set_channels      = mlx5e_set_channels,
1648 	.get_coalesce      = mlx5e_get_coalesce,
1649 	.set_coalesce      = mlx5e_set_coalesce,
1650 	.get_link_ksettings  = mlx5e_get_link_ksettings,
1651 	.set_link_ksettings  = mlx5e_set_link_ksettings,
1652 	.get_rxfh_key_size   = mlx5e_get_rxfh_key_size,
1653 	.get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
1654 	.get_rxfh          = mlx5e_get_rxfh,
1655 	.set_rxfh          = mlx5e_set_rxfh,
1656 	.get_rxnfc         = mlx5e_get_rxnfc,
1657 	.set_rxnfc         = mlx5e_set_rxnfc,
1658 	.get_tunable       = mlx5e_get_tunable,
1659 	.set_tunable       = mlx5e_set_tunable,
1660 	.get_pauseparam    = mlx5e_get_pauseparam,
1661 	.set_pauseparam    = mlx5e_set_pauseparam,
1662 	.get_ts_info       = mlx5e_get_ts_info,
1663 	.set_phys_id       = mlx5e_set_phys_id,
1664 	.get_wol	   = mlx5e_get_wol,
1665 	.set_wol	   = mlx5e_set_wol,
1666 	.get_module_info   = mlx5e_get_module_info,
1667 	.get_module_eeprom = mlx5e_get_module_eeprom,
1668 	.get_priv_flags    = mlx5e_get_priv_flags,
1669 	.set_priv_flags    = mlx5e_set_priv_flags,
1670 	.self_test         = mlx5e_self_test,
1671 };
1672