xref: /linux/drivers/net/ethernet/qlogic/qede/qede_main.c (revision 3ad0876554cafa368f574d4d408468510543e9ff)
1 /* QLogic qede NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/pci.h>
34 #include <linux/version.h>
35 #include <linux/device.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/skbuff.h>
39 #include <linux/errno.h>
40 #include <linux/list.h>
41 #include <linux/string.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/interrupt.h>
44 #include <asm/byteorder.h>
45 #include <asm/param.h>
46 #include <linux/io.h>
47 #include <linux/netdev_features.h>
48 #include <linux/udp.h>
49 #include <linux/tcp.h>
50 #include <net/udp_tunnel.h>
51 #include <linux/ip.h>
52 #include <net/ipv6.h>
53 #include <net/tcp.h>
54 #include <linux/if_ether.h>
55 #include <linux/if_vlan.h>
56 #include <linux/pkt_sched.h>
57 #include <linux/ethtool.h>
58 #include <linux/in.h>
59 #include <linux/random.h>
60 #include <net/ip6_checksum.h>
61 #include <linux/bitops.h>
62 #include <linux/vmalloc.h>
63 #include "qede.h"
64 #include "qede_ptp.h"
65 
66 static char version[] =
67 	"QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n";
68 
69 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
72 
73 static uint debug;
74 module_param(debug, uint, 0);
75 MODULE_PARM_DESC(debug, " Default debug msglevel");
76 
77 static const struct qed_eth_ops *qed_ops;
78 
79 #define CHIP_NUM_57980S_40		0x1634
80 #define CHIP_NUM_57980S_10		0x1666
81 #define CHIP_NUM_57980S_MF		0x1636
82 #define CHIP_NUM_57980S_100		0x1644
83 #define CHIP_NUM_57980S_50		0x1654
84 #define CHIP_NUM_57980S_25		0x1656
85 #define CHIP_NUM_57980S_IOV		0x1664
86 #define CHIP_NUM_AH			0x8070
87 #define CHIP_NUM_AH_IOV			0x8090
88 
89 #ifndef PCI_DEVICE_ID_NX2_57980E
90 #define PCI_DEVICE_ID_57980S_40		CHIP_NUM_57980S_40
91 #define PCI_DEVICE_ID_57980S_10		CHIP_NUM_57980S_10
92 #define PCI_DEVICE_ID_57980S_MF		CHIP_NUM_57980S_MF
93 #define PCI_DEVICE_ID_57980S_100	CHIP_NUM_57980S_100
94 #define PCI_DEVICE_ID_57980S_50		CHIP_NUM_57980S_50
95 #define PCI_DEVICE_ID_57980S_25		CHIP_NUM_57980S_25
96 #define PCI_DEVICE_ID_57980S_IOV	CHIP_NUM_57980S_IOV
97 #define PCI_DEVICE_ID_AH		CHIP_NUM_AH
98 #define PCI_DEVICE_ID_AH_IOV		CHIP_NUM_AH_IOV
99 
100 #endif
101 
102 enum qede_pci_private {
103 	QEDE_PRIVATE_PF,
104 	QEDE_PRIVATE_VF
105 };
106 
107 static const struct pci_device_id qede_pci_tbl[] = {
108 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
109 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
110 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
111 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
112 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
113 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
114 #ifdef CONFIG_QED_SRIOV
115 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
116 #endif
117 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF},
118 #ifdef CONFIG_QED_SRIOV
119 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF},
120 #endif
121 	{ 0 }
122 };
123 
124 MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
125 
126 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
127 
128 #define TX_TIMEOUT		(5 * HZ)
129 
130 /* Utilize last protocol index for XDP */
131 #define XDP_PI	11
132 
133 static void qede_remove(struct pci_dev *pdev);
134 static void qede_shutdown(struct pci_dev *pdev);
135 static void qede_link_update(void *dev, struct qed_link_output *link);
136 
137 /* The qede lock is used to protect driver state change and driver flows that
138  * are not reentrant.
139  */
140 void __qede_lock(struct qede_dev *edev)
141 {
142 	mutex_lock(&edev->qede_lock);
143 }
144 
145 void __qede_unlock(struct qede_dev *edev)
146 {
147 	mutex_unlock(&edev->qede_lock);
148 }
149 
150 #ifdef CONFIG_QED_SRIOV
151 static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
152 			    __be16 vlan_proto)
153 {
154 	struct qede_dev *edev = netdev_priv(ndev);
155 
156 	if (vlan > 4095) {
157 		DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
158 		return -EINVAL;
159 	}
160 
161 	if (vlan_proto != htons(ETH_P_8021Q))
162 		return -EPROTONOSUPPORT;
163 
164 	DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
165 		   vlan, vf);
166 
167 	return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
168 }
169 
170 static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
171 {
172 	struct qede_dev *edev = netdev_priv(ndev);
173 
174 	DP_VERBOSE(edev, QED_MSG_IOV,
175 		   "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n",
176 		   mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx);
177 
178 	if (!is_valid_ether_addr(mac)) {
179 		DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
180 		return -EINVAL;
181 	}
182 
183 	return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
184 }
185 
186 static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
187 {
188 	struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
189 	struct qed_dev_info *qed_info = &edev->dev_info.common;
190 	struct qed_update_vport_params *vport_params;
191 	int rc;
192 
193 	vport_params = vzalloc(sizeof(*vport_params));
194 	if (!vport_params)
195 		return -ENOMEM;
196 	DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
197 
198 	rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
199 
200 	/* Enable/Disable Tx switching for PF */
201 	if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
202 	    qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) {
203 		vport_params->vport_id = 0;
204 		vport_params->update_tx_switching_flg = 1;
205 		vport_params->tx_switching_flg = num_vfs_param ? 1 : 0;
206 		edev->ops->vport_update(edev->cdev, vport_params);
207 	}
208 
209 	vfree(vport_params);
210 	return rc;
211 }
212 #endif
213 
214 static struct pci_driver qede_pci_driver = {
215 	.name = "qede",
216 	.id_table = qede_pci_tbl,
217 	.probe = qede_probe,
218 	.remove = qede_remove,
219 	.shutdown = qede_shutdown,
220 #ifdef CONFIG_QED_SRIOV
221 	.sriov_configure = qede_sriov_configure,
222 #endif
223 };
224 
225 static struct qed_eth_cb_ops qede_ll_ops = {
226 	{
227 #ifdef CONFIG_RFS_ACCEL
228 		.arfs_filter_op = qede_arfs_filter_op,
229 #endif
230 		.link_update = qede_link_update,
231 	},
232 	.force_mac = qede_force_mac,
233 	.ports_update = qede_udp_ports_update,
234 };
235 
236 static int qede_netdev_event(struct notifier_block *this, unsigned long event,
237 			     void *ptr)
238 {
239 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
240 	struct ethtool_drvinfo drvinfo;
241 	struct qede_dev *edev;
242 
243 	if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR)
244 		goto done;
245 
246 	/* Check whether this is a qede device */
247 	if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
248 		goto done;
249 
250 	memset(&drvinfo, 0, sizeof(drvinfo));
251 	ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
252 	if (strcmp(drvinfo.driver, "qede"))
253 		goto done;
254 	edev = netdev_priv(ndev);
255 
256 	switch (event) {
257 	case NETDEV_CHANGENAME:
258 		/* Notify qed of the name change */
259 		if (!edev->ops || !edev->ops->common)
260 			goto done;
261 		edev->ops->common->set_name(edev->cdev, edev->ndev->name);
262 		break;
263 	case NETDEV_CHANGEADDR:
264 		edev = netdev_priv(ndev);
265 		qede_rdma_event_changeaddr(edev);
266 		break;
267 	}
268 
269 done:
270 	return NOTIFY_DONE;
271 }
272 
273 static struct notifier_block qede_netdev_notifier = {
274 	.notifier_call = qede_netdev_event,
275 };
276 
277 static
278 int __init qede_init(void)
279 {
280 	int ret;
281 
282 	pr_info("qede_init: %s\n", version);
283 
284 	qed_ops = qed_get_eth_ops();
285 	if (!qed_ops) {
286 		pr_notice("Failed to get qed ethtool operations\n");
287 		return -EINVAL;
288 	}
289 
290 	/* Must register notifier before pci ops, since we might miss
291 	 * interface rename after pci probe and netdev registration.
292 	 */
293 	ret = register_netdevice_notifier(&qede_netdev_notifier);
294 	if (ret) {
295 		pr_notice("Failed to register netdevice_notifier\n");
296 		qed_put_eth_ops();
297 		return -EINVAL;
298 	}
299 
300 	ret = pci_register_driver(&qede_pci_driver);
301 	if (ret) {
302 		pr_notice("Failed to register driver\n");
303 		unregister_netdevice_notifier(&qede_netdev_notifier);
304 		qed_put_eth_ops();
305 		return -EINVAL;
306 	}
307 
308 	return 0;
309 }
310 
311 static void __exit qede_cleanup(void)
312 {
313 	if (debug & QED_LOG_INFO_MASK)
314 		pr_info("qede_cleanup called\n");
315 
316 	unregister_netdevice_notifier(&qede_netdev_notifier);
317 	pci_unregister_driver(&qede_pci_driver);
318 	qed_put_eth_ops();
319 }
320 
321 module_init(qede_init);
322 module_exit(qede_cleanup);
323 
324 static int qede_open(struct net_device *ndev);
325 static int qede_close(struct net_device *ndev);
326 
327 void qede_fill_by_demand_stats(struct qede_dev *edev)
328 {
329 	struct qede_stats_common *p_common = &edev->stats.common;
330 	struct qed_eth_stats stats;
331 
332 	edev->ops->get_vport_stats(edev->cdev, &stats);
333 
334 	p_common->no_buff_discards = stats.common.no_buff_discards;
335 	p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
336 	p_common->ttl0_discard = stats.common.ttl0_discard;
337 	p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes;
338 	p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes;
339 	p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes;
340 	p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts;
341 	p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts;
342 	p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts;
343 	p_common->mftag_filter_discards = stats.common.mftag_filter_discards;
344 	p_common->mac_filter_discards = stats.common.mac_filter_discards;
345 
346 	p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes;
347 	p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes;
348 	p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes;
349 	p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts;
350 	p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts;
351 	p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts;
352 	p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts;
353 	p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts;
354 	p_common->coalesced_events = stats.common.tpa_coalesced_events;
355 	p_common->coalesced_aborts_num = stats.common.tpa_aborts_num;
356 	p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts;
357 	p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes;
358 
359 	p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets;
360 	p_common->rx_65_to_127_byte_packets =
361 	    stats.common.rx_65_to_127_byte_packets;
362 	p_common->rx_128_to_255_byte_packets =
363 	    stats.common.rx_128_to_255_byte_packets;
364 	p_common->rx_256_to_511_byte_packets =
365 	    stats.common.rx_256_to_511_byte_packets;
366 	p_common->rx_512_to_1023_byte_packets =
367 	    stats.common.rx_512_to_1023_byte_packets;
368 	p_common->rx_1024_to_1518_byte_packets =
369 	    stats.common.rx_1024_to_1518_byte_packets;
370 	p_common->rx_crc_errors = stats.common.rx_crc_errors;
371 	p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames;
372 	p_common->rx_pause_frames = stats.common.rx_pause_frames;
373 	p_common->rx_pfc_frames = stats.common.rx_pfc_frames;
374 	p_common->rx_align_errors = stats.common.rx_align_errors;
375 	p_common->rx_carrier_errors = stats.common.rx_carrier_errors;
376 	p_common->rx_oversize_packets = stats.common.rx_oversize_packets;
377 	p_common->rx_jabbers = stats.common.rx_jabbers;
378 	p_common->rx_undersize_packets = stats.common.rx_undersize_packets;
379 	p_common->rx_fragments = stats.common.rx_fragments;
380 	p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets;
381 	p_common->tx_65_to_127_byte_packets =
382 	    stats.common.tx_65_to_127_byte_packets;
383 	p_common->tx_128_to_255_byte_packets =
384 	    stats.common.tx_128_to_255_byte_packets;
385 	p_common->tx_256_to_511_byte_packets =
386 	    stats.common.tx_256_to_511_byte_packets;
387 	p_common->tx_512_to_1023_byte_packets =
388 	    stats.common.tx_512_to_1023_byte_packets;
389 	p_common->tx_1024_to_1518_byte_packets =
390 	    stats.common.tx_1024_to_1518_byte_packets;
391 	p_common->tx_pause_frames = stats.common.tx_pause_frames;
392 	p_common->tx_pfc_frames = stats.common.tx_pfc_frames;
393 	p_common->brb_truncates = stats.common.brb_truncates;
394 	p_common->brb_discards = stats.common.brb_discards;
395 	p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
396 
397 	if (QEDE_IS_BB(edev)) {
398 		struct qede_stats_bb *p_bb = &edev->stats.bb;
399 
400 		p_bb->rx_1519_to_1522_byte_packets =
401 		    stats.bb.rx_1519_to_1522_byte_packets;
402 		p_bb->rx_1519_to_2047_byte_packets =
403 		    stats.bb.rx_1519_to_2047_byte_packets;
404 		p_bb->rx_2048_to_4095_byte_packets =
405 		    stats.bb.rx_2048_to_4095_byte_packets;
406 		p_bb->rx_4096_to_9216_byte_packets =
407 		    stats.bb.rx_4096_to_9216_byte_packets;
408 		p_bb->rx_9217_to_16383_byte_packets =
409 		    stats.bb.rx_9217_to_16383_byte_packets;
410 		p_bb->tx_1519_to_2047_byte_packets =
411 		    stats.bb.tx_1519_to_2047_byte_packets;
412 		p_bb->tx_2048_to_4095_byte_packets =
413 		    stats.bb.tx_2048_to_4095_byte_packets;
414 		p_bb->tx_4096_to_9216_byte_packets =
415 		    stats.bb.tx_4096_to_9216_byte_packets;
416 		p_bb->tx_9217_to_16383_byte_packets =
417 		    stats.bb.tx_9217_to_16383_byte_packets;
418 		p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count;
419 		p_bb->tx_total_collisions = stats.bb.tx_total_collisions;
420 	} else {
421 		struct qede_stats_ah *p_ah = &edev->stats.ah;
422 
423 		p_ah->rx_1519_to_max_byte_packets =
424 		    stats.ah.rx_1519_to_max_byte_packets;
425 		p_ah->tx_1519_to_max_byte_packets =
426 		    stats.ah.tx_1519_to_max_byte_packets;
427 	}
428 }
429 
430 static void qede_get_stats64(struct net_device *dev,
431 			     struct rtnl_link_stats64 *stats)
432 {
433 	struct qede_dev *edev = netdev_priv(dev);
434 	struct qede_stats_common *p_common;
435 
436 	qede_fill_by_demand_stats(edev);
437 	p_common = &edev->stats.common;
438 
439 	stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
440 			    p_common->rx_bcast_pkts;
441 	stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
442 			    p_common->tx_bcast_pkts;
443 
444 	stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
445 			  p_common->rx_bcast_bytes;
446 	stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
447 			  p_common->tx_bcast_bytes;
448 
449 	stats->tx_errors = p_common->tx_err_drop_pkts;
450 	stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts;
451 
452 	stats->rx_fifo_errors = p_common->no_buff_discards;
453 
454 	if (QEDE_IS_BB(edev))
455 		stats->collisions = edev->stats.bb.tx_total_collisions;
456 	stats->rx_crc_errors = p_common->rx_crc_errors;
457 	stats->rx_frame_errors = p_common->rx_align_errors;
458 }
459 
460 #ifdef CONFIG_QED_SRIOV
461 static int qede_get_vf_config(struct net_device *dev, int vfidx,
462 			      struct ifla_vf_info *ivi)
463 {
464 	struct qede_dev *edev = netdev_priv(dev);
465 
466 	if (!edev->ops)
467 		return -EINVAL;
468 
469 	return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
470 }
471 
472 static int qede_set_vf_rate(struct net_device *dev, int vfidx,
473 			    int min_tx_rate, int max_tx_rate)
474 {
475 	struct qede_dev *edev = netdev_priv(dev);
476 
477 	return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
478 					max_tx_rate);
479 }
480 
481 static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
482 {
483 	struct qede_dev *edev = netdev_priv(dev);
484 
485 	if (!edev->ops)
486 		return -EINVAL;
487 
488 	return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
489 }
490 
491 static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
492 				  int link_state)
493 {
494 	struct qede_dev *edev = netdev_priv(dev);
495 
496 	if (!edev->ops)
497 		return -EINVAL;
498 
499 	return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
500 }
501 
502 static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting)
503 {
504 	struct qede_dev *edev = netdev_priv(dev);
505 
506 	if (!edev->ops)
507 		return -EINVAL;
508 
509 	return edev->ops->iov->set_trust(edev->cdev, vfidx, setting);
510 }
511 #endif
512 
513 static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
514 {
515 	struct qede_dev *edev = netdev_priv(dev);
516 
517 	if (!netif_running(dev))
518 		return -EAGAIN;
519 
520 	switch (cmd) {
521 	case SIOCSHWTSTAMP:
522 		return qede_ptp_hw_ts(edev, ifr);
523 	default:
524 		DP_VERBOSE(edev, QED_MSG_DEBUG,
525 			   "default IOCTL cmd 0x%x\n", cmd);
526 		return -EOPNOTSUPP;
527 	}
528 
529 	return 0;
530 }
531 
532 static const struct net_device_ops qede_netdev_ops = {
533 	.ndo_open = qede_open,
534 	.ndo_stop = qede_close,
535 	.ndo_start_xmit = qede_start_xmit,
536 	.ndo_set_rx_mode = qede_set_rx_mode,
537 	.ndo_set_mac_address = qede_set_mac_addr,
538 	.ndo_validate_addr = eth_validate_addr,
539 	.ndo_change_mtu = qede_change_mtu,
540 	.ndo_do_ioctl = qede_ioctl,
541 #ifdef CONFIG_QED_SRIOV
542 	.ndo_set_vf_mac = qede_set_vf_mac,
543 	.ndo_set_vf_vlan = qede_set_vf_vlan,
544 	.ndo_set_vf_trust = qede_set_vf_trust,
545 #endif
546 	.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
547 	.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
548 	.ndo_fix_features = qede_fix_features,
549 	.ndo_set_features = qede_set_features,
550 	.ndo_get_stats64 = qede_get_stats64,
551 #ifdef CONFIG_QED_SRIOV
552 	.ndo_set_vf_link_state = qede_set_vf_link_state,
553 	.ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
554 	.ndo_get_vf_config = qede_get_vf_config,
555 	.ndo_set_vf_rate = qede_set_vf_rate,
556 #endif
557 	.ndo_udp_tunnel_add = qede_udp_tunnel_add,
558 	.ndo_udp_tunnel_del = qede_udp_tunnel_del,
559 	.ndo_features_check = qede_features_check,
560 	.ndo_bpf = qede_xdp,
561 #ifdef CONFIG_RFS_ACCEL
562 	.ndo_rx_flow_steer = qede_rx_flow_steer,
563 #endif
564 };
565 
566 static const struct net_device_ops qede_netdev_vf_ops = {
567 	.ndo_open = qede_open,
568 	.ndo_stop = qede_close,
569 	.ndo_start_xmit = qede_start_xmit,
570 	.ndo_set_rx_mode = qede_set_rx_mode,
571 	.ndo_set_mac_address = qede_set_mac_addr,
572 	.ndo_validate_addr = eth_validate_addr,
573 	.ndo_change_mtu = qede_change_mtu,
574 	.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
575 	.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
576 	.ndo_fix_features = qede_fix_features,
577 	.ndo_set_features = qede_set_features,
578 	.ndo_get_stats64 = qede_get_stats64,
579 	.ndo_udp_tunnel_add = qede_udp_tunnel_add,
580 	.ndo_udp_tunnel_del = qede_udp_tunnel_del,
581 	.ndo_features_check = qede_features_check,
582 };
583 
584 static const struct net_device_ops qede_netdev_vf_xdp_ops = {
585 	.ndo_open = qede_open,
586 	.ndo_stop = qede_close,
587 	.ndo_start_xmit = qede_start_xmit,
588 	.ndo_set_rx_mode = qede_set_rx_mode,
589 	.ndo_set_mac_address = qede_set_mac_addr,
590 	.ndo_validate_addr = eth_validate_addr,
591 	.ndo_change_mtu = qede_change_mtu,
592 	.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
593 	.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
594 	.ndo_fix_features = qede_fix_features,
595 	.ndo_set_features = qede_set_features,
596 	.ndo_get_stats64 = qede_get_stats64,
597 	.ndo_udp_tunnel_add = qede_udp_tunnel_add,
598 	.ndo_udp_tunnel_del = qede_udp_tunnel_del,
599 	.ndo_features_check = qede_features_check,
600 	.ndo_bpf = qede_xdp,
601 };
602 
603 /* -------------------------------------------------------------------------
604  * START OF PROBE / REMOVE
605  * -------------------------------------------------------------------------
606  */
607 
608 static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
609 					    struct pci_dev *pdev,
610 					    struct qed_dev_eth_info *info,
611 					    u32 dp_module, u8 dp_level)
612 {
613 	struct net_device *ndev;
614 	struct qede_dev *edev;
615 
616 	ndev = alloc_etherdev_mqs(sizeof(*edev),
617 				  info->num_queues, info->num_queues);
618 	if (!ndev) {
619 		pr_err("etherdev allocation failed\n");
620 		return NULL;
621 	}
622 
623 	edev = netdev_priv(ndev);
624 	edev->ndev = ndev;
625 	edev->cdev = cdev;
626 	edev->pdev = pdev;
627 	edev->dp_module = dp_module;
628 	edev->dp_level = dp_level;
629 	edev->ops = qed_ops;
630 	edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
631 	edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
632 
633 	DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
634 		info->num_queues, info->num_queues);
635 
636 	SET_NETDEV_DEV(ndev, &pdev->dev);
637 
638 	memset(&edev->stats, 0, sizeof(edev->stats));
639 	memcpy(&edev->dev_info, info, sizeof(*info));
640 
641 	/* As ethtool doesn't have the ability to show WoL behavior as
642 	 * 'default', if device supports it declare it's enabled.
643 	 */
644 	if (edev->dev_info.common.wol_support)
645 		edev->wol_enabled = true;
646 
647 	INIT_LIST_HEAD(&edev->vlan_list);
648 
649 	return edev;
650 }
651 
652 static void qede_init_ndev(struct qede_dev *edev)
653 {
654 	struct net_device *ndev = edev->ndev;
655 	struct pci_dev *pdev = edev->pdev;
656 	bool udp_tunnel_enable = false;
657 	netdev_features_t hw_features;
658 
659 	pci_set_drvdata(pdev, ndev);
660 
661 	ndev->mem_start = edev->dev_info.common.pci_mem_start;
662 	ndev->base_addr = ndev->mem_start;
663 	ndev->mem_end = edev->dev_info.common.pci_mem_end;
664 	ndev->irq = edev->dev_info.common.pci_irq;
665 
666 	ndev->watchdog_timeo = TX_TIMEOUT;
667 
668 	if (IS_VF(edev)) {
669 		if (edev->dev_info.xdp_supported)
670 			ndev->netdev_ops = &qede_netdev_vf_xdp_ops;
671 		else
672 			ndev->netdev_ops = &qede_netdev_vf_ops;
673 	} else {
674 		ndev->netdev_ops = &qede_netdev_ops;
675 	}
676 
677 	qede_set_ethtool_ops(ndev);
678 
679 	ndev->priv_flags |= IFF_UNICAST_FLT;
680 
681 	/* user-changeble features */
682 	hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG |
683 		      NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
684 		      NETIF_F_TSO | NETIF_F_TSO6;
685 
686 	if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1)
687 		hw_features |= NETIF_F_NTUPLE;
688 
689 	if (edev->dev_info.common.vxlan_enable ||
690 	    edev->dev_info.common.geneve_enable)
691 		udp_tunnel_enable = true;
692 
693 	if (udp_tunnel_enable || edev->dev_info.common.gre_enable) {
694 		hw_features |= NETIF_F_TSO_ECN;
695 		ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
696 					NETIF_F_SG | NETIF_F_TSO |
697 					NETIF_F_TSO_ECN | NETIF_F_TSO6 |
698 					NETIF_F_RXCSUM;
699 	}
700 
701 	if (udp_tunnel_enable) {
702 		hw_features |= (NETIF_F_GSO_UDP_TUNNEL |
703 				NETIF_F_GSO_UDP_TUNNEL_CSUM);
704 		ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL |
705 					  NETIF_F_GSO_UDP_TUNNEL_CSUM);
706 	}
707 
708 	if (edev->dev_info.common.gre_enable) {
709 		hw_features |= (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM);
710 		ndev->hw_enc_features |= (NETIF_F_GSO_GRE |
711 					  NETIF_F_GSO_GRE_CSUM);
712 	}
713 
714 	ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
715 			      NETIF_F_HIGHDMA;
716 	ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
717 			 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
718 			 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
719 
720 	ndev->hw_features = hw_features;
721 
722 	/* MTU range: 46 - 9600 */
723 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
724 	ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
725 
726 	/* Set network device HW mac */
727 	ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
728 
729 	ndev->mtu = edev->dev_info.common.mtu;
730 }
731 
732 /* This function converts from 32b param to two params of level and module
733  * Input 32b decoding:
734  * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
735  * 'happy' flow, e.g. memory allocation failed.
736  * b30 - enable all INFO prints. INFO prints are for major steps in the flow
737  * and provide important parameters.
738  * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
739  * module. VERBOSE prints are for tracking the specific flow in low level.
740  *
741  * Notice that the level should be that of the lowest required logs.
742  */
743 void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
744 {
745 	*p_dp_level = QED_LEVEL_NOTICE;
746 	*p_dp_module = 0;
747 
748 	if (debug & QED_LOG_VERBOSE_MASK) {
749 		*p_dp_level = QED_LEVEL_VERBOSE;
750 		*p_dp_module = (debug & 0x3FFFFFFF);
751 	} else if (debug & QED_LOG_INFO_MASK) {
752 		*p_dp_level = QED_LEVEL_INFO;
753 	} else if (debug & QED_LOG_NOTICE_MASK) {
754 		*p_dp_level = QED_LEVEL_NOTICE;
755 	}
756 }
757 
758 static void qede_free_fp_array(struct qede_dev *edev)
759 {
760 	if (edev->fp_array) {
761 		struct qede_fastpath *fp;
762 		int i;
763 
764 		for_each_queue(i) {
765 			fp = &edev->fp_array[i];
766 
767 			kfree(fp->sb_info);
768 			/* Handle mem alloc failure case where qede_init_fp
769 			 * didn't register xdp_rxq_info yet.
770 			 * Implicit only (fp->type & QEDE_FASTPATH_RX)
771 			 */
772 			if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq))
773 				xdp_rxq_info_unreg(&fp->rxq->xdp_rxq);
774 			kfree(fp->rxq);
775 			kfree(fp->xdp_tx);
776 			kfree(fp->txq);
777 		}
778 		kfree(edev->fp_array);
779 	}
780 
781 	edev->num_queues = 0;
782 	edev->fp_num_tx = 0;
783 	edev->fp_num_rx = 0;
784 }
785 
786 static int qede_alloc_fp_array(struct qede_dev *edev)
787 {
788 	u8 fp_combined, fp_rx = edev->fp_num_rx;
789 	struct qede_fastpath *fp;
790 	int i;
791 
792 	edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
793 				 sizeof(*edev->fp_array), GFP_KERNEL);
794 	if (!edev->fp_array) {
795 		DP_NOTICE(edev, "fp array allocation failed\n");
796 		goto err;
797 	}
798 
799 	fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
800 
801 	/* Allocate the FP elements for Rx queues followed by combined and then
802 	 * the Tx. This ordering should be maintained so that the respective
803 	 * queues (Rx or Tx) will be together in the fastpath array and the
804 	 * associated ids will be sequential.
805 	 */
806 	for_each_queue(i) {
807 		fp = &edev->fp_array[i];
808 
809 		fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL);
810 		if (!fp->sb_info) {
811 			DP_NOTICE(edev, "sb info struct allocation failed\n");
812 			goto err;
813 		}
814 
815 		if (fp_rx) {
816 			fp->type = QEDE_FASTPATH_RX;
817 			fp_rx--;
818 		} else if (fp_combined) {
819 			fp->type = QEDE_FASTPATH_COMBINED;
820 			fp_combined--;
821 		} else {
822 			fp->type = QEDE_FASTPATH_TX;
823 		}
824 
825 		if (fp->type & QEDE_FASTPATH_TX) {
826 			fp->txq = kzalloc(sizeof(*fp->txq), GFP_KERNEL);
827 			if (!fp->txq)
828 				goto err;
829 		}
830 
831 		if (fp->type & QEDE_FASTPATH_RX) {
832 			fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL);
833 			if (!fp->rxq)
834 				goto err;
835 
836 			if (edev->xdp_prog) {
837 				fp->xdp_tx = kzalloc(sizeof(*fp->xdp_tx),
838 						     GFP_KERNEL);
839 				if (!fp->xdp_tx)
840 					goto err;
841 				fp->type |= QEDE_FASTPATH_XDP;
842 			}
843 		}
844 	}
845 
846 	return 0;
847 err:
848 	qede_free_fp_array(edev);
849 	return -ENOMEM;
850 }
851 
852 static void qede_sp_task(struct work_struct *work)
853 {
854 	struct qede_dev *edev = container_of(work, struct qede_dev,
855 					     sp_task.work);
856 
857 	__qede_lock(edev);
858 
859 	if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
860 		if (edev->state == QEDE_STATE_OPEN)
861 			qede_config_rx_mode(edev->ndev);
862 
863 #ifdef CONFIG_RFS_ACCEL
864 	if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) {
865 		if (edev->state == QEDE_STATE_OPEN)
866 			qede_process_arfs_filters(edev, false);
867 	}
868 #endif
869 	__qede_unlock(edev);
870 }
871 
872 static void qede_update_pf_params(struct qed_dev *cdev)
873 {
874 	struct qed_pf_params pf_params;
875 
876 	/* 64 rx + 64 tx + 64 XDP */
877 	memset(&pf_params, 0, sizeof(struct qed_pf_params));
878 	pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * 3;
879 
880 	/* Same for VFs - make sure they'll have sufficient connections
881 	 * to support XDP Tx queues.
882 	 */
883 	pf_params.eth_pf_params.num_vf_cons = 48;
884 
885 	pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
886 	qed_ops->common->update_pf_params(cdev, &pf_params);
887 }
888 
889 #define QEDE_FW_VER_STR_SIZE	80
890 
891 static void qede_log_probe(struct qede_dev *edev)
892 {
893 	struct qed_dev_info *p_dev_info = &edev->dev_info.common;
894 	u8 buf[QEDE_FW_VER_STR_SIZE];
895 	size_t left_size;
896 
897 	snprintf(buf, QEDE_FW_VER_STR_SIZE,
898 		 "Storm FW %d.%d.%d.%d, Management FW %d.%d.%d.%d",
899 		 p_dev_info->fw_major, p_dev_info->fw_minor, p_dev_info->fw_rev,
900 		 p_dev_info->fw_eng,
901 		 (p_dev_info->mfw_rev & QED_MFW_VERSION_3_MASK) >>
902 		 QED_MFW_VERSION_3_OFFSET,
903 		 (p_dev_info->mfw_rev & QED_MFW_VERSION_2_MASK) >>
904 		 QED_MFW_VERSION_2_OFFSET,
905 		 (p_dev_info->mfw_rev & QED_MFW_VERSION_1_MASK) >>
906 		 QED_MFW_VERSION_1_OFFSET,
907 		 (p_dev_info->mfw_rev & QED_MFW_VERSION_0_MASK) >>
908 		 QED_MFW_VERSION_0_OFFSET);
909 
910 	left_size = QEDE_FW_VER_STR_SIZE - strlen(buf);
911 	if (p_dev_info->mbi_version && left_size)
912 		snprintf(buf + strlen(buf), left_size,
913 			 " [MBI %d.%d.%d]",
914 			 (p_dev_info->mbi_version & QED_MBI_VERSION_2_MASK) >>
915 			 QED_MBI_VERSION_2_OFFSET,
916 			 (p_dev_info->mbi_version & QED_MBI_VERSION_1_MASK) >>
917 			 QED_MBI_VERSION_1_OFFSET,
918 			 (p_dev_info->mbi_version & QED_MBI_VERSION_0_MASK) >>
919 			 QED_MBI_VERSION_0_OFFSET);
920 
921 	pr_info("qede %02x:%02x.%02x: %s [%s]\n", edev->pdev->bus->number,
922 		PCI_SLOT(edev->pdev->devfn), PCI_FUNC(edev->pdev->devfn),
923 		buf, edev->ndev->name);
924 }
925 
926 enum qede_probe_mode {
927 	QEDE_PROBE_NORMAL,
928 };
929 
930 static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
931 			bool is_vf, enum qede_probe_mode mode)
932 {
933 	struct qed_probe_params probe_params;
934 	struct qed_slowpath_params sp_params;
935 	struct qed_dev_eth_info dev_info;
936 	struct qede_dev *edev;
937 	struct qed_dev *cdev;
938 	int rc;
939 
940 	if (unlikely(dp_level & QED_LEVEL_INFO))
941 		pr_notice("Starting qede probe\n");
942 
943 	memset(&probe_params, 0, sizeof(probe_params));
944 	probe_params.protocol = QED_PROTOCOL_ETH;
945 	probe_params.dp_module = dp_module;
946 	probe_params.dp_level = dp_level;
947 	probe_params.is_vf = is_vf;
948 	cdev = qed_ops->common->probe(pdev, &probe_params);
949 	if (!cdev) {
950 		rc = -ENODEV;
951 		goto err0;
952 	}
953 
954 	qede_update_pf_params(cdev);
955 
956 	/* Start the Slowpath-process */
957 	memset(&sp_params, 0, sizeof(sp_params));
958 	sp_params.int_mode = QED_INT_MODE_MSIX;
959 	sp_params.drv_major = QEDE_MAJOR_VERSION;
960 	sp_params.drv_minor = QEDE_MINOR_VERSION;
961 	sp_params.drv_rev = QEDE_REVISION_VERSION;
962 	sp_params.drv_eng = QEDE_ENGINEERING_VERSION;
963 	strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
964 	rc = qed_ops->common->slowpath_start(cdev, &sp_params);
965 	if (rc) {
966 		pr_notice("Cannot start slowpath\n");
967 		goto err1;
968 	}
969 
970 	/* Learn information crucial for qede to progress */
971 	rc = qed_ops->fill_dev_info(cdev, &dev_info);
972 	if (rc)
973 		goto err2;
974 
975 	edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
976 				   dp_level);
977 	if (!edev) {
978 		rc = -ENOMEM;
979 		goto err2;
980 	}
981 
982 	if (is_vf)
983 		edev->flags |= QEDE_FLAG_IS_VF;
984 
985 	qede_init_ndev(edev);
986 
987 	rc = qede_rdma_dev_add(edev);
988 	if (rc)
989 		goto err3;
990 
991 	/* Prepare the lock prior to the registration of the netdev,
992 	 * as once it's registered we might reach flows requiring it
993 	 * [it's even possible to reach a flow needing it directly
994 	 * from there, although it's unlikely].
995 	 */
996 	INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
997 	mutex_init(&edev->qede_lock);
998 	rc = register_netdev(edev->ndev);
999 	if (rc) {
1000 		DP_NOTICE(edev, "Cannot register net-device\n");
1001 		goto err4;
1002 	}
1003 
1004 	edev->ops->common->set_name(cdev, edev->ndev->name);
1005 
1006 	/* PTP not supported on VFs */
1007 	if (!is_vf)
1008 		qede_ptp_enable(edev, true);
1009 
1010 	edev->ops->register_ops(cdev, &qede_ll_ops, edev);
1011 
1012 #ifdef CONFIG_DCB
1013 	if (!IS_VF(edev))
1014 		qede_set_dcbnl_ops(edev->ndev);
1015 #endif
1016 
1017 	edev->rx_copybreak = QEDE_RX_HDR_SIZE;
1018 
1019 	qede_log_probe(edev);
1020 	return 0;
1021 
1022 err4:
1023 	qede_rdma_dev_remove(edev);
1024 err3:
1025 	free_netdev(edev->ndev);
1026 err2:
1027 	qed_ops->common->slowpath_stop(cdev);
1028 err1:
1029 	qed_ops->common->remove(cdev);
1030 err0:
1031 	return rc;
1032 }
1033 
1034 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1035 {
1036 	bool is_vf = false;
1037 	u32 dp_module = 0;
1038 	u8 dp_level = 0;
1039 
1040 	switch ((enum qede_pci_private)id->driver_data) {
1041 	case QEDE_PRIVATE_VF:
1042 		if (debug & QED_LOG_VERBOSE_MASK)
1043 			dev_err(&pdev->dev, "Probing a VF\n");
1044 		is_vf = true;
1045 		break;
1046 	default:
1047 		if (debug & QED_LOG_VERBOSE_MASK)
1048 			dev_err(&pdev->dev, "Probing a PF\n");
1049 	}
1050 
1051 	qede_config_debug(debug, &dp_module, &dp_level);
1052 
1053 	return __qede_probe(pdev, dp_module, dp_level, is_vf,
1054 			    QEDE_PROBE_NORMAL);
1055 }
1056 
1057 enum qede_remove_mode {
1058 	QEDE_REMOVE_NORMAL,
1059 };
1060 
1061 static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
1062 {
1063 	struct net_device *ndev = pci_get_drvdata(pdev);
1064 	struct qede_dev *edev = netdev_priv(ndev);
1065 	struct qed_dev *cdev = edev->cdev;
1066 
1067 	DP_INFO(edev, "Starting qede_remove\n");
1068 
1069 	unregister_netdev(ndev);
1070 	cancel_delayed_work_sync(&edev->sp_task);
1071 
1072 	qede_ptp_disable(edev);
1073 
1074 	qede_rdma_dev_remove(edev);
1075 
1076 	edev->ops->common->set_power_state(cdev, PCI_D0);
1077 
1078 	pci_set_drvdata(pdev, NULL);
1079 
1080 	/* Use global ops since we've freed edev */
1081 	qed_ops->common->slowpath_stop(cdev);
1082 	if (system_state == SYSTEM_POWER_OFF)
1083 		return;
1084 	qed_ops->common->remove(cdev);
1085 
1086 	/* Since this can happen out-of-sync with other flows,
1087 	 * don't release the netdevice until after slowpath stop
1088 	 * has been called to guarantee various other contexts
1089 	 * [e.g., QED register callbacks] won't break anything when
1090 	 * accessing the netdevice.
1091 	 */
1092 	 free_netdev(ndev);
1093 
1094 	dev_info(&pdev->dev, "Ending qede_remove successfully\n");
1095 }
1096 
1097 static void qede_remove(struct pci_dev *pdev)
1098 {
1099 	__qede_remove(pdev, QEDE_REMOVE_NORMAL);
1100 }
1101 
1102 static void qede_shutdown(struct pci_dev *pdev)
1103 {
1104 	__qede_remove(pdev, QEDE_REMOVE_NORMAL);
1105 }
1106 
1107 /* -------------------------------------------------------------------------
1108  * START OF LOAD / UNLOAD
1109  * -------------------------------------------------------------------------
1110  */
1111 
1112 static int qede_set_num_queues(struct qede_dev *edev)
1113 {
1114 	int rc;
1115 	u16 rss_num;
1116 
1117 	/* Setup queues according to possible resources*/
1118 	if (edev->req_queues)
1119 		rss_num = edev->req_queues;
1120 	else
1121 		rss_num = netif_get_num_default_rss_queues() *
1122 			  edev->dev_info.common.num_hwfns;
1123 
1124 	rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
1125 
1126 	rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
1127 	if (rc > 0) {
1128 		/* Managed to request interrupts for our queues */
1129 		edev->num_queues = rc;
1130 		DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
1131 			QEDE_QUEUE_CNT(edev), rss_num);
1132 		rc = 0;
1133 	}
1134 
1135 	edev->fp_num_tx = edev->req_num_tx;
1136 	edev->fp_num_rx = edev->req_num_rx;
1137 
1138 	return rc;
1139 }
1140 
1141 static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
1142 			     u16 sb_id)
1143 {
1144 	if (sb_info->sb_virt) {
1145 		edev->ops->common->sb_release(edev->cdev, sb_info, sb_id);
1146 		dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
1147 				  (void *)sb_info->sb_virt, sb_info->sb_phys);
1148 		memset(sb_info, 0, sizeof(*sb_info));
1149 	}
1150 }
1151 
1152 /* This function allocates fast-path status block memory */
1153 static int qede_alloc_mem_sb(struct qede_dev *edev,
1154 			     struct qed_sb_info *sb_info, u16 sb_id)
1155 {
1156 	struct status_block_e4 *sb_virt;
1157 	dma_addr_t sb_phys;
1158 	int rc;
1159 
1160 	sb_virt = dma_alloc_coherent(&edev->pdev->dev,
1161 				     sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
1162 	if (!sb_virt) {
1163 		DP_ERR(edev, "Status block allocation failed\n");
1164 		return -ENOMEM;
1165 	}
1166 
1167 	rc = edev->ops->common->sb_init(edev->cdev, sb_info,
1168 					sb_virt, sb_phys, sb_id,
1169 					QED_SB_TYPE_L2_QUEUE);
1170 	if (rc) {
1171 		DP_ERR(edev, "Status block initialization failed\n");
1172 		dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
1173 				  sb_virt, sb_phys);
1174 		return rc;
1175 	}
1176 
1177 	return 0;
1178 }
1179 
1180 static void qede_free_rx_buffers(struct qede_dev *edev,
1181 				 struct qede_rx_queue *rxq)
1182 {
1183 	u16 i;
1184 
1185 	for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
1186 		struct sw_rx_data *rx_buf;
1187 		struct page *data;
1188 
1189 		rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
1190 		data = rx_buf->data;
1191 
1192 		dma_unmap_page(&edev->pdev->dev,
1193 			       rx_buf->mapping, PAGE_SIZE, rxq->data_direction);
1194 
1195 		rx_buf->data = NULL;
1196 		__free_page(data);
1197 	}
1198 }
1199 
1200 static void qede_free_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
1201 {
1202 	int i;
1203 
1204 	if (edev->gro_disable)
1205 		return;
1206 
1207 	for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
1208 		struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
1209 		struct sw_rx_data *replace_buf = &tpa_info->buffer;
1210 
1211 		if (replace_buf->data) {
1212 			dma_unmap_page(&edev->pdev->dev,
1213 				       replace_buf->mapping,
1214 				       PAGE_SIZE, DMA_FROM_DEVICE);
1215 			__free_page(replace_buf->data);
1216 		}
1217 	}
1218 }
1219 
1220 static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
1221 {
1222 	qede_free_sge_mem(edev, rxq);
1223 
1224 	/* Free rx buffers */
1225 	qede_free_rx_buffers(edev, rxq);
1226 
1227 	/* Free the parallel SW ring */
1228 	kfree(rxq->sw_rx_ring);
1229 
1230 	/* Free the real RQ ring used by FW */
1231 	edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
1232 	edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
1233 }
1234 
1235 static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
1236 {
1237 	dma_addr_t mapping;
1238 	int i;
1239 
1240 	if (edev->gro_disable)
1241 		return 0;
1242 
1243 	for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
1244 		struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
1245 		struct sw_rx_data *replace_buf = &tpa_info->buffer;
1246 
1247 		replace_buf->data = alloc_pages(GFP_ATOMIC, 0);
1248 		if (unlikely(!replace_buf->data)) {
1249 			DP_NOTICE(edev,
1250 				  "Failed to allocate TPA skb pool [replacement buffer]\n");
1251 			goto err;
1252 		}
1253 
1254 		mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0,
1255 				       PAGE_SIZE, DMA_FROM_DEVICE);
1256 		if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
1257 			DP_NOTICE(edev,
1258 				  "Failed to map TPA replacement buffer\n");
1259 			goto err;
1260 		}
1261 
1262 		replace_buf->mapping = mapping;
1263 		tpa_info->buffer.page_offset = 0;
1264 		tpa_info->buffer_mapping = mapping;
1265 		tpa_info->state = QEDE_AGG_STATE_NONE;
1266 	}
1267 
1268 	return 0;
1269 err:
1270 	qede_free_sge_mem(edev, rxq);
1271 	edev->gro_disable = 1;
1272 	edev->ndev->features &= ~NETIF_F_GRO_HW;
1273 	return -ENOMEM;
1274 }
1275 
1276 /* This function allocates all memory needed per Rx queue */
1277 static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
1278 {
1279 	int i, rc, size;
1280 
1281 	rxq->num_rx_buffers = edev->q_num_rx_buffers;
1282 
1283 	rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
1284 	rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : 0;
1285 
1286 	/* Make sure that the headroom and  payload fit in a single page */
1287 	if (rxq->rx_buf_size + rxq->rx_headroom > PAGE_SIZE)
1288 		rxq->rx_buf_size = PAGE_SIZE - rxq->rx_headroom;
1289 
1290 	/* Segment size to spilt a page in multiple equal parts,
1291 	 * unless XDP is used in which case we'd use the entire page.
1292 	 */
1293 	if (!edev->xdp_prog)
1294 		rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size);
1295 	else
1296 		rxq->rx_buf_seg_size = PAGE_SIZE;
1297 
1298 	/* Allocate the parallel driver ring for Rx buffers */
1299 	size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
1300 	rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
1301 	if (!rxq->sw_rx_ring) {
1302 		DP_ERR(edev, "Rx buffers ring allocation failed\n");
1303 		rc = -ENOMEM;
1304 		goto err;
1305 	}
1306 
1307 	/* Allocate FW Rx ring  */
1308 	rc = edev->ops->common->chain_alloc(edev->cdev,
1309 					    QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1310 					    QED_CHAIN_MODE_NEXT_PTR,
1311 					    QED_CHAIN_CNT_TYPE_U16,
1312 					    RX_RING_SIZE,
1313 					    sizeof(struct eth_rx_bd),
1314 					    &rxq->rx_bd_ring, NULL);
1315 	if (rc)
1316 		goto err;
1317 
1318 	/* Allocate FW completion ring */
1319 	rc = edev->ops->common->chain_alloc(edev->cdev,
1320 					    QED_CHAIN_USE_TO_CONSUME,
1321 					    QED_CHAIN_MODE_PBL,
1322 					    QED_CHAIN_CNT_TYPE_U16,
1323 					    RX_RING_SIZE,
1324 					    sizeof(union eth_rx_cqe),
1325 					    &rxq->rx_comp_ring, NULL);
1326 	if (rc)
1327 		goto err;
1328 
1329 	/* Allocate buffers for the Rx ring */
1330 	rxq->filled_buffers = 0;
1331 	for (i = 0; i < rxq->num_rx_buffers; i++) {
1332 		rc = qede_alloc_rx_buffer(rxq, false);
1333 		if (rc) {
1334 			DP_ERR(edev,
1335 			       "Rx buffers allocation failed at index %d\n", i);
1336 			goto err;
1337 		}
1338 	}
1339 
1340 	rc = qede_alloc_sge_mem(edev, rxq);
1341 err:
1342 	return rc;
1343 }
1344 
1345 static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
1346 {
1347 	/* Free the parallel SW ring */
1348 	if (txq->is_xdp)
1349 		kfree(txq->sw_tx_ring.xdp);
1350 	else
1351 		kfree(txq->sw_tx_ring.skbs);
1352 
1353 	/* Free the real RQ ring used by FW */
1354 	edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
1355 }
1356 
1357 /* This function allocates all memory needed per Tx queue */
1358 static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
1359 {
1360 	union eth_tx_bd_types *p_virt;
1361 	int size, rc;
1362 
1363 	txq->num_tx_buffers = edev->q_num_tx_buffers;
1364 
1365 	/* Allocate the parallel driver ring for Tx buffers */
1366 	if (txq->is_xdp) {
1367 		size = sizeof(*txq->sw_tx_ring.xdp) * txq->num_tx_buffers;
1368 		txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL);
1369 		if (!txq->sw_tx_ring.xdp)
1370 			goto err;
1371 	} else {
1372 		size = sizeof(*txq->sw_tx_ring.skbs) * txq->num_tx_buffers;
1373 		txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL);
1374 		if (!txq->sw_tx_ring.skbs)
1375 			goto err;
1376 	}
1377 
1378 	rc = edev->ops->common->chain_alloc(edev->cdev,
1379 					    QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1380 					    QED_CHAIN_MODE_PBL,
1381 					    QED_CHAIN_CNT_TYPE_U16,
1382 					    txq->num_tx_buffers,
1383 					    sizeof(*p_virt),
1384 					    &txq->tx_pbl, NULL);
1385 	if (rc)
1386 		goto err;
1387 
1388 	return 0;
1389 
1390 err:
1391 	qede_free_mem_txq(edev, txq);
1392 	return -ENOMEM;
1393 }
1394 
1395 /* This function frees all memory of a single fp */
1396 static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
1397 {
1398 	qede_free_mem_sb(edev, fp->sb_info, fp->id);
1399 
1400 	if (fp->type & QEDE_FASTPATH_RX)
1401 		qede_free_mem_rxq(edev, fp->rxq);
1402 
1403 	if (fp->type & QEDE_FASTPATH_XDP)
1404 		qede_free_mem_txq(edev, fp->xdp_tx);
1405 
1406 	if (fp->type & QEDE_FASTPATH_TX)
1407 		qede_free_mem_txq(edev, fp->txq);
1408 }
1409 
1410 /* This function allocates all memory needed for a single fp (i.e. an entity
1411  * which contains status block, one rx queue and/or multiple per-TC tx queues.
1412  */
1413 static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
1414 {
1415 	int rc = 0;
1416 
1417 	rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
1418 	if (rc)
1419 		goto out;
1420 
1421 	if (fp->type & QEDE_FASTPATH_RX) {
1422 		rc = qede_alloc_mem_rxq(edev, fp->rxq);
1423 		if (rc)
1424 			goto out;
1425 	}
1426 
1427 	if (fp->type & QEDE_FASTPATH_XDP) {
1428 		rc = qede_alloc_mem_txq(edev, fp->xdp_tx);
1429 		if (rc)
1430 			goto out;
1431 	}
1432 
1433 	if (fp->type & QEDE_FASTPATH_TX) {
1434 		rc = qede_alloc_mem_txq(edev, fp->txq);
1435 		if (rc)
1436 			goto out;
1437 	}
1438 
1439 out:
1440 	return rc;
1441 }
1442 
1443 static void qede_free_mem_load(struct qede_dev *edev)
1444 {
1445 	int i;
1446 
1447 	for_each_queue(i) {
1448 		struct qede_fastpath *fp = &edev->fp_array[i];
1449 
1450 		qede_free_mem_fp(edev, fp);
1451 	}
1452 }
1453 
1454 /* This function allocates all qede memory at NIC load. */
1455 static int qede_alloc_mem_load(struct qede_dev *edev)
1456 {
1457 	int rc = 0, queue_id;
1458 
1459 	for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) {
1460 		struct qede_fastpath *fp = &edev->fp_array[queue_id];
1461 
1462 		rc = qede_alloc_mem_fp(edev, fp);
1463 		if (rc) {
1464 			DP_ERR(edev,
1465 			       "Failed to allocate memory for fastpath - rss id = %d\n",
1466 			       queue_id);
1467 			qede_free_mem_load(edev);
1468 			return rc;
1469 		}
1470 	}
1471 
1472 	return 0;
1473 }
1474 
1475 /* This function inits fp content and resets the SB, RXQ and TXQ structures */
1476 static void qede_init_fp(struct qede_dev *edev)
1477 {
1478 	int queue_id, rxq_index = 0, txq_index = 0;
1479 	struct qede_fastpath *fp;
1480 
1481 	for_each_queue(queue_id) {
1482 		fp = &edev->fp_array[queue_id];
1483 
1484 		fp->edev = edev;
1485 		fp->id = queue_id;
1486 
1487 		if (fp->type & QEDE_FASTPATH_XDP) {
1488 			fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev,
1489 								rxq_index);
1490 			fp->xdp_tx->is_xdp = 1;
1491 		}
1492 
1493 		if (fp->type & QEDE_FASTPATH_RX) {
1494 			fp->rxq->rxq_id = rxq_index++;
1495 
1496 			/* Determine how to map buffers for this queue */
1497 			if (fp->type & QEDE_FASTPATH_XDP)
1498 				fp->rxq->data_direction = DMA_BIDIRECTIONAL;
1499 			else
1500 				fp->rxq->data_direction = DMA_FROM_DEVICE;
1501 			fp->rxq->dev = &edev->pdev->dev;
1502 
1503 			/* Driver have no error path from here */
1504 			WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev,
1505 						 fp->rxq->rxq_id) < 0);
1506 		}
1507 
1508 		if (fp->type & QEDE_FASTPATH_TX) {
1509 			fp->txq->index = txq_index++;
1510 			if (edev->dev_info.is_legacy)
1511 				fp->txq->is_legacy = 1;
1512 			fp->txq->dev = &edev->pdev->dev;
1513 		}
1514 
1515 		snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1516 			 edev->ndev->name, queue_id);
1517 	}
1518 
1519 	edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
1520 }
1521 
1522 static int qede_set_real_num_queues(struct qede_dev *edev)
1523 {
1524 	int rc = 0;
1525 
1526 	rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_COUNT(edev));
1527 	if (rc) {
1528 		DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
1529 		return rc;
1530 	}
1531 
1532 	rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev));
1533 	if (rc) {
1534 		DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
1535 		return rc;
1536 	}
1537 
1538 	return 0;
1539 }
1540 
1541 static void qede_napi_disable_remove(struct qede_dev *edev)
1542 {
1543 	int i;
1544 
1545 	for_each_queue(i) {
1546 		napi_disable(&edev->fp_array[i].napi);
1547 
1548 		netif_napi_del(&edev->fp_array[i].napi);
1549 	}
1550 }
1551 
1552 static void qede_napi_add_enable(struct qede_dev *edev)
1553 {
1554 	int i;
1555 
1556 	/* Add NAPI objects */
1557 	for_each_queue(i) {
1558 		netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
1559 			       qede_poll, NAPI_POLL_WEIGHT);
1560 		napi_enable(&edev->fp_array[i].napi);
1561 	}
1562 }
1563 
1564 static void qede_sync_free_irqs(struct qede_dev *edev)
1565 {
1566 	int i;
1567 
1568 	for (i = 0; i < edev->int_info.used_cnt; i++) {
1569 		if (edev->int_info.msix_cnt) {
1570 			synchronize_irq(edev->int_info.msix[i].vector);
1571 			free_irq(edev->int_info.msix[i].vector,
1572 				 &edev->fp_array[i]);
1573 		} else {
1574 			edev->ops->common->simd_handler_clean(edev->cdev, i);
1575 		}
1576 	}
1577 
1578 	edev->int_info.used_cnt = 0;
1579 }
1580 
1581 static int qede_req_msix_irqs(struct qede_dev *edev)
1582 {
1583 	int i, rc;
1584 
1585 	/* Sanitize number of interrupts == number of prepared RSS queues */
1586 	if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) {
1587 		DP_ERR(edev,
1588 		       "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
1589 		       QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt);
1590 		return -EINVAL;
1591 	}
1592 
1593 	for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
1594 #ifdef CONFIG_RFS_ACCEL
1595 		struct qede_fastpath *fp = &edev->fp_array[i];
1596 
1597 		if (edev->ndev->rx_cpu_rmap && (fp->type & QEDE_FASTPATH_RX)) {
1598 			rc = irq_cpu_rmap_add(edev->ndev->rx_cpu_rmap,
1599 					      edev->int_info.msix[i].vector);
1600 			if (rc) {
1601 				DP_ERR(edev, "Failed to add CPU rmap\n");
1602 				qede_free_arfs(edev);
1603 			}
1604 		}
1605 #endif
1606 		rc = request_irq(edev->int_info.msix[i].vector,
1607 				 qede_msix_fp_int, 0, edev->fp_array[i].name,
1608 				 &edev->fp_array[i]);
1609 		if (rc) {
1610 			DP_ERR(edev, "Request fp %d irq failed\n", i);
1611 			qede_sync_free_irqs(edev);
1612 			return rc;
1613 		}
1614 		DP_VERBOSE(edev, NETIF_MSG_INTR,
1615 			   "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
1616 			   edev->fp_array[i].name, i,
1617 			   &edev->fp_array[i]);
1618 		edev->int_info.used_cnt++;
1619 	}
1620 
1621 	return 0;
1622 }
1623 
1624 static void qede_simd_fp_handler(void *cookie)
1625 {
1626 	struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
1627 
1628 	napi_schedule_irqoff(&fp->napi);
1629 }
1630 
1631 static int qede_setup_irqs(struct qede_dev *edev)
1632 {
1633 	int i, rc = 0;
1634 
1635 	/* Learn Interrupt configuration */
1636 	rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
1637 	if (rc)
1638 		return rc;
1639 
1640 	if (edev->int_info.msix_cnt) {
1641 		rc = qede_req_msix_irqs(edev);
1642 		if (rc)
1643 			return rc;
1644 		edev->ndev->irq = edev->int_info.msix[0].vector;
1645 	} else {
1646 		const struct qed_common_ops *ops;
1647 
1648 		/* qed should learn receive the RSS ids and callbacks */
1649 		ops = edev->ops->common;
1650 		for (i = 0; i < QEDE_QUEUE_CNT(edev); i++)
1651 			ops->simd_handler_config(edev->cdev,
1652 						 &edev->fp_array[i], i,
1653 						 qede_simd_fp_handler);
1654 		edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev);
1655 	}
1656 	return 0;
1657 }
1658 
1659 static int qede_drain_txq(struct qede_dev *edev,
1660 			  struct qede_tx_queue *txq, bool allow_drain)
1661 {
1662 	int rc, cnt = 1000;
1663 
1664 	while (txq->sw_tx_cons != txq->sw_tx_prod) {
1665 		if (!cnt) {
1666 			if (allow_drain) {
1667 				DP_NOTICE(edev,
1668 					  "Tx queue[%d] is stuck, requesting MCP to drain\n",
1669 					  txq->index);
1670 				rc = edev->ops->common->drain(edev->cdev);
1671 				if (rc)
1672 					return rc;
1673 				return qede_drain_txq(edev, txq, false);
1674 			}
1675 			DP_NOTICE(edev,
1676 				  "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
1677 				  txq->index, txq->sw_tx_prod,
1678 				  txq->sw_tx_cons);
1679 			return -ENODEV;
1680 		}
1681 		cnt--;
1682 		usleep_range(1000, 2000);
1683 		barrier();
1684 	}
1685 
1686 	/* FW finished processing, wait for HW to transmit all tx packets */
1687 	usleep_range(1000, 2000);
1688 
1689 	return 0;
1690 }
1691 
1692 static int qede_stop_txq(struct qede_dev *edev,
1693 			 struct qede_tx_queue *txq, int rss_id)
1694 {
1695 	return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle);
1696 }
1697 
1698 static int qede_stop_queues(struct qede_dev *edev)
1699 {
1700 	struct qed_update_vport_params *vport_update_params;
1701 	struct qed_dev *cdev = edev->cdev;
1702 	struct qede_fastpath *fp;
1703 	int rc, i;
1704 
1705 	/* Disable the vport */
1706 	vport_update_params = vzalloc(sizeof(*vport_update_params));
1707 	if (!vport_update_params)
1708 		return -ENOMEM;
1709 
1710 	vport_update_params->vport_id = 0;
1711 	vport_update_params->update_vport_active_flg = 1;
1712 	vport_update_params->vport_active_flg = 0;
1713 	vport_update_params->update_rss_flg = 0;
1714 
1715 	rc = edev->ops->vport_update(cdev, vport_update_params);
1716 	vfree(vport_update_params);
1717 
1718 	if (rc) {
1719 		DP_ERR(edev, "Failed to update vport\n");
1720 		return rc;
1721 	}
1722 
1723 	/* Flush Tx queues. If needed, request drain from MCP */
1724 	for_each_queue(i) {
1725 		fp = &edev->fp_array[i];
1726 
1727 		if (fp->type & QEDE_FASTPATH_TX) {
1728 			rc = qede_drain_txq(edev, fp->txq, true);
1729 			if (rc)
1730 				return rc;
1731 		}
1732 
1733 		if (fp->type & QEDE_FASTPATH_XDP) {
1734 			rc = qede_drain_txq(edev, fp->xdp_tx, true);
1735 			if (rc)
1736 				return rc;
1737 		}
1738 	}
1739 
1740 	/* Stop all Queues in reverse order */
1741 	for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
1742 		fp = &edev->fp_array[i];
1743 
1744 		/* Stop the Tx Queue(s) */
1745 		if (fp->type & QEDE_FASTPATH_TX) {
1746 			rc = qede_stop_txq(edev, fp->txq, i);
1747 			if (rc)
1748 				return rc;
1749 		}
1750 
1751 		/* Stop the Rx Queue */
1752 		if (fp->type & QEDE_FASTPATH_RX) {
1753 			rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle);
1754 			if (rc) {
1755 				DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
1756 				return rc;
1757 			}
1758 		}
1759 
1760 		/* Stop the XDP forwarding queue */
1761 		if (fp->type & QEDE_FASTPATH_XDP) {
1762 			rc = qede_stop_txq(edev, fp->xdp_tx, i);
1763 			if (rc)
1764 				return rc;
1765 
1766 			bpf_prog_put(fp->rxq->xdp_prog);
1767 		}
1768 	}
1769 
1770 	/* Stop the vport */
1771 	rc = edev->ops->vport_stop(cdev, 0);
1772 	if (rc)
1773 		DP_ERR(edev, "Failed to stop VPORT\n");
1774 
1775 	return rc;
1776 }
1777 
1778 static int qede_start_txq(struct qede_dev *edev,
1779 			  struct qede_fastpath *fp,
1780 			  struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx)
1781 {
1782 	dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
1783 	u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
1784 	struct qed_queue_start_common_params params;
1785 	struct qed_txq_start_ret_params ret_params;
1786 	int rc;
1787 
1788 	memset(&params, 0, sizeof(params));
1789 	memset(&ret_params, 0, sizeof(ret_params));
1790 
1791 	/* Let the XDP queue share the queue-zone with one of the regular txq.
1792 	 * We don't really care about its coalescing.
1793 	 */
1794 	if (txq->is_xdp)
1795 		params.queue_id = QEDE_TXQ_XDP_TO_IDX(edev, txq);
1796 	else
1797 		params.queue_id = txq->index;
1798 
1799 	params.p_sb = fp->sb_info;
1800 	params.sb_idx = sb_idx;
1801 
1802 	rc = edev->ops->q_tx_start(edev->cdev, rss_id, &params, phys_table,
1803 				   page_cnt, &ret_params);
1804 	if (rc) {
1805 		DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc);
1806 		return rc;
1807 	}
1808 
1809 	txq->doorbell_addr = ret_params.p_doorbell;
1810 	txq->handle = ret_params.p_handle;
1811 
1812 	/* Determine the FW consumer address associated */
1813 	txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx];
1814 
1815 	/* Prepare the doorbell parameters */
1816 	SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM);
1817 	SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1818 	SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL,
1819 		  DQ_XCM_ETH_TX_BD_PROD_CMD);
1820 	txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
1821 
1822 	return rc;
1823 }
1824 
1825 static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
1826 {
1827 	int vlan_removal_en = 1;
1828 	struct qed_dev *cdev = edev->cdev;
1829 	struct qed_dev_info *qed_info = &edev->dev_info.common;
1830 	struct qed_update_vport_params *vport_update_params;
1831 	struct qed_queue_start_common_params q_params;
1832 	struct qed_start_vport_params start = {0};
1833 	int rc, i;
1834 
1835 	if (!edev->num_queues) {
1836 		DP_ERR(edev,
1837 		       "Cannot update V-VPORT as active as there are no Rx queues\n");
1838 		return -EINVAL;
1839 	}
1840 
1841 	vport_update_params = vzalloc(sizeof(*vport_update_params));
1842 	if (!vport_update_params)
1843 		return -ENOMEM;
1844 
1845 	start.handle_ptp_pkts = !!(edev->ptp);
1846 	start.gro_enable = !edev->gro_disable;
1847 	start.mtu = edev->ndev->mtu;
1848 	start.vport_id = 0;
1849 	start.drop_ttl0 = true;
1850 	start.remove_inner_vlan = vlan_removal_en;
1851 	start.clear_stats = clear_stats;
1852 
1853 	rc = edev->ops->vport_start(cdev, &start);
1854 
1855 	if (rc) {
1856 		DP_ERR(edev, "Start V-PORT failed %d\n", rc);
1857 		goto out;
1858 	}
1859 
1860 	DP_VERBOSE(edev, NETIF_MSG_IFUP,
1861 		   "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
1862 		   start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
1863 
1864 	for_each_queue(i) {
1865 		struct qede_fastpath *fp = &edev->fp_array[i];
1866 		dma_addr_t p_phys_table;
1867 		u32 page_cnt;
1868 
1869 		if (fp->type & QEDE_FASTPATH_RX) {
1870 			struct qed_rxq_start_ret_params ret_params;
1871 			struct qede_rx_queue *rxq = fp->rxq;
1872 			__le16 *val;
1873 
1874 			memset(&ret_params, 0, sizeof(ret_params));
1875 			memset(&q_params, 0, sizeof(q_params));
1876 			q_params.queue_id = rxq->rxq_id;
1877 			q_params.vport_id = 0;
1878 			q_params.p_sb = fp->sb_info;
1879 			q_params.sb_idx = RX_PI;
1880 
1881 			p_phys_table =
1882 			    qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
1883 			page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
1884 
1885 			rc = edev->ops->q_rx_start(cdev, i, &q_params,
1886 						   rxq->rx_buf_size,
1887 						   rxq->rx_bd_ring.p_phys_addr,
1888 						   p_phys_table,
1889 						   page_cnt, &ret_params);
1890 			if (rc) {
1891 				DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
1892 				       rc);
1893 				goto out;
1894 			}
1895 
1896 			/* Use the return parameters */
1897 			rxq->hw_rxq_prod_addr = ret_params.p_prod;
1898 			rxq->handle = ret_params.p_handle;
1899 
1900 			val = &fp->sb_info->sb_virt->pi_array[RX_PI];
1901 			rxq->hw_cons_ptr = val;
1902 
1903 			qede_update_rx_prod(edev, rxq);
1904 		}
1905 
1906 		if (fp->type & QEDE_FASTPATH_XDP) {
1907 			rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI);
1908 			if (rc)
1909 				goto out;
1910 
1911 			fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1);
1912 			if (IS_ERR(fp->rxq->xdp_prog)) {
1913 				rc = PTR_ERR(fp->rxq->xdp_prog);
1914 				fp->rxq->xdp_prog = NULL;
1915 				goto out;
1916 			}
1917 		}
1918 
1919 		if (fp->type & QEDE_FASTPATH_TX) {
1920 			rc = qede_start_txq(edev, fp, fp->txq, i, TX_PI(0));
1921 			if (rc)
1922 				goto out;
1923 		}
1924 	}
1925 
1926 	/* Prepare and send the vport enable */
1927 	vport_update_params->vport_id = start.vport_id;
1928 	vport_update_params->update_vport_active_flg = 1;
1929 	vport_update_params->vport_active_flg = 1;
1930 
1931 	if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) &&
1932 	    qed_info->tx_switching) {
1933 		vport_update_params->update_tx_switching_flg = 1;
1934 		vport_update_params->tx_switching_flg = 1;
1935 	}
1936 
1937 	qede_fill_rss_params(edev, &vport_update_params->rss_params,
1938 			     &vport_update_params->update_rss_flg);
1939 
1940 	rc = edev->ops->vport_update(cdev, vport_update_params);
1941 	if (rc)
1942 		DP_ERR(edev, "Update V-PORT failed %d\n", rc);
1943 
1944 out:
1945 	vfree(vport_update_params);
1946 	return rc;
1947 }
1948 
1949 enum qede_unload_mode {
1950 	QEDE_UNLOAD_NORMAL,
1951 };
1952 
1953 static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
1954 			bool is_locked)
1955 {
1956 	struct qed_link_params link_params;
1957 	int rc;
1958 
1959 	DP_INFO(edev, "Starting qede unload\n");
1960 
1961 	if (!is_locked)
1962 		__qede_lock(edev);
1963 
1964 	edev->state = QEDE_STATE_CLOSED;
1965 
1966 	qede_rdma_dev_event_close(edev);
1967 
1968 	/* Close OS Tx */
1969 	netif_tx_disable(edev->ndev);
1970 	netif_carrier_off(edev->ndev);
1971 
1972 	/* Reset the link */
1973 	memset(&link_params, 0, sizeof(link_params));
1974 	link_params.link_up = false;
1975 	edev->ops->common->set_link(edev->cdev, &link_params);
1976 	rc = qede_stop_queues(edev);
1977 	if (rc) {
1978 		qede_sync_free_irqs(edev);
1979 		goto out;
1980 	}
1981 
1982 	DP_INFO(edev, "Stopped Queues\n");
1983 
1984 	qede_vlan_mark_nonconfigured(edev);
1985 	edev->ops->fastpath_stop(edev->cdev);
1986 
1987 	if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
1988 		qede_poll_for_freeing_arfs_filters(edev);
1989 		qede_free_arfs(edev);
1990 	}
1991 
1992 	/* Release the interrupts */
1993 	qede_sync_free_irqs(edev);
1994 	edev->ops->common->set_fp_int(edev->cdev, 0);
1995 
1996 	qede_napi_disable_remove(edev);
1997 
1998 	qede_free_mem_load(edev);
1999 	qede_free_fp_array(edev);
2000 
2001 out:
2002 	if (!is_locked)
2003 		__qede_unlock(edev);
2004 	DP_INFO(edev, "Ending qede unload\n");
2005 }
2006 
2007 enum qede_load_mode {
2008 	QEDE_LOAD_NORMAL,
2009 	QEDE_LOAD_RELOAD,
2010 };
2011 
2012 static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
2013 		     bool is_locked)
2014 {
2015 	struct qed_link_params link_params;
2016 	int rc;
2017 
2018 	DP_INFO(edev, "Starting qede load\n");
2019 
2020 	if (!is_locked)
2021 		__qede_lock(edev);
2022 
2023 	rc = qede_set_num_queues(edev);
2024 	if (rc)
2025 		goto out;
2026 
2027 	rc = qede_alloc_fp_array(edev);
2028 	if (rc)
2029 		goto out;
2030 
2031 	qede_init_fp(edev);
2032 
2033 	rc = qede_alloc_mem_load(edev);
2034 	if (rc)
2035 		goto err1;
2036 	DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n",
2037 		QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev));
2038 
2039 	rc = qede_set_real_num_queues(edev);
2040 	if (rc)
2041 		goto err2;
2042 
2043 	if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) {
2044 		rc = qede_alloc_arfs(edev);
2045 		if (rc)
2046 			DP_NOTICE(edev, "aRFS memory allocation failed\n");
2047 	}
2048 
2049 	qede_napi_add_enable(edev);
2050 	DP_INFO(edev, "Napi added and enabled\n");
2051 
2052 	rc = qede_setup_irqs(edev);
2053 	if (rc)
2054 		goto err3;
2055 	DP_INFO(edev, "Setup IRQs succeeded\n");
2056 
2057 	rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
2058 	if (rc)
2059 		goto err4;
2060 	DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
2061 
2062 	/* Program un-configured VLANs */
2063 	qede_configure_vlan_filters(edev);
2064 
2065 	/* Ask for link-up using current configuration */
2066 	memset(&link_params, 0, sizeof(link_params));
2067 	link_params.link_up = true;
2068 	edev->ops->common->set_link(edev->cdev, &link_params);
2069 
2070 	edev->state = QEDE_STATE_OPEN;
2071 
2072 	DP_INFO(edev, "Ending successfully qede load\n");
2073 
2074 	goto out;
2075 err4:
2076 	qede_sync_free_irqs(edev);
2077 	memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
2078 err3:
2079 	qede_napi_disable_remove(edev);
2080 err2:
2081 	qede_free_mem_load(edev);
2082 err1:
2083 	edev->ops->common->set_fp_int(edev->cdev, 0);
2084 	qede_free_fp_array(edev);
2085 	edev->num_queues = 0;
2086 	edev->fp_num_tx = 0;
2087 	edev->fp_num_rx = 0;
2088 out:
2089 	if (!is_locked)
2090 		__qede_unlock(edev);
2091 
2092 	return rc;
2093 }
2094 
2095 /* 'func' should be able to run between unload and reload assuming interface
2096  * is actually running, or afterwards in case it's currently DOWN.
2097  */
2098 void qede_reload(struct qede_dev *edev,
2099 		 struct qede_reload_args *args, bool is_locked)
2100 {
2101 	if (!is_locked)
2102 		__qede_lock(edev);
2103 
2104 	/* Since qede_lock is held, internal state wouldn't change even
2105 	 * if netdev state would start transitioning. Check whether current
2106 	 * internal configuration indicates device is up, then reload.
2107 	 */
2108 	if (edev->state == QEDE_STATE_OPEN) {
2109 		qede_unload(edev, QEDE_UNLOAD_NORMAL, true);
2110 		if (args)
2111 			args->func(edev, args);
2112 		qede_load(edev, QEDE_LOAD_RELOAD, true);
2113 
2114 		/* Since no one is going to do it for us, re-configure */
2115 		qede_config_rx_mode(edev->ndev);
2116 	} else if (args) {
2117 		args->func(edev, args);
2118 	}
2119 
2120 	if (!is_locked)
2121 		__qede_unlock(edev);
2122 }
2123 
2124 /* called with rtnl_lock */
2125 static int qede_open(struct net_device *ndev)
2126 {
2127 	struct qede_dev *edev = netdev_priv(ndev);
2128 	int rc;
2129 
2130 	netif_carrier_off(ndev);
2131 
2132 	edev->ops->common->set_power_state(edev->cdev, PCI_D0);
2133 
2134 	rc = qede_load(edev, QEDE_LOAD_NORMAL, false);
2135 	if (rc)
2136 		return rc;
2137 
2138 	udp_tunnel_get_rx_info(ndev);
2139 
2140 	edev->ops->common->update_drv_state(edev->cdev, true);
2141 
2142 	return 0;
2143 }
2144 
2145 static int qede_close(struct net_device *ndev)
2146 {
2147 	struct qede_dev *edev = netdev_priv(ndev);
2148 
2149 	qede_unload(edev, QEDE_UNLOAD_NORMAL, false);
2150 
2151 	edev->ops->common->update_drv_state(edev->cdev, false);
2152 
2153 	return 0;
2154 }
2155 
2156 static void qede_link_update(void *dev, struct qed_link_output *link)
2157 {
2158 	struct qede_dev *edev = dev;
2159 
2160 	if (!netif_running(edev->ndev)) {
2161 		DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n");
2162 		return;
2163 	}
2164 
2165 	if (link->link_up) {
2166 		if (!netif_carrier_ok(edev->ndev)) {
2167 			DP_NOTICE(edev, "Link is up\n");
2168 			netif_tx_start_all_queues(edev->ndev);
2169 			netif_carrier_on(edev->ndev);
2170 			qede_rdma_dev_event_open(edev);
2171 		}
2172 	} else {
2173 		if (netif_carrier_ok(edev->ndev)) {
2174 			DP_NOTICE(edev, "Link is down\n");
2175 			netif_tx_disable(edev->ndev);
2176 			netif_carrier_off(edev->ndev);
2177 			qede_rdma_dev_event_close(edev);
2178 		}
2179 	}
2180 }
2181