xref: /linux/drivers/net/ethernet/microsoft/mana/mana_en.c (revision 42874e4eb35bdfc54f8514685e50434098ba4f6c)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #include <uapi/linux/bpf.h>
5 
6 #include <linux/inetdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/filter.h>
10 #include <linux/mm.h>
11 #include <linux/pci.h>
12 
13 #include <net/checksum.h>
14 #include <net/ip6_checksum.h>
15 #include <net/page_pool/helpers.h>
16 #include <net/xdp.h>
17 
18 #include <net/mana/mana.h>
19 #include <net/mana/mana_auxiliary.h>
20 
21 static DEFINE_IDA(mana_adev_ida);
22 
23 static int mana_adev_idx_alloc(void)
24 {
25 	return ida_alloc(&mana_adev_ida, GFP_KERNEL);
26 }
27 
28 static void mana_adev_idx_free(int idx)
29 {
30 	ida_free(&mana_adev_ida, idx);
31 }
32 
33 /* Microsoft Azure Network Adapter (MANA) functions */
34 
35 static int mana_open(struct net_device *ndev)
36 {
37 	struct mana_port_context *apc = netdev_priv(ndev);
38 	int err;
39 
40 	err = mana_alloc_queues(ndev);
41 	if (err)
42 		return err;
43 
44 	apc->port_is_up = true;
45 
46 	/* Ensure port state updated before txq state */
47 	smp_wmb();
48 
49 	netif_carrier_on(ndev);
50 	netif_tx_wake_all_queues(ndev);
51 
52 	return 0;
53 }
54 
55 static int mana_close(struct net_device *ndev)
56 {
57 	struct mana_port_context *apc = netdev_priv(ndev);
58 
59 	if (!apc->port_is_up)
60 		return 0;
61 
62 	return mana_detach(ndev, true);
63 }
64 
65 static bool mana_can_tx(struct gdma_queue *wq)
66 {
67 	return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
68 }
69 
70 static unsigned int mana_checksum_info(struct sk_buff *skb)
71 {
72 	if (skb->protocol == htons(ETH_P_IP)) {
73 		struct iphdr *ip = ip_hdr(skb);
74 
75 		if (ip->protocol == IPPROTO_TCP)
76 			return IPPROTO_TCP;
77 
78 		if (ip->protocol == IPPROTO_UDP)
79 			return IPPROTO_UDP;
80 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
81 		struct ipv6hdr *ip6 = ipv6_hdr(skb);
82 
83 		if (ip6->nexthdr == IPPROTO_TCP)
84 			return IPPROTO_TCP;
85 
86 		if (ip6->nexthdr == IPPROTO_UDP)
87 			return IPPROTO_UDP;
88 	}
89 
90 	/* No csum offloading */
91 	return 0;
92 }
93 
94 static void mana_add_sge(struct mana_tx_package *tp, struct mana_skb_head *ash,
95 			 int sg_i, dma_addr_t da, int sge_len, u32 gpa_mkey)
96 {
97 	ash->dma_handle[sg_i] = da;
98 	ash->size[sg_i] = sge_len;
99 
100 	tp->wqe_req.sgl[sg_i].address = da;
101 	tp->wqe_req.sgl[sg_i].mem_key = gpa_mkey;
102 	tp->wqe_req.sgl[sg_i].size = sge_len;
103 }
104 
105 static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
106 			struct mana_tx_package *tp, int gso_hs)
107 {
108 	struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
109 	int hsg = 1; /* num of SGEs of linear part */
110 	struct gdma_dev *gd = apc->ac->gdma_dev;
111 	int skb_hlen = skb_headlen(skb);
112 	int sge0_len, sge1_len = 0;
113 	struct gdma_context *gc;
114 	struct device *dev;
115 	skb_frag_t *frag;
116 	dma_addr_t da;
117 	int sg_i;
118 	int i;
119 
120 	gc = gd->gdma_context;
121 	dev = gc->dev;
122 
123 	if (gso_hs && gso_hs < skb_hlen) {
124 		sge0_len = gso_hs;
125 		sge1_len = skb_hlen - gso_hs;
126 	} else {
127 		sge0_len = skb_hlen;
128 	}
129 
130 	da = dma_map_single(dev, skb->data, sge0_len, DMA_TO_DEVICE);
131 	if (dma_mapping_error(dev, da))
132 		return -ENOMEM;
133 
134 	mana_add_sge(tp, ash, 0, da, sge0_len, gd->gpa_mkey);
135 
136 	if (sge1_len) {
137 		sg_i = 1;
138 		da = dma_map_single(dev, skb->data + sge0_len, sge1_len,
139 				    DMA_TO_DEVICE);
140 		if (dma_mapping_error(dev, da))
141 			goto frag_err;
142 
143 		mana_add_sge(tp, ash, sg_i, da, sge1_len, gd->gpa_mkey);
144 		hsg = 2;
145 	}
146 
147 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
148 		sg_i = hsg + i;
149 
150 		frag = &skb_shinfo(skb)->frags[i];
151 		da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
152 				      DMA_TO_DEVICE);
153 		if (dma_mapping_error(dev, da))
154 			goto frag_err;
155 
156 		mana_add_sge(tp, ash, sg_i, da, skb_frag_size(frag),
157 			     gd->gpa_mkey);
158 	}
159 
160 	return 0;
161 
162 frag_err:
163 	for (i = sg_i - 1; i >= hsg; i--)
164 		dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
165 			       DMA_TO_DEVICE);
166 
167 	for (i = hsg - 1; i >= 0; i--)
168 		dma_unmap_single(dev, ash->dma_handle[i], ash->size[i],
169 				 DMA_TO_DEVICE);
170 
171 	return -ENOMEM;
172 }
173 
174 /* Handle the case when GSO SKB linear length is too large.
175  * MANA NIC requires GSO packets to put only the packet header to SGE0.
176  * So, we need 2 SGEs for the skb linear part which contains more than the
177  * header.
178  * Return a positive value for the number of SGEs, or a negative value
179  * for an error.
180  */
181 static int mana_fix_skb_head(struct net_device *ndev, struct sk_buff *skb,
182 			     int gso_hs)
183 {
184 	int num_sge = 1 + skb_shinfo(skb)->nr_frags;
185 	int skb_hlen = skb_headlen(skb);
186 
187 	if (gso_hs < skb_hlen) {
188 		num_sge++;
189 	} else if (gso_hs > skb_hlen) {
190 		if (net_ratelimit())
191 			netdev_err(ndev,
192 				   "TX nonlinear head: hs:%d, skb_hlen:%d\n",
193 				   gso_hs, skb_hlen);
194 
195 		return -EINVAL;
196 	}
197 
198 	return num_sge;
199 }
200 
201 /* Get the GSO packet's header size */
202 static int mana_get_gso_hs(struct sk_buff *skb)
203 {
204 	int gso_hs;
205 
206 	if (skb->encapsulation) {
207 		gso_hs = skb_inner_tcp_all_headers(skb);
208 	} else {
209 		if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
210 			gso_hs = skb_transport_offset(skb) +
211 				 sizeof(struct udphdr);
212 		} else {
213 			gso_hs = skb_tcp_all_headers(skb);
214 		}
215 	}
216 
217 	return gso_hs;
218 }
219 
220 netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
221 {
222 	enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
223 	struct mana_port_context *apc = netdev_priv(ndev);
224 	int gso_hs = 0; /* zero for non-GSO pkts */
225 	u16 txq_idx = skb_get_queue_mapping(skb);
226 	struct gdma_dev *gd = apc->ac->gdma_dev;
227 	bool ipv4 = false, ipv6 = false;
228 	struct mana_tx_package pkg = {};
229 	struct netdev_queue *net_txq;
230 	struct mana_stats_tx *tx_stats;
231 	struct gdma_queue *gdma_sq;
232 	unsigned int csum_type;
233 	struct mana_txq *txq;
234 	struct mana_cq *cq;
235 	int err, len;
236 
237 	if (unlikely(!apc->port_is_up))
238 		goto tx_drop;
239 
240 	if (skb_cow_head(skb, MANA_HEADROOM))
241 		goto tx_drop_count;
242 
243 	txq = &apc->tx_qp[txq_idx].txq;
244 	gdma_sq = txq->gdma_sq;
245 	cq = &apc->tx_qp[txq_idx].tx_cq;
246 	tx_stats = &txq->stats;
247 
248 	pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
249 	pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
250 
251 	if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
252 		pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
253 		pkt_fmt = MANA_LONG_PKT_FMT;
254 	} else {
255 		pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
256 	}
257 
258 	if (skb_vlan_tag_present(skb)) {
259 		pkt_fmt = MANA_LONG_PKT_FMT;
260 		pkg.tx_oob.l_oob.inject_vlan_pri_tag = 1;
261 		pkg.tx_oob.l_oob.pcp = skb_vlan_tag_get_prio(skb);
262 		pkg.tx_oob.l_oob.dei = skb_vlan_tag_get_cfi(skb);
263 		pkg.tx_oob.l_oob.vlan_id = skb_vlan_tag_get_id(skb);
264 	}
265 
266 	pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
267 
268 	if (pkt_fmt == MANA_SHORT_PKT_FMT) {
269 		pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
270 		u64_stats_update_begin(&tx_stats->syncp);
271 		tx_stats->short_pkt_fmt++;
272 		u64_stats_update_end(&tx_stats->syncp);
273 	} else {
274 		pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
275 		u64_stats_update_begin(&tx_stats->syncp);
276 		tx_stats->long_pkt_fmt++;
277 		u64_stats_update_end(&tx_stats->syncp);
278 	}
279 
280 	pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
281 	pkg.wqe_req.flags = 0;
282 	pkg.wqe_req.client_data_unit = 0;
283 
284 	pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags;
285 
286 	if (skb->protocol == htons(ETH_P_IP))
287 		ipv4 = true;
288 	else if (skb->protocol == htons(ETH_P_IPV6))
289 		ipv6 = true;
290 
291 	if (skb_is_gso(skb)) {
292 		int num_sge;
293 
294 		gso_hs = mana_get_gso_hs(skb);
295 
296 		num_sge = mana_fix_skb_head(ndev, skb, gso_hs);
297 		if (num_sge > 0)
298 			pkg.wqe_req.num_sge = num_sge;
299 		else
300 			goto tx_drop_count;
301 
302 		u64_stats_update_begin(&tx_stats->syncp);
303 		if (skb->encapsulation) {
304 			tx_stats->tso_inner_packets++;
305 			tx_stats->tso_inner_bytes += skb->len - gso_hs;
306 		} else {
307 			tx_stats->tso_packets++;
308 			tx_stats->tso_bytes += skb->len - gso_hs;
309 		}
310 		u64_stats_update_end(&tx_stats->syncp);
311 
312 		pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
313 		pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
314 
315 		pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
316 		pkg.tx_oob.s_oob.comp_tcp_csum = 1;
317 		pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
318 
319 		pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size;
320 		pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
321 		if (ipv4) {
322 			ip_hdr(skb)->tot_len = 0;
323 			ip_hdr(skb)->check = 0;
324 			tcp_hdr(skb)->check =
325 				~csum_tcpudp_magic(ip_hdr(skb)->saddr,
326 						   ip_hdr(skb)->daddr, 0,
327 						   IPPROTO_TCP, 0);
328 		} else {
329 			ipv6_hdr(skb)->payload_len = 0;
330 			tcp_hdr(skb)->check =
331 				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
332 						 &ipv6_hdr(skb)->daddr, 0,
333 						 IPPROTO_TCP, 0);
334 		}
335 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
336 		csum_type = mana_checksum_info(skb);
337 
338 		u64_stats_update_begin(&tx_stats->syncp);
339 		tx_stats->csum_partial++;
340 		u64_stats_update_end(&tx_stats->syncp);
341 
342 		if (csum_type == IPPROTO_TCP) {
343 			pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
344 			pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
345 
346 			pkg.tx_oob.s_oob.comp_tcp_csum = 1;
347 			pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
348 
349 		} else if (csum_type == IPPROTO_UDP) {
350 			pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
351 			pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
352 
353 			pkg.tx_oob.s_oob.comp_udp_csum = 1;
354 		} else {
355 			/* Can't do offload of this type of checksum */
356 			if (skb_checksum_help(skb))
357 				goto tx_drop_count;
358 		}
359 	}
360 
361 	WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES);
362 
363 	if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
364 		pkg.wqe_req.sgl = pkg.sgl_array;
365 	} else {
366 		pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge,
367 					    sizeof(struct gdma_sge),
368 					    GFP_ATOMIC);
369 		if (!pkg.sgl_ptr)
370 			goto tx_drop_count;
371 
372 		pkg.wqe_req.sgl = pkg.sgl_ptr;
373 	}
374 
375 	if (mana_map_skb(skb, apc, &pkg, gso_hs)) {
376 		u64_stats_update_begin(&tx_stats->syncp);
377 		tx_stats->mana_map_err++;
378 		u64_stats_update_end(&tx_stats->syncp);
379 		goto free_sgl_ptr;
380 	}
381 
382 	skb_queue_tail(&txq->pending_skbs, skb);
383 
384 	len = skb->len;
385 	net_txq = netdev_get_tx_queue(ndev, txq_idx);
386 
387 	err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
388 					(struct gdma_posted_wqe_info *)skb->cb);
389 	if (!mana_can_tx(gdma_sq)) {
390 		netif_tx_stop_queue(net_txq);
391 		apc->eth_stats.stop_queue++;
392 	}
393 
394 	if (err) {
395 		(void)skb_dequeue_tail(&txq->pending_skbs);
396 		netdev_warn(ndev, "Failed to post TX OOB: %d\n", err);
397 		err = NETDEV_TX_BUSY;
398 		goto tx_busy;
399 	}
400 
401 	err = NETDEV_TX_OK;
402 	atomic_inc(&txq->pending_sends);
403 
404 	mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
405 
406 	/* skb may be freed after mana_gd_post_work_request. Do not use it. */
407 	skb = NULL;
408 
409 	tx_stats = &txq->stats;
410 	u64_stats_update_begin(&tx_stats->syncp);
411 	tx_stats->packets++;
412 	tx_stats->bytes += len;
413 	u64_stats_update_end(&tx_stats->syncp);
414 
415 tx_busy:
416 	if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) {
417 		netif_tx_wake_queue(net_txq);
418 		apc->eth_stats.wake_queue++;
419 	}
420 
421 	kfree(pkg.sgl_ptr);
422 	return err;
423 
424 free_sgl_ptr:
425 	kfree(pkg.sgl_ptr);
426 tx_drop_count:
427 	ndev->stats.tx_dropped++;
428 tx_drop:
429 	dev_kfree_skb_any(skb);
430 	return NETDEV_TX_OK;
431 }
432 
433 static void mana_get_stats64(struct net_device *ndev,
434 			     struct rtnl_link_stats64 *st)
435 {
436 	struct mana_port_context *apc = netdev_priv(ndev);
437 	unsigned int num_queues = apc->num_queues;
438 	struct mana_stats_rx *rx_stats;
439 	struct mana_stats_tx *tx_stats;
440 	unsigned int start;
441 	u64 packets, bytes;
442 	int q;
443 
444 	if (!apc->port_is_up)
445 		return;
446 
447 	netdev_stats_to_stats64(st, &ndev->stats);
448 
449 	for (q = 0; q < num_queues; q++) {
450 		rx_stats = &apc->rxqs[q]->stats;
451 
452 		do {
453 			start = u64_stats_fetch_begin(&rx_stats->syncp);
454 			packets = rx_stats->packets;
455 			bytes = rx_stats->bytes;
456 		} while (u64_stats_fetch_retry(&rx_stats->syncp, start));
457 
458 		st->rx_packets += packets;
459 		st->rx_bytes += bytes;
460 	}
461 
462 	for (q = 0; q < num_queues; q++) {
463 		tx_stats = &apc->tx_qp[q].txq.stats;
464 
465 		do {
466 			start = u64_stats_fetch_begin(&tx_stats->syncp);
467 			packets = tx_stats->packets;
468 			bytes = tx_stats->bytes;
469 		} while (u64_stats_fetch_retry(&tx_stats->syncp, start));
470 
471 		st->tx_packets += packets;
472 		st->tx_bytes += bytes;
473 	}
474 }
475 
476 static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb,
477 			     int old_q)
478 {
479 	struct mana_port_context *apc = netdev_priv(ndev);
480 	u32 hash = skb_get_hash(skb);
481 	struct sock *sk = skb->sk;
482 	int txq;
483 
484 	txq = apc->indir_table[hash & MANA_INDIRECT_TABLE_MASK];
485 
486 	if (txq != old_q && sk && sk_fullsock(sk) &&
487 	    rcu_access_pointer(sk->sk_dst_cache))
488 		sk_tx_queue_set(sk, txq);
489 
490 	return txq;
491 }
492 
493 static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb,
494 			     struct net_device *sb_dev)
495 {
496 	int txq;
497 
498 	if (ndev->real_num_tx_queues == 1)
499 		return 0;
500 
501 	txq = sk_tx_queue_get(skb->sk);
502 
503 	if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) {
504 		if (skb_rx_queue_recorded(skb))
505 			txq = skb_get_rx_queue(skb);
506 		else
507 			txq = mana_get_tx_queue(ndev, skb, txq);
508 	}
509 
510 	return txq;
511 }
512 
513 /* Release pre-allocated RX buffers */
514 static void mana_pre_dealloc_rxbufs(struct mana_port_context *mpc)
515 {
516 	struct device *dev;
517 	int i;
518 
519 	dev = mpc->ac->gdma_dev->gdma_context->dev;
520 
521 	if (!mpc->rxbufs_pre)
522 		goto out1;
523 
524 	if (!mpc->das_pre)
525 		goto out2;
526 
527 	while (mpc->rxbpre_total) {
528 		i = --mpc->rxbpre_total;
529 		dma_unmap_single(dev, mpc->das_pre[i], mpc->rxbpre_datasize,
530 				 DMA_FROM_DEVICE);
531 		put_page(virt_to_head_page(mpc->rxbufs_pre[i]));
532 	}
533 
534 	kfree(mpc->das_pre);
535 	mpc->das_pre = NULL;
536 
537 out2:
538 	kfree(mpc->rxbufs_pre);
539 	mpc->rxbufs_pre = NULL;
540 
541 out1:
542 	mpc->rxbpre_datasize = 0;
543 	mpc->rxbpre_alloc_size = 0;
544 	mpc->rxbpre_headroom = 0;
545 }
546 
547 /* Get a buffer from the pre-allocated RX buffers */
548 static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da)
549 {
550 	struct net_device *ndev = rxq->ndev;
551 	struct mana_port_context *mpc;
552 	void *va;
553 
554 	mpc = netdev_priv(ndev);
555 
556 	if (!mpc->rxbufs_pre || !mpc->das_pre || !mpc->rxbpre_total) {
557 		netdev_err(ndev, "No RX pre-allocated bufs\n");
558 		return NULL;
559 	}
560 
561 	/* Check sizes to catch unexpected coding error */
562 	if (mpc->rxbpre_datasize != rxq->datasize) {
563 		netdev_err(ndev, "rxbpre_datasize mismatch: %u: %u\n",
564 			   mpc->rxbpre_datasize, rxq->datasize);
565 		return NULL;
566 	}
567 
568 	if (mpc->rxbpre_alloc_size != rxq->alloc_size) {
569 		netdev_err(ndev, "rxbpre_alloc_size mismatch: %u: %u\n",
570 			   mpc->rxbpre_alloc_size, rxq->alloc_size);
571 		return NULL;
572 	}
573 
574 	if (mpc->rxbpre_headroom != rxq->headroom) {
575 		netdev_err(ndev, "rxbpre_headroom mismatch: %u: %u\n",
576 			   mpc->rxbpre_headroom, rxq->headroom);
577 		return NULL;
578 	}
579 
580 	mpc->rxbpre_total--;
581 
582 	*da = mpc->das_pre[mpc->rxbpre_total];
583 	va = mpc->rxbufs_pre[mpc->rxbpre_total];
584 	mpc->rxbufs_pre[mpc->rxbpre_total] = NULL;
585 
586 	/* Deallocate the array after all buffers are gone */
587 	if (!mpc->rxbpre_total)
588 		mana_pre_dealloc_rxbufs(mpc);
589 
590 	return va;
591 }
592 
593 /* Get RX buffer's data size, alloc size, XDP headroom based on MTU */
594 static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size,
595 			       u32 *headroom)
596 {
597 	if (mtu > MANA_XDP_MTU_MAX)
598 		*headroom = 0; /* no support for XDP */
599 	else
600 		*headroom = XDP_PACKET_HEADROOM;
601 
602 	*alloc_size = mtu + MANA_RXBUF_PAD + *headroom;
603 
604 	*datasize = ALIGN(mtu + ETH_HLEN, MANA_RX_DATA_ALIGN);
605 }
606 
607 static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu)
608 {
609 	struct device *dev;
610 	struct page *page;
611 	dma_addr_t da;
612 	int num_rxb;
613 	void *va;
614 	int i;
615 
616 	mana_get_rxbuf_cfg(new_mtu, &mpc->rxbpre_datasize,
617 			   &mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom);
618 
619 	dev = mpc->ac->gdma_dev->gdma_context->dev;
620 
621 	num_rxb = mpc->num_queues * RX_BUFFERS_PER_QUEUE;
622 
623 	WARN(mpc->rxbufs_pre, "mana rxbufs_pre exists\n");
624 	mpc->rxbufs_pre = kmalloc_array(num_rxb, sizeof(void *), GFP_KERNEL);
625 	if (!mpc->rxbufs_pre)
626 		goto error;
627 
628 	mpc->das_pre = kmalloc_array(num_rxb, sizeof(dma_addr_t), GFP_KERNEL);
629 	if (!mpc->das_pre)
630 		goto error;
631 
632 	mpc->rxbpre_total = 0;
633 
634 	for (i = 0; i < num_rxb; i++) {
635 		if (mpc->rxbpre_alloc_size > PAGE_SIZE) {
636 			va = netdev_alloc_frag(mpc->rxbpre_alloc_size);
637 			if (!va)
638 				goto error;
639 
640 			page = virt_to_head_page(va);
641 			/* Check if the frag falls back to single page */
642 			if (compound_order(page) <
643 			    get_order(mpc->rxbpre_alloc_size)) {
644 				put_page(page);
645 				goto error;
646 			}
647 		} else {
648 			page = dev_alloc_page();
649 			if (!page)
650 				goto error;
651 
652 			va = page_to_virt(page);
653 		}
654 
655 		da = dma_map_single(dev, va + mpc->rxbpre_headroom,
656 				    mpc->rxbpre_datasize, DMA_FROM_DEVICE);
657 		if (dma_mapping_error(dev, da)) {
658 			put_page(virt_to_head_page(va));
659 			goto error;
660 		}
661 
662 		mpc->rxbufs_pre[i] = va;
663 		mpc->das_pre[i] = da;
664 		mpc->rxbpre_total = i + 1;
665 	}
666 
667 	return 0;
668 
669 error:
670 	mana_pre_dealloc_rxbufs(mpc);
671 	return -ENOMEM;
672 }
673 
674 static int mana_change_mtu(struct net_device *ndev, int new_mtu)
675 {
676 	struct mana_port_context *mpc = netdev_priv(ndev);
677 	unsigned int old_mtu = ndev->mtu;
678 	int err;
679 
680 	/* Pre-allocate buffers to prevent failure in mana_attach later */
681 	err = mana_pre_alloc_rxbufs(mpc, new_mtu);
682 	if (err) {
683 		netdev_err(ndev, "Insufficient memory for new MTU\n");
684 		return err;
685 	}
686 
687 	err = mana_detach(ndev, false);
688 	if (err) {
689 		netdev_err(ndev, "mana_detach failed: %d\n", err);
690 		goto out;
691 	}
692 
693 	ndev->mtu = new_mtu;
694 
695 	err = mana_attach(ndev);
696 	if (err) {
697 		netdev_err(ndev, "mana_attach failed: %d\n", err);
698 		ndev->mtu = old_mtu;
699 	}
700 
701 out:
702 	mana_pre_dealloc_rxbufs(mpc);
703 	return err;
704 }
705 
706 static const struct net_device_ops mana_devops = {
707 	.ndo_open		= mana_open,
708 	.ndo_stop		= mana_close,
709 	.ndo_select_queue	= mana_select_queue,
710 	.ndo_start_xmit		= mana_start_xmit,
711 	.ndo_validate_addr	= eth_validate_addr,
712 	.ndo_get_stats64	= mana_get_stats64,
713 	.ndo_bpf		= mana_bpf,
714 	.ndo_xdp_xmit		= mana_xdp_xmit,
715 	.ndo_change_mtu		= mana_change_mtu,
716 };
717 
718 static void mana_cleanup_port_context(struct mana_port_context *apc)
719 {
720 	kfree(apc->rxqs);
721 	apc->rxqs = NULL;
722 }
723 
724 static int mana_init_port_context(struct mana_port_context *apc)
725 {
726 	apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *),
727 			    GFP_KERNEL);
728 
729 	return !apc->rxqs ? -ENOMEM : 0;
730 }
731 
732 static int mana_send_request(struct mana_context *ac, void *in_buf,
733 			     u32 in_len, void *out_buf, u32 out_len)
734 {
735 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
736 	struct gdma_resp_hdr *resp = out_buf;
737 	struct gdma_req_hdr *req = in_buf;
738 	struct device *dev = gc->dev;
739 	static atomic_t activity_id;
740 	int err;
741 
742 	req->dev_id = gc->mana.dev_id;
743 	req->activity_id = atomic_inc_return(&activity_id);
744 
745 	err = mana_gd_send_request(gc, in_len, in_buf, out_len,
746 				   out_buf);
747 	if (err || resp->status) {
748 		dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
749 			err, resp->status);
750 		return err ? err : -EPROTO;
751 	}
752 
753 	if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
754 	    req->activity_id != resp->activity_id) {
755 		dev_err(dev, "Unexpected mana message response: %x,%x,%x,%x\n",
756 			req->dev_id.as_uint32, resp->dev_id.as_uint32,
757 			req->activity_id, resp->activity_id);
758 		return -EPROTO;
759 	}
760 
761 	return 0;
762 }
763 
764 static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
765 				const enum mana_command_code expected_code,
766 				const u32 min_size)
767 {
768 	if (resp_hdr->response.msg_type != expected_code)
769 		return -EPROTO;
770 
771 	if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
772 		return -EPROTO;
773 
774 	if (resp_hdr->response.msg_size < min_size)
775 		return -EPROTO;
776 
777 	return 0;
778 }
779 
780 static int mana_pf_register_hw_vport(struct mana_port_context *apc)
781 {
782 	struct mana_register_hw_vport_resp resp = {};
783 	struct mana_register_hw_vport_req req = {};
784 	int err;
785 
786 	mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_HW_PORT,
787 			     sizeof(req), sizeof(resp));
788 	req.attached_gfid = 1;
789 	req.is_pf_default_vport = 1;
790 	req.allow_all_ether_types = 1;
791 
792 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
793 				sizeof(resp));
794 	if (err) {
795 		netdev_err(apc->ndev, "Failed to register hw vPort: %d\n", err);
796 		return err;
797 	}
798 
799 	err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_HW_PORT,
800 				   sizeof(resp));
801 	if (err || resp.hdr.status) {
802 		netdev_err(apc->ndev, "Failed to register hw vPort: %d, 0x%x\n",
803 			   err, resp.hdr.status);
804 		return err ? err : -EPROTO;
805 	}
806 
807 	apc->port_handle = resp.hw_vport_handle;
808 	return 0;
809 }
810 
811 static void mana_pf_deregister_hw_vport(struct mana_port_context *apc)
812 {
813 	struct mana_deregister_hw_vport_resp resp = {};
814 	struct mana_deregister_hw_vport_req req = {};
815 	int err;
816 
817 	mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_HW_PORT,
818 			     sizeof(req), sizeof(resp));
819 	req.hw_vport_handle = apc->port_handle;
820 
821 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
822 				sizeof(resp));
823 	if (err) {
824 		netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
825 			   err);
826 		return;
827 	}
828 
829 	err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_HW_PORT,
830 				   sizeof(resp));
831 	if (err || resp.hdr.status)
832 		netdev_err(apc->ndev,
833 			   "Failed to deregister hw vPort: %d, 0x%x\n",
834 			   err, resp.hdr.status);
835 }
836 
837 static int mana_pf_register_filter(struct mana_port_context *apc)
838 {
839 	struct mana_register_filter_resp resp = {};
840 	struct mana_register_filter_req req = {};
841 	int err;
842 
843 	mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_FILTER,
844 			     sizeof(req), sizeof(resp));
845 	req.vport = apc->port_handle;
846 	memcpy(req.mac_addr, apc->mac_addr, ETH_ALEN);
847 
848 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
849 				sizeof(resp));
850 	if (err) {
851 		netdev_err(apc->ndev, "Failed to register filter: %d\n", err);
852 		return err;
853 	}
854 
855 	err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_FILTER,
856 				   sizeof(resp));
857 	if (err || resp.hdr.status) {
858 		netdev_err(apc->ndev, "Failed to register filter: %d, 0x%x\n",
859 			   err, resp.hdr.status);
860 		return err ? err : -EPROTO;
861 	}
862 
863 	apc->pf_filter_handle = resp.filter_handle;
864 	return 0;
865 }
866 
867 static void mana_pf_deregister_filter(struct mana_port_context *apc)
868 {
869 	struct mana_deregister_filter_resp resp = {};
870 	struct mana_deregister_filter_req req = {};
871 	int err;
872 
873 	mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_FILTER,
874 			     sizeof(req), sizeof(resp));
875 	req.filter_handle = apc->pf_filter_handle;
876 
877 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
878 				sizeof(resp));
879 	if (err) {
880 		netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
881 			   err);
882 		return;
883 	}
884 
885 	err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_FILTER,
886 				   sizeof(resp));
887 	if (err || resp.hdr.status)
888 		netdev_err(apc->ndev,
889 			   "Failed to deregister filter: %d, 0x%x\n",
890 			   err, resp.hdr.status);
891 }
892 
893 static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
894 				 u32 proto_minor_ver, u32 proto_micro_ver,
895 				 u16 *max_num_vports)
896 {
897 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
898 	struct mana_query_device_cfg_resp resp = {};
899 	struct mana_query_device_cfg_req req = {};
900 	struct device *dev = gc->dev;
901 	int err = 0;
902 
903 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
904 			     sizeof(req), sizeof(resp));
905 
906 	req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
907 
908 	req.proto_major_ver = proto_major_ver;
909 	req.proto_minor_ver = proto_minor_ver;
910 	req.proto_micro_ver = proto_micro_ver;
911 
912 	err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
913 	if (err) {
914 		dev_err(dev, "Failed to query config: %d", err);
915 		return err;
916 	}
917 
918 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
919 				   sizeof(resp));
920 	if (err || resp.hdr.status) {
921 		dev_err(dev, "Invalid query result: %d, 0x%x\n", err,
922 			resp.hdr.status);
923 		if (!err)
924 			err = -EPROTO;
925 		return err;
926 	}
927 
928 	*max_num_vports = resp.max_num_vports;
929 
930 	if (resp.hdr.response.msg_version == GDMA_MESSAGE_V2)
931 		gc->adapter_mtu = resp.adapter_mtu;
932 	else
933 		gc->adapter_mtu = ETH_FRAME_LEN;
934 
935 	return 0;
936 }
937 
938 static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index,
939 				u32 *max_sq, u32 *max_rq, u32 *num_indir_entry)
940 {
941 	struct mana_query_vport_cfg_resp resp = {};
942 	struct mana_query_vport_cfg_req req = {};
943 	int err;
944 
945 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
946 			     sizeof(req), sizeof(resp));
947 
948 	req.vport_index = vport_index;
949 
950 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
951 				sizeof(resp));
952 	if (err)
953 		return err;
954 
955 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
956 				   sizeof(resp));
957 	if (err)
958 		return err;
959 
960 	if (resp.hdr.status)
961 		return -EPROTO;
962 
963 	*max_sq = resp.max_num_sq;
964 	*max_rq = resp.max_num_rq;
965 	*num_indir_entry = resp.num_indirection_ent;
966 
967 	apc->port_handle = resp.vport;
968 	ether_addr_copy(apc->mac_addr, resp.mac_addr);
969 
970 	return 0;
971 }
972 
973 void mana_uncfg_vport(struct mana_port_context *apc)
974 {
975 	mutex_lock(&apc->vport_mutex);
976 	apc->vport_use_count--;
977 	WARN_ON(apc->vport_use_count < 0);
978 	mutex_unlock(&apc->vport_mutex);
979 }
980 EXPORT_SYMBOL_NS(mana_uncfg_vport, NET_MANA);
981 
982 int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
983 		   u32 doorbell_pg_id)
984 {
985 	struct mana_config_vport_resp resp = {};
986 	struct mana_config_vport_req req = {};
987 	int err;
988 
989 	/* This function is used to program the Ethernet port in the hardware
990 	 * table. It can be called from the Ethernet driver or the RDMA driver.
991 	 *
992 	 * For Ethernet usage, the hardware supports only one active user on a
993 	 * physical port. The driver checks on the port usage before programming
994 	 * the hardware when creating the RAW QP (RDMA driver) or exposing the
995 	 * device to kernel NET layer (Ethernet driver).
996 	 *
997 	 * Because the RDMA driver doesn't know in advance which QP type the
998 	 * user will create, it exposes the device with all its ports. The user
999 	 * may not be able to create RAW QP on a port if this port is already
1000 	 * in used by the Ethernet driver from the kernel.
1001 	 *
1002 	 * This physical port limitation only applies to the RAW QP. For RC QP,
1003 	 * the hardware doesn't have this limitation. The user can create RC
1004 	 * QPs on a physical port up to the hardware limits independent of the
1005 	 * Ethernet usage on the same port.
1006 	 */
1007 	mutex_lock(&apc->vport_mutex);
1008 	if (apc->vport_use_count > 0) {
1009 		mutex_unlock(&apc->vport_mutex);
1010 		return -EBUSY;
1011 	}
1012 	apc->vport_use_count++;
1013 	mutex_unlock(&apc->vport_mutex);
1014 
1015 	mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
1016 			     sizeof(req), sizeof(resp));
1017 	req.vport = apc->port_handle;
1018 	req.pdid = protection_dom_id;
1019 	req.doorbell_pageid = doorbell_pg_id;
1020 
1021 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1022 				sizeof(resp));
1023 	if (err) {
1024 		netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err);
1025 		goto out;
1026 	}
1027 
1028 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
1029 				   sizeof(resp));
1030 	if (err || resp.hdr.status) {
1031 		netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
1032 			   err, resp.hdr.status);
1033 		if (!err)
1034 			err = -EPROTO;
1035 
1036 		goto out;
1037 	}
1038 
1039 	apc->tx_shortform_allowed = resp.short_form_allowed;
1040 	apc->tx_vp_offset = resp.tx_vport_offset;
1041 
1042 	netdev_info(apc->ndev, "Configured vPort %llu PD %u DB %u\n",
1043 		    apc->port_handle, protection_dom_id, doorbell_pg_id);
1044 out:
1045 	if (err)
1046 		mana_uncfg_vport(apc);
1047 
1048 	return err;
1049 }
1050 EXPORT_SYMBOL_NS(mana_cfg_vport, NET_MANA);
1051 
1052 static int mana_cfg_vport_steering(struct mana_port_context *apc,
1053 				   enum TRI_STATE rx,
1054 				   bool update_default_rxobj, bool update_key,
1055 				   bool update_tab)
1056 {
1057 	u16 num_entries = MANA_INDIRECT_TABLE_SIZE;
1058 	struct mana_cfg_rx_steer_req_v2 *req;
1059 	struct mana_cfg_rx_steer_resp resp = {};
1060 	struct net_device *ndev = apc->ndev;
1061 	mana_handle_t *req_indir_tab;
1062 	u32 req_buf_size;
1063 	int err;
1064 
1065 	req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
1066 	req = kzalloc(req_buf_size, GFP_KERNEL);
1067 	if (!req)
1068 		return -ENOMEM;
1069 
1070 	mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
1071 			     sizeof(resp));
1072 
1073 	req->hdr.req.msg_version = GDMA_MESSAGE_V2;
1074 
1075 	req->vport = apc->port_handle;
1076 	req->num_indir_entries = num_entries;
1077 	req->indir_tab_offset = sizeof(*req);
1078 	req->rx_enable = rx;
1079 	req->rss_enable = apc->rss_state;
1080 	req->update_default_rxobj = update_default_rxobj;
1081 	req->update_hashkey = update_key;
1082 	req->update_indir_tab = update_tab;
1083 	req->default_rxobj = apc->default_rxobj;
1084 	req->cqe_coalescing_enable = 0;
1085 
1086 	if (update_key)
1087 		memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
1088 
1089 	if (update_tab) {
1090 		req_indir_tab = (mana_handle_t *)(req + 1);
1091 		memcpy(req_indir_tab, apc->rxobj_table,
1092 		       req->num_indir_entries * sizeof(mana_handle_t));
1093 	}
1094 
1095 	err = mana_send_request(apc->ac, req, req_buf_size, &resp,
1096 				sizeof(resp));
1097 	if (err) {
1098 		netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
1099 		goto out;
1100 	}
1101 
1102 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
1103 				   sizeof(resp));
1104 	if (err) {
1105 		netdev_err(ndev, "vPort RX configuration failed: %d\n", err);
1106 		goto out;
1107 	}
1108 
1109 	if (resp.hdr.status) {
1110 		netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
1111 			   resp.hdr.status);
1112 		err = -EPROTO;
1113 	}
1114 
1115 	netdev_info(ndev, "Configured steering vPort %llu entries %u\n",
1116 		    apc->port_handle, num_entries);
1117 out:
1118 	kfree(req);
1119 	return err;
1120 }
1121 
1122 int mana_create_wq_obj(struct mana_port_context *apc,
1123 		       mana_handle_t vport,
1124 		       u32 wq_type, struct mana_obj_spec *wq_spec,
1125 		       struct mana_obj_spec *cq_spec,
1126 		       mana_handle_t *wq_obj)
1127 {
1128 	struct mana_create_wqobj_resp resp = {};
1129 	struct mana_create_wqobj_req req = {};
1130 	struct net_device *ndev = apc->ndev;
1131 	int err;
1132 
1133 	mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
1134 			     sizeof(req), sizeof(resp));
1135 	req.vport = vport;
1136 	req.wq_type = wq_type;
1137 	req.wq_gdma_region = wq_spec->gdma_region;
1138 	req.cq_gdma_region = cq_spec->gdma_region;
1139 	req.wq_size = wq_spec->queue_size;
1140 	req.cq_size = cq_spec->queue_size;
1141 	req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
1142 	req.cq_parent_qid = cq_spec->attached_eq;
1143 
1144 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1145 				sizeof(resp));
1146 	if (err) {
1147 		netdev_err(ndev, "Failed to create WQ object: %d\n", err);
1148 		goto out;
1149 	}
1150 
1151 	err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
1152 				   sizeof(resp));
1153 	if (err || resp.hdr.status) {
1154 		netdev_err(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
1155 			   resp.hdr.status);
1156 		if (!err)
1157 			err = -EPROTO;
1158 		goto out;
1159 	}
1160 
1161 	if (resp.wq_obj == INVALID_MANA_HANDLE) {
1162 		netdev_err(ndev, "Got an invalid WQ object handle\n");
1163 		err = -EPROTO;
1164 		goto out;
1165 	}
1166 
1167 	*wq_obj = resp.wq_obj;
1168 	wq_spec->queue_index = resp.wq_id;
1169 	cq_spec->queue_index = resp.cq_id;
1170 
1171 	return 0;
1172 out:
1173 	return err;
1174 }
1175 EXPORT_SYMBOL_NS(mana_create_wq_obj, NET_MANA);
1176 
1177 void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
1178 			 mana_handle_t wq_obj)
1179 {
1180 	struct mana_destroy_wqobj_resp resp = {};
1181 	struct mana_destroy_wqobj_req req = {};
1182 	struct net_device *ndev = apc->ndev;
1183 	int err;
1184 
1185 	mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
1186 			     sizeof(req), sizeof(resp));
1187 	req.wq_type = wq_type;
1188 	req.wq_obj_handle = wq_obj;
1189 
1190 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1191 				sizeof(resp));
1192 	if (err) {
1193 		netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
1194 		return;
1195 	}
1196 
1197 	err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
1198 				   sizeof(resp));
1199 	if (err || resp.hdr.status)
1200 		netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err,
1201 			   resp.hdr.status);
1202 }
1203 EXPORT_SYMBOL_NS(mana_destroy_wq_obj, NET_MANA);
1204 
1205 static void mana_destroy_eq(struct mana_context *ac)
1206 {
1207 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
1208 	struct gdma_queue *eq;
1209 	int i;
1210 
1211 	if (!ac->eqs)
1212 		return;
1213 
1214 	for (i = 0; i < gc->max_num_queues; i++) {
1215 		eq = ac->eqs[i].eq;
1216 		if (!eq)
1217 			continue;
1218 
1219 		mana_gd_destroy_queue(gc, eq);
1220 	}
1221 
1222 	kfree(ac->eqs);
1223 	ac->eqs = NULL;
1224 }
1225 
1226 static int mana_create_eq(struct mana_context *ac)
1227 {
1228 	struct gdma_dev *gd = ac->gdma_dev;
1229 	struct gdma_context *gc = gd->gdma_context;
1230 	struct gdma_queue_spec spec = {};
1231 	int err;
1232 	int i;
1233 
1234 	ac->eqs = kcalloc(gc->max_num_queues, sizeof(struct mana_eq),
1235 			  GFP_KERNEL);
1236 	if (!ac->eqs)
1237 		return -ENOMEM;
1238 
1239 	spec.type = GDMA_EQ;
1240 	spec.monitor_avl_buf = false;
1241 	spec.queue_size = EQ_SIZE;
1242 	spec.eq.callback = NULL;
1243 	spec.eq.context = ac->eqs;
1244 	spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
1245 
1246 	for (i = 0; i < gc->max_num_queues; i++) {
1247 		err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
1248 		if (err)
1249 			goto out;
1250 	}
1251 
1252 	return 0;
1253 out:
1254 	mana_destroy_eq(ac);
1255 	return err;
1256 }
1257 
1258 static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
1259 {
1260 	struct mana_fence_rq_resp resp = {};
1261 	struct mana_fence_rq_req req = {};
1262 	int err;
1263 
1264 	init_completion(&rxq->fence_event);
1265 
1266 	mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
1267 			     sizeof(req), sizeof(resp));
1268 	req.wq_obj_handle =  rxq->rxobj;
1269 
1270 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1271 				sizeof(resp));
1272 	if (err) {
1273 		netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n",
1274 			   rxq->rxq_idx, err);
1275 		return err;
1276 	}
1277 
1278 	err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
1279 	if (err || resp.hdr.status) {
1280 		netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
1281 			   rxq->rxq_idx, err, resp.hdr.status);
1282 		if (!err)
1283 			err = -EPROTO;
1284 
1285 		return err;
1286 	}
1287 
1288 	if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) {
1289 		netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n",
1290 			   rxq->rxq_idx);
1291 		return -ETIMEDOUT;
1292 	}
1293 
1294 	return 0;
1295 }
1296 
1297 static void mana_fence_rqs(struct mana_port_context *apc)
1298 {
1299 	unsigned int rxq_idx;
1300 	struct mana_rxq *rxq;
1301 	int err;
1302 
1303 	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
1304 		rxq = apc->rxqs[rxq_idx];
1305 		err = mana_fence_rq(apc, rxq);
1306 
1307 		/* In case of any error, use sleep instead. */
1308 		if (err)
1309 			msleep(100);
1310 	}
1311 }
1312 
1313 static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
1314 {
1315 	u32 used_space_old;
1316 	u32 used_space_new;
1317 
1318 	used_space_old = wq->head - wq->tail;
1319 	used_space_new = wq->head - (wq->tail + num_units);
1320 
1321 	if (WARN_ON_ONCE(used_space_new > used_space_old))
1322 		return -ERANGE;
1323 
1324 	wq->tail += num_units;
1325 	return 0;
1326 }
1327 
1328 static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
1329 {
1330 	struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
1331 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1332 	struct device *dev = gc->dev;
1333 	int hsg, i;
1334 
1335 	/* Number of SGEs of linear part */
1336 	hsg = (skb_is_gso(skb) && skb_headlen(skb) > ash->size[0]) ? 2 : 1;
1337 
1338 	for (i = 0; i < hsg; i++)
1339 		dma_unmap_single(dev, ash->dma_handle[i], ash->size[i],
1340 				 DMA_TO_DEVICE);
1341 
1342 	for (i = hsg; i < skb_shinfo(skb)->nr_frags + hsg; i++)
1343 		dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
1344 			       DMA_TO_DEVICE);
1345 }
1346 
1347 static void mana_poll_tx_cq(struct mana_cq *cq)
1348 {
1349 	struct gdma_comp *completions = cq->gdma_comp_buf;
1350 	struct gdma_posted_wqe_info *wqe_info;
1351 	unsigned int pkt_transmitted = 0;
1352 	unsigned int wqe_unit_cnt = 0;
1353 	struct mana_txq *txq = cq->txq;
1354 	struct mana_port_context *apc;
1355 	struct netdev_queue *net_txq;
1356 	struct gdma_queue *gdma_wq;
1357 	unsigned int avail_space;
1358 	struct net_device *ndev;
1359 	struct sk_buff *skb;
1360 	bool txq_stopped;
1361 	int comp_read;
1362 	int i;
1363 
1364 	ndev = txq->ndev;
1365 	apc = netdev_priv(ndev);
1366 
1367 	comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
1368 				    CQE_POLLING_BUFFER);
1369 
1370 	if (comp_read < 1)
1371 		return;
1372 
1373 	for (i = 0; i < comp_read; i++) {
1374 		struct mana_tx_comp_oob *cqe_oob;
1375 
1376 		if (WARN_ON_ONCE(!completions[i].is_sq))
1377 			return;
1378 
1379 		cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
1380 		if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type !=
1381 				 MANA_CQE_COMPLETION))
1382 			return;
1383 
1384 		switch (cqe_oob->cqe_hdr.cqe_type) {
1385 		case CQE_TX_OKAY:
1386 			break;
1387 
1388 		case CQE_TX_SA_DROP:
1389 		case CQE_TX_MTU_DROP:
1390 		case CQE_TX_INVALID_OOB:
1391 		case CQE_TX_INVALID_ETH_TYPE:
1392 		case CQE_TX_HDR_PROCESSING_ERROR:
1393 		case CQE_TX_VF_DISABLED:
1394 		case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
1395 		case CQE_TX_VPORT_DISABLED:
1396 		case CQE_TX_VLAN_TAGGING_VIOLATION:
1397 			if (net_ratelimit())
1398 				netdev_err(ndev, "TX: CQE error %d\n",
1399 					   cqe_oob->cqe_hdr.cqe_type);
1400 
1401 			apc->eth_stats.tx_cqe_err++;
1402 			break;
1403 
1404 		default:
1405 			/* If the CQE type is unknown, log an error,
1406 			 * and still free the SKB, update tail, etc.
1407 			 */
1408 			if (net_ratelimit())
1409 				netdev_err(ndev, "TX: unknown CQE type %d\n",
1410 					   cqe_oob->cqe_hdr.cqe_type);
1411 
1412 			apc->eth_stats.tx_cqe_unknown_type++;
1413 			break;
1414 		}
1415 
1416 		if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num))
1417 			return;
1418 
1419 		skb = skb_dequeue(&txq->pending_skbs);
1420 		if (WARN_ON_ONCE(!skb))
1421 			return;
1422 
1423 		wqe_info = (struct gdma_posted_wqe_info *)skb->cb;
1424 		wqe_unit_cnt += wqe_info->wqe_size_in_bu;
1425 
1426 		mana_unmap_skb(skb, apc);
1427 
1428 		napi_consume_skb(skb, cq->budget);
1429 
1430 		pkt_transmitted++;
1431 	}
1432 
1433 	if (WARN_ON_ONCE(wqe_unit_cnt == 0))
1434 		return;
1435 
1436 	mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
1437 
1438 	gdma_wq = txq->gdma_sq;
1439 	avail_space = mana_gd_wq_avail_space(gdma_wq);
1440 
1441 	/* Ensure tail updated before checking q stop */
1442 	smp_mb();
1443 
1444 	net_txq = txq->net_txq;
1445 	txq_stopped = netif_tx_queue_stopped(net_txq);
1446 
1447 	/* Ensure checking txq_stopped before apc->port_is_up. */
1448 	smp_rmb();
1449 
1450 	if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1451 		netif_tx_wake_queue(net_txq);
1452 		apc->eth_stats.wake_queue++;
1453 	}
1454 
1455 	if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
1456 		WARN_ON_ONCE(1);
1457 
1458 	cq->work_done = pkt_transmitted;
1459 }
1460 
1461 static void mana_post_pkt_rxq(struct mana_rxq *rxq)
1462 {
1463 	struct mana_recv_buf_oob *recv_buf_oob;
1464 	u32 curr_index;
1465 	int err;
1466 
1467 	curr_index = rxq->buf_index++;
1468 	if (rxq->buf_index == rxq->num_rx_buf)
1469 		rxq->buf_index = 0;
1470 
1471 	recv_buf_oob = &rxq->rx_oobs[curr_index];
1472 
1473 	err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req,
1474 					&recv_buf_oob->wqe_inf);
1475 	if (WARN_ON_ONCE(err))
1476 		return;
1477 
1478 	WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1);
1479 }
1480 
1481 static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va,
1482 				      uint pkt_len, struct xdp_buff *xdp)
1483 {
1484 	struct sk_buff *skb = napi_build_skb(buf_va, rxq->alloc_size);
1485 
1486 	if (!skb)
1487 		return NULL;
1488 
1489 	if (xdp->data_hard_start) {
1490 		skb_reserve(skb, xdp->data - xdp->data_hard_start);
1491 		skb_put(skb, xdp->data_end - xdp->data);
1492 		return skb;
1493 	}
1494 
1495 	skb_reserve(skb, rxq->headroom);
1496 	skb_put(skb, pkt_len);
1497 
1498 	return skb;
1499 }
1500 
1501 static void mana_rx_skb(void *buf_va, bool from_pool,
1502 			struct mana_rxcomp_oob *cqe, struct mana_rxq *rxq)
1503 {
1504 	struct mana_stats_rx *rx_stats = &rxq->stats;
1505 	struct net_device *ndev = rxq->ndev;
1506 	uint pkt_len = cqe->ppi[0].pkt_len;
1507 	u16 rxq_idx = rxq->rxq_idx;
1508 	struct napi_struct *napi;
1509 	struct xdp_buff xdp = {};
1510 	struct sk_buff *skb;
1511 	u32 hash_value;
1512 	u32 act;
1513 
1514 	rxq->rx_cq.work_done++;
1515 	napi = &rxq->rx_cq.napi;
1516 
1517 	if (!buf_va) {
1518 		++ndev->stats.rx_dropped;
1519 		return;
1520 	}
1521 
1522 	act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len);
1523 
1524 	if (act == XDP_REDIRECT && !rxq->xdp_rc)
1525 		return;
1526 
1527 	if (act != XDP_PASS && act != XDP_TX)
1528 		goto drop_xdp;
1529 
1530 	skb = mana_build_skb(rxq, buf_va, pkt_len, &xdp);
1531 
1532 	if (!skb)
1533 		goto drop;
1534 
1535 	if (from_pool)
1536 		skb_mark_for_recycle(skb);
1537 
1538 	skb->dev = napi->dev;
1539 
1540 	skb->protocol = eth_type_trans(skb, ndev);
1541 	skb_checksum_none_assert(skb);
1542 	skb_record_rx_queue(skb, rxq_idx);
1543 
1544 	if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) {
1545 		if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed)
1546 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1547 	}
1548 
1549 	if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) {
1550 		hash_value = cqe->ppi[0].pkt_hash;
1551 
1552 		if (cqe->rx_hashtype & MANA_HASH_L4)
1553 			skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L4);
1554 		else
1555 			skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3);
1556 	}
1557 
1558 	if (cqe->rx_vlantag_present) {
1559 		u16 vlan_tci = cqe->rx_vlan_id;
1560 
1561 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
1562 	}
1563 
1564 	u64_stats_update_begin(&rx_stats->syncp);
1565 	rx_stats->packets++;
1566 	rx_stats->bytes += pkt_len;
1567 
1568 	if (act == XDP_TX)
1569 		rx_stats->xdp_tx++;
1570 	u64_stats_update_end(&rx_stats->syncp);
1571 
1572 	if (act == XDP_TX) {
1573 		skb_set_queue_mapping(skb, rxq_idx);
1574 		mana_xdp_tx(skb, ndev);
1575 		return;
1576 	}
1577 
1578 	napi_gro_receive(napi, skb);
1579 
1580 	return;
1581 
1582 drop_xdp:
1583 	u64_stats_update_begin(&rx_stats->syncp);
1584 	rx_stats->xdp_drop++;
1585 	u64_stats_update_end(&rx_stats->syncp);
1586 
1587 drop:
1588 	if (from_pool) {
1589 		page_pool_recycle_direct(rxq->page_pool,
1590 					 virt_to_head_page(buf_va));
1591 	} else {
1592 		WARN_ON_ONCE(rxq->xdp_save_va);
1593 		/* Save for reuse */
1594 		rxq->xdp_save_va = buf_va;
1595 	}
1596 
1597 	++ndev->stats.rx_dropped;
1598 
1599 	return;
1600 }
1601 
1602 static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
1603 			     dma_addr_t *da, bool *from_pool, bool is_napi)
1604 {
1605 	struct page *page;
1606 	void *va;
1607 
1608 	*from_pool = false;
1609 
1610 	/* Reuse XDP dropped page if available */
1611 	if (rxq->xdp_save_va) {
1612 		va = rxq->xdp_save_va;
1613 		rxq->xdp_save_va = NULL;
1614 	} else if (rxq->alloc_size > PAGE_SIZE) {
1615 		if (is_napi)
1616 			va = napi_alloc_frag(rxq->alloc_size);
1617 		else
1618 			va = netdev_alloc_frag(rxq->alloc_size);
1619 
1620 		if (!va)
1621 			return NULL;
1622 
1623 		page = virt_to_head_page(va);
1624 		/* Check if the frag falls back to single page */
1625 		if (compound_order(page) < get_order(rxq->alloc_size)) {
1626 			put_page(page);
1627 			return NULL;
1628 		}
1629 	} else {
1630 		page = page_pool_dev_alloc_pages(rxq->page_pool);
1631 		if (!page)
1632 			return NULL;
1633 
1634 		*from_pool = true;
1635 		va = page_to_virt(page);
1636 	}
1637 
1638 	*da = dma_map_single(dev, va + rxq->headroom, rxq->datasize,
1639 			     DMA_FROM_DEVICE);
1640 	if (dma_mapping_error(dev, *da)) {
1641 		if (*from_pool)
1642 			page_pool_put_full_page(rxq->page_pool, page, false);
1643 		else
1644 			put_page(virt_to_head_page(va));
1645 
1646 		return NULL;
1647 	}
1648 
1649 	return va;
1650 }
1651 
1652 /* Allocate frag for rx buffer, and save the old buf */
1653 static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
1654 			       struct mana_recv_buf_oob *rxoob, void **old_buf,
1655 			       bool *old_fp)
1656 {
1657 	bool from_pool;
1658 	dma_addr_t da;
1659 	void *va;
1660 
1661 	va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true);
1662 	if (!va)
1663 		return;
1664 
1665 	dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize,
1666 			 DMA_FROM_DEVICE);
1667 	*old_buf = rxoob->buf_va;
1668 	*old_fp = rxoob->from_pool;
1669 
1670 	rxoob->buf_va = va;
1671 	rxoob->sgl[0].address = da;
1672 	rxoob->from_pool = from_pool;
1673 }
1674 
1675 static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
1676 				struct gdma_comp *cqe)
1677 {
1678 	struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
1679 	struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
1680 	struct net_device *ndev = rxq->ndev;
1681 	struct mana_recv_buf_oob *rxbuf_oob;
1682 	struct mana_port_context *apc;
1683 	struct device *dev = gc->dev;
1684 	void *old_buf = NULL;
1685 	u32 curr, pktlen;
1686 	bool old_fp;
1687 
1688 	apc = netdev_priv(ndev);
1689 
1690 	switch (oob->cqe_hdr.cqe_type) {
1691 	case CQE_RX_OKAY:
1692 		break;
1693 
1694 	case CQE_RX_TRUNCATED:
1695 		++ndev->stats.rx_dropped;
1696 		rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
1697 		netdev_warn_once(ndev, "Dropped a truncated packet\n");
1698 		goto drop;
1699 
1700 	case CQE_RX_COALESCED_4:
1701 		netdev_err(ndev, "RX coalescing is unsupported\n");
1702 		apc->eth_stats.rx_coalesced_err++;
1703 		return;
1704 
1705 	case CQE_RX_OBJECT_FENCE:
1706 		complete(&rxq->fence_event);
1707 		return;
1708 
1709 	default:
1710 		netdev_err(ndev, "Unknown RX CQE type = %d\n",
1711 			   oob->cqe_hdr.cqe_type);
1712 		apc->eth_stats.rx_cqe_unknown_type++;
1713 		return;
1714 	}
1715 
1716 	pktlen = oob->ppi[0].pkt_len;
1717 
1718 	if (pktlen == 0) {
1719 		/* data packets should never have packetlength of zero */
1720 		netdev_err(ndev, "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n",
1721 			   rxq->gdma_id, cq->gdma_id, rxq->rxobj);
1722 		return;
1723 	}
1724 
1725 	curr = rxq->buf_index;
1726 	rxbuf_oob = &rxq->rx_oobs[curr];
1727 	WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
1728 
1729 	mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf, &old_fp);
1730 
1731 	/* Unsuccessful refill will have old_buf == NULL.
1732 	 * In this case, mana_rx_skb() will drop the packet.
1733 	 */
1734 	mana_rx_skb(old_buf, old_fp, oob, rxq);
1735 
1736 drop:
1737 	mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
1738 
1739 	mana_post_pkt_rxq(rxq);
1740 }
1741 
1742 static void mana_poll_rx_cq(struct mana_cq *cq)
1743 {
1744 	struct gdma_comp *comp = cq->gdma_comp_buf;
1745 	struct mana_rxq *rxq = cq->rxq;
1746 	int comp_read, i;
1747 
1748 	comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
1749 	WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
1750 
1751 	rxq->xdp_flush = false;
1752 
1753 	for (i = 0; i < comp_read; i++) {
1754 		if (WARN_ON_ONCE(comp[i].is_sq))
1755 			return;
1756 
1757 		/* verify recv cqe references the right rxq */
1758 		if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id))
1759 			return;
1760 
1761 		mana_process_rx_cqe(rxq, cq, &comp[i]);
1762 	}
1763 
1764 	if (comp_read > 0) {
1765 		struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
1766 
1767 		mana_gd_wq_ring_doorbell(gc, rxq->gdma_rq);
1768 	}
1769 
1770 	if (rxq->xdp_flush)
1771 		xdp_do_flush();
1772 }
1773 
1774 static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
1775 {
1776 	struct mana_cq *cq = context;
1777 	u8 arm_bit;
1778 	int w;
1779 
1780 	WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
1781 
1782 	if (cq->type == MANA_CQ_TYPE_RX)
1783 		mana_poll_rx_cq(cq);
1784 	else
1785 		mana_poll_tx_cq(cq);
1786 
1787 	w = cq->work_done;
1788 
1789 	if (w < cq->budget &&
1790 	    napi_complete_done(&cq->napi, w)) {
1791 		arm_bit = SET_ARM_BIT;
1792 	} else {
1793 		arm_bit = 0;
1794 	}
1795 
1796 	mana_gd_ring_cq(gdma_queue, arm_bit);
1797 
1798 	return w;
1799 }
1800 
1801 static int mana_poll(struct napi_struct *napi, int budget)
1802 {
1803 	struct mana_cq *cq = container_of(napi, struct mana_cq, napi);
1804 	int w;
1805 
1806 	cq->work_done = 0;
1807 	cq->budget = budget;
1808 
1809 	w = mana_cq_handler(cq, cq->gdma_cq);
1810 
1811 	return min(w, budget);
1812 }
1813 
1814 static void mana_schedule_napi(void *context, struct gdma_queue *gdma_queue)
1815 {
1816 	struct mana_cq *cq = context;
1817 
1818 	napi_schedule_irqoff(&cq->napi);
1819 }
1820 
1821 static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
1822 {
1823 	struct gdma_dev *gd = apc->ac->gdma_dev;
1824 
1825 	if (!cq->gdma_cq)
1826 		return;
1827 
1828 	mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
1829 }
1830 
1831 static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
1832 {
1833 	struct gdma_dev *gd = apc->ac->gdma_dev;
1834 
1835 	if (!txq->gdma_sq)
1836 		return;
1837 
1838 	mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
1839 }
1840 
1841 static void mana_destroy_txq(struct mana_port_context *apc)
1842 {
1843 	struct napi_struct *napi;
1844 	int i;
1845 
1846 	if (!apc->tx_qp)
1847 		return;
1848 
1849 	for (i = 0; i < apc->num_queues; i++) {
1850 		napi = &apc->tx_qp[i].tx_cq.napi;
1851 		napi_synchronize(napi);
1852 		napi_disable(napi);
1853 		netif_napi_del(napi);
1854 
1855 		mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
1856 
1857 		mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
1858 
1859 		mana_deinit_txq(apc, &apc->tx_qp[i].txq);
1860 	}
1861 
1862 	kfree(apc->tx_qp);
1863 	apc->tx_qp = NULL;
1864 }
1865 
1866 static int mana_create_txq(struct mana_port_context *apc,
1867 			   struct net_device *net)
1868 {
1869 	struct mana_context *ac = apc->ac;
1870 	struct gdma_dev *gd = ac->gdma_dev;
1871 	struct mana_obj_spec wq_spec;
1872 	struct mana_obj_spec cq_spec;
1873 	struct gdma_queue_spec spec;
1874 	struct gdma_context *gc;
1875 	struct mana_txq *txq;
1876 	struct mana_cq *cq;
1877 	u32 txq_size;
1878 	u32 cq_size;
1879 	int err;
1880 	int i;
1881 
1882 	apc->tx_qp = kcalloc(apc->num_queues, sizeof(struct mana_tx_qp),
1883 			     GFP_KERNEL);
1884 	if (!apc->tx_qp)
1885 		return -ENOMEM;
1886 
1887 	/*  The minimum size of the WQE is 32 bytes, hence
1888 	 *  MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
1889 	 *  the SQ can store. This value is then used to size other queues
1890 	 *  to prevent overflow.
1891 	 */
1892 	txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
1893 	BUILD_BUG_ON(!PAGE_ALIGNED(txq_size));
1894 
1895 	cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
1896 	cq_size = PAGE_ALIGN(cq_size);
1897 
1898 	gc = gd->gdma_context;
1899 
1900 	for (i = 0; i < apc->num_queues; i++) {
1901 		apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
1902 
1903 		/* Create SQ */
1904 		txq = &apc->tx_qp[i].txq;
1905 
1906 		u64_stats_init(&txq->stats.syncp);
1907 		txq->ndev = net;
1908 		txq->net_txq = netdev_get_tx_queue(net, i);
1909 		txq->vp_offset = apc->tx_vp_offset;
1910 		skb_queue_head_init(&txq->pending_skbs);
1911 
1912 		memset(&spec, 0, sizeof(spec));
1913 		spec.type = GDMA_SQ;
1914 		spec.monitor_avl_buf = true;
1915 		spec.queue_size = txq_size;
1916 		err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
1917 		if (err)
1918 			goto out;
1919 
1920 		/* Create SQ's CQ */
1921 		cq = &apc->tx_qp[i].tx_cq;
1922 		cq->type = MANA_CQ_TYPE_TX;
1923 
1924 		cq->txq = txq;
1925 
1926 		memset(&spec, 0, sizeof(spec));
1927 		spec.type = GDMA_CQ;
1928 		spec.monitor_avl_buf = false;
1929 		spec.queue_size = cq_size;
1930 		spec.cq.callback = mana_schedule_napi;
1931 		spec.cq.parent_eq = ac->eqs[i].eq;
1932 		spec.cq.context = cq;
1933 		err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
1934 		if (err)
1935 			goto out;
1936 
1937 		memset(&wq_spec, 0, sizeof(wq_spec));
1938 		memset(&cq_spec, 0, sizeof(cq_spec));
1939 
1940 		wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle;
1941 		wq_spec.queue_size = txq->gdma_sq->queue_size;
1942 
1943 		cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
1944 		cq_spec.queue_size = cq->gdma_cq->queue_size;
1945 		cq_spec.modr_ctx_id = 0;
1946 		cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
1947 
1948 		err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
1949 					 &wq_spec, &cq_spec,
1950 					 &apc->tx_qp[i].tx_object);
1951 
1952 		if (err)
1953 			goto out;
1954 
1955 		txq->gdma_sq->id = wq_spec.queue_index;
1956 		cq->gdma_cq->id = cq_spec.queue_index;
1957 
1958 		txq->gdma_sq->mem_info.dma_region_handle =
1959 			GDMA_INVALID_DMA_REGION;
1960 		cq->gdma_cq->mem_info.dma_region_handle =
1961 			GDMA_INVALID_DMA_REGION;
1962 
1963 		txq->gdma_txq_id = txq->gdma_sq->id;
1964 
1965 		cq->gdma_id = cq->gdma_cq->id;
1966 
1967 		if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
1968 			err = -EINVAL;
1969 			goto out;
1970 		}
1971 
1972 		gc->cq_table[cq->gdma_id] = cq->gdma_cq;
1973 
1974 		netif_napi_add_tx(net, &cq->napi, mana_poll);
1975 		napi_enable(&cq->napi);
1976 
1977 		mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
1978 	}
1979 
1980 	return 0;
1981 out:
1982 	mana_destroy_txq(apc);
1983 	return err;
1984 }
1985 
1986 static void mana_destroy_rxq(struct mana_port_context *apc,
1987 			     struct mana_rxq *rxq, bool validate_state)
1988 
1989 {
1990 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1991 	struct mana_recv_buf_oob *rx_oob;
1992 	struct device *dev = gc->dev;
1993 	struct napi_struct *napi;
1994 	struct page *page;
1995 	int i;
1996 
1997 	if (!rxq)
1998 		return;
1999 
2000 	napi = &rxq->rx_cq.napi;
2001 
2002 	if (validate_state)
2003 		napi_synchronize(napi);
2004 
2005 	napi_disable(napi);
2006 
2007 	xdp_rxq_info_unreg(&rxq->xdp_rxq);
2008 
2009 	netif_napi_del(napi);
2010 
2011 	mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
2012 
2013 	mana_deinit_cq(apc, &rxq->rx_cq);
2014 
2015 	if (rxq->xdp_save_va)
2016 		put_page(virt_to_head_page(rxq->xdp_save_va));
2017 
2018 	for (i = 0; i < rxq->num_rx_buf; i++) {
2019 		rx_oob = &rxq->rx_oobs[i];
2020 
2021 		if (!rx_oob->buf_va)
2022 			continue;
2023 
2024 		dma_unmap_single(dev, rx_oob->sgl[0].address,
2025 				 rx_oob->sgl[0].size, DMA_FROM_DEVICE);
2026 
2027 		page = virt_to_head_page(rx_oob->buf_va);
2028 
2029 		if (rx_oob->from_pool)
2030 			page_pool_put_full_page(rxq->page_pool, page, false);
2031 		else
2032 			put_page(page);
2033 
2034 		rx_oob->buf_va = NULL;
2035 	}
2036 
2037 	page_pool_destroy(rxq->page_pool);
2038 
2039 	if (rxq->gdma_rq)
2040 		mana_gd_destroy_queue(gc, rxq->gdma_rq);
2041 
2042 	kfree(rxq);
2043 }
2044 
2045 static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
2046 			    struct mana_rxq *rxq, struct device *dev)
2047 {
2048 	struct mana_port_context *mpc = netdev_priv(rxq->ndev);
2049 	bool from_pool = false;
2050 	dma_addr_t da;
2051 	void *va;
2052 
2053 	if (mpc->rxbufs_pre)
2054 		va = mana_get_rxbuf_pre(rxq, &da);
2055 	else
2056 		va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false);
2057 
2058 	if (!va)
2059 		return -ENOMEM;
2060 
2061 	rx_oob->buf_va = va;
2062 	rx_oob->from_pool = from_pool;
2063 
2064 	rx_oob->sgl[0].address = da;
2065 	rx_oob->sgl[0].size = rxq->datasize;
2066 	rx_oob->sgl[0].mem_key = mem_key;
2067 
2068 	return 0;
2069 }
2070 
2071 #define MANA_WQE_HEADER_SIZE 16
2072 #define MANA_WQE_SGE_SIZE 16
2073 
2074 static int mana_alloc_rx_wqe(struct mana_port_context *apc,
2075 			     struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size)
2076 {
2077 	struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
2078 	struct mana_recv_buf_oob *rx_oob;
2079 	struct device *dev = gc->dev;
2080 	u32 buf_idx;
2081 	int ret;
2082 
2083 	WARN_ON(rxq->datasize == 0);
2084 
2085 	*rxq_size = 0;
2086 	*cq_size = 0;
2087 
2088 	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2089 		rx_oob = &rxq->rx_oobs[buf_idx];
2090 		memset(rx_oob, 0, sizeof(*rx_oob));
2091 
2092 		rx_oob->num_sge = 1;
2093 
2094 		ret = mana_fill_rx_oob(rx_oob, apc->ac->gdma_dev->gpa_mkey, rxq,
2095 				       dev);
2096 		if (ret)
2097 			return ret;
2098 
2099 		rx_oob->wqe_req.sgl = rx_oob->sgl;
2100 		rx_oob->wqe_req.num_sge = rx_oob->num_sge;
2101 		rx_oob->wqe_req.inline_oob_size = 0;
2102 		rx_oob->wqe_req.inline_oob_data = NULL;
2103 		rx_oob->wqe_req.flags = 0;
2104 		rx_oob->wqe_req.client_data_unit = 0;
2105 
2106 		*rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
2107 				   MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
2108 		*cq_size += COMP_ENTRY_SIZE;
2109 	}
2110 
2111 	return 0;
2112 }
2113 
2114 static int mana_push_wqe(struct mana_rxq *rxq)
2115 {
2116 	struct mana_recv_buf_oob *rx_oob;
2117 	u32 buf_idx;
2118 	int err;
2119 
2120 	for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2121 		rx_oob = &rxq->rx_oobs[buf_idx];
2122 
2123 		err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
2124 					    &rx_oob->wqe_inf);
2125 		if (err)
2126 			return -ENOSPC;
2127 	}
2128 
2129 	return 0;
2130 }
2131 
2132 static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
2133 {
2134 	struct page_pool_params pprm = {};
2135 	int ret;
2136 
2137 	pprm.pool_size = RX_BUFFERS_PER_QUEUE;
2138 	pprm.nid = gc->numa_node;
2139 	pprm.napi = &rxq->rx_cq.napi;
2140 
2141 	rxq->page_pool = page_pool_create(&pprm);
2142 
2143 	if (IS_ERR(rxq->page_pool)) {
2144 		ret = PTR_ERR(rxq->page_pool);
2145 		rxq->page_pool = NULL;
2146 		return ret;
2147 	}
2148 
2149 	return 0;
2150 }
2151 
2152 static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
2153 					u32 rxq_idx, struct mana_eq *eq,
2154 					struct net_device *ndev)
2155 {
2156 	struct gdma_dev *gd = apc->ac->gdma_dev;
2157 	struct mana_obj_spec wq_spec;
2158 	struct mana_obj_spec cq_spec;
2159 	struct gdma_queue_spec spec;
2160 	struct mana_cq *cq = NULL;
2161 	struct gdma_context *gc;
2162 	u32 cq_size, rq_size;
2163 	struct mana_rxq *rxq;
2164 	int err;
2165 
2166 	gc = gd->gdma_context;
2167 
2168 	rxq = kzalloc(struct_size(rxq, rx_oobs, RX_BUFFERS_PER_QUEUE),
2169 		      GFP_KERNEL);
2170 	if (!rxq)
2171 		return NULL;
2172 
2173 	rxq->ndev = ndev;
2174 	rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
2175 	rxq->rxq_idx = rxq_idx;
2176 	rxq->rxobj = INVALID_MANA_HANDLE;
2177 
2178 	mana_get_rxbuf_cfg(ndev->mtu, &rxq->datasize, &rxq->alloc_size,
2179 			   &rxq->headroom);
2180 
2181 	/* Create page pool for RX queue */
2182 	err = mana_create_page_pool(rxq, gc);
2183 	if (err) {
2184 		netdev_err(ndev, "Create page pool err:%d\n", err);
2185 		goto out;
2186 	}
2187 
2188 	err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
2189 	if (err)
2190 		goto out;
2191 
2192 	rq_size = PAGE_ALIGN(rq_size);
2193 	cq_size = PAGE_ALIGN(cq_size);
2194 
2195 	/* Create RQ */
2196 	memset(&spec, 0, sizeof(spec));
2197 	spec.type = GDMA_RQ;
2198 	spec.monitor_avl_buf = true;
2199 	spec.queue_size = rq_size;
2200 	err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
2201 	if (err)
2202 		goto out;
2203 
2204 	/* Create RQ's CQ */
2205 	cq = &rxq->rx_cq;
2206 	cq->type = MANA_CQ_TYPE_RX;
2207 	cq->rxq = rxq;
2208 
2209 	memset(&spec, 0, sizeof(spec));
2210 	spec.type = GDMA_CQ;
2211 	spec.monitor_avl_buf = false;
2212 	spec.queue_size = cq_size;
2213 	spec.cq.callback = mana_schedule_napi;
2214 	spec.cq.parent_eq = eq->eq;
2215 	spec.cq.context = cq;
2216 	err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
2217 	if (err)
2218 		goto out;
2219 
2220 	memset(&wq_spec, 0, sizeof(wq_spec));
2221 	memset(&cq_spec, 0, sizeof(cq_spec));
2222 	wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle;
2223 	wq_spec.queue_size = rxq->gdma_rq->queue_size;
2224 
2225 	cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
2226 	cq_spec.queue_size = cq->gdma_cq->queue_size;
2227 	cq_spec.modr_ctx_id = 0;
2228 	cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
2229 
2230 	err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
2231 				 &wq_spec, &cq_spec, &rxq->rxobj);
2232 	if (err)
2233 		goto out;
2234 
2235 	rxq->gdma_rq->id = wq_spec.queue_index;
2236 	cq->gdma_cq->id = cq_spec.queue_index;
2237 
2238 	rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2239 	cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2240 
2241 	rxq->gdma_id = rxq->gdma_rq->id;
2242 	cq->gdma_id = cq->gdma_cq->id;
2243 
2244 	err = mana_push_wqe(rxq);
2245 	if (err)
2246 		goto out;
2247 
2248 	if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
2249 		err = -EINVAL;
2250 		goto out;
2251 	}
2252 
2253 	gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2254 
2255 	netif_napi_add_weight(ndev, &cq->napi, mana_poll, 1);
2256 
2257 	WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
2258 				 cq->napi.napi_id));
2259 	WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
2260 					   rxq->page_pool));
2261 
2262 	napi_enable(&cq->napi);
2263 
2264 	mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
2265 out:
2266 	if (!err)
2267 		return rxq;
2268 
2269 	netdev_err(ndev, "Failed to create RXQ: err = %d\n", err);
2270 
2271 	mana_destroy_rxq(apc, rxq, false);
2272 
2273 	if (cq)
2274 		mana_deinit_cq(apc, cq);
2275 
2276 	return NULL;
2277 }
2278 
2279 static int mana_add_rx_queues(struct mana_port_context *apc,
2280 			      struct net_device *ndev)
2281 {
2282 	struct mana_context *ac = apc->ac;
2283 	struct mana_rxq *rxq;
2284 	int err = 0;
2285 	int i;
2286 
2287 	for (i = 0; i < apc->num_queues; i++) {
2288 		rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
2289 		if (!rxq) {
2290 			err = -ENOMEM;
2291 			goto out;
2292 		}
2293 
2294 		u64_stats_init(&rxq->stats.syncp);
2295 
2296 		apc->rxqs[i] = rxq;
2297 	}
2298 
2299 	apc->default_rxobj = apc->rxqs[0]->rxobj;
2300 out:
2301 	return err;
2302 }
2303 
2304 static void mana_destroy_vport(struct mana_port_context *apc)
2305 {
2306 	struct gdma_dev *gd = apc->ac->gdma_dev;
2307 	struct mana_rxq *rxq;
2308 	u32 rxq_idx;
2309 
2310 	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
2311 		rxq = apc->rxqs[rxq_idx];
2312 		if (!rxq)
2313 			continue;
2314 
2315 		mana_destroy_rxq(apc, rxq, true);
2316 		apc->rxqs[rxq_idx] = NULL;
2317 	}
2318 
2319 	mana_destroy_txq(apc);
2320 	mana_uncfg_vport(apc);
2321 
2322 	if (gd->gdma_context->is_pf)
2323 		mana_pf_deregister_hw_vport(apc);
2324 }
2325 
2326 static int mana_create_vport(struct mana_port_context *apc,
2327 			     struct net_device *net)
2328 {
2329 	struct gdma_dev *gd = apc->ac->gdma_dev;
2330 	int err;
2331 
2332 	apc->default_rxobj = INVALID_MANA_HANDLE;
2333 
2334 	if (gd->gdma_context->is_pf) {
2335 		err = mana_pf_register_hw_vport(apc);
2336 		if (err)
2337 			return err;
2338 	}
2339 
2340 	err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
2341 	if (err)
2342 		return err;
2343 
2344 	return mana_create_txq(apc, net);
2345 }
2346 
2347 static void mana_rss_table_init(struct mana_port_context *apc)
2348 {
2349 	int i;
2350 
2351 	for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
2352 		apc->indir_table[i] =
2353 			ethtool_rxfh_indir_default(i, apc->num_queues);
2354 }
2355 
2356 int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
2357 		    bool update_hash, bool update_tab)
2358 {
2359 	u32 queue_idx;
2360 	int err;
2361 	int i;
2362 
2363 	if (update_tab) {
2364 		for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
2365 			queue_idx = apc->indir_table[i];
2366 			apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
2367 		}
2368 	}
2369 
2370 	err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
2371 	if (err)
2372 		return err;
2373 
2374 	mana_fence_rqs(apc);
2375 
2376 	return 0;
2377 }
2378 
2379 void mana_query_gf_stats(struct mana_port_context *apc)
2380 {
2381 	struct mana_query_gf_stat_resp resp = {};
2382 	struct mana_query_gf_stat_req req = {};
2383 	struct net_device *ndev = apc->ndev;
2384 	int err;
2385 
2386 	mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_GF_STAT,
2387 			     sizeof(req), sizeof(resp));
2388 	req.req_stats = STATISTICS_FLAGS_HC_TX_BYTES |
2389 			STATISTICS_FLAGS_HC_TX_UCAST_PACKETS |
2390 			STATISTICS_FLAGS_HC_TX_UCAST_BYTES |
2391 			STATISTICS_FLAGS_HC_TX_MCAST_PACKETS |
2392 			STATISTICS_FLAGS_HC_TX_MCAST_BYTES |
2393 			STATISTICS_FLAGS_HC_TX_BCAST_PACKETS |
2394 			STATISTICS_FLAGS_HC_TX_BCAST_BYTES;
2395 
2396 	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
2397 				sizeof(resp));
2398 	if (err) {
2399 		netdev_err(ndev, "Failed to query GF stats: %d\n", err);
2400 		return;
2401 	}
2402 	err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_GF_STAT,
2403 				   sizeof(resp));
2404 	if (err || resp.hdr.status) {
2405 		netdev_err(ndev, "Failed to query GF stats: %d, 0x%x\n", err,
2406 			   resp.hdr.status);
2407 		return;
2408 	}
2409 
2410 	apc->eth_stats.hc_tx_bytes = resp.hc_tx_bytes;
2411 	apc->eth_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts;
2412 	apc->eth_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes;
2413 	apc->eth_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts;
2414 	apc->eth_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes;
2415 	apc->eth_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts;
2416 	apc->eth_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes;
2417 }
2418 
2419 static int mana_init_port(struct net_device *ndev)
2420 {
2421 	struct mana_port_context *apc = netdev_priv(ndev);
2422 	u32 max_txq, max_rxq, max_queues;
2423 	int port_idx = apc->port_idx;
2424 	u32 num_indirect_entries;
2425 	int err;
2426 
2427 	err = mana_init_port_context(apc);
2428 	if (err)
2429 		return err;
2430 
2431 	err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
2432 				   &num_indirect_entries);
2433 	if (err) {
2434 		netdev_err(ndev, "Failed to query info for vPort %d\n",
2435 			   port_idx);
2436 		goto reset_apc;
2437 	}
2438 
2439 	max_queues = min_t(u32, max_txq, max_rxq);
2440 	if (apc->max_queues > max_queues)
2441 		apc->max_queues = max_queues;
2442 
2443 	if (apc->num_queues > apc->max_queues)
2444 		apc->num_queues = apc->max_queues;
2445 
2446 	eth_hw_addr_set(ndev, apc->mac_addr);
2447 
2448 	return 0;
2449 
2450 reset_apc:
2451 	kfree(apc->rxqs);
2452 	apc->rxqs = NULL;
2453 	return err;
2454 }
2455 
2456 int mana_alloc_queues(struct net_device *ndev)
2457 {
2458 	struct mana_port_context *apc = netdev_priv(ndev);
2459 	struct gdma_dev *gd = apc->ac->gdma_dev;
2460 	int err;
2461 
2462 	err = mana_create_vport(apc, ndev);
2463 	if (err)
2464 		return err;
2465 
2466 	err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
2467 	if (err)
2468 		goto destroy_vport;
2469 
2470 	err = mana_add_rx_queues(apc, ndev);
2471 	if (err)
2472 		goto destroy_vport;
2473 
2474 	apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
2475 
2476 	err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
2477 	if (err)
2478 		goto destroy_vport;
2479 
2480 	mana_rss_table_init(apc);
2481 
2482 	err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
2483 	if (err)
2484 		goto destroy_vport;
2485 
2486 	if (gd->gdma_context->is_pf) {
2487 		err = mana_pf_register_filter(apc);
2488 		if (err)
2489 			goto destroy_vport;
2490 	}
2491 
2492 	mana_chn_setxdp(apc, mana_xdp_get(apc));
2493 
2494 	return 0;
2495 
2496 destroy_vport:
2497 	mana_destroy_vport(apc);
2498 	return err;
2499 }
2500 
2501 int mana_attach(struct net_device *ndev)
2502 {
2503 	struct mana_port_context *apc = netdev_priv(ndev);
2504 	int err;
2505 
2506 	ASSERT_RTNL();
2507 
2508 	err = mana_init_port(ndev);
2509 	if (err)
2510 		return err;
2511 
2512 	if (apc->port_st_save) {
2513 		err = mana_alloc_queues(ndev);
2514 		if (err) {
2515 			mana_cleanup_port_context(apc);
2516 			return err;
2517 		}
2518 	}
2519 
2520 	apc->port_is_up = apc->port_st_save;
2521 
2522 	/* Ensure port state updated before txq state */
2523 	smp_wmb();
2524 
2525 	if (apc->port_is_up)
2526 		netif_carrier_on(ndev);
2527 
2528 	netif_device_attach(ndev);
2529 
2530 	return 0;
2531 }
2532 
2533 static int mana_dealloc_queues(struct net_device *ndev)
2534 {
2535 	struct mana_port_context *apc = netdev_priv(ndev);
2536 	unsigned long timeout = jiffies + 120 * HZ;
2537 	struct gdma_dev *gd = apc->ac->gdma_dev;
2538 	struct mana_txq *txq;
2539 	struct sk_buff *skb;
2540 	int i, err;
2541 	u32 tsleep;
2542 
2543 	if (apc->port_is_up)
2544 		return -EINVAL;
2545 
2546 	mana_chn_setxdp(apc, NULL);
2547 
2548 	if (gd->gdma_context->is_pf)
2549 		mana_pf_deregister_filter(apc);
2550 
2551 	/* No packet can be transmitted now since apc->port_is_up is false.
2552 	 * There is still a tiny chance that mana_poll_tx_cq() can re-enable
2553 	 * a txq because it may not timely see apc->port_is_up being cleared
2554 	 * to false, but it doesn't matter since mana_start_xmit() drops any
2555 	 * new packets due to apc->port_is_up being false.
2556 	 *
2557 	 * Drain all the in-flight TX packets.
2558 	 * A timeout of 120 seconds for all the queues is used.
2559 	 * This will break the while loop when h/w is not responding.
2560 	 * This value of 120 has been decided here considering max
2561 	 * number of queues.
2562 	 */
2563 
2564 	for (i = 0; i < apc->num_queues; i++) {
2565 		txq = &apc->tx_qp[i].txq;
2566 		tsleep = 1000;
2567 		while (atomic_read(&txq->pending_sends) > 0 &&
2568 		       time_before(jiffies, timeout)) {
2569 			usleep_range(tsleep, tsleep + 1000);
2570 			tsleep <<= 1;
2571 		}
2572 		if (atomic_read(&txq->pending_sends)) {
2573 			err = pcie_flr(to_pci_dev(gd->gdma_context->dev));
2574 			if (err) {
2575 				netdev_err(ndev, "flr failed %d with %d pkts pending in txq %u\n",
2576 					   err, atomic_read(&txq->pending_sends),
2577 					   txq->gdma_txq_id);
2578 			}
2579 			break;
2580 		}
2581 	}
2582 
2583 	for (i = 0; i < apc->num_queues; i++) {
2584 		txq = &apc->tx_qp[i].txq;
2585 		while ((skb = skb_dequeue(&txq->pending_skbs))) {
2586 			mana_unmap_skb(skb, apc);
2587 			dev_kfree_skb_any(skb);
2588 		}
2589 		atomic_set(&txq->pending_sends, 0);
2590 	}
2591 	/* We're 100% sure the queues can no longer be woken up, because
2592 	 * we're sure now mana_poll_tx_cq() can't be running.
2593 	 */
2594 
2595 	apc->rss_state = TRI_STATE_FALSE;
2596 	err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
2597 	if (err) {
2598 		netdev_err(ndev, "Failed to disable vPort: %d\n", err);
2599 		return err;
2600 	}
2601 
2602 	mana_destroy_vport(apc);
2603 
2604 	return 0;
2605 }
2606 
2607 int mana_detach(struct net_device *ndev, bool from_close)
2608 {
2609 	struct mana_port_context *apc = netdev_priv(ndev);
2610 	int err;
2611 
2612 	ASSERT_RTNL();
2613 
2614 	apc->port_st_save = apc->port_is_up;
2615 	apc->port_is_up = false;
2616 
2617 	/* Ensure port state updated before txq state */
2618 	smp_wmb();
2619 
2620 	netif_tx_disable(ndev);
2621 	netif_carrier_off(ndev);
2622 
2623 	if (apc->port_st_save) {
2624 		err = mana_dealloc_queues(ndev);
2625 		if (err)
2626 			return err;
2627 	}
2628 
2629 	if (!from_close) {
2630 		netif_device_detach(ndev);
2631 		mana_cleanup_port_context(apc);
2632 	}
2633 
2634 	return 0;
2635 }
2636 
2637 static int mana_probe_port(struct mana_context *ac, int port_idx,
2638 			   struct net_device **ndev_storage)
2639 {
2640 	struct gdma_context *gc = ac->gdma_dev->gdma_context;
2641 	struct mana_port_context *apc;
2642 	struct net_device *ndev;
2643 	int err;
2644 
2645 	ndev = alloc_etherdev_mq(sizeof(struct mana_port_context),
2646 				 gc->max_num_queues);
2647 	if (!ndev)
2648 		return -ENOMEM;
2649 
2650 	*ndev_storage = ndev;
2651 
2652 	apc = netdev_priv(ndev);
2653 	apc->ac = ac;
2654 	apc->ndev = ndev;
2655 	apc->max_queues = gc->max_num_queues;
2656 	apc->num_queues = gc->max_num_queues;
2657 	apc->port_handle = INVALID_MANA_HANDLE;
2658 	apc->pf_filter_handle = INVALID_MANA_HANDLE;
2659 	apc->port_idx = port_idx;
2660 
2661 	mutex_init(&apc->vport_mutex);
2662 	apc->vport_use_count = 0;
2663 
2664 	ndev->netdev_ops = &mana_devops;
2665 	ndev->ethtool_ops = &mana_ethtool_ops;
2666 	ndev->mtu = ETH_DATA_LEN;
2667 	ndev->max_mtu = gc->adapter_mtu - ETH_HLEN;
2668 	ndev->min_mtu = ETH_MIN_MTU;
2669 	ndev->needed_headroom = MANA_HEADROOM;
2670 	ndev->dev_port = port_idx;
2671 	SET_NETDEV_DEV(ndev, gc->dev);
2672 
2673 	netif_carrier_off(ndev);
2674 
2675 	netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
2676 
2677 	err = mana_init_port(ndev);
2678 	if (err)
2679 		goto free_net;
2680 
2681 	netdev_lockdep_set_classes(ndev);
2682 
2683 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2684 	ndev->hw_features |= NETIF_F_RXCSUM;
2685 	ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
2686 	ndev->hw_features |= NETIF_F_RXHASH;
2687 	ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_TX |
2688 			 NETIF_F_HW_VLAN_CTAG_RX;
2689 	ndev->vlan_features = ndev->features;
2690 	xdp_set_features_flag(ndev, NETDEV_XDP_ACT_BASIC |
2691 			      NETDEV_XDP_ACT_REDIRECT |
2692 			      NETDEV_XDP_ACT_NDO_XMIT);
2693 
2694 	err = register_netdev(ndev);
2695 	if (err) {
2696 		netdev_err(ndev, "Unable to register netdev.\n");
2697 		goto reset_apc;
2698 	}
2699 
2700 	return 0;
2701 
2702 reset_apc:
2703 	kfree(apc->rxqs);
2704 	apc->rxqs = NULL;
2705 free_net:
2706 	*ndev_storage = NULL;
2707 	netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
2708 	free_netdev(ndev);
2709 	return err;
2710 }
2711 
2712 static void adev_release(struct device *dev)
2713 {
2714 	struct mana_adev *madev = container_of(dev, struct mana_adev, adev.dev);
2715 
2716 	kfree(madev);
2717 }
2718 
2719 static void remove_adev(struct gdma_dev *gd)
2720 {
2721 	struct auxiliary_device *adev = gd->adev;
2722 	int id = adev->id;
2723 
2724 	auxiliary_device_delete(adev);
2725 	auxiliary_device_uninit(adev);
2726 
2727 	mana_adev_idx_free(id);
2728 	gd->adev = NULL;
2729 }
2730 
2731 static int add_adev(struct gdma_dev *gd)
2732 {
2733 	struct auxiliary_device *adev;
2734 	struct mana_adev *madev;
2735 	int ret;
2736 
2737 	madev = kzalloc(sizeof(*madev), GFP_KERNEL);
2738 	if (!madev)
2739 		return -ENOMEM;
2740 
2741 	adev = &madev->adev;
2742 	ret = mana_adev_idx_alloc();
2743 	if (ret < 0)
2744 		goto idx_fail;
2745 	adev->id = ret;
2746 
2747 	adev->name = "rdma";
2748 	adev->dev.parent = gd->gdma_context->dev;
2749 	adev->dev.release = adev_release;
2750 	madev->mdev = gd;
2751 
2752 	ret = auxiliary_device_init(adev);
2753 	if (ret)
2754 		goto init_fail;
2755 
2756 	ret = auxiliary_device_add(adev);
2757 	if (ret)
2758 		goto add_fail;
2759 
2760 	gd->adev = adev;
2761 	return 0;
2762 
2763 add_fail:
2764 	auxiliary_device_uninit(adev);
2765 
2766 init_fail:
2767 	mana_adev_idx_free(adev->id);
2768 
2769 idx_fail:
2770 	kfree(madev);
2771 
2772 	return ret;
2773 }
2774 
2775 int mana_probe(struct gdma_dev *gd, bool resuming)
2776 {
2777 	struct gdma_context *gc = gd->gdma_context;
2778 	struct mana_context *ac = gd->driver_data;
2779 	struct device *dev = gc->dev;
2780 	u16 num_ports = 0;
2781 	int err;
2782 	int i;
2783 
2784 	dev_info(dev,
2785 		 "Microsoft Azure Network Adapter protocol version: %d.%d.%d\n",
2786 		 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
2787 
2788 	err = mana_gd_register_device(gd);
2789 	if (err)
2790 		return err;
2791 
2792 	if (!resuming) {
2793 		ac = kzalloc(sizeof(*ac), GFP_KERNEL);
2794 		if (!ac)
2795 			return -ENOMEM;
2796 
2797 		ac->gdma_dev = gd;
2798 		gd->driver_data = ac;
2799 	}
2800 
2801 	err = mana_create_eq(ac);
2802 	if (err)
2803 		goto out;
2804 
2805 	err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
2806 				    MANA_MICRO_VERSION, &num_ports);
2807 	if (err)
2808 		goto out;
2809 
2810 	if (!resuming) {
2811 		ac->num_ports = num_ports;
2812 	} else {
2813 		if (ac->num_ports != num_ports) {
2814 			dev_err(dev, "The number of vPorts changed: %d->%d\n",
2815 				ac->num_ports, num_ports);
2816 			err = -EPROTO;
2817 			goto out;
2818 		}
2819 	}
2820 
2821 	if (ac->num_ports == 0)
2822 		dev_err(dev, "Failed to detect any vPort\n");
2823 
2824 	if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
2825 		ac->num_ports = MAX_PORTS_IN_MANA_DEV;
2826 
2827 	if (!resuming) {
2828 		for (i = 0; i < ac->num_ports; i++) {
2829 			err = mana_probe_port(ac, i, &ac->ports[i]);
2830 			if (err)
2831 				break;
2832 		}
2833 	} else {
2834 		for (i = 0; i < ac->num_ports; i++) {
2835 			rtnl_lock();
2836 			err = mana_attach(ac->ports[i]);
2837 			rtnl_unlock();
2838 			if (err)
2839 				break;
2840 		}
2841 	}
2842 
2843 	err = add_adev(gd);
2844 out:
2845 	if (err)
2846 		mana_remove(gd, false);
2847 
2848 	return err;
2849 }
2850 
2851 void mana_remove(struct gdma_dev *gd, bool suspending)
2852 {
2853 	struct gdma_context *gc = gd->gdma_context;
2854 	struct mana_context *ac = gd->driver_data;
2855 	struct device *dev = gc->dev;
2856 	struct net_device *ndev;
2857 	int err;
2858 	int i;
2859 
2860 	/* adev currently doesn't support suspending, always remove it */
2861 	if (gd->adev)
2862 		remove_adev(gd);
2863 
2864 	for (i = 0; i < ac->num_ports; i++) {
2865 		ndev = ac->ports[i];
2866 		if (!ndev) {
2867 			if (i == 0)
2868 				dev_err(dev, "No net device to remove\n");
2869 			goto out;
2870 		}
2871 
2872 		/* All cleanup actions should stay after rtnl_lock(), otherwise
2873 		 * other functions may access partially cleaned up data.
2874 		 */
2875 		rtnl_lock();
2876 
2877 		err = mana_detach(ndev, false);
2878 		if (err)
2879 			netdev_err(ndev, "Failed to detach vPort %d: %d\n",
2880 				   i, err);
2881 
2882 		if (suspending) {
2883 			/* No need to unregister the ndev. */
2884 			rtnl_unlock();
2885 			continue;
2886 		}
2887 
2888 		unregister_netdevice(ndev);
2889 
2890 		rtnl_unlock();
2891 
2892 		free_netdev(ndev);
2893 	}
2894 
2895 	mana_destroy_eq(ac);
2896 out:
2897 	mana_gd_deregister_device(gd);
2898 
2899 	if (suspending)
2900 		return;
2901 
2902 	gd->driver_data = NULL;
2903 	gd->gdma_context = NULL;
2904 	kfree(ac);
2905 }
2906