xref: /linux/drivers/net/ethernet/freescale/enetc/enetc.c (revision d2912cb15bdda8ba4a5dd73396ad62641af2f520)
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2017-2019 NXP */
3 
4 #include "enetc.h"
5 #include <linux/tcp.h>
6 #include <linux/udp.h>
7 #include <linux/of_mdio.h>
8 #include <linux/vmalloc.h>
9 
10 /* ENETC overhead: optional extension BD + 1 BD gap */
11 #define ENETC_TXBDS_NEEDED(val)	((val) + 2)
12 /* max # of chained Tx BDs is 15, including head and extension BD */
13 #define ENETC_MAX_SKB_FRAGS	13
14 #define ENETC_TXBDS_MAX_NEEDED	ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
15 
16 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb);
17 
18 netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
19 {
20 	struct enetc_ndev_priv *priv = netdev_priv(ndev);
21 	struct enetc_bdr *tx_ring;
22 	int count;
23 
24 	tx_ring = priv->tx_ring[skb->queue_mapping];
25 
26 	if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
27 		if (unlikely(skb_linearize(skb)))
28 			goto drop_packet_err;
29 
30 	count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
31 	if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
32 		netif_stop_subqueue(ndev, tx_ring->index);
33 		return NETDEV_TX_BUSY;
34 	}
35 
36 	count = enetc_map_tx_buffs(tx_ring, skb);
37 	if (unlikely(!count))
38 		goto drop_packet_err;
39 
40 	if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
41 		netif_stop_subqueue(ndev, tx_ring->index);
42 
43 	return NETDEV_TX_OK;
44 
45 drop_packet_err:
46 	dev_kfree_skb_any(skb);
47 	return NETDEV_TX_OK;
48 }
49 
50 static bool enetc_tx_csum(struct sk_buff *skb, union enetc_tx_bd *txbd)
51 {
52 	int l3_start, l3_hsize;
53 	u16 l3_flags, l4_flags;
54 
55 	if (skb->ip_summed != CHECKSUM_PARTIAL)
56 		return false;
57 
58 	switch (skb->csum_offset) {
59 	case offsetof(struct tcphdr, check):
60 		l4_flags = ENETC_TXBD_L4_TCP;
61 		break;
62 	case offsetof(struct udphdr, check):
63 		l4_flags = ENETC_TXBD_L4_UDP;
64 		break;
65 	default:
66 		skb_checksum_help(skb);
67 		return false;
68 	}
69 
70 	l3_start = skb_network_offset(skb);
71 	l3_hsize = skb_network_header_len(skb);
72 
73 	l3_flags = 0;
74 	if (skb->protocol == htons(ETH_P_IPV6))
75 		l3_flags = ENETC_TXBD_L3_IPV6;
76 
77 	/* write BD fields */
78 	txbd->l3_csoff = enetc_txbd_l3_csoff(l3_start, l3_hsize, l3_flags);
79 	txbd->l4_csoff = l4_flags;
80 
81 	return true;
82 }
83 
84 static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
85 				struct enetc_tx_swbd *tx_swbd)
86 {
87 	if (tx_swbd->is_dma_page)
88 		dma_unmap_page(tx_ring->dev, tx_swbd->dma,
89 			       tx_swbd->len, DMA_TO_DEVICE);
90 	else
91 		dma_unmap_single(tx_ring->dev, tx_swbd->dma,
92 				 tx_swbd->len, DMA_TO_DEVICE);
93 	tx_swbd->dma = 0;
94 }
95 
96 static void enetc_free_tx_skb(struct enetc_bdr *tx_ring,
97 			      struct enetc_tx_swbd *tx_swbd)
98 {
99 	if (tx_swbd->dma)
100 		enetc_unmap_tx_buff(tx_ring, tx_swbd);
101 
102 	if (tx_swbd->skb) {
103 		dev_kfree_skb_any(tx_swbd->skb);
104 		tx_swbd->skb = NULL;
105 	}
106 }
107 
108 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
109 {
110 	struct enetc_tx_swbd *tx_swbd;
111 	struct skb_frag_struct *frag;
112 	int len = skb_headlen(skb);
113 	union enetc_tx_bd temp_bd;
114 	union enetc_tx_bd *txbd;
115 	bool do_vlan, do_tstamp;
116 	int i, count = 0;
117 	unsigned int f;
118 	dma_addr_t dma;
119 	u8 flags = 0;
120 
121 	i = tx_ring->next_to_use;
122 	txbd = ENETC_TXBD(*tx_ring, i);
123 	prefetchw(txbd);
124 
125 	dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE);
126 	if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
127 		goto dma_err;
128 
129 	temp_bd.addr = cpu_to_le64(dma);
130 	temp_bd.buf_len = cpu_to_le16(len);
131 	temp_bd.lstatus = 0;
132 
133 	tx_swbd = &tx_ring->tx_swbd[i];
134 	tx_swbd->dma = dma;
135 	tx_swbd->len = len;
136 	tx_swbd->is_dma_page = 0;
137 	count++;
138 
139 	do_vlan = skb_vlan_tag_present(skb);
140 	do_tstamp = skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP;
141 
142 	if (do_vlan || do_tstamp)
143 		flags |= ENETC_TXBD_FLAGS_EX;
144 
145 	if (enetc_tx_csum(skb, &temp_bd))
146 		flags |= ENETC_TXBD_FLAGS_CSUM | ENETC_TXBD_FLAGS_L4CS;
147 
148 	/* first BD needs frm_len and offload flags set */
149 	temp_bd.frm_len = cpu_to_le16(skb->len);
150 	temp_bd.flags = flags;
151 
152 	if (flags & ENETC_TXBD_FLAGS_EX) {
153 		u8 e_flags = 0;
154 		*txbd = temp_bd;
155 		enetc_clear_tx_bd(&temp_bd);
156 
157 		/* add extension BD for VLAN and/or timestamping */
158 		flags = 0;
159 		tx_swbd++;
160 		txbd++;
161 		i++;
162 		if (unlikely(i == tx_ring->bd_count)) {
163 			i = 0;
164 			tx_swbd = tx_ring->tx_swbd;
165 			txbd = ENETC_TXBD(*tx_ring, 0);
166 		}
167 		prefetchw(txbd);
168 
169 		if (do_vlan) {
170 			temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
171 			temp_bd.ext.tpid = 0; /* < C-TAG */
172 			e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
173 		}
174 
175 		if (do_tstamp) {
176 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
177 			e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
178 		}
179 
180 		temp_bd.ext.e_flags = e_flags;
181 		count++;
182 	}
183 
184 	frag = &skb_shinfo(skb)->frags[0];
185 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) {
186 		len = skb_frag_size(frag);
187 		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
188 				       DMA_TO_DEVICE);
189 		if (dma_mapping_error(tx_ring->dev, dma))
190 			goto dma_err;
191 
192 		*txbd = temp_bd;
193 		enetc_clear_tx_bd(&temp_bd);
194 
195 		flags = 0;
196 		tx_swbd++;
197 		txbd++;
198 		i++;
199 		if (unlikely(i == tx_ring->bd_count)) {
200 			i = 0;
201 			tx_swbd = tx_ring->tx_swbd;
202 			txbd = ENETC_TXBD(*tx_ring, 0);
203 		}
204 		prefetchw(txbd);
205 
206 		temp_bd.addr = cpu_to_le64(dma);
207 		temp_bd.buf_len = cpu_to_le16(len);
208 
209 		tx_swbd->dma = dma;
210 		tx_swbd->len = len;
211 		tx_swbd->is_dma_page = 1;
212 		count++;
213 	}
214 
215 	/* last BD needs 'F' bit set */
216 	flags |= ENETC_TXBD_FLAGS_F;
217 	temp_bd.flags = flags;
218 	*txbd = temp_bd;
219 
220 	tx_ring->tx_swbd[i].skb = skb;
221 
222 	enetc_bdr_idx_inc(tx_ring, &i);
223 	tx_ring->next_to_use = i;
224 
225 	/* let H/W know BD ring has been updated */
226 	enetc_wr_reg(tx_ring->tpir, i); /* includes wmb() */
227 
228 	return count;
229 
230 dma_err:
231 	dev_err(tx_ring->dev, "DMA map error");
232 
233 	do {
234 		tx_swbd = &tx_ring->tx_swbd[i];
235 		enetc_free_tx_skb(tx_ring, tx_swbd);
236 		if (i == 0)
237 			i = tx_ring->bd_count;
238 		i--;
239 	} while (count--);
240 
241 	return 0;
242 }
243 
244 static irqreturn_t enetc_msix(int irq, void *data)
245 {
246 	struct enetc_int_vector	*v = data;
247 	int i;
248 
249 	/* disable interrupts */
250 	enetc_wr_reg(v->rbier, 0);
251 
252 	for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
253 		enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0);
254 
255 	napi_schedule_irqoff(&v->napi);
256 
257 	return IRQ_HANDLED;
258 }
259 
260 static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget);
261 static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
262 			       struct napi_struct *napi, int work_limit);
263 
264 static int enetc_poll(struct napi_struct *napi, int budget)
265 {
266 	struct enetc_int_vector
267 		*v = container_of(napi, struct enetc_int_vector, napi);
268 	bool complete = true;
269 	int work_done;
270 	int i;
271 
272 	for (i = 0; i < v->count_tx_rings; i++)
273 		if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
274 			complete = false;
275 
276 	work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget);
277 	if (work_done == budget)
278 		complete = false;
279 
280 	if (!complete)
281 		return budget;
282 
283 	napi_complete_done(napi, work_done);
284 
285 	/* enable interrupts */
286 	enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE);
287 
288 	for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
289 		enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i),
290 			     ENETC_TBIER_TXTIE);
291 
292 	return work_done;
293 }
294 
295 static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
296 {
297 	int pi = enetc_rd_reg(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
298 
299 	return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
300 }
301 
302 static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
303 {
304 	struct net_device *ndev = tx_ring->ndev;
305 	int tx_frm_cnt = 0, tx_byte_cnt = 0;
306 	struct enetc_tx_swbd *tx_swbd;
307 	int i, bds_to_clean;
308 
309 	i = tx_ring->next_to_clean;
310 	tx_swbd = &tx_ring->tx_swbd[i];
311 	bds_to_clean = enetc_bd_ready_count(tx_ring, i);
312 
313 	while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) {
314 		bool is_eof = !!tx_swbd->skb;
315 
316 		if (likely(tx_swbd->dma))
317 			enetc_unmap_tx_buff(tx_ring, tx_swbd);
318 
319 		if (is_eof) {
320 			napi_consume_skb(tx_swbd->skb, napi_budget);
321 			tx_swbd->skb = NULL;
322 		}
323 
324 		tx_byte_cnt += tx_swbd->len;
325 
326 		bds_to_clean--;
327 		tx_swbd++;
328 		i++;
329 		if (unlikely(i == tx_ring->bd_count)) {
330 			i = 0;
331 			tx_swbd = tx_ring->tx_swbd;
332 		}
333 
334 		/* BD iteration loop end */
335 		if (is_eof) {
336 			tx_frm_cnt++;
337 			/* re-arm interrupt source */
338 			enetc_wr_reg(tx_ring->idr, BIT(tx_ring->index) |
339 				     BIT(16 + tx_ring->index));
340 		}
341 
342 		if (unlikely(!bds_to_clean))
343 			bds_to_clean = enetc_bd_ready_count(tx_ring, i);
344 	}
345 
346 	tx_ring->next_to_clean = i;
347 	tx_ring->stats.packets += tx_frm_cnt;
348 	tx_ring->stats.bytes += tx_byte_cnt;
349 
350 	if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
351 		     __netif_subqueue_stopped(ndev, tx_ring->index) &&
352 		     (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
353 		netif_wake_subqueue(ndev, tx_ring->index);
354 	}
355 
356 	return tx_frm_cnt != ENETC_DEFAULT_TX_WORK;
357 }
358 
359 static bool enetc_new_page(struct enetc_bdr *rx_ring,
360 			   struct enetc_rx_swbd *rx_swbd)
361 {
362 	struct page *page;
363 	dma_addr_t addr;
364 
365 	page = dev_alloc_page();
366 	if (unlikely(!page))
367 		return false;
368 
369 	addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
370 	if (unlikely(dma_mapping_error(rx_ring->dev, addr))) {
371 		__free_page(page);
372 
373 		return false;
374 	}
375 
376 	rx_swbd->dma = addr;
377 	rx_swbd->page = page;
378 	rx_swbd->page_offset = ENETC_RXB_PAD;
379 
380 	return true;
381 }
382 
383 static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
384 {
385 	struct enetc_rx_swbd *rx_swbd;
386 	union enetc_rx_bd *rxbd;
387 	int i, j;
388 
389 	i = rx_ring->next_to_use;
390 	rx_swbd = &rx_ring->rx_swbd[i];
391 	rxbd = ENETC_RXBD(*rx_ring, i);
392 
393 	for (j = 0; j < buff_cnt; j++) {
394 		/* try reuse page */
395 		if (unlikely(!rx_swbd->page)) {
396 			if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) {
397 				rx_ring->stats.rx_alloc_errs++;
398 				break;
399 			}
400 		}
401 
402 		/* update RxBD */
403 		rxbd->w.addr = cpu_to_le64(rx_swbd->dma +
404 					   rx_swbd->page_offset);
405 		/* clear 'R" as well */
406 		rxbd->r.lstatus = 0;
407 
408 		rx_swbd++;
409 		rxbd++;
410 		i++;
411 		if (unlikely(i == rx_ring->bd_count)) {
412 			i = 0;
413 			rx_swbd = rx_ring->rx_swbd;
414 			rxbd = ENETC_RXBD(*rx_ring, 0);
415 		}
416 	}
417 
418 	if (likely(j)) {
419 		rx_ring->next_to_alloc = i; /* keep track from page reuse */
420 		rx_ring->next_to_use = i;
421 		/* update ENETC's consumer index */
422 		enetc_wr_reg(rx_ring->rcir, i);
423 	}
424 
425 	return j;
426 }
427 
428 static void enetc_get_offloads(struct enetc_bdr *rx_ring,
429 			       union enetc_rx_bd *rxbd, struct sk_buff *skb)
430 {
431 	/* TODO: add tstamp, hashing */
432 	if (rx_ring->ndev->features & NETIF_F_RXCSUM) {
433 		u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum);
434 
435 		skb->csum = csum_unfold((__force __sum16)~htons(inet_csum));
436 		skb->ip_summed = CHECKSUM_COMPLETE;
437 	}
438 
439 	/* copy VLAN to skb, if one is extracted, for now we assume it's a
440 	 * standard TPID, but HW also supports custom values
441 	 */
442 	if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN)
443 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
444 				       le16_to_cpu(rxbd->r.vlan_opt));
445 }
446 
447 static void enetc_process_skb(struct enetc_bdr *rx_ring,
448 			      struct sk_buff *skb)
449 {
450 	skb_record_rx_queue(skb, rx_ring->index);
451 	skb->protocol = eth_type_trans(skb, rx_ring->ndev);
452 }
453 
454 static bool enetc_page_reusable(struct page *page)
455 {
456 	return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1);
457 }
458 
459 static void enetc_reuse_page(struct enetc_bdr *rx_ring,
460 			     struct enetc_rx_swbd *old)
461 {
462 	struct enetc_rx_swbd *new;
463 
464 	new = &rx_ring->rx_swbd[rx_ring->next_to_alloc];
465 
466 	/* next buf that may reuse a page */
467 	enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc);
468 
469 	/* copy page reference */
470 	*new = *old;
471 }
472 
473 static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring,
474 					       int i, u16 size)
475 {
476 	struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
477 
478 	dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma,
479 				      rx_swbd->page_offset,
480 				      size, DMA_FROM_DEVICE);
481 	return rx_swbd;
482 }
483 
484 static void enetc_put_rx_buff(struct enetc_bdr *rx_ring,
485 			      struct enetc_rx_swbd *rx_swbd)
486 {
487 	if (likely(enetc_page_reusable(rx_swbd->page))) {
488 		rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE;
489 		page_ref_inc(rx_swbd->page);
490 
491 		enetc_reuse_page(rx_ring, rx_swbd);
492 
493 		/* sync for use by the device */
494 		dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma,
495 						 rx_swbd->page_offset,
496 						 ENETC_RXB_DMA_SIZE,
497 						 DMA_FROM_DEVICE);
498 	} else {
499 		dma_unmap_page(rx_ring->dev, rx_swbd->dma,
500 			       PAGE_SIZE, DMA_FROM_DEVICE);
501 	}
502 
503 	rx_swbd->page = NULL;
504 }
505 
506 static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring,
507 						int i, u16 size)
508 {
509 	struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
510 	struct sk_buff *skb;
511 	void *ba;
512 
513 	ba = page_address(rx_swbd->page) + rx_swbd->page_offset;
514 	skb = build_skb(ba - ENETC_RXB_PAD, ENETC_RXB_TRUESIZE);
515 	if (unlikely(!skb)) {
516 		rx_ring->stats.rx_alloc_errs++;
517 		return NULL;
518 	}
519 
520 	skb_reserve(skb, ENETC_RXB_PAD);
521 	__skb_put(skb, size);
522 
523 	enetc_put_rx_buff(rx_ring, rx_swbd);
524 
525 	return skb;
526 }
527 
528 static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i,
529 				     u16 size, struct sk_buff *skb)
530 {
531 	struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
532 
533 	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page,
534 			rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE);
535 
536 	enetc_put_rx_buff(rx_ring, rx_swbd);
537 }
538 
539 #define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */
540 
541 static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
542 			       struct napi_struct *napi, int work_limit)
543 {
544 	int rx_frm_cnt = 0, rx_byte_cnt = 0;
545 	int cleaned_cnt, i;
546 
547 	cleaned_cnt = enetc_bd_unused(rx_ring);
548 	/* next descriptor to process */
549 	i = rx_ring->next_to_clean;
550 
551 	while (likely(rx_frm_cnt < work_limit)) {
552 		union enetc_rx_bd *rxbd;
553 		struct sk_buff *skb;
554 		u32 bd_status;
555 		u16 size;
556 
557 		if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
558 			int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
559 
560 			cleaned_cnt -= count;
561 		}
562 
563 		rxbd = ENETC_RXBD(*rx_ring, i);
564 		bd_status = le32_to_cpu(rxbd->r.lstatus);
565 		if (!bd_status)
566 			break;
567 
568 		enetc_wr_reg(rx_ring->idr, BIT(rx_ring->index));
569 		dma_rmb(); /* for reading other rxbd fields */
570 		size = le16_to_cpu(rxbd->r.buf_len);
571 		skb = enetc_map_rx_buff_to_skb(rx_ring, i, size);
572 		if (!skb)
573 			break;
574 
575 		enetc_get_offloads(rx_ring, rxbd, skb);
576 
577 		cleaned_cnt++;
578 		rxbd++;
579 		i++;
580 		if (unlikely(i == rx_ring->bd_count)) {
581 			i = 0;
582 			rxbd = ENETC_RXBD(*rx_ring, 0);
583 		}
584 
585 		if (unlikely(bd_status &
586 			     ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) {
587 			dev_kfree_skb(skb);
588 			while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
589 				dma_rmb();
590 				bd_status = le32_to_cpu(rxbd->r.lstatus);
591 				rxbd++;
592 				i++;
593 				if (unlikely(i == rx_ring->bd_count)) {
594 					i = 0;
595 					rxbd = ENETC_RXBD(*rx_ring, 0);
596 				}
597 			}
598 
599 			rx_ring->ndev->stats.rx_dropped++;
600 			rx_ring->ndev->stats.rx_errors++;
601 
602 			break;
603 		}
604 
605 		/* not last BD in frame? */
606 		while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
607 			bd_status = le32_to_cpu(rxbd->r.lstatus);
608 			size = ENETC_RXB_DMA_SIZE;
609 
610 			if (bd_status & ENETC_RXBD_LSTATUS_F) {
611 				dma_rmb();
612 				size = le16_to_cpu(rxbd->r.buf_len);
613 			}
614 
615 			enetc_add_rx_buff_to_skb(rx_ring, i, size, skb);
616 
617 			cleaned_cnt++;
618 			rxbd++;
619 			i++;
620 			if (unlikely(i == rx_ring->bd_count)) {
621 				i = 0;
622 				rxbd = ENETC_RXBD(*rx_ring, 0);
623 			}
624 		}
625 
626 		rx_byte_cnt += skb->len;
627 
628 		enetc_process_skb(rx_ring, skb);
629 
630 		napi_gro_receive(napi, skb);
631 
632 		rx_frm_cnt++;
633 	}
634 
635 	rx_ring->next_to_clean = i;
636 
637 	rx_ring->stats.packets += rx_frm_cnt;
638 	rx_ring->stats.bytes += rx_byte_cnt;
639 
640 	return rx_frm_cnt;
641 }
642 
643 /* Probing and Init */
644 #define ENETC_MAX_RFS_SIZE 64
645 void enetc_get_si_caps(struct enetc_si *si)
646 {
647 	struct enetc_hw *hw = &si->hw;
648 	u32 val;
649 
650 	/* find out how many of various resources we have to work with */
651 	val = enetc_rd(hw, ENETC_SICAPR0);
652 	si->num_rx_rings = (val >> 16) & 0xff;
653 	si->num_tx_rings = val & 0xff;
654 
655 	val = enetc_rd(hw, ENETC_SIRFSCAPR);
656 	si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val);
657 	si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE);
658 
659 	si->num_rss = 0;
660 	val = enetc_rd(hw, ENETC_SIPCAPR0);
661 	if (val & ENETC_SIPCAPR0_RSS) {
662 		val = enetc_rd(hw, ENETC_SIRSSCAPR);
663 		si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(val);
664 	}
665 }
666 
667 static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
668 {
669 	r->bd_base = dma_alloc_coherent(r->dev, r->bd_count * bd_size,
670 					&r->bd_dma_base, GFP_KERNEL);
671 	if (!r->bd_base)
672 		return -ENOMEM;
673 
674 	/* h/w requires 128B alignment */
675 	if (!IS_ALIGNED(r->bd_dma_base, 128)) {
676 		dma_free_coherent(r->dev, r->bd_count * bd_size, r->bd_base,
677 				  r->bd_dma_base);
678 		return -EINVAL;
679 	}
680 
681 	return 0;
682 }
683 
684 static int enetc_alloc_txbdr(struct enetc_bdr *txr)
685 {
686 	int err;
687 
688 	txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd));
689 	if (!txr->tx_swbd)
690 		return -ENOMEM;
691 
692 	err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd));
693 	if (err) {
694 		vfree(txr->tx_swbd);
695 		return err;
696 	}
697 
698 	txr->next_to_clean = 0;
699 	txr->next_to_use = 0;
700 
701 	return 0;
702 }
703 
704 static void enetc_free_txbdr(struct enetc_bdr *txr)
705 {
706 	int size, i;
707 
708 	for (i = 0; i < txr->bd_count; i++)
709 		enetc_free_tx_skb(txr, &txr->tx_swbd[i]);
710 
711 	size = txr->bd_count * sizeof(union enetc_tx_bd);
712 
713 	dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base);
714 	txr->bd_base = NULL;
715 
716 	vfree(txr->tx_swbd);
717 	txr->tx_swbd = NULL;
718 }
719 
720 static int enetc_alloc_tx_resources(struct enetc_ndev_priv *priv)
721 {
722 	int i, err;
723 
724 	for (i = 0; i < priv->num_tx_rings; i++) {
725 		err = enetc_alloc_txbdr(priv->tx_ring[i]);
726 
727 		if (err)
728 			goto fail;
729 	}
730 
731 	return 0;
732 
733 fail:
734 	while (i-- > 0)
735 		enetc_free_txbdr(priv->tx_ring[i]);
736 
737 	return err;
738 }
739 
740 static void enetc_free_tx_resources(struct enetc_ndev_priv *priv)
741 {
742 	int i;
743 
744 	for (i = 0; i < priv->num_tx_rings; i++)
745 		enetc_free_txbdr(priv->tx_ring[i]);
746 }
747 
748 static int enetc_alloc_rxbdr(struct enetc_bdr *rxr)
749 {
750 	int err;
751 
752 	rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd));
753 	if (!rxr->rx_swbd)
754 		return -ENOMEM;
755 
756 	err = enetc_dma_alloc_bdr(rxr, sizeof(union enetc_rx_bd));
757 	if (err) {
758 		vfree(rxr->rx_swbd);
759 		return err;
760 	}
761 
762 	rxr->next_to_clean = 0;
763 	rxr->next_to_use = 0;
764 	rxr->next_to_alloc = 0;
765 
766 	return 0;
767 }
768 
769 static void enetc_free_rxbdr(struct enetc_bdr *rxr)
770 {
771 	int size;
772 
773 	size = rxr->bd_count * sizeof(union enetc_rx_bd);
774 
775 	dma_free_coherent(rxr->dev, size, rxr->bd_base, rxr->bd_dma_base);
776 	rxr->bd_base = NULL;
777 
778 	vfree(rxr->rx_swbd);
779 	rxr->rx_swbd = NULL;
780 }
781 
782 static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv)
783 {
784 	int i, err;
785 
786 	for (i = 0; i < priv->num_rx_rings; i++) {
787 		err = enetc_alloc_rxbdr(priv->rx_ring[i]);
788 
789 		if (err)
790 			goto fail;
791 	}
792 
793 	return 0;
794 
795 fail:
796 	while (i-- > 0)
797 		enetc_free_rxbdr(priv->rx_ring[i]);
798 
799 	return err;
800 }
801 
802 static void enetc_free_rx_resources(struct enetc_ndev_priv *priv)
803 {
804 	int i;
805 
806 	for (i = 0; i < priv->num_rx_rings; i++)
807 		enetc_free_rxbdr(priv->rx_ring[i]);
808 }
809 
810 static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
811 {
812 	int i;
813 
814 	if (!tx_ring->tx_swbd)
815 		return;
816 
817 	for (i = 0; i < tx_ring->bd_count; i++) {
818 		struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
819 
820 		enetc_free_tx_skb(tx_ring, tx_swbd);
821 	}
822 
823 	tx_ring->next_to_clean = 0;
824 	tx_ring->next_to_use = 0;
825 }
826 
827 static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
828 {
829 	int i;
830 
831 	if (!rx_ring->rx_swbd)
832 		return;
833 
834 	for (i = 0; i < rx_ring->bd_count; i++) {
835 		struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
836 
837 		if (!rx_swbd->page)
838 			continue;
839 
840 		dma_unmap_page(rx_ring->dev, rx_swbd->dma,
841 			       PAGE_SIZE, DMA_FROM_DEVICE);
842 		__free_page(rx_swbd->page);
843 		rx_swbd->page = NULL;
844 	}
845 
846 	rx_ring->next_to_clean = 0;
847 	rx_ring->next_to_use = 0;
848 	rx_ring->next_to_alloc = 0;
849 }
850 
851 static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
852 {
853 	int i;
854 
855 	for (i = 0; i < priv->num_rx_rings; i++)
856 		enetc_free_rx_ring(priv->rx_ring[i]);
857 
858 	for (i = 0; i < priv->num_tx_rings; i++)
859 		enetc_free_tx_ring(priv->tx_ring[i]);
860 }
861 
862 static int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
863 {
864 	int size = cbdr->bd_count * sizeof(struct enetc_cbd);
865 
866 	cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base,
867 					   GFP_KERNEL);
868 	if (!cbdr->bd_base)
869 		return -ENOMEM;
870 
871 	/* h/w requires 128B alignment */
872 	if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) {
873 		dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
874 		return -EINVAL;
875 	}
876 
877 	cbdr->next_to_clean = 0;
878 	cbdr->next_to_use = 0;
879 
880 	return 0;
881 }
882 
883 static void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
884 {
885 	int size = cbdr->bd_count * sizeof(struct enetc_cbd);
886 
887 	dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
888 	cbdr->bd_base = NULL;
889 }
890 
891 static void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr)
892 {
893 	/* set CBDR cache attributes */
894 	enetc_wr(hw, ENETC_SICAR2,
895 		 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
896 
897 	enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base));
898 	enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base));
899 	enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count));
900 
901 	enetc_wr(hw, ENETC_SICBDRPIR, 0);
902 	enetc_wr(hw, ENETC_SICBDRCIR, 0);
903 
904 	/* enable ring */
905 	enetc_wr(hw, ENETC_SICBDRMR, BIT(31));
906 
907 	cbdr->pir = hw->reg + ENETC_SICBDRPIR;
908 	cbdr->cir = hw->reg + ENETC_SICBDRCIR;
909 }
910 
911 static void enetc_clear_cbdr(struct enetc_hw *hw)
912 {
913 	enetc_wr(hw, ENETC_SICBDRMR, 0);
914 }
915 
916 static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
917 {
918 	int *rss_table;
919 	int i;
920 
921 	rss_table = kmalloc_array(si->num_rss, sizeof(*rss_table), GFP_KERNEL);
922 	if (!rss_table)
923 		return -ENOMEM;
924 
925 	/* Set up RSS table defaults */
926 	for (i = 0; i < si->num_rss; i++)
927 		rss_table[i] = i % num_groups;
928 
929 	enetc_set_rss_table(si, rss_table, si->num_rss);
930 
931 	kfree(rss_table);
932 
933 	return 0;
934 }
935 
936 static int enetc_configure_si(struct enetc_ndev_priv *priv)
937 {
938 	struct enetc_si *si = priv->si;
939 	struct enetc_hw *hw = &si->hw;
940 	int err;
941 
942 	enetc_setup_cbdr(hw, &si->cbd_ring);
943 	/* set SI cache attributes */
944 	enetc_wr(hw, ENETC_SICAR0,
945 		 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
946 	enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI);
947 	/* enable SI */
948 	enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN);
949 
950 	if (si->num_rss) {
951 		err = enetc_setup_default_rss_table(si, priv->num_rx_rings);
952 		if (err)
953 			return err;
954 	}
955 
956 	return 0;
957 }
958 
959 void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
960 {
961 	struct enetc_si *si = priv->si;
962 	int cpus = num_online_cpus();
963 
964 	priv->tx_bd_count = ENETC_BDR_DEFAULT_SIZE;
965 	priv->rx_bd_count = ENETC_BDR_DEFAULT_SIZE;
966 
967 	/* Enable all available TX rings in order to configure as many
968 	 * priorities as possible, when needed.
969 	 * TODO: Make # of TX rings run-time configurable
970 	 */
971 	priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings);
972 	priv->num_tx_rings = si->num_tx_rings;
973 	priv->bdr_int_num = cpus;
974 
975 	/* SI specific */
976 	si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE;
977 }
978 
979 int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
980 {
981 	struct enetc_si *si = priv->si;
982 	int err;
983 
984 	err = enetc_alloc_cbdr(priv->dev, &si->cbd_ring);
985 	if (err)
986 		return err;
987 
988 	priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules),
989 				  GFP_KERNEL);
990 	if (!priv->cls_rules) {
991 		err = -ENOMEM;
992 		goto err_alloc_cls;
993 	}
994 
995 	err = enetc_configure_si(priv);
996 	if (err)
997 		goto err_config_si;
998 
999 	return 0;
1000 
1001 err_config_si:
1002 	kfree(priv->cls_rules);
1003 err_alloc_cls:
1004 	enetc_clear_cbdr(&si->hw);
1005 	enetc_free_cbdr(priv->dev, &si->cbd_ring);
1006 
1007 	return err;
1008 }
1009 
1010 void enetc_free_si_resources(struct enetc_ndev_priv *priv)
1011 {
1012 	struct enetc_si *si = priv->si;
1013 
1014 	enetc_clear_cbdr(&si->hw);
1015 	enetc_free_cbdr(priv->dev, &si->cbd_ring);
1016 
1017 	kfree(priv->cls_rules);
1018 }
1019 
1020 static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
1021 {
1022 	int idx = tx_ring->index;
1023 	u32 tbmr;
1024 
1025 	enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
1026 		       lower_32_bits(tx_ring->bd_dma_base));
1027 
1028 	enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
1029 		       upper_32_bits(tx_ring->bd_dma_base));
1030 
1031 	WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */
1032 	enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
1033 		       ENETC_RTBLENR_LEN(tx_ring->bd_count));
1034 
1035 	/* clearing PI/CI registers for Tx not supported, adjust sw indexes */
1036 	tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR);
1037 	tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR);
1038 
1039 	/* enable Tx ints by setting pkt thr to 1 */
1040 	enetc_txbdr_wr(hw, idx, ENETC_TBICIR0, ENETC_TBICIR0_ICEN | 0x1);
1041 
1042 	tbmr = ENETC_TBMR_EN;
1043 	if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
1044 		tbmr |= ENETC_TBMR_VIH;
1045 
1046 	/* enable ring */
1047 	enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
1048 
1049 	tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR);
1050 	tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR);
1051 	tx_ring->idr = hw->reg + ENETC_SITXIDR;
1052 }
1053 
1054 static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
1055 {
1056 	int idx = rx_ring->index;
1057 	u32 rbmr;
1058 
1059 	enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
1060 		       lower_32_bits(rx_ring->bd_dma_base));
1061 
1062 	enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
1063 		       upper_32_bits(rx_ring->bd_dma_base));
1064 
1065 	WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */
1066 	enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
1067 		       ENETC_RTBLENR_LEN(rx_ring->bd_count));
1068 
1069 	enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
1070 
1071 	enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
1072 
1073 	/* enable Rx ints by setting pkt thr to 1 */
1074 	enetc_rxbdr_wr(hw, idx, ENETC_RBICIR0, ENETC_RBICIR0_ICEN | 0x1);
1075 
1076 	rbmr = ENETC_RBMR_EN;
1077 	if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1078 		rbmr |= ENETC_RBMR_VTE;
1079 
1080 	rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR);
1081 	rx_ring->idr = hw->reg + ENETC_SIRXIDR;
1082 
1083 	enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
1084 
1085 	/* enable ring */
1086 	enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
1087 }
1088 
1089 static void enetc_setup_bdrs(struct enetc_ndev_priv *priv)
1090 {
1091 	int i;
1092 
1093 	for (i = 0; i < priv->num_tx_rings; i++)
1094 		enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]);
1095 
1096 	for (i = 0; i < priv->num_rx_rings; i++)
1097 		enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]);
1098 }
1099 
1100 static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
1101 {
1102 	int idx = rx_ring->index;
1103 
1104 	/* disable EN bit on ring */
1105 	enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0);
1106 }
1107 
1108 static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
1109 {
1110 	int delay = 8, timeout = 100;
1111 	int idx = tx_ring->index;
1112 
1113 	/* disable EN bit on ring */
1114 	enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
1115 
1116 	/* wait for busy to clear */
1117 	while (delay < timeout &&
1118 	       enetc_txbdr_rd(hw, idx, ENETC_TBSR) & ENETC_TBSR_BUSY) {
1119 		msleep(delay);
1120 		delay *= 2;
1121 	}
1122 
1123 	if (delay >= timeout)
1124 		netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n",
1125 			    idx);
1126 }
1127 
1128 static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
1129 {
1130 	int i;
1131 
1132 	for (i = 0; i < priv->num_tx_rings; i++)
1133 		enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]);
1134 
1135 	for (i = 0; i < priv->num_rx_rings; i++)
1136 		enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]);
1137 
1138 	udelay(1);
1139 }
1140 
1141 static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
1142 {
1143 	struct pci_dev *pdev = priv->si->pdev;
1144 	cpumask_t cpu_mask;
1145 	int i, j, err;
1146 
1147 	for (i = 0; i < priv->bdr_int_num; i++) {
1148 		int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1149 		struct enetc_int_vector *v = priv->int_vector[i];
1150 		int entry = ENETC_BDR_INT_BASE_IDX + i;
1151 		struct enetc_hw *hw = &priv->si->hw;
1152 
1153 		snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
1154 			 priv->ndev->name, i);
1155 		err = request_irq(irq, enetc_msix, 0, v->name, v);
1156 		if (err) {
1157 			dev_err(priv->dev, "request_irq() failed!\n");
1158 			goto irq_err;
1159 		}
1160 
1161 		v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER);
1162 		v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER);
1163 
1164 		enetc_wr(hw, ENETC_SIMSIRRV(i), entry);
1165 
1166 		for (j = 0; j < v->count_tx_rings; j++) {
1167 			int idx = v->tx_ring[j].index;
1168 
1169 			enetc_wr(hw, ENETC_SIMSITRV(idx), entry);
1170 		}
1171 		cpumask_clear(&cpu_mask);
1172 		cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
1173 		irq_set_affinity_hint(irq, &cpu_mask);
1174 	}
1175 
1176 	return 0;
1177 
1178 irq_err:
1179 	while (i--) {
1180 		int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1181 
1182 		irq_set_affinity_hint(irq, NULL);
1183 		free_irq(irq, priv->int_vector[i]);
1184 	}
1185 
1186 	return err;
1187 }
1188 
1189 static void enetc_free_irqs(struct enetc_ndev_priv *priv)
1190 {
1191 	struct pci_dev *pdev = priv->si->pdev;
1192 	int i;
1193 
1194 	for (i = 0; i < priv->bdr_int_num; i++) {
1195 		int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1196 
1197 		irq_set_affinity_hint(irq, NULL);
1198 		free_irq(irq, priv->int_vector[i]);
1199 	}
1200 }
1201 
1202 static void enetc_enable_interrupts(struct enetc_ndev_priv *priv)
1203 {
1204 	int i;
1205 
1206 	/* enable Tx & Rx event indication */
1207 	for (i = 0; i < priv->num_rx_rings; i++) {
1208 		enetc_rxbdr_wr(&priv->si->hw, i,
1209 			       ENETC_RBIER, ENETC_RBIER_RXTIE);
1210 	}
1211 
1212 	for (i = 0; i < priv->num_tx_rings; i++) {
1213 		enetc_txbdr_wr(&priv->si->hw, i,
1214 			       ENETC_TBIER, ENETC_TBIER_TXTIE);
1215 	}
1216 }
1217 
1218 static void enetc_disable_interrupts(struct enetc_ndev_priv *priv)
1219 {
1220 	int i;
1221 
1222 	for (i = 0; i < priv->num_tx_rings; i++)
1223 		enetc_txbdr_wr(&priv->si->hw, i, ENETC_TBIER, 0);
1224 
1225 	for (i = 0; i < priv->num_rx_rings; i++)
1226 		enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0);
1227 }
1228 
1229 static void adjust_link(struct net_device *ndev)
1230 {
1231 	struct phy_device *phydev = ndev->phydev;
1232 
1233 	phy_print_status(phydev);
1234 }
1235 
1236 static int enetc_phy_connect(struct net_device *ndev)
1237 {
1238 	struct enetc_ndev_priv *priv = netdev_priv(ndev);
1239 	struct phy_device *phydev;
1240 
1241 	if (!priv->phy_node)
1242 		return 0; /* phy-less mode */
1243 
1244 	phydev = of_phy_connect(ndev, priv->phy_node, &adjust_link,
1245 				0, priv->if_mode);
1246 	if (!phydev) {
1247 		dev_err(&ndev->dev, "could not attach to PHY\n");
1248 		return -ENODEV;
1249 	}
1250 
1251 	phy_attached_info(phydev);
1252 
1253 	return 0;
1254 }
1255 
1256 int enetc_open(struct net_device *ndev)
1257 {
1258 	struct enetc_ndev_priv *priv = netdev_priv(ndev);
1259 	int i, err;
1260 
1261 	err = enetc_setup_irqs(priv);
1262 	if (err)
1263 		return err;
1264 
1265 	err = enetc_phy_connect(ndev);
1266 	if (err)
1267 		goto err_phy_connect;
1268 
1269 	err = enetc_alloc_tx_resources(priv);
1270 	if (err)
1271 		goto err_alloc_tx;
1272 
1273 	err = enetc_alloc_rx_resources(priv);
1274 	if (err)
1275 		goto err_alloc_rx;
1276 
1277 	enetc_setup_bdrs(priv);
1278 
1279 	err = netif_set_real_num_tx_queues(ndev, priv->num_tx_rings);
1280 	if (err)
1281 		goto err_set_queues;
1282 
1283 	err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings);
1284 	if (err)
1285 		goto err_set_queues;
1286 
1287 	for (i = 0; i < priv->bdr_int_num; i++)
1288 		napi_enable(&priv->int_vector[i]->napi);
1289 
1290 	enetc_enable_interrupts(priv);
1291 
1292 	if (ndev->phydev)
1293 		phy_start(ndev->phydev);
1294 	else
1295 		netif_carrier_on(ndev);
1296 
1297 	netif_tx_start_all_queues(ndev);
1298 
1299 	return 0;
1300 
1301 err_set_queues:
1302 	enetc_free_rx_resources(priv);
1303 err_alloc_rx:
1304 	enetc_free_tx_resources(priv);
1305 err_alloc_tx:
1306 	if (ndev->phydev)
1307 		phy_disconnect(ndev->phydev);
1308 err_phy_connect:
1309 	enetc_free_irqs(priv);
1310 
1311 	return err;
1312 }
1313 
1314 int enetc_close(struct net_device *ndev)
1315 {
1316 	struct enetc_ndev_priv *priv = netdev_priv(ndev);
1317 	int i;
1318 
1319 	netif_tx_stop_all_queues(ndev);
1320 
1321 	if (ndev->phydev) {
1322 		phy_stop(ndev->phydev);
1323 		phy_disconnect(ndev->phydev);
1324 	} else {
1325 		netif_carrier_off(ndev);
1326 	}
1327 
1328 	for (i = 0; i < priv->bdr_int_num; i++) {
1329 		napi_synchronize(&priv->int_vector[i]->napi);
1330 		napi_disable(&priv->int_vector[i]->napi);
1331 	}
1332 
1333 	enetc_disable_interrupts(priv);
1334 	enetc_clear_bdrs(priv);
1335 
1336 	enetc_free_rxtx_rings(priv);
1337 	enetc_free_rx_resources(priv);
1338 	enetc_free_tx_resources(priv);
1339 	enetc_free_irqs(priv);
1340 
1341 	return 0;
1342 }
1343 
1344 struct net_device_stats *enetc_get_stats(struct net_device *ndev)
1345 {
1346 	struct enetc_ndev_priv *priv = netdev_priv(ndev);
1347 	struct net_device_stats *stats = &ndev->stats;
1348 	unsigned long packets = 0, bytes = 0;
1349 	int i;
1350 
1351 	for (i = 0; i < priv->num_rx_rings; i++) {
1352 		packets += priv->rx_ring[i]->stats.packets;
1353 		bytes	+= priv->rx_ring[i]->stats.bytes;
1354 	}
1355 
1356 	stats->rx_packets = packets;
1357 	stats->rx_bytes = bytes;
1358 	bytes = 0;
1359 	packets = 0;
1360 
1361 	for (i = 0; i < priv->num_tx_rings; i++) {
1362 		packets += priv->tx_ring[i]->stats.packets;
1363 		bytes	+= priv->tx_ring[i]->stats.bytes;
1364 	}
1365 
1366 	stats->tx_packets = packets;
1367 	stats->tx_bytes = bytes;
1368 
1369 	return stats;
1370 }
1371 
1372 static int enetc_set_rss(struct net_device *ndev, int en)
1373 {
1374 	struct enetc_ndev_priv *priv = netdev_priv(ndev);
1375 	struct enetc_hw *hw = &priv->si->hw;
1376 	u32 reg;
1377 
1378 	enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
1379 
1380 	reg = enetc_rd(hw, ENETC_SIMR);
1381 	reg &= ~ENETC_SIMR_RSSE;
1382 	reg |= (en) ? ENETC_SIMR_RSSE : 0;
1383 	enetc_wr(hw, ENETC_SIMR, reg);
1384 
1385 	return 0;
1386 }
1387 
1388 int enetc_set_features(struct net_device *ndev,
1389 		       netdev_features_t features)
1390 {
1391 	netdev_features_t changed = ndev->features ^ features;
1392 
1393 	if (changed & NETIF_F_RXHASH)
1394 		enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
1395 
1396 	return 0;
1397 }
1398 
1399 int enetc_alloc_msix(struct enetc_ndev_priv *priv)
1400 {
1401 	struct pci_dev *pdev = priv->si->pdev;
1402 	int size, v_tx_rings;
1403 	int i, n, err, nvec;
1404 
1405 	nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
1406 	/* allocate MSIX for both messaging and Rx/Tx interrupts */
1407 	n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
1408 
1409 	if (n < 0)
1410 		return n;
1411 
1412 	if (n != nvec)
1413 		return -EPERM;
1414 
1415 	/* # of tx rings per int vector */
1416 	v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
1417 	size = sizeof(struct enetc_int_vector) +
1418 	       sizeof(struct enetc_bdr) * v_tx_rings;
1419 
1420 	for (i = 0; i < priv->bdr_int_num; i++) {
1421 		struct enetc_int_vector *v;
1422 		struct enetc_bdr *bdr;
1423 		int j;
1424 
1425 		v = kzalloc(size, GFP_KERNEL);
1426 		if (!v) {
1427 			err = -ENOMEM;
1428 			goto fail;
1429 		}
1430 
1431 		priv->int_vector[i] = v;
1432 
1433 		netif_napi_add(priv->ndev, &v->napi, enetc_poll,
1434 			       NAPI_POLL_WEIGHT);
1435 		v->count_tx_rings = v_tx_rings;
1436 
1437 		for (j = 0; j < v_tx_rings; j++) {
1438 			int idx;
1439 
1440 			/* default tx ring mapping policy */
1441 			if (priv->bdr_int_num == ENETC_MAX_BDR_INT)
1442 				idx = 2 * j + i; /* 2 CPUs */
1443 			else
1444 				idx = j + i * v_tx_rings; /* default */
1445 
1446 			__set_bit(idx, &v->tx_rings_map);
1447 			bdr = &v->tx_ring[j];
1448 			bdr->index = idx;
1449 			bdr->ndev = priv->ndev;
1450 			bdr->dev = priv->dev;
1451 			bdr->bd_count = priv->tx_bd_count;
1452 			priv->tx_ring[idx] = bdr;
1453 		}
1454 
1455 		bdr = &v->rx_ring;
1456 		bdr->index = i;
1457 		bdr->ndev = priv->ndev;
1458 		bdr->dev = priv->dev;
1459 		bdr->bd_count = priv->rx_bd_count;
1460 		priv->rx_ring[i] = bdr;
1461 	}
1462 
1463 	return 0;
1464 
1465 fail:
1466 	while (i--) {
1467 		netif_napi_del(&priv->int_vector[i]->napi);
1468 		kfree(priv->int_vector[i]);
1469 	}
1470 
1471 	pci_free_irq_vectors(pdev);
1472 
1473 	return err;
1474 }
1475 
1476 void enetc_free_msix(struct enetc_ndev_priv *priv)
1477 {
1478 	int i;
1479 
1480 	for (i = 0; i < priv->bdr_int_num; i++) {
1481 		struct enetc_int_vector *v = priv->int_vector[i];
1482 
1483 		netif_napi_del(&v->napi);
1484 	}
1485 
1486 	for (i = 0; i < priv->num_rx_rings; i++)
1487 		priv->rx_ring[i] = NULL;
1488 
1489 	for (i = 0; i < priv->num_tx_rings; i++)
1490 		priv->tx_ring[i] = NULL;
1491 
1492 	for (i = 0; i < priv->bdr_int_num; i++) {
1493 		kfree(priv->int_vector[i]);
1494 		priv->int_vector[i] = NULL;
1495 	}
1496 
1497 	/* disable all MSIX for this device */
1498 	pci_free_irq_vectors(priv->si->pdev);
1499 }
1500 
1501 static void enetc_kfree_si(struct enetc_si *si)
1502 {
1503 	char *p = (char *)si - si->pad;
1504 
1505 	kfree(p);
1506 }
1507 
1508 static void enetc_detect_errata(struct enetc_si *si)
1509 {
1510 	if (si->pdev->revision == ENETC_REV1)
1511 		si->errata = ENETC_ERR_TXCSUM | ENETC_ERR_VLAN_ISOL |
1512 			     ENETC_ERR_UCMCSWP;
1513 }
1514 
1515 int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
1516 {
1517 	struct enetc_si *si, *p;
1518 	struct enetc_hw *hw;
1519 	size_t alloc_size;
1520 	int err, len;
1521 
1522 	pcie_flr(pdev);
1523 	err = pci_enable_device_mem(pdev);
1524 	if (err) {
1525 		dev_err(&pdev->dev, "device enable failed\n");
1526 		return err;
1527 	}
1528 
1529 	/* set up for high or low dma */
1530 	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1531 	if (err) {
1532 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1533 		if (err) {
1534 			dev_err(&pdev->dev,
1535 				"DMA configuration failed: 0x%x\n", err);
1536 			goto err_dma;
1537 		}
1538 	}
1539 
1540 	err = pci_request_mem_regions(pdev, name);
1541 	if (err) {
1542 		dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err);
1543 		goto err_pci_mem_reg;
1544 	}
1545 
1546 	pci_set_master(pdev);
1547 
1548 	alloc_size = sizeof(struct enetc_si);
1549 	if (sizeof_priv) {
1550 		/* align priv to 32B */
1551 		alloc_size = ALIGN(alloc_size, ENETC_SI_ALIGN);
1552 		alloc_size += sizeof_priv;
1553 	}
1554 	/* force 32B alignment for enetc_si */
1555 	alloc_size += ENETC_SI_ALIGN - 1;
1556 
1557 	p = kzalloc(alloc_size, GFP_KERNEL);
1558 	if (!p) {
1559 		err = -ENOMEM;
1560 		goto err_alloc_si;
1561 	}
1562 
1563 	si = PTR_ALIGN(p, ENETC_SI_ALIGN);
1564 	si->pad = (char *)si - (char *)p;
1565 
1566 	pci_set_drvdata(pdev, si);
1567 	si->pdev = pdev;
1568 	hw = &si->hw;
1569 
1570 	len = pci_resource_len(pdev, ENETC_BAR_REGS);
1571 	hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len);
1572 	if (!hw->reg) {
1573 		err = -ENXIO;
1574 		dev_err(&pdev->dev, "ioremap() failed\n");
1575 		goto err_ioremap;
1576 	}
1577 	if (len > ENETC_PORT_BASE)
1578 		hw->port = hw->reg + ENETC_PORT_BASE;
1579 	if (len > ENETC_GLOBAL_BASE)
1580 		hw->global = hw->reg + ENETC_GLOBAL_BASE;
1581 
1582 	enetc_detect_errata(si);
1583 
1584 	return 0;
1585 
1586 err_ioremap:
1587 	enetc_kfree_si(si);
1588 err_alloc_si:
1589 	pci_release_mem_regions(pdev);
1590 err_pci_mem_reg:
1591 err_dma:
1592 	pci_disable_device(pdev);
1593 
1594 	return err;
1595 }
1596 
1597 void enetc_pci_remove(struct pci_dev *pdev)
1598 {
1599 	struct enetc_si *si = pci_get_drvdata(pdev);
1600 	struct enetc_hw *hw = &si->hw;
1601 
1602 	iounmap(hw->reg);
1603 	enetc_kfree_si(si);
1604 	pci_release_mem_regions(pdev);
1605 	pci_disable_device(pdev);
1606 }
1607