xref: /linux/drivers/net/ethernet/apm/xgene-v2/main.c (revision 3bdab16c55f57a24245c97d707241dd9b48d1a91)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Applied Micro X-Gene SoC Ethernet v2 Driver
4  *
5  * Copyright (c) 2017, Applied Micro Circuits Corporation
6  * Author(s): Iyappan Subramanian <isubramanian@apm.com>
7  *	      Keyur Chudgar <kchudgar@apm.com>
8  */
9 
10 #include "main.h"
11 
12 static const struct acpi_device_id xge_acpi_match[];
13 
14 static int xge_get_resources(struct xge_pdata *pdata)
15 {
16 	struct platform_device *pdev;
17 	struct net_device *ndev;
18 	int phy_mode, ret = 0;
19 	struct resource *res;
20 	struct device *dev;
21 
22 	pdev = pdata->pdev;
23 	dev = &pdev->dev;
24 	ndev = pdata->ndev;
25 
26 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
27 	if (!res) {
28 		dev_err(dev, "Resource enet_csr not defined\n");
29 		return -ENODEV;
30 	}
31 
32 	pdata->resources.base_addr = devm_ioremap(dev, res->start,
33 						  resource_size(res));
34 	if (!pdata->resources.base_addr) {
35 		dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
36 		return -ENOMEM;
37 	}
38 
39 	if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
40 		eth_hw_addr_random(ndev);
41 
42 	memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
43 
44 	phy_mode = device_get_phy_mode(dev);
45 	if (phy_mode < 0) {
46 		dev_err(dev, "Unable to get phy-connection-type\n");
47 		return phy_mode;
48 	}
49 	pdata->resources.phy_mode = phy_mode;
50 
51 	if (pdata->resources.phy_mode != PHY_INTERFACE_MODE_RGMII) {
52 		dev_err(dev, "Incorrect phy-connection-type specified\n");
53 		return -ENODEV;
54 	}
55 
56 	ret = platform_get_irq(pdev, 0);
57 	if (ret < 0) {
58 		dev_err(dev, "Unable to get irq\n");
59 		return ret;
60 	}
61 	pdata->resources.irq = ret;
62 
63 	return 0;
64 }
65 
66 static int xge_refill_buffers(struct net_device *ndev, u32 nbuf)
67 {
68 	struct xge_pdata *pdata = netdev_priv(ndev);
69 	struct xge_desc_ring *ring = pdata->rx_ring;
70 	const u8 slots = XGENE_ENET_NUM_DESC - 1;
71 	struct device *dev = &pdata->pdev->dev;
72 	struct xge_raw_desc *raw_desc;
73 	u64 addr_lo, addr_hi;
74 	u8 tail = ring->tail;
75 	struct sk_buff *skb;
76 	dma_addr_t dma_addr;
77 	u16 len;
78 	int i;
79 
80 	for (i = 0; i < nbuf; i++) {
81 		raw_desc = &ring->raw_desc[tail];
82 
83 		len = XGENE_ENET_STD_MTU;
84 		skb = netdev_alloc_skb(ndev, len);
85 		if (unlikely(!skb))
86 			return -ENOMEM;
87 
88 		dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
89 		if (dma_mapping_error(dev, dma_addr)) {
90 			netdev_err(ndev, "DMA mapping error\n");
91 			dev_kfree_skb_any(skb);
92 			return -EINVAL;
93 		}
94 
95 		ring->pkt_info[tail].skb = skb;
96 		ring->pkt_info[tail].dma_addr = dma_addr;
97 
98 		addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
99 		addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
100 		raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
101 					   SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
102 					   SET_BITS(PKT_ADDRH,
103 						    upper_32_bits(dma_addr)));
104 
105 		dma_wmb();
106 		raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
107 					   SET_BITS(E, 1));
108 		tail = (tail + 1) & slots;
109 	}
110 
111 	ring->tail = tail;
112 
113 	return 0;
114 }
115 
116 static int xge_init_hw(struct net_device *ndev)
117 {
118 	struct xge_pdata *pdata = netdev_priv(ndev);
119 	int ret;
120 
121 	ret = xge_port_reset(ndev);
122 	if (ret)
123 		return ret;
124 
125 	xge_port_init(ndev);
126 	pdata->nbufs = NUM_BUFS;
127 
128 	return 0;
129 }
130 
131 static irqreturn_t xge_irq(const int irq, void *data)
132 {
133 	struct xge_pdata *pdata = data;
134 
135 	if (napi_schedule_prep(&pdata->napi)) {
136 		xge_intr_disable(pdata);
137 		__napi_schedule(&pdata->napi);
138 	}
139 
140 	return IRQ_HANDLED;
141 }
142 
143 static int xge_request_irq(struct net_device *ndev)
144 {
145 	struct xge_pdata *pdata = netdev_priv(ndev);
146 	int ret;
147 
148 	snprintf(pdata->irq_name, IRQ_ID_SIZE, "%s", ndev->name);
149 
150 	ret = request_irq(pdata->resources.irq, xge_irq, 0, pdata->irq_name,
151 			  pdata);
152 	if (ret)
153 		netdev_err(ndev, "Failed to request irq %s\n", pdata->irq_name);
154 
155 	return ret;
156 }
157 
158 static void xge_free_irq(struct net_device *ndev)
159 {
160 	struct xge_pdata *pdata = netdev_priv(ndev);
161 
162 	free_irq(pdata->resources.irq, pdata);
163 }
164 
165 static bool is_tx_slot_available(struct xge_raw_desc *raw_desc)
166 {
167 	if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
168 	    (GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)) == SLOT_EMPTY))
169 		return true;
170 
171 	return false;
172 }
173 
174 static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
175 {
176 	struct xge_pdata *pdata = netdev_priv(ndev);
177 	struct device *dev = &pdata->pdev->dev;
178 	struct xge_desc_ring *tx_ring;
179 	struct xge_raw_desc *raw_desc;
180 	static dma_addr_t dma_addr;
181 	u64 addr_lo, addr_hi;
182 	void *pkt_buf;
183 	u8 tail;
184 	u16 len;
185 
186 	tx_ring = pdata->tx_ring;
187 	tail = tx_ring->tail;
188 	len = skb_headlen(skb);
189 	raw_desc = &tx_ring->raw_desc[tail];
190 
191 	if (!is_tx_slot_available(raw_desc)) {
192 		netif_stop_queue(ndev);
193 		return NETDEV_TX_BUSY;
194 	}
195 
196 	/* Packet buffers should be 64B aligned */
197 	pkt_buf = dma_alloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
198 				     GFP_ATOMIC);
199 	if (unlikely(!pkt_buf)) {
200 		dev_kfree_skb_any(skb);
201 		return NETDEV_TX_OK;
202 	}
203 	memcpy(pkt_buf, skb->data, len);
204 
205 	addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
206 	addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
207 	raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
208 				   SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
209 				   SET_BITS(PKT_ADDRH,
210 					    upper_32_bits(dma_addr)));
211 
212 	tx_ring->pkt_info[tail].skb = skb;
213 	tx_ring->pkt_info[tail].dma_addr = dma_addr;
214 	tx_ring->pkt_info[tail].pkt_buf = pkt_buf;
215 
216 	dma_wmb();
217 
218 	raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
219 				   SET_BITS(PKT_SIZE, len) |
220 				   SET_BITS(E, 0));
221 	skb_tx_timestamp(skb);
222 	xge_wr_csr(pdata, DMATXCTRL, 1);
223 
224 	tx_ring->tail = (tail + 1) & (XGENE_ENET_NUM_DESC - 1);
225 
226 	return NETDEV_TX_OK;
227 }
228 
229 static bool is_tx_hw_done(struct xge_raw_desc *raw_desc)
230 {
231 	if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
232 	    !GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)))
233 		return true;
234 
235 	return false;
236 }
237 
238 static void xge_txc_poll(struct net_device *ndev)
239 {
240 	struct xge_pdata *pdata = netdev_priv(ndev);
241 	struct device *dev = &pdata->pdev->dev;
242 	struct xge_desc_ring *tx_ring;
243 	struct xge_raw_desc *raw_desc;
244 	dma_addr_t dma_addr;
245 	struct sk_buff *skb;
246 	void *pkt_buf;
247 	u32 data;
248 	u8 head;
249 
250 	tx_ring = pdata->tx_ring;
251 	head = tx_ring->head;
252 
253 	data = xge_rd_csr(pdata, DMATXSTATUS);
254 	if (!GET_BITS(TXPKTCOUNT, data))
255 		return;
256 
257 	while (1) {
258 		raw_desc = &tx_ring->raw_desc[head];
259 
260 		if (!is_tx_hw_done(raw_desc))
261 			break;
262 
263 		dma_rmb();
264 
265 		skb = tx_ring->pkt_info[head].skb;
266 		dma_addr = tx_ring->pkt_info[head].dma_addr;
267 		pkt_buf = tx_ring->pkt_info[head].pkt_buf;
268 		pdata->stats.tx_packets++;
269 		pdata->stats.tx_bytes += skb->len;
270 		dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
271 		dev_kfree_skb_any(skb);
272 
273 		/* clear pktstart address and pktsize */
274 		raw_desc->m0 = cpu_to_le64(SET_BITS(E, 1) |
275 					   SET_BITS(PKT_SIZE, SLOT_EMPTY));
276 		xge_wr_csr(pdata, DMATXSTATUS, 1);
277 
278 		head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
279 	}
280 
281 	if (netif_queue_stopped(ndev))
282 		netif_wake_queue(ndev);
283 
284 	tx_ring->head = head;
285 }
286 
287 static int xge_rx_poll(struct net_device *ndev, unsigned int budget)
288 {
289 	struct xge_pdata *pdata = netdev_priv(ndev);
290 	struct device *dev = &pdata->pdev->dev;
291 	struct xge_desc_ring *rx_ring;
292 	struct xge_raw_desc *raw_desc;
293 	struct sk_buff *skb;
294 	dma_addr_t dma_addr;
295 	int processed = 0;
296 	u8 head, rx_error;
297 	int i, ret;
298 	u32 data;
299 	u16 len;
300 
301 	rx_ring = pdata->rx_ring;
302 	head = rx_ring->head;
303 
304 	data = xge_rd_csr(pdata, DMARXSTATUS);
305 	if (!GET_BITS(RXPKTCOUNT, data))
306 		return 0;
307 
308 	for (i = 0; i < budget; i++) {
309 		raw_desc = &rx_ring->raw_desc[head];
310 
311 		if (GET_BITS(E, le64_to_cpu(raw_desc->m0)))
312 			break;
313 
314 		dma_rmb();
315 
316 		skb = rx_ring->pkt_info[head].skb;
317 		rx_ring->pkt_info[head].skb = NULL;
318 		dma_addr = rx_ring->pkt_info[head].dma_addr;
319 		len = GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0));
320 		dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
321 				 DMA_FROM_DEVICE);
322 
323 		rx_error = GET_BITS(D, le64_to_cpu(raw_desc->m2));
324 		if (unlikely(rx_error)) {
325 			pdata->stats.rx_errors++;
326 			dev_kfree_skb_any(skb);
327 			goto out;
328 		}
329 
330 		skb_put(skb, len);
331 		skb->protocol = eth_type_trans(skb, ndev);
332 
333 		pdata->stats.rx_packets++;
334 		pdata->stats.rx_bytes += len;
335 		napi_gro_receive(&pdata->napi, skb);
336 out:
337 		ret = xge_refill_buffers(ndev, 1);
338 		xge_wr_csr(pdata, DMARXSTATUS, 1);
339 		xge_wr_csr(pdata, DMARXCTRL, 1);
340 
341 		if (ret)
342 			break;
343 
344 		head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
345 		processed++;
346 	}
347 
348 	rx_ring->head = head;
349 
350 	return processed;
351 }
352 
353 static void xge_delete_desc_ring(struct net_device *ndev,
354 				 struct xge_desc_ring *ring)
355 {
356 	struct xge_pdata *pdata = netdev_priv(ndev);
357 	struct device *dev = &pdata->pdev->dev;
358 	u16 size;
359 
360 	if (!ring)
361 		return;
362 
363 	size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
364 	if (ring->desc_addr)
365 		dma_free_coherent(dev, size, ring->desc_addr, ring->dma_addr);
366 
367 	kfree(ring->pkt_info);
368 	kfree(ring);
369 }
370 
371 static void xge_free_buffers(struct net_device *ndev)
372 {
373 	struct xge_pdata *pdata = netdev_priv(ndev);
374 	struct xge_desc_ring *ring = pdata->rx_ring;
375 	struct device *dev = &pdata->pdev->dev;
376 	struct sk_buff *skb;
377 	dma_addr_t dma_addr;
378 	int i;
379 
380 	for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
381 		skb = ring->pkt_info[i].skb;
382 		dma_addr = ring->pkt_info[i].dma_addr;
383 
384 		if (!skb)
385 			continue;
386 
387 		dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
388 				 DMA_FROM_DEVICE);
389 		dev_kfree_skb_any(skb);
390 	}
391 }
392 
393 static void xge_delete_desc_rings(struct net_device *ndev)
394 {
395 	struct xge_pdata *pdata = netdev_priv(ndev);
396 
397 	xge_txc_poll(ndev);
398 	xge_delete_desc_ring(ndev, pdata->tx_ring);
399 
400 	xge_rx_poll(ndev, 64);
401 	xge_free_buffers(ndev);
402 	xge_delete_desc_ring(ndev, pdata->rx_ring);
403 }
404 
405 static struct xge_desc_ring *xge_create_desc_ring(struct net_device *ndev)
406 {
407 	struct xge_pdata *pdata = netdev_priv(ndev);
408 	struct device *dev = &pdata->pdev->dev;
409 	struct xge_desc_ring *ring;
410 	u16 size;
411 
412 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
413 	if (!ring)
414 		return NULL;
415 
416 	ring->ndev = ndev;
417 
418 	size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
419 	ring->desc_addr = dma_alloc_coherent(dev, size, &ring->dma_addr,
420 					     GFP_KERNEL);
421 	if (!ring->desc_addr)
422 		goto err;
423 
424 	ring->pkt_info = kcalloc(XGENE_ENET_NUM_DESC, sizeof(*ring->pkt_info),
425 				 GFP_KERNEL);
426 	if (!ring->pkt_info)
427 		goto err;
428 
429 	xge_setup_desc(ring);
430 
431 	return ring;
432 
433 err:
434 	xge_delete_desc_ring(ndev, ring);
435 
436 	return NULL;
437 }
438 
439 static int xge_create_desc_rings(struct net_device *ndev)
440 {
441 	struct xge_pdata *pdata = netdev_priv(ndev);
442 	struct xge_desc_ring *ring;
443 	int ret;
444 
445 	/* create tx ring */
446 	ring = xge_create_desc_ring(ndev);
447 	if (!ring)
448 		goto err;
449 
450 	pdata->tx_ring = ring;
451 	xge_update_tx_desc_addr(pdata);
452 
453 	/* create rx ring */
454 	ring = xge_create_desc_ring(ndev);
455 	if (!ring)
456 		goto err;
457 
458 	pdata->rx_ring = ring;
459 	xge_update_rx_desc_addr(pdata);
460 
461 	ret = xge_refill_buffers(ndev, XGENE_ENET_NUM_DESC);
462 	if (ret)
463 		goto err;
464 
465 	return 0;
466 err:
467 	xge_delete_desc_rings(ndev);
468 
469 	return -ENOMEM;
470 }
471 
472 static int xge_open(struct net_device *ndev)
473 {
474 	struct xge_pdata *pdata = netdev_priv(ndev);
475 	int ret;
476 
477 	ret = xge_create_desc_rings(ndev);
478 	if (ret)
479 		return ret;
480 
481 	napi_enable(&pdata->napi);
482 	ret = xge_request_irq(ndev);
483 	if (ret)
484 		return ret;
485 
486 	xge_intr_enable(pdata);
487 	xge_wr_csr(pdata, DMARXCTRL, 1);
488 
489 	phy_start(ndev->phydev);
490 	xge_mac_enable(pdata);
491 	netif_start_queue(ndev);
492 
493 	return 0;
494 }
495 
496 static int xge_close(struct net_device *ndev)
497 {
498 	struct xge_pdata *pdata = netdev_priv(ndev);
499 
500 	netif_stop_queue(ndev);
501 	xge_mac_disable(pdata);
502 	phy_stop(ndev->phydev);
503 
504 	xge_intr_disable(pdata);
505 	xge_free_irq(ndev);
506 	napi_disable(&pdata->napi);
507 	xge_delete_desc_rings(ndev);
508 
509 	return 0;
510 }
511 
512 static int xge_napi(struct napi_struct *napi, const int budget)
513 {
514 	struct net_device *ndev = napi->dev;
515 	struct xge_pdata *pdata;
516 	int processed;
517 
518 	pdata = netdev_priv(ndev);
519 
520 	xge_txc_poll(ndev);
521 	processed = xge_rx_poll(ndev, budget);
522 
523 	if (processed < budget) {
524 		napi_complete_done(napi, processed);
525 		xge_intr_enable(pdata);
526 	}
527 
528 	return processed;
529 }
530 
531 static int xge_set_mac_addr(struct net_device *ndev, void *addr)
532 {
533 	struct xge_pdata *pdata = netdev_priv(ndev);
534 	int ret;
535 
536 	ret = eth_mac_addr(ndev, addr);
537 	if (ret)
538 		return ret;
539 
540 	xge_mac_set_station_addr(pdata);
541 
542 	return 0;
543 }
544 
545 static bool is_tx_pending(struct xge_raw_desc *raw_desc)
546 {
547 	if (!GET_BITS(E, le64_to_cpu(raw_desc->m0)))
548 		return true;
549 
550 	return false;
551 }
552 
553 static void xge_free_pending_skb(struct net_device *ndev)
554 {
555 	struct xge_pdata *pdata = netdev_priv(ndev);
556 	struct device *dev = &pdata->pdev->dev;
557 	struct xge_desc_ring *tx_ring;
558 	struct xge_raw_desc *raw_desc;
559 	dma_addr_t dma_addr;
560 	struct sk_buff *skb;
561 	void *pkt_buf;
562 	int i;
563 
564 	tx_ring = pdata->tx_ring;
565 
566 	for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
567 		raw_desc = &tx_ring->raw_desc[i];
568 
569 		if (!is_tx_pending(raw_desc))
570 			continue;
571 
572 		skb = tx_ring->pkt_info[i].skb;
573 		dma_addr = tx_ring->pkt_info[i].dma_addr;
574 		pkt_buf = tx_ring->pkt_info[i].pkt_buf;
575 		dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
576 		dev_kfree_skb_any(skb);
577 	}
578 }
579 
580 static void xge_timeout(struct net_device *ndev)
581 {
582 	struct xge_pdata *pdata = netdev_priv(ndev);
583 
584 	rtnl_lock();
585 
586 	if (!netif_running(ndev))
587 		goto out;
588 
589 	netif_stop_queue(ndev);
590 	xge_intr_disable(pdata);
591 	napi_disable(&pdata->napi);
592 
593 	xge_wr_csr(pdata, DMATXCTRL, 0);
594 	xge_txc_poll(ndev);
595 	xge_free_pending_skb(ndev);
596 	xge_wr_csr(pdata, DMATXSTATUS, ~0U);
597 
598 	xge_setup_desc(pdata->tx_ring);
599 	xge_update_tx_desc_addr(pdata);
600 	xge_mac_init(pdata);
601 
602 	napi_enable(&pdata->napi);
603 	xge_intr_enable(pdata);
604 	xge_mac_enable(pdata);
605 	netif_start_queue(ndev);
606 
607 out:
608 	rtnl_unlock();
609 }
610 
611 static void xge_get_stats64(struct net_device *ndev,
612 			    struct rtnl_link_stats64 *storage)
613 {
614 	struct xge_pdata *pdata = netdev_priv(ndev);
615 	struct xge_stats *stats = &pdata->stats;
616 
617 	storage->tx_packets += stats->tx_packets;
618 	storage->tx_bytes += stats->tx_bytes;
619 
620 	storage->rx_packets += stats->rx_packets;
621 	storage->rx_bytes += stats->rx_bytes;
622 	storage->rx_errors += stats->rx_errors;
623 }
624 
625 static const struct net_device_ops xgene_ndev_ops = {
626 	.ndo_open = xge_open,
627 	.ndo_stop = xge_close,
628 	.ndo_start_xmit = xge_start_xmit,
629 	.ndo_set_mac_address = xge_set_mac_addr,
630 	.ndo_tx_timeout = xge_timeout,
631 	.ndo_get_stats64 = xge_get_stats64,
632 };
633 
634 static int xge_probe(struct platform_device *pdev)
635 {
636 	struct device *dev = &pdev->dev;
637 	struct net_device *ndev;
638 	struct xge_pdata *pdata;
639 	int ret;
640 
641 	ndev = alloc_etherdev(sizeof(*pdata));
642 	if (!ndev)
643 		return -ENOMEM;
644 
645 	pdata = netdev_priv(ndev);
646 
647 	pdata->pdev = pdev;
648 	pdata->ndev = ndev;
649 	SET_NETDEV_DEV(ndev, dev);
650 	platform_set_drvdata(pdev, pdata);
651 	ndev->netdev_ops = &xgene_ndev_ops;
652 
653 	ndev->features |= NETIF_F_GSO |
654 			  NETIF_F_GRO;
655 
656 	ret = xge_get_resources(pdata);
657 	if (ret)
658 		goto err;
659 
660 	ndev->hw_features = ndev->features;
661 	xge_set_ethtool_ops(ndev);
662 
663 	ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
664 	if (ret) {
665 		netdev_err(ndev, "No usable DMA configuration\n");
666 		goto err;
667 	}
668 
669 	ret = xge_init_hw(ndev);
670 	if (ret)
671 		goto err;
672 
673 	ret = xge_mdio_config(ndev);
674 	if (ret)
675 		goto err;
676 
677 	netif_napi_add(ndev, &pdata->napi, xge_napi, NAPI_POLL_WEIGHT);
678 
679 	ret = register_netdev(ndev);
680 	if (ret) {
681 		netdev_err(ndev, "Failed to register netdev\n");
682 		goto err;
683 	}
684 
685 	return 0;
686 
687 err:
688 	free_netdev(ndev);
689 
690 	return ret;
691 }
692 
693 static int xge_remove(struct platform_device *pdev)
694 {
695 	struct xge_pdata *pdata;
696 	struct net_device *ndev;
697 
698 	pdata = platform_get_drvdata(pdev);
699 	ndev = pdata->ndev;
700 
701 	rtnl_lock();
702 	if (netif_running(ndev))
703 		dev_close(ndev);
704 	rtnl_unlock();
705 
706 	xge_mdio_remove(ndev);
707 	unregister_netdev(ndev);
708 	free_netdev(ndev);
709 
710 	return 0;
711 }
712 
713 static void xge_shutdown(struct platform_device *pdev)
714 {
715 	struct xge_pdata *pdata;
716 
717 	pdata = platform_get_drvdata(pdev);
718 	if (!pdata)
719 		return;
720 
721 	if (!pdata->ndev)
722 		return;
723 
724 	xge_remove(pdev);
725 }
726 
727 static const struct acpi_device_id xge_acpi_match[] = {
728 	{ "APMC0D80" },
729 	{ }
730 };
731 MODULE_DEVICE_TABLE(acpi, xge_acpi_match);
732 
733 static struct platform_driver xge_driver = {
734 	.driver = {
735 		   .name = "xgene-enet-v2",
736 		   .acpi_match_table = ACPI_PTR(xge_acpi_match),
737 	},
738 	.probe = xge_probe,
739 	.remove = xge_remove,
740 	.shutdown = xge_shutdown,
741 };
742 module_platform_driver(xge_driver);
743 
744 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet v2 driver");
745 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
746 MODULE_VERSION(XGENE_ENET_V2_VERSION);
747 MODULE_LICENSE("GPL");
748