xref: /linux/drivers/net/ethernet/ti/netcp_ethss.c (revision fbc872c38c8fed31948c85683b5326ee5ab9fccc)
1 /*
2  * Keystone GBE and XGBE subsystem code
3  *
4  * Copyright (C) 2014 Texas Instruments Incorporated
5  * Authors:	Sandeep Nair <sandeep_n@ti.com>
6  *		Sandeep Paulraj <s-paulraj@ti.com>
7  *		Cyril Chemparathy <cyril@ti.com>
8  *		Santosh Shilimkar <santosh.shilimkar@ti.com>
9  *		Wingman Kwok <w-kwok2@ti.com>
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License as
13  * published by the Free Software Foundation version 2.
14  *
15  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
16  * kind, whether express or implied; without even the implied warranty
17  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  */
20 
21 #include <linux/io.h>
22 #include <linux/module.h>
23 #include <linux/of_mdio.h>
24 #include <linux/of_address.h>
25 #include <linux/if_vlan.h>
26 #include <linux/ethtool.h>
27 
28 #include "cpsw_ale.h"
29 #include "netcp.h"
30 
31 #define NETCP_DRIVER_NAME		"TI KeyStone Ethernet Driver"
32 #define NETCP_DRIVER_VERSION		"v1.0"
33 
34 #define GBE_IDENT(reg)			((reg >> 16) & 0xffff)
35 #define GBE_MAJOR_VERSION(reg)		(reg >> 8 & 0x7)
36 #define GBE_MINOR_VERSION(reg)		(reg & 0xff)
37 #define GBE_RTL_VERSION(reg)		((reg >> 11) & 0x1f)
38 
39 /* 1G Ethernet SS defines */
40 #define GBE_MODULE_NAME			"netcp-gbe"
41 #define GBE_SS_VERSION_14		0x4ed21104
42 
43 #define GBE_SS_REG_INDEX		0
44 #define GBE_SGMII34_REG_INDEX		1
45 #define GBE_SM_REG_INDEX		2
46 /* offset relative to base of GBE_SS_REG_INDEX */
47 #define GBE13_SGMII_MODULE_OFFSET	0x100
48 /* offset relative to base of GBE_SM_REG_INDEX */
49 #define GBE13_HOST_PORT_OFFSET		0x34
50 #define GBE13_SLAVE_PORT_OFFSET		0x60
51 #define GBE13_EMAC_OFFSET		0x100
52 #define GBE13_SLAVE_PORT2_OFFSET	0x200
53 #define GBE13_HW_STATS_OFFSET		0x300
54 #define GBE13_ALE_OFFSET		0x600
55 #define GBE13_HOST_PORT_NUM		0
56 #define GBE13_NUM_ALE_ENTRIES		1024
57 
58 /* 1G Ethernet NU SS defines */
59 #define GBENU_MODULE_NAME		"netcp-gbenu"
60 #define GBE_SS_ID_NU			0x4ee6
61 #define GBE_SS_ID_2U			0x4ee8
62 
63 #define IS_SS_ID_MU(d) \
64 	((GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) || \
65 	 (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U))
66 
67 #define IS_SS_ID_NU(d) \
68 	(GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU)
69 
70 #define GBENU_SS_REG_INDEX		0
71 #define GBENU_SM_REG_INDEX		1
72 #define GBENU_SGMII_MODULE_OFFSET	0x100
73 #define GBENU_HOST_PORT_OFFSET		0x1000
74 #define GBENU_SLAVE_PORT_OFFSET		0x2000
75 #define GBENU_EMAC_OFFSET		0x2330
76 #define GBENU_HW_STATS_OFFSET		0x1a000
77 #define GBENU_ALE_OFFSET		0x1e000
78 #define GBENU_HOST_PORT_NUM		0
79 #define GBENU_NUM_ALE_ENTRIES		1024
80 #define GBENU_SGMII_MODULE_SIZE		0x100
81 
82 /* 10G Ethernet SS defines */
83 #define XGBE_MODULE_NAME		"netcp-xgbe"
84 #define XGBE_SS_VERSION_10		0x4ee42100
85 
86 #define XGBE_SS_REG_INDEX		0
87 #define XGBE_SM_REG_INDEX		1
88 #define XGBE_SERDES_REG_INDEX		2
89 
90 /* offset relative to base of XGBE_SS_REG_INDEX */
91 #define XGBE10_SGMII_MODULE_OFFSET	0x100
92 /* offset relative to base of XGBE_SM_REG_INDEX */
93 #define XGBE10_HOST_PORT_OFFSET		0x34
94 #define XGBE10_SLAVE_PORT_OFFSET	0x64
95 #define XGBE10_EMAC_OFFSET		0x400
96 #define XGBE10_ALE_OFFSET		0x700
97 #define XGBE10_HW_STATS_OFFSET		0x800
98 #define XGBE10_HOST_PORT_NUM		0
99 #define XGBE10_NUM_ALE_ENTRIES		1024
100 
101 #define	GBE_TIMER_INTERVAL			(HZ / 2)
102 
103 /* Soft reset register values */
104 #define SOFT_RESET_MASK				BIT(0)
105 #define SOFT_RESET				BIT(0)
106 #define DEVICE_EMACSL_RESET_POLL_COUNT		100
107 #define GMACSL_RET_WARN_RESET_INCOMPLETE	-2
108 
109 #define MACSL_RX_ENABLE_CSF			BIT(23)
110 #define MACSL_ENABLE_EXT_CTL			BIT(18)
111 #define MACSL_XGMII_ENABLE			BIT(13)
112 #define MACSL_XGIG_MODE				BIT(8)
113 #define MACSL_GIG_MODE				BIT(7)
114 #define MACSL_GMII_ENABLE			BIT(5)
115 #define MACSL_FULLDUPLEX			BIT(0)
116 
117 #define GBE_CTL_P0_ENABLE			BIT(2)
118 #define GBE13_REG_VAL_STAT_ENABLE_ALL		0xff
119 #define XGBE_REG_VAL_STAT_ENABLE_ALL		0xf
120 #define GBE_STATS_CD_SEL			BIT(28)
121 
122 #define GBE_PORT_MASK(x)			(BIT(x) - 1)
123 #define GBE_MASK_NO_PORTS			0
124 
125 #define GBE_DEF_1G_MAC_CONTROL					\
126 		(MACSL_GIG_MODE | MACSL_GMII_ENABLE |		\
127 		 MACSL_ENABLE_EXT_CTL |	MACSL_RX_ENABLE_CSF)
128 
129 #define GBE_DEF_10G_MAC_CONTROL				\
130 		(MACSL_XGIG_MODE | MACSL_XGMII_ENABLE |		\
131 		 MACSL_ENABLE_EXT_CTL |	MACSL_RX_ENABLE_CSF)
132 
133 #define GBE_STATSA_MODULE			0
134 #define GBE_STATSB_MODULE			1
135 #define GBE_STATSC_MODULE			2
136 #define GBE_STATSD_MODULE			3
137 
138 #define GBENU_STATS0_MODULE			0
139 #define GBENU_STATS1_MODULE			1
140 #define GBENU_STATS2_MODULE			2
141 #define GBENU_STATS3_MODULE			3
142 #define GBENU_STATS4_MODULE			4
143 #define GBENU_STATS5_MODULE			5
144 #define GBENU_STATS6_MODULE			6
145 #define GBENU_STATS7_MODULE			7
146 #define GBENU_STATS8_MODULE			8
147 
148 #define XGBE_STATS0_MODULE			0
149 #define XGBE_STATS1_MODULE			1
150 #define XGBE_STATS2_MODULE			2
151 
152 /* s: 0-based slave_port */
153 #define SGMII_BASE(d, s) \
154 	(((s) < 2) ? (d)->sgmii_port_regs : (d)->sgmii_port34_regs)
155 
156 #define GBE_TX_QUEUE				648
157 #define	GBE_TXHOOK_ORDER			0
158 #define GBE_DEFAULT_ALE_AGEOUT			30
159 #define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
160 #define NETCP_LINK_STATE_INVALID		-1
161 
162 #define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
163 		offsetof(struct gbe##_##rb, rn)
164 #define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
165 		offsetof(struct gbenu##_##rb, rn)
166 #define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
167 		offsetof(struct xgbe##_##rb, rn)
168 #define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
169 
170 #define HOST_TX_PRI_MAP_DEFAULT			0x00000000
171 
172 struct xgbe_ss_regs {
173 	u32	id_ver;
174 	u32	synce_count;
175 	u32	synce_mux;
176 	u32	control;
177 };
178 
179 struct xgbe_switch_regs {
180 	u32	id_ver;
181 	u32	control;
182 	u32	emcontrol;
183 	u32	stat_port_en;
184 	u32	ptype;
185 	u32	soft_idle;
186 	u32	thru_rate;
187 	u32	gap_thresh;
188 	u32	tx_start_wds;
189 	u32	flow_control;
190 	u32	cppi_thresh;
191 };
192 
193 struct xgbe_port_regs {
194 	u32	blk_cnt;
195 	u32	port_vlan;
196 	u32	tx_pri_map;
197 	u32	sa_lo;
198 	u32	sa_hi;
199 	u32	ts_ctl;
200 	u32	ts_seq_ltype;
201 	u32	ts_vlan;
202 	u32	ts_ctl_ltype2;
203 	u32	ts_ctl2;
204 	u32	control;
205 };
206 
207 struct xgbe_host_port_regs {
208 	u32	blk_cnt;
209 	u32	port_vlan;
210 	u32	tx_pri_map;
211 	u32	src_id;
212 	u32	rx_pri_map;
213 	u32	rx_maxlen;
214 };
215 
216 struct xgbe_emac_regs {
217 	u32	id_ver;
218 	u32	mac_control;
219 	u32	mac_status;
220 	u32	soft_reset;
221 	u32	rx_maxlen;
222 	u32	__reserved_0;
223 	u32	rx_pause;
224 	u32	tx_pause;
225 	u32	em_control;
226 	u32	__reserved_1;
227 	u32	tx_gap;
228 	u32	rsvd[4];
229 };
230 
231 struct xgbe_host_hw_stats {
232 	u32	rx_good_frames;
233 	u32	rx_broadcast_frames;
234 	u32	rx_multicast_frames;
235 	u32	__rsvd_0[3];
236 	u32	rx_oversized_frames;
237 	u32	__rsvd_1;
238 	u32	rx_undersized_frames;
239 	u32	__rsvd_2;
240 	u32	overrun_type4;
241 	u32	overrun_type5;
242 	u32	rx_bytes;
243 	u32	tx_good_frames;
244 	u32	tx_broadcast_frames;
245 	u32	tx_multicast_frames;
246 	u32	__rsvd_3[9];
247 	u32	tx_bytes;
248 	u32	tx_64byte_frames;
249 	u32	tx_65_to_127byte_frames;
250 	u32	tx_128_to_255byte_frames;
251 	u32	tx_256_to_511byte_frames;
252 	u32	tx_512_to_1023byte_frames;
253 	u32	tx_1024byte_frames;
254 	u32	net_bytes;
255 	u32	rx_sof_overruns;
256 	u32	rx_mof_overruns;
257 	u32	rx_dma_overruns;
258 };
259 
260 struct xgbe_hw_stats {
261 	u32	rx_good_frames;
262 	u32	rx_broadcast_frames;
263 	u32	rx_multicast_frames;
264 	u32	rx_pause_frames;
265 	u32	rx_crc_errors;
266 	u32	rx_align_code_errors;
267 	u32	rx_oversized_frames;
268 	u32	rx_jabber_frames;
269 	u32	rx_undersized_frames;
270 	u32	rx_fragments;
271 	u32	overrun_type4;
272 	u32	overrun_type5;
273 	u32	rx_bytes;
274 	u32	tx_good_frames;
275 	u32	tx_broadcast_frames;
276 	u32	tx_multicast_frames;
277 	u32	tx_pause_frames;
278 	u32	tx_deferred_frames;
279 	u32	tx_collision_frames;
280 	u32	tx_single_coll_frames;
281 	u32	tx_mult_coll_frames;
282 	u32	tx_excessive_collisions;
283 	u32	tx_late_collisions;
284 	u32	tx_underrun;
285 	u32	tx_carrier_sense_errors;
286 	u32	tx_bytes;
287 	u32	tx_64byte_frames;
288 	u32	tx_65_to_127byte_frames;
289 	u32	tx_128_to_255byte_frames;
290 	u32	tx_256_to_511byte_frames;
291 	u32	tx_512_to_1023byte_frames;
292 	u32	tx_1024byte_frames;
293 	u32	net_bytes;
294 	u32	rx_sof_overruns;
295 	u32	rx_mof_overruns;
296 	u32	rx_dma_overruns;
297 };
298 
299 struct gbenu_ss_regs {
300 	u32	id_ver;
301 	u32	synce_count;		/* NU */
302 	u32	synce_mux;		/* NU */
303 	u32	control;		/* 2U */
304 	u32	__rsvd_0[2];		/* 2U */
305 	u32	rgmii_status;		/* 2U */
306 	u32	ss_status;		/* 2U */
307 };
308 
309 struct gbenu_switch_regs {
310 	u32	id_ver;
311 	u32	control;
312 	u32	__rsvd_0[2];
313 	u32	emcontrol;
314 	u32	stat_port_en;
315 	u32	ptype;			/* NU */
316 	u32	soft_idle;
317 	u32	thru_rate;		/* NU */
318 	u32	gap_thresh;		/* NU */
319 	u32	tx_start_wds;		/* NU */
320 	u32	eee_prescale;		/* 2U */
321 	u32	tx_g_oflow_thresh_set;	/* NU */
322 	u32	tx_g_oflow_thresh_clr;	/* NU */
323 	u32	tx_g_buf_thresh_set_l;	/* NU */
324 	u32	tx_g_buf_thresh_set_h;	/* NU */
325 	u32	tx_g_buf_thresh_clr_l;	/* NU */
326 	u32	tx_g_buf_thresh_clr_h;	/* NU */
327 };
328 
329 struct gbenu_port_regs {
330 	u32	__rsvd_0;
331 	u32	control;
332 	u32	max_blks;		/* 2U */
333 	u32	mem_align1;
334 	u32	blk_cnt;
335 	u32	port_vlan;
336 	u32	tx_pri_map;		/* NU */
337 	u32	pri_ctl;		/* 2U */
338 	u32	rx_pri_map;
339 	u32	rx_maxlen;
340 	u32	tx_blks_pri;		/* NU */
341 	u32	__rsvd_1;
342 	u32	idle2lpi;		/* 2U */
343 	u32	lpi2idle;		/* 2U */
344 	u32	eee_status;		/* 2U */
345 	u32	__rsvd_2;
346 	u32	__rsvd_3[176];		/* NU: more to add */
347 	u32	__rsvd_4[2];
348 	u32	sa_lo;
349 	u32	sa_hi;
350 	u32	ts_ctl;
351 	u32	ts_seq_ltype;
352 	u32	ts_vlan;
353 	u32	ts_ctl_ltype2;
354 	u32	ts_ctl2;
355 };
356 
357 struct gbenu_host_port_regs {
358 	u32	__rsvd_0;
359 	u32	control;
360 	u32	flow_id_offset;		/* 2U */
361 	u32	__rsvd_1;
362 	u32	blk_cnt;
363 	u32	port_vlan;
364 	u32	tx_pri_map;		/* NU */
365 	u32	pri_ctl;
366 	u32	rx_pri_map;
367 	u32	rx_maxlen;
368 	u32	tx_blks_pri;		/* NU */
369 	u32	__rsvd_2;
370 	u32	idle2lpi;		/* 2U */
371 	u32	lpi2wake;		/* 2U */
372 	u32	eee_status;		/* 2U */
373 	u32	__rsvd_3;
374 	u32	__rsvd_4[184];		/* NU */
375 	u32	host_blks_pri;		/* NU */
376 };
377 
378 struct gbenu_emac_regs {
379 	u32	mac_control;
380 	u32	mac_status;
381 	u32	soft_reset;
382 	u32	boff_test;
383 	u32	rx_pause;
384 	u32	__rsvd_0[11];		/* NU */
385 	u32	tx_pause;
386 	u32	__rsvd_1[11];		/* NU */
387 	u32	em_control;
388 	u32	tx_gap;
389 };
390 
391 /* Some hw stat regs are applicable to slave port only.
392  * This is handled by gbenu_et_stats struct.  Also some
393  * are for SS version NU and some are for 2U.
394  */
395 struct gbenu_hw_stats {
396 	u32	rx_good_frames;
397 	u32	rx_broadcast_frames;
398 	u32	rx_multicast_frames;
399 	u32	rx_pause_frames;		/* slave */
400 	u32	rx_crc_errors;
401 	u32	rx_align_code_errors;		/* slave */
402 	u32	rx_oversized_frames;
403 	u32	rx_jabber_frames;		/* slave */
404 	u32	rx_undersized_frames;
405 	u32	rx_fragments;			/* slave */
406 	u32	ale_drop;
407 	u32	ale_overrun_drop;
408 	u32	rx_bytes;
409 	u32	tx_good_frames;
410 	u32	tx_broadcast_frames;
411 	u32	tx_multicast_frames;
412 	u32	tx_pause_frames;		/* slave */
413 	u32	tx_deferred_frames;		/* slave */
414 	u32	tx_collision_frames;		/* slave */
415 	u32	tx_single_coll_frames;		/* slave */
416 	u32	tx_mult_coll_frames;		/* slave */
417 	u32	tx_excessive_collisions;	/* slave */
418 	u32	tx_late_collisions;		/* slave */
419 	u32	rx_ipg_error;			/* slave 10G only */
420 	u32	tx_carrier_sense_errors;	/* slave */
421 	u32	tx_bytes;
422 	u32	tx_64B_frames;
423 	u32	tx_65_to_127B_frames;
424 	u32	tx_128_to_255B_frames;
425 	u32	tx_256_to_511B_frames;
426 	u32	tx_512_to_1023B_frames;
427 	u32	tx_1024B_frames;
428 	u32	net_bytes;
429 	u32	rx_bottom_fifo_drop;
430 	u32	rx_port_mask_drop;
431 	u32	rx_top_fifo_drop;
432 	u32	ale_rate_limit_drop;
433 	u32	ale_vid_ingress_drop;
434 	u32	ale_da_eq_sa_drop;
435 	u32	__rsvd_0[3];
436 	u32	ale_unknown_ucast;
437 	u32	ale_unknown_ucast_bytes;
438 	u32	ale_unknown_mcast;
439 	u32	ale_unknown_mcast_bytes;
440 	u32	ale_unknown_bcast;
441 	u32	ale_unknown_bcast_bytes;
442 	u32	ale_pol_match;
443 	u32	ale_pol_match_red;		/* NU */
444 	u32	ale_pol_match_yellow;		/* NU */
445 	u32	__rsvd_1[44];
446 	u32	tx_mem_protect_err;
447 	/* following NU only */
448 	u32	tx_pri0;
449 	u32	tx_pri1;
450 	u32	tx_pri2;
451 	u32	tx_pri3;
452 	u32	tx_pri4;
453 	u32	tx_pri5;
454 	u32	tx_pri6;
455 	u32	tx_pri7;
456 	u32	tx_pri0_bcnt;
457 	u32	tx_pri1_bcnt;
458 	u32	tx_pri2_bcnt;
459 	u32	tx_pri3_bcnt;
460 	u32	tx_pri4_bcnt;
461 	u32	tx_pri5_bcnt;
462 	u32	tx_pri6_bcnt;
463 	u32	tx_pri7_bcnt;
464 	u32	tx_pri0_drop;
465 	u32	tx_pri1_drop;
466 	u32	tx_pri2_drop;
467 	u32	tx_pri3_drop;
468 	u32	tx_pri4_drop;
469 	u32	tx_pri5_drop;
470 	u32	tx_pri6_drop;
471 	u32	tx_pri7_drop;
472 	u32	tx_pri0_drop_bcnt;
473 	u32	tx_pri1_drop_bcnt;
474 	u32	tx_pri2_drop_bcnt;
475 	u32	tx_pri3_drop_bcnt;
476 	u32	tx_pri4_drop_bcnt;
477 	u32	tx_pri5_drop_bcnt;
478 	u32	tx_pri6_drop_bcnt;
479 	u32	tx_pri7_drop_bcnt;
480 };
481 
482 #define GBENU_HW_STATS_REG_MAP_SZ	0x200
483 
484 struct gbe_ss_regs {
485 	u32	id_ver;
486 	u32	synce_count;
487 	u32	synce_mux;
488 };
489 
490 struct gbe_ss_regs_ofs {
491 	u16	id_ver;
492 	u16	control;
493 };
494 
495 struct gbe_switch_regs {
496 	u32	id_ver;
497 	u32	control;
498 	u32	soft_reset;
499 	u32	stat_port_en;
500 	u32	ptype;
501 	u32	soft_idle;
502 	u32	thru_rate;
503 	u32	gap_thresh;
504 	u32	tx_start_wds;
505 	u32	flow_control;
506 };
507 
508 struct gbe_switch_regs_ofs {
509 	u16	id_ver;
510 	u16	control;
511 	u16	soft_reset;
512 	u16	emcontrol;
513 	u16	stat_port_en;
514 	u16	ptype;
515 	u16	flow_control;
516 };
517 
518 struct gbe_port_regs {
519 	u32	max_blks;
520 	u32	blk_cnt;
521 	u32	port_vlan;
522 	u32	tx_pri_map;
523 	u32	sa_lo;
524 	u32	sa_hi;
525 	u32	ts_ctl;
526 	u32	ts_seq_ltype;
527 	u32	ts_vlan;
528 	u32	ts_ctl_ltype2;
529 	u32	ts_ctl2;
530 };
531 
532 struct gbe_port_regs_ofs {
533 	u16	port_vlan;
534 	u16	tx_pri_map;
535 	u16	sa_lo;
536 	u16	sa_hi;
537 	u16	ts_ctl;
538 	u16	ts_seq_ltype;
539 	u16	ts_vlan;
540 	u16	ts_ctl_ltype2;
541 	u16	ts_ctl2;
542 	u16	rx_maxlen;	/* 2U, NU */
543 };
544 
545 struct gbe_host_port_regs {
546 	u32	src_id;
547 	u32	port_vlan;
548 	u32	rx_pri_map;
549 	u32	rx_maxlen;
550 };
551 
552 struct gbe_host_port_regs_ofs {
553 	u16	port_vlan;
554 	u16	tx_pri_map;
555 	u16	rx_maxlen;
556 };
557 
558 struct gbe_emac_regs {
559 	u32	id_ver;
560 	u32	mac_control;
561 	u32	mac_status;
562 	u32	soft_reset;
563 	u32	rx_maxlen;
564 	u32	__reserved_0;
565 	u32	rx_pause;
566 	u32	tx_pause;
567 	u32	__reserved_1;
568 	u32	rx_pri_map;
569 	u32	rsvd[6];
570 };
571 
572 struct gbe_emac_regs_ofs {
573 	u16	mac_control;
574 	u16	soft_reset;
575 	u16	rx_maxlen;
576 };
577 
578 struct gbe_hw_stats {
579 	u32	rx_good_frames;
580 	u32	rx_broadcast_frames;
581 	u32	rx_multicast_frames;
582 	u32	rx_pause_frames;
583 	u32	rx_crc_errors;
584 	u32	rx_align_code_errors;
585 	u32	rx_oversized_frames;
586 	u32	rx_jabber_frames;
587 	u32	rx_undersized_frames;
588 	u32	rx_fragments;
589 	u32	__pad_0[2];
590 	u32	rx_bytes;
591 	u32	tx_good_frames;
592 	u32	tx_broadcast_frames;
593 	u32	tx_multicast_frames;
594 	u32	tx_pause_frames;
595 	u32	tx_deferred_frames;
596 	u32	tx_collision_frames;
597 	u32	tx_single_coll_frames;
598 	u32	tx_mult_coll_frames;
599 	u32	tx_excessive_collisions;
600 	u32	tx_late_collisions;
601 	u32	tx_underrun;
602 	u32	tx_carrier_sense_errors;
603 	u32	tx_bytes;
604 	u32	tx_64byte_frames;
605 	u32	tx_65_to_127byte_frames;
606 	u32	tx_128_to_255byte_frames;
607 	u32	tx_256_to_511byte_frames;
608 	u32	tx_512_to_1023byte_frames;
609 	u32	tx_1024byte_frames;
610 	u32	net_bytes;
611 	u32	rx_sof_overruns;
612 	u32	rx_mof_overruns;
613 	u32	rx_dma_overruns;
614 };
615 
616 #define GBE_MAX_HW_STAT_MODS			9
617 #define GBE_HW_STATS_REG_MAP_SZ			0x100
618 
619 struct gbe_slave {
620 	void __iomem			*port_regs;
621 	void __iomem			*emac_regs;
622 	struct gbe_port_regs_ofs	port_regs_ofs;
623 	struct gbe_emac_regs_ofs	emac_regs_ofs;
624 	int				slave_num; /* 0 based logical number */
625 	int				port_num;  /* actual port number */
626 	atomic_t			link_state;
627 	bool				open;
628 	struct phy_device		*phy;
629 	u32				link_interface;
630 	u32				mac_control;
631 	u8				phy_port_t;
632 	struct device_node		*phy_node;
633 	struct list_head		slave_list;
634 };
635 
636 struct gbe_priv {
637 	struct device			*dev;
638 	struct netcp_device		*netcp_device;
639 	struct timer_list		timer;
640 	u32				num_slaves;
641 	u32				ale_entries;
642 	u32				ale_ports;
643 	bool				enable_ale;
644 	u8				max_num_slaves;
645 	u8				max_num_ports; /* max_num_slaves + 1 */
646 	u8				num_stats_mods;
647 	struct netcp_tx_pipe		tx_pipe;
648 
649 	int				host_port;
650 	u32				rx_packet_max;
651 	u32				ss_version;
652 	u32				stats_en_mask;
653 
654 	void __iomem			*ss_regs;
655 	void __iomem			*switch_regs;
656 	void __iomem			*host_port_regs;
657 	void __iomem			*ale_reg;
658 	void __iomem			*sgmii_port_regs;
659 	void __iomem			*sgmii_port34_regs;
660 	void __iomem			*xgbe_serdes_regs;
661 	void __iomem			*hw_stats_regs[GBE_MAX_HW_STAT_MODS];
662 
663 	struct gbe_ss_regs_ofs		ss_regs_ofs;
664 	struct gbe_switch_regs_ofs	switch_regs_ofs;
665 	struct gbe_host_port_regs_ofs	host_port_regs_ofs;
666 
667 	struct cpsw_ale			*ale;
668 	unsigned int			tx_queue_id;
669 	const char			*dma_chan_name;
670 
671 	struct list_head		gbe_intf_head;
672 	struct list_head		secondary_slaves;
673 	struct net_device		*dummy_ndev;
674 
675 	u64				*hw_stats;
676 	u32				*hw_stats_prev;
677 	const struct netcp_ethtool_stat *et_stats;
678 	int				num_et_stats;
679 	/*  Lock for updating the hwstats */
680 	spinlock_t			hw_stats_lock;
681 };
682 
683 struct gbe_intf {
684 	struct net_device	*ndev;
685 	struct device		*dev;
686 	struct gbe_priv		*gbe_dev;
687 	struct netcp_tx_pipe	tx_pipe;
688 	struct gbe_slave	*slave;
689 	struct list_head	gbe_intf_list;
690 	unsigned long		active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
691 };
692 
693 static struct netcp_module gbe_module;
694 static struct netcp_module xgbe_module;
695 
696 /* Statistic management */
697 struct netcp_ethtool_stat {
698 	char desc[ETH_GSTRING_LEN];
699 	int type;
700 	u32 size;
701 	int offset;
702 };
703 
704 #define GBE_STATSA_INFO(field)						\
705 {									\
706 	"GBE_A:"#field, GBE_STATSA_MODULE,				\
707 	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
708 	offsetof(struct gbe_hw_stats, field)				\
709 }
710 
711 #define GBE_STATSB_INFO(field)						\
712 {									\
713 	"GBE_B:"#field, GBE_STATSB_MODULE,				\
714 	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
715 	offsetof(struct gbe_hw_stats, field)				\
716 }
717 
718 #define GBE_STATSC_INFO(field)						\
719 {									\
720 	"GBE_C:"#field, GBE_STATSC_MODULE,				\
721 	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
722 	offsetof(struct gbe_hw_stats, field)				\
723 }
724 
725 #define GBE_STATSD_INFO(field)						\
726 {									\
727 	"GBE_D:"#field, GBE_STATSD_MODULE,				\
728 	FIELD_SIZEOF(struct gbe_hw_stats, field),			\
729 	offsetof(struct gbe_hw_stats, field)				\
730 }
731 
732 static const struct netcp_ethtool_stat gbe13_et_stats[] = {
733 	/* GBE module A */
734 	GBE_STATSA_INFO(rx_good_frames),
735 	GBE_STATSA_INFO(rx_broadcast_frames),
736 	GBE_STATSA_INFO(rx_multicast_frames),
737 	GBE_STATSA_INFO(rx_pause_frames),
738 	GBE_STATSA_INFO(rx_crc_errors),
739 	GBE_STATSA_INFO(rx_align_code_errors),
740 	GBE_STATSA_INFO(rx_oversized_frames),
741 	GBE_STATSA_INFO(rx_jabber_frames),
742 	GBE_STATSA_INFO(rx_undersized_frames),
743 	GBE_STATSA_INFO(rx_fragments),
744 	GBE_STATSA_INFO(rx_bytes),
745 	GBE_STATSA_INFO(tx_good_frames),
746 	GBE_STATSA_INFO(tx_broadcast_frames),
747 	GBE_STATSA_INFO(tx_multicast_frames),
748 	GBE_STATSA_INFO(tx_pause_frames),
749 	GBE_STATSA_INFO(tx_deferred_frames),
750 	GBE_STATSA_INFO(tx_collision_frames),
751 	GBE_STATSA_INFO(tx_single_coll_frames),
752 	GBE_STATSA_INFO(tx_mult_coll_frames),
753 	GBE_STATSA_INFO(tx_excessive_collisions),
754 	GBE_STATSA_INFO(tx_late_collisions),
755 	GBE_STATSA_INFO(tx_underrun),
756 	GBE_STATSA_INFO(tx_carrier_sense_errors),
757 	GBE_STATSA_INFO(tx_bytes),
758 	GBE_STATSA_INFO(tx_64byte_frames),
759 	GBE_STATSA_INFO(tx_65_to_127byte_frames),
760 	GBE_STATSA_INFO(tx_128_to_255byte_frames),
761 	GBE_STATSA_INFO(tx_256_to_511byte_frames),
762 	GBE_STATSA_INFO(tx_512_to_1023byte_frames),
763 	GBE_STATSA_INFO(tx_1024byte_frames),
764 	GBE_STATSA_INFO(net_bytes),
765 	GBE_STATSA_INFO(rx_sof_overruns),
766 	GBE_STATSA_INFO(rx_mof_overruns),
767 	GBE_STATSA_INFO(rx_dma_overruns),
768 	/* GBE module B */
769 	GBE_STATSB_INFO(rx_good_frames),
770 	GBE_STATSB_INFO(rx_broadcast_frames),
771 	GBE_STATSB_INFO(rx_multicast_frames),
772 	GBE_STATSB_INFO(rx_pause_frames),
773 	GBE_STATSB_INFO(rx_crc_errors),
774 	GBE_STATSB_INFO(rx_align_code_errors),
775 	GBE_STATSB_INFO(rx_oversized_frames),
776 	GBE_STATSB_INFO(rx_jabber_frames),
777 	GBE_STATSB_INFO(rx_undersized_frames),
778 	GBE_STATSB_INFO(rx_fragments),
779 	GBE_STATSB_INFO(rx_bytes),
780 	GBE_STATSB_INFO(tx_good_frames),
781 	GBE_STATSB_INFO(tx_broadcast_frames),
782 	GBE_STATSB_INFO(tx_multicast_frames),
783 	GBE_STATSB_INFO(tx_pause_frames),
784 	GBE_STATSB_INFO(tx_deferred_frames),
785 	GBE_STATSB_INFO(tx_collision_frames),
786 	GBE_STATSB_INFO(tx_single_coll_frames),
787 	GBE_STATSB_INFO(tx_mult_coll_frames),
788 	GBE_STATSB_INFO(tx_excessive_collisions),
789 	GBE_STATSB_INFO(tx_late_collisions),
790 	GBE_STATSB_INFO(tx_underrun),
791 	GBE_STATSB_INFO(tx_carrier_sense_errors),
792 	GBE_STATSB_INFO(tx_bytes),
793 	GBE_STATSB_INFO(tx_64byte_frames),
794 	GBE_STATSB_INFO(tx_65_to_127byte_frames),
795 	GBE_STATSB_INFO(tx_128_to_255byte_frames),
796 	GBE_STATSB_INFO(tx_256_to_511byte_frames),
797 	GBE_STATSB_INFO(tx_512_to_1023byte_frames),
798 	GBE_STATSB_INFO(tx_1024byte_frames),
799 	GBE_STATSB_INFO(net_bytes),
800 	GBE_STATSB_INFO(rx_sof_overruns),
801 	GBE_STATSB_INFO(rx_mof_overruns),
802 	GBE_STATSB_INFO(rx_dma_overruns),
803 	/* GBE module C */
804 	GBE_STATSC_INFO(rx_good_frames),
805 	GBE_STATSC_INFO(rx_broadcast_frames),
806 	GBE_STATSC_INFO(rx_multicast_frames),
807 	GBE_STATSC_INFO(rx_pause_frames),
808 	GBE_STATSC_INFO(rx_crc_errors),
809 	GBE_STATSC_INFO(rx_align_code_errors),
810 	GBE_STATSC_INFO(rx_oversized_frames),
811 	GBE_STATSC_INFO(rx_jabber_frames),
812 	GBE_STATSC_INFO(rx_undersized_frames),
813 	GBE_STATSC_INFO(rx_fragments),
814 	GBE_STATSC_INFO(rx_bytes),
815 	GBE_STATSC_INFO(tx_good_frames),
816 	GBE_STATSC_INFO(tx_broadcast_frames),
817 	GBE_STATSC_INFO(tx_multicast_frames),
818 	GBE_STATSC_INFO(tx_pause_frames),
819 	GBE_STATSC_INFO(tx_deferred_frames),
820 	GBE_STATSC_INFO(tx_collision_frames),
821 	GBE_STATSC_INFO(tx_single_coll_frames),
822 	GBE_STATSC_INFO(tx_mult_coll_frames),
823 	GBE_STATSC_INFO(tx_excessive_collisions),
824 	GBE_STATSC_INFO(tx_late_collisions),
825 	GBE_STATSC_INFO(tx_underrun),
826 	GBE_STATSC_INFO(tx_carrier_sense_errors),
827 	GBE_STATSC_INFO(tx_bytes),
828 	GBE_STATSC_INFO(tx_64byte_frames),
829 	GBE_STATSC_INFO(tx_65_to_127byte_frames),
830 	GBE_STATSC_INFO(tx_128_to_255byte_frames),
831 	GBE_STATSC_INFO(tx_256_to_511byte_frames),
832 	GBE_STATSC_INFO(tx_512_to_1023byte_frames),
833 	GBE_STATSC_INFO(tx_1024byte_frames),
834 	GBE_STATSC_INFO(net_bytes),
835 	GBE_STATSC_INFO(rx_sof_overruns),
836 	GBE_STATSC_INFO(rx_mof_overruns),
837 	GBE_STATSC_INFO(rx_dma_overruns),
838 	/* GBE module D */
839 	GBE_STATSD_INFO(rx_good_frames),
840 	GBE_STATSD_INFO(rx_broadcast_frames),
841 	GBE_STATSD_INFO(rx_multicast_frames),
842 	GBE_STATSD_INFO(rx_pause_frames),
843 	GBE_STATSD_INFO(rx_crc_errors),
844 	GBE_STATSD_INFO(rx_align_code_errors),
845 	GBE_STATSD_INFO(rx_oversized_frames),
846 	GBE_STATSD_INFO(rx_jabber_frames),
847 	GBE_STATSD_INFO(rx_undersized_frames),
848 	GBE_STATSD_INFO(rx_fragments),
849 	GBE_STATSD_INFO(rx_bytes),
850 	GBE_STATSD_INFO(tx_good_frames),
851 	GBE_STATSD_INFO(tx_broadcast_frames),
852 	GBE_STATSD_INFO(tx_multicast_frames),
853 	GBE_STATSD_INFO(tx_pause_frames),
854 	GBE_STATSD_INFO(tx_deferred_frames),
855 	GBE_STATSD_INFO(tx_collision_frames),
856 	GBE_STATSD_INFO(tx_single_coll_frames),
857 	GBE_STATSD_INFO(tx_mult_coll_frames),
858 	GBE_STATSD_INFO(tx_excessive_collisions),
859 	GBE_STATSD_INFO(tx_late_collisions),
860 	GBE_STATSD_INFO(tx_underrun),
861 	GBE_STATSD_INFO(tx_carrier_sense_errors),
862 	GBE_STATSD_INFO(tx_bytes),
863 	GBE_STATSD_INFO(tx_64byte_frames),
864 	GBE_STATSD_INFO(tx_65_to_127byte_frames),
865 	GBE_STATSD_INFO(tx_128_to_255byte_frames),
866 	GBE_STATSD_INFO(tx_256_to_511byte_frames),
867 	GBE_STATSD_INFO(tx_512_to_1023byte_frames),
868 	GBE_STATSD_INFO(tx_1024byte_frames),
869 	GBE_STATSD_INFO(net_bytes),
870 	GBE_STATSD_INFO(rx_sof_overruns),
871 	GBE_STATSD_INFO(rx_mof_overruns),
872 	GBE_STATSD_INFO(rx_dma_overruns),
873 };
874 
875 /* This is the size of entries in GBENU_STATS_HOST */
876 #define GBENU_ET_STATS_HOST_SIZE	52
877 
878 #define GBENU_STATS_HOST(field)					\
879 {								\
880 	"GBE_HOST:"#field, GBENU_STATS0_MODULE,			\
881 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
882 	offsetof(struct gbenu_hw_stats, field)			\
883 }
884 
885 /* This is the size of entries in GBENU_STATS_PORT */
886 #define GBENU_ET_STATS_PORT_SIZE	65
887 
888 #define GBENU_STATS_P1(field)					\
889 {								\
890 	"GBE_P1:"#field, GBENU_STATS1_MODULE,			\
891 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
892 	offsetof(struct gbenu_hw_stats, field)			\
893 }
894 
895 #define GBENU_STATS_P2(field)					\
896 {								\
897 	"GBE_P2:"#field, GBENU_STATS2_MODULE,			\
898 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
899 	offsetof(struct gbenu_hw_stats, field)			\
900 }
901 
902 #define GBENU_STATS_P3(field)					\
903 {								\
904 	"GBE_P3:"#field, GBENU_STATS3_MODULE,			\
905 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
906 	offsetof(struct gbenu_hw_stats, field)			\
907 }
908 
909 #define GBENU_STATS_P4(field)					\
910 {								\
911 	"GBE_P4:"#field, GBENU_STATS4_MODULE,			\
912 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
913 	offsetof(struct gbenu_hw_stats, field)			\
914 }
915 
916 #define GBENU_STATS_P5(field)					\
917 {								\
918 	"GBE_P5:"#field, GBENU_STATS5_MODULE,			\
919 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
920 	offsetof(struct gbenu_hw_stats, field)			\
921 }
922 
923 #define GBENU_STATS_P6(field)					\
924 {								\
925 	"GBE_P6:"#field, GBENU_STATS6_MODULE,			\
926 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
927 	offsetof(struct gbenu_hw_stats, field)			\
928 }
929 
930 #define GBENU_STATS_P7(field)					\
931 {								\
932 	"GBE_P7:"#field, GBENU_STATS7_MODULE,			\
933 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
934 	offsetof(struct gbenu_hw_stats, field)			\
935 }
936 
937 #define GBENU_STATS_P8(field)					\
938 {								\
939 	"GBE_P8:"#field, GBENU_STATS8_MODULE,			\
940 	FIELD_SIZEOF(struct gbenu_hw_stats, field),		\
941 	offsetof(struct gbenu_hw_stats, field)			\
942 }
943 
944 static const struct netcp_ethtool_stat gbenu_et_stats[] = {
945 	/* GBENU Host Module */
946 	GBENU_STATS_HOST(rx_good_frames),
947 	GBENU_STATS_HOST(rx_broadcast_frames),
948 	GBENU_STATS_HOST(rx_multicast_frames),
949 	GBENU_STATS_HOST(rx_crc_errors),
950 	GBENU_STATS_HOST(rx_oversized_frames),
951 	GBENU_STATS_HOST(rx_undersized_frames),
952 	GBENU_STATS_HOST(ale_drop),
953 	GBENU_STATS_HOST(ale_overrun_drop),
954 	GBENU_STATS_HOST(rx_bytes),
955 	GBENU_STATS_HOST(tx_good_frames),
956 	GBENU_STATS_HOST(tx_broadcast_frames),
957 	GBENU_STATS_HOST(tx_multicast_frames),
958 	GBENU_STATS_HOST(tx_bytes),
959 	GBENU_STATS_HOST(tx_64B_frames),
960 	GBENU_STATS_HOST(tx_65_to_127B_frames),
961 	GBENU_STATS_HOST(tx_128_to_255B_frames),
962 	GBENU_STATS_HOST(tx_256_to_511B_frames),
963 	GBENU_STATS_HOST(tx_512_to_1023B_frames),
964 	GBENU_STATS_HOST(tx_1024B_frames),
965 	GBENU_STATS_HOST(net_bytes),
966 	GBENU_STATS_HOST(rx_bottom_fifo_drop),
967 	GBENU_STATS_HOST(rx_port_mask_drop),
968 	GBENU_STATS_HOST(rx_top_fifo_drop),
969 	GBENU_STATS_HOST(ale_rate_limit_drop),
970 	GBENU_STATS_HOST(ale_vid_ingress_drop),
971 	GBENU_STATS_HOST(ale_da_eq_sa_drop),
972 	GBENU_STATS_HOST(ale_unknown_ucast),
973 	GBENU_STATS_HOST(ale_unknown_ucast_bytes),
974 	GBENU_STATS_HOST(ale_unknown_mcast),
975 	GBENU_STATS_HOST(ale_unknown_mcast_bytes),
976 	GBENU_STATS_HOST(ale_unknown_bcast),
977 	GBENU_STATS_HOST(ale_unknown_bcast_bytes),
978 	GBENU_STATS_HOST(ale_pol_match),
979 	GBENU_STATS_HOST(ale_pol_match_red),
980 	GBENU_STATS_HOST(ale_pol_match_yellow),
981 	GBENU_STATS_HOST(tx_mem_protect_err),
982 	GBENU_STATS_HOST(tx_pri0_drop),
983 	GBENU_STATS_HOST(tx_pri1_drop),
984 	GBENU_STATS_HOST(tx_pri2_drop),
985 	GBENU_STATS_HOST(tx_pri3_drop),
986 	GBENU_STATS_HOST(tx_pri4_drop),
987 	GBENU_STATS_HOST(tx_pri5_drop),
988 	GBENU_STATS_HOST(tx_pri6_drop),
989 	GBENU_STATS_HOST(tx_pri7_drop),
990 	GBENU_STATS_HOST(tx_pri0_drop_bcnt),
991 	GBENU_STATS_HOST(tx_pri1_drop_bcnt),
992 	GBENU_STATS_HOST(tx_pri2_drop_bcnt),
993 	GBENU_STATS_HOST(tx_pri3_drop_bcnt),
994 	GBENU_STATS_HOST(tx_pri4_drop_bcnt),
995 	GBENU_STATS_HOST(tx_pri5_drop_bcnt),
996 	GBENU_STATS_HOST(tx_pri6_drop_bcnt),
997 	GBENU_STATS_HOST(tx_pri7_drop_bcnt),
998 	/* GBENU Module 1 */
999 	GBENU_STATS_P1(rx_good_frames),
1000 	GBENU_STATS_P1(rx_broadcast_frames),
1001 	GBENU_STATS_P1(rx_multicast_frames),
1002 	GBENU_STATS_P1(rx_pause_frames),
1003 	GBENU_STATS_P1(rx_crc_errors),
1004 	GBENU_STATS_P1(rx_align_code_errors),
1005 	GBENU_STATS_P1(rx_oversized_frames),
1006 	GBENU_STATS_P1(rx_jabber_frames),
1007 	GBENU_STATS_P1(rx_undersized_frames),
1008 	GBENU_STATS_P1(rx_fragments),
1009 	GBENU_STATS_P1(ale_drop),
1010 	GBENU_STATS_P1(ale_overrun_drop),
1011 	GBENU_STATS_P1(rx_bytes),
1012 	GBENU_STATS_P1(tx_good_frames),
1013 	GBENU_STATS_P1(tx_broadcast_frames),
1014 	GBENU_STATS_P1(tx_multicast_frames),
1015 	GBENU_STATS_P1(tx_pause_frames),
1016 	GBENU_STATS_P1(tx_deferred_frames),
1017 	GBENU_STATS_P1(tx_collision_frames),
1018 	GBENU_STATS_P1(tx_single_coll_frames),
1019 	GBENU_STATS_P1(tx_mult_coll_frames),
1020 	GBENU_STATS_P1(tx_excessive_collisions),
1021 	GBENU_STATS_P1(tx_late_collisions),
1022 	GBENU_STATS_P1(rx_ipg_error),
1023 	GBENU_STATS_P1(tx_carrier_sense_errors),
1024 	GBENU_STATS_P1(tx_bytes),
1025 	GBENU_STATS_P1(tx_64B_frames),
1026 	GBENU_STATS_P1(tx_65_to_127B_frames),
1027 	GBENU_STATS_P1(tx_128_to_255B_frames),
1028 	GBENU_STATS_P1(tx_256_to_511B_frames),
1029 	GBENU_STATS_P1(tx_512_to_1023B_frames),
1030 	GBENU_STATS_P1(tx_1024B_frames),
1031 	GBENU_STATS_P1(net_bytes),
1032 	GBENU_STATS_P1(rx_bottom_fifo_drop),
1033 	GBENU_STATS_P1(rx_port_mask_drop),
1034 	GBENU_STATS_P1(rx_top_fifo_drop),
1035 	GBENU_STATS_P1(ale_rate_limit_drop),
1036 	GBENU_STATS_P1(ale_vid_ingress_drop),
1037 	GBENU_STATS_P1(ale_da_eq_sa_drop),
1038 	GBENU_STATS_P1(ale_unknown_ucast),
1039 	GBENU_STATS_P1(ale_unknown_ucast_bytes),
1040 	GBENU_STATS_P1(ale_unknown_mcast),
1041 	GBENU_STATS_P1(ale_unknown_mcast_bytes),
1042 	GBENU_STATS_P1(ale_unknown_bcast),
1043 	GBENU_STATS_P1(ale_unknown_bcast_bytes),
1044 	GBENU_STATS_P1(ale_pol_match),
1045 	GBENU_STATS_P1(ale_pol_match_red),
1046 	GBENU_STATS_P1(ale_pol_match_yellow),
1047 	GBENU_STATS_P1(tx_mem_protect_err),
1048 	GBENU_STATS_P1(tx_pri0_drop),
1049 	GBENU_STATS_P1(tx_pri1_drop),
1050 	GBENU_STATS_P1(tx_pri2_drop),
1051 	GBENU_STATS_P1(tx_pri3_drop),
1052 	GBENU_STATS_P1(tx_pri4_drop),
1053 	GBENU_STATS_P1(tx_pri5_drop),
1054 	GBENU_STATS_P1(tx_pri6_drop),
1055 	GBENU_STATS_P1(tx_pri7_drop),
1056 	GBENU_STATS_P1(tx_pri0_drop_bcnt),
1057 	GBENU_STATS_P1(tx_pri1_drop_bcnt),
1058 	GBENU_STATS_P1(tx_pri2_drop_bcnt),
1059 	GBENU_STATS_P1(tx_pri3_drop_bcnt),
1060 	GBENU_STATS_P1(tx_pri4_drop_bcnt),
1061 	GBENU_STATS_P1(tx_pri5_drop_bcnt),
1062 	GBENU_STATS_P1(tx_pri6_drop_bcnt),
1063 	GBENU_STATS_P1(tx_pri7_drop_bcnt),
1064 	/* GBENU Module 2 */
1065 	GBENU_STATS_P2(rx_good_frames),
1066 	GBENU_STATS_P2(rx_broadcast_frames),
1067 	GBENU_STATS_P2(rx_multicast_frames),
1068 	GBENU_STATS_P2(rx_pause_frames),
1069 	GBENU_STATS_P2(rx_crc_errors),
1070 	GBENU_STATS_P2(rx_align_code_errors),
1071 	GBENU_STATS_P2(rx_oversized_frames),
1072 	GBENU_STATS_P2(rx_jabber_frames),
1073 	GBENU_STATS_P2(rx_undersized_frames),
1074 	GBENU_STATS_P2(rx_fragments),
1075 	GBENU_STATS_P2(ale_drop),
1076 	GBENU_STATS_P2(ale_overrun_drop),
1077 	GBENU_STATS_P2(rx_bytes),
1078 	GBENU_STATS_P2(tx_good_frames),
1079 	GBENU_STATS_P2(tx_broadcast_frames),
1080 	GBENU_STATS_P2(tx_multicast_frames),
1081 	GBENU_STATS_P2(tx_pause_frames),
1082 	GBENU_STATS_P2(tx_deferred_frames),
1083 	GBENU_STATS_P2(tx_collision_frames),
1084 	GBENU_STATS_P2(tx_single_coll_frames),
1085 	GBENU_STATS_P2(tx_mult_coll_frames),
1086 	GBENU_STATS_P2(tx_excessive_collisions),
1087 	GBENU_STATS_P2(tx_late_collisions),
1088 	GBENU_STATS_P2(rx_ipg_error),
1089 	GBENU_STATS_P2(tx_carrier_sense_errors),
1090 	GBENU_STATS_P2(tx_bytes),
1091 	GBENU_STATS_P2(tx_64B_frames),
1092 	GBENU_STATS_P2(tx_65_to_127B_frames),
1093 	GBENU_STATS_P2(tx_128_to_255B_frames),
1094 	GBENU_STATS_P2(tx_256_to_511B_frames),
1095 	GBENU_STATS_P2(tx_512_to_1023B_frames),
1096 	GBENU_STATS_P2(tx_1024B_frames),
1097 	GBENU_STATS_P2(net_bytes),
1098 	GBENU_STATS_P2(rx_bottom_fifo_drop),
1099 	GBENU_STATS_P2(rx_port_mask_drop),
1100 	GBENU_STATS_P2(rx_top_fifo_drop),
1101 	GBENU_STATS_P2(ale_rate_limit_drop),
1102 	GBENU_STATS_P2(ale_vid_ingress_drop),
1103 	GBENU_STATS_P2(ale_da_eq_sa_drop),
1104 	GBENU_STATS_P2(ale_unknown_ucast),
1105 	GBENU_STATS_P2(ale_unknown_ucast_bytes),
1106 	GBENU_STATS_P2(ale_unknown_mcast),
1107 	GBENU_STATS_P2(ale_unknown_mcast_bytes),
1108 	GBENU_STATS_P2(ale_unknown_bcast),
1109 	GBENU_STATS_P2(ale_unknown_bcast_bytes),
1110 	GBENU_STATS_P2(ale_pol_match),
1111 	GBENU_STATS_P2(ale_pol_match_red),
1112 	GBENU_STATS_P2(ale_pol_match_yellow),
1113 	GBENU_STATS_P2(tx_mem_protect_err),
1114 	GBENU_STATS_P2(tx_pri0_drop),
1115 	GBENU_STATS_P2(tx_pri1_drop),
1116 	GBENU_STATS_P2(tx_pri2_drop),
1117 	GBENU_STATS_P2(tx_pri3_drop),
1118 	GBENU_STATS_P2(tx_pri4_drop),
1119 	GBENU_STATS_P2(tx_pri5_drop),
1120 	GBENU_STATS_P2(tx_pri6_drop),
1121 	GBENU_STATS_P2(tx_pri7_drop),
1122 	GBENU_STATS_P2(tx_pri0_drop_bcnt),
1123 	GBENU_STATS_P2(tx_pri1_drop_bcnt),
1124 	GBENU_STATS_P2(tx_pri2_drop_bcnt),
1125 	GBENU_STATS_P2(tx_pri3_drop_bcnt),
1126 	GBENU_STATS_P2(tx_pri4_drop_bcnt),
1127 	GBENU_STATS_P2(tx_pri5_drop_bcnt),
1128 	GBENU_STATS_P2(tx_pri6_drop_bcnt),
1129 	GBENU_STATS_P2(tx_pri7_drop_bcnt),
1130 	/* GBENU Module 3 */
1131 	GBENU_STATS_P3(rx_good_frames),
1132 	GBENU_STATS_P3(rx_broadcast_frames),
1133 	GBENU_STATS_P3(rx_multicast_frames),
1134 	GBENU_STATS_P3(rx_pause_frames),
1135 	GBENU_STATS_P3(rx_crc_errors),
1136 	GBENU_STATS_P3(rx_align_code_errors),
1137 	GBENU_STATS_P3(rx_oversized_frames),
1138 	GBENU_STATS_P3(rx_jabber_frames),
1139 	GBENU_STATS_P3(rx_undersized_frames),
1140 	GBENU_STATS_P3(rx_fragments),
1141 	GBENU_STATS_P3(ale_drop),
1142 	GBENU_STATS_P3(ale_overrun_drop),
1143 	GBENU_STATS_P3(rx_bytes),
1144 	GBENU_STATS_P3(tx_good_frames),
1145 	GBENU_STATS_P3(tx_broadcast_frames),
1146 	GBENU_STATS_P3(tx_multicast_frames),
1147 	GBENU_STATS_P3(tx_pause_frames),
1148 	GBENU_STATS_P3(tx_deferred_frames),
1149 	GBENU_STATS_P3(tx_collision_frames),
1150 	GBENU_STATS_P3(tx_single_coll_frames),
1151 	GBENU_STATS_P3(tx_mult_coll_frames),
1152 	GBENU_STATS_P3(tx_excessive_collisions),
1153 	GBENU_STATS_P3(tx_late_collisions),
1154 	GBENU_STATS_P3(rx_ipg_error),
1155 	GBENU_STATS_P3(tx_carrier_sense_errors),
1156 	GBENU_STATS_P3(tx_bytes),
1157 	GBENU_STATS_P3(tx_64B_frames),
1158 	GBENU_STATS_P3(tx_65_to_127B_frames),
1159 	GBENU_STATS_P3(tx_128_to_255B_frames),
1160 	GBENU_STATS_P3(tx_256_to_511B_frames),
1161 	GBENU_STATS_P3(tx_512_to_1023B_frames),
1162 	GBENU_STATS_P3(tx_1024B_frames),
1163 	GBENU_STATS_P3(net_bytes),
1164 	GBENU_STATS_P3(rx_bottom_fifo_drop),
1165 	GBENU_STATS_P3(rx_port_mask_drop),
1166 	GBENU_STATS_P3(rx_top_fifo_drop),
1167 	GBENU_STATS_P3(ale_rate_limit_drop),
1168 	GBENU_STATS_P3(ale_vid_ingress_drop),
1169 	GBENU_STATS_P3(ale_da_eq_sa_drop),
1170 	GBENU_STATS_P3(ale_unknown_ucast),
1171 	GBENU_STATS_P3(ale_unknown_ucast_bytes),
1172 	GBENU_STATS_P3(ale_unknown_mcast),
1173 	GBENU_STATS_P3(ale_unknown_mcast_bytes),
1174 	GBENU_STATS_P3(ale_unknown_bcast),
1175 	GBENU_STATS_P3(ale_unknown_bcast_bytes),
1176 	GBENU_STATS_P3(ale_pol_match),
1177 	GBENU_STATS_P3(ale_pol_match_red),
1178 	GBENU_STATS_P3(ale_pol_match_yellow),
1179 	GBENU_STATS_P3(tx_mem_protect_err),
1180 	GBENU_STATS_P3(tx_pri0_drop),
1181 	GBENU_STATS_P3(tx_pri1_drop),
1182 	GBENU_STATS_P3(tx_pri2_drop),
1183 	GBENU_STATS_P3(tx_pri3_drop),
1184 	GBENU_STATS_P3(tx_pri4_drop),
1185 	GBENU_STATS_P3(tx_pri5_drop),
1186 	GBENU_STATS_P3(tx_pri6_drop),
1187 	GBENU_STATS_P3(tx_pri7_drop),
1188 	GBENU_STATS_P3(tx_pri0_drop_bcnt),
1189 	GBENU_STATS_P3(tx_pri1_drop_bcnt),
1190 	GBENU_STATS_P3(tx_pri2_drop_bcnt),
1191 	GBENU_STATS_P3(tx_pri3_drop_bcnt),
1192 	GBENU_STATS_P3(tx_pri4_drop_bcnt),
1193 	GBENU_STATS_P3(tx_pri5_drop_bcnt),
1194 	GBENU_STATS_P3(tx_pri6_drop_bcnt),
1195 	GBENU_STATS_P3(tx_pri7_drop_bcnt),
1196 	/* GBENU Module 4 */
1197 	GBENU_STATS_P4(rx_good_frames),
1198 	GBENU_STATS_P4(rx_broadcast_frames),
1199 	GBENU_STATS_P4(rx_multicast_frames),
1200 	GBENU_STATS_P4(rx_pause_frames),
1201 	GBENU_STATS_P4(rx_crc_errors),
1202 	GBENU_STATS_P4(rx_align_code_errors),
1203 	GBENU_STATS_P4(rx_oversized_frames),
1204 	GBENU_STATS_P4(rx_jabber_frames),
1205 	GBENU_STATS_P4(rx_undersized_frames),
1206 	GBENU_STATS_P4(rx_fragments),
1207 	GBENU_STATS_P4(ale_drop),
1208 	GBENU_STATS_P4(ale_overrun_drop),
1209 	GBENU_STATS_P4(rx_bytes),
1210 	GBENU_STATS_P4(tx_good_frames),
1211 	GBENU_STATS_P4(tx_broadcast_frames),
1212 	GBENU_STATS_P4(tx_multicast_frames),
1213 	GBENU_STATS_P4(tx_pause_frames),
1214 	GBENU_STATS_P4(tx_deferred_frames),
1215 	GBENU_STATS_P4(tx_collision_frames),
1216 	GBENU_STATS_P4(tx_single_coll_frames),
1217 	GBENU_STATS_P4(tx_mult_coll_frames),
1218 	GBENU_STATS_P4(tx_excessive_collisions),
1219 	GBENU_STATS_P4(tx_late_collisions),
1220 	GBENU_STATS_P4(rx_ipg_error),
1221 	GBENU_STATS_P4(tx_carrier_sense_errors),
1222 	GBENU_STATS_P4(tx_bytes),
1223 	GBENU_STATS_P4(tx_64B_frames),
1224 	GBENU_STATS_P4(tx_65_to_127B_frames),
1225 	GBENU_STATS_P4(tx_128_to_255B_frames),
1226 	GBENU_STATS_P4(tx_256_to_511B_frames),
1227 	GBENU_STATS_P4(tx_512_to_1023B_frames),
1228 	GBENU_STATS_P4(tx_1024B_frames),
1229 	GBENU_STATS_P4(net_bytes),
1230 	GBENU_STATS_P4(rx_bottom_fifo_drop),
1231 	GBENU_STATS_P4(rx_port_mask_drop),
1232 	GBENU_STATS_P4(rx_top_fifo_drop),
1233 	GBENU_STATS_P4(ale_rate_limit_drop),
1234 	GBENU_STATS_P4(ale_vid_ingress_drop),
1235 	GBENU_STATS_P4(ale_da_eq_sa_drop),
1236 	GBENU_STATS_P4(ale_unknown_ucast),
1237 	GBENU_STATS_P4(ale_unknown_ucast_bytes),
1238 	GBENU_STATS_P4(ale_unknown_mcast),
1239 	GBENU_STATS_P4(ale_unknown_mcast_bytes),
1240 	GBENU_STATS_P4(ale_unknown_bcast),
1241 	GBENU_STATS_P4(ale_unknown_bcast_bytes),
1242 	GBENU_STATS_P4(ale_pol_match),
1243 	GBENU_STATS_P4(ale_pol_match_red),
1244 	GBENU_STATS_P4(ale_pol_match_yellow),
1245 	GBENU_STATS_P4(tx_mem_protect_err),
1246 	GBENU_STATS_P4(tx_pri0_drop),
1247 	GBENU_STATS_P4(tx_pri1_drop),
1248 	GBENU_STATS_P4(tx_pri2_drop),
1249 	GBENU_STATS_P4(tx_pri3_drop),
1250 	GBENU_STATS_P4(tx_pri4_drop),
1251 	GBENU_STATS_P4(tx_pri5_drop),
1252 	GBENU_STATS_P4(tx_pri6_drop),
1253 	GBENU_STATS_P4(tx_pri7_drop),
1254 	GBENU_STATS_P4(tx_pri0_drop_bcnt),
1255 	GBENU_STATS_P4(tx_pri1_drop_bcnt),
1256 	GBENU_STATS_P4(tx_pri2_drop_bcnt),
1257 	GBENU_STATS_P4(tx_pri3_drop_bcnt),
1258 	GBENU_STATS_P4(tx_pri4_drop_bcnt),
1259 	GBENU_STATS_P4(tx_pri5_drop_bcnt),
1260 	GBENU_STATS_P4(tx_pri6_drop_bcnt),
1261 	GBENU_STATS_P4(tx_pri7_drop_bcnt),
1262 	/* GBENU Module 5 */
1263 	GBENU_STATS_P5(rx_good_frames),
1264 	GBENU_STATS_P5(rx_broadcast_frames),
1265 	GBENU_STATS_P5(rx_multicast_frames),
1266 	GBENU_STATS_P5(rx_pause_frames),
1267 	GBENU_STATS_P5(rx_crc_errors),
1268 	GBENU_STATS_P5(rx_align_code_errors),
1269 	GBENU_STATS_P5(rx_oversized_frames),
1270 	GBENU_STATS_P5(rx_jabber_frames),
1271 	GBENU_STATS_P5(rx_undersized_frames),
1272 	GBENU_STATS_P5(rx_fragments),
1273 	GBENU_STATS_P5(ale_drop),
1274 	GBENU_STATS_P5(ale_overrun_drop),
1275 	GBENU_STATS_P5(rx_bytes),
1276 	GBENU_STATS_P5(tx_good_frames),
1277 	GBENU_STATS_P5(tx_broadcast_frames),
1278 	GBENU_STATS_P5(tx_multicast_frames),
1279 	GBENU_STATS_P5(tx_pause_frames),
1280 	GBENU_STATS_P5(tx_deferred_frames),
1281 	GBENU_STATS_P5(tx_collision_frames),
1282 	GBENU_STATS_P5(tx_single_coll_frames),
1283 	GBENU_STATS_P5(tx_mult_coll_frames),
1284 	GBENU_STATS_P5(tx_excessive_collisions),
1285 	GBENU_STATS_P5(tx_late_collisions),
1286 	GBENU_STATS_P5(rx_ipg_error),
1287 	GBENU_STATS_P5(tx_carrier_sense_errors),
1288 	GBENU_STATS_P5(tx_bytes),
1289 	GBENU_STATS_P5(tx_64B_frames),
1290 	GBENU_STATS_P5(tx_65_to_127B_frames),
1291 	GBENU_STATS_P5(tx_128_to_255B_frames),
1292 	GBENU_STATS_P5(tx_256_to_511B_frames),
1293 	GBENU_STATS_P5(tx_512_to_1023B_frames),
1294 	GBENU_STATS_P5(tx_1024B_frames),
1295 	GBENU_STATS_P5(net_bytes),
1296 	GBENU_STATS_P5(rx_bottom_fifo_drop),
1297 	GBENU_STATS_P5(rx_port_mask_drop),
1298 	GBENU_STATS_P5(rx_top_fifo_drop),
1299 	GBENU_STATS_P5(ale_rate_limit_drop),
1300 	GBENU_STATS_P5(ale_vid_ingress_drop),
1301 	GBENU_STATS_P5(ale_da_eq_sa_drop),
1302 	GBENU_STATS_P5(ale_unknown_ucast),
1303 	GBENU_STATS_P5(ale_unknown_ucast_bytes),
1304 	GBENU_STATS_P5(ale_unknown_mcast),
1305 	GBENU_STATS_P5(ale_unknown_mcast_bytes),
1306 	GBENU_STATS_P5(ale_unknown_bcast),
1307 	GBENU_STATS_P5(ale_unknown_bcast_bytes),
1308 	GBENU_STATS_P5(ale_pol_match),
1309 	GBENU_STATS_P5(ale_pol_match_red),
1310 	GBENU_STATS_P5(ale_pol_match_yellow),
1311 	GBENU_STATS_P5(tx_mem_protect_err),
1312 	GBENU_STATS_P5(tx_pri0_drop),
1313 	GBENU_STATS_P5(tx_pri1_drop),
1314 	GBENU_STATS_P5(tx_pri2_drop),
1315 	GBENU_STATS_P5(tx_pri3_drop),
1316 	GBENU_STATS_P5(tx_pri4_drop),
1317 	GBENU_STATS_P5(tx_pri5_drop),
1318 	GBENU_STATS_P5(tx_pri6_drop),
1319 	GBENU_STATS_P5(tx_pri7_drop),
1320 	GBENU_STATS_P5(tx_pri0_drop_bcnt),
1321 	GBENU_STATS_P5(tx_pri1_drop_bcnt),
1322 	GBENU_STATS_P5(tx_pri2_drop_bcnt),
1323 	GBENU_STATS_P5(tx_pri3_drop_bcnt),
1324 	GBENU_STATS_P5(tx_pri4_drop_bcnt),
1325 	GBENU_STATS_P5(tx_pri5_drop_bcnt),
1326 	GBENU_STATS_P5(tx_pri6_drop_bcnt),
1327 	GBENU_STATS_P5(tx_pri7_drop_bcnt),
1328 	/* GBENU Module 6 */
1329 	GBENU_STATS_P6(rx_good_frames),
1330 	GBENU_STATS_P6(rx_broadcast_frames),
1331 	GBENU_STATS_P6(rx_multicast_frames),
1332 	GBENU_STATS_P6(rx_pause_frames),
1333 	GBENU_STATS_P6(rx_crc_errors),
1334 	GBENU_STATS_P6(rx_align_code_errors),
1335 	GBENU_STATS_P6(rx_oversized_frames),
1336 	GBENU_STATS_P6(rx_jabber_frames),
1337 	GBENU_STATS_P6(rx_undersized_frames),
1338 	GBENU_STATS_P6(rx_fragments),
1339 	GBENU_STATS_P6(ale_drop),
1340 	GBENU_STATS_P6(ale_overrun_drop),
1341 	GBENU_STATS_P6(rx_bytes),
1342 	GBENU_STATS_P6(tx_good_frames),
1343 	GBENU_STATS_P6(tx_broadcast_frames),
1344 	GBENU_STATS_P6(tx_multicast_frames),
1345 	GBENU_STATS_P6(tx_pause_frames),
1346 	GBENU_STATS_P6(tx_deferred_frames),
1347 	GBENU_STATS_P6(tx_collision_frames),
1348 	GBENU_STATS_P6(tx_single_coll_frames),
1349 	GBENU_STATS_P6(tx_mult_coll_frames),
1350 	GBENU_STATS_P6(tx_excessive_collisions),
1351 	GBENU_STATS_P6(tx_late_collisions),
1352 	GBENU_STATS_P6(rx_ipg_error),
1353 	GBENU_STATS_P6(tx_carrier_sense_errors),
1354 	GBENU_STATS_P6(tx_bytes),
1355 	GBENU_STATS_P6(tx_64B_frames),
1356 	GBENU_STATS_P6(tx_65_to_127B_frames),
1357 	GBENU_STATS_P6(tx_128_to_255B_frames),
1358 	GBENU_STATS_P6(tx_256_to_511B_frames),
1359 	GBENU_STATS_P6(tx_512_to_1023B_frames),
1360 	GBENU_STATS_P6(tx_1024B_frames),
1361 	GBENU_STATS_P6(net_bytes),
1362 	GBENU_STATS_P6(rx_bottom_fifo_drop),
1363 	GBENU_STATS_P6(rx_port_mask_drop),
1364 	GBENU_STATS_P6(rx_top_fifo_drop),
1365 	GBENU_STATS_P6(ale_rate_limit_drop),
1366 	GBENU_STATS_P6(ale_vid_ingress_drop),
1367 	GBENU_STATS_P6(ale_da_eq_sa_drop),
1368 	GBENU_STATS_P6(ale_unknown_ucast),
1369 	GBENU_STATS_P6(ale_unknown_ucast_bytes),
1370 	GBENU_STATS_P6(ale_unknown_mcast),
1371 	GBENU_STATS_P6(ale_unknown_mcast_bytes),
1372 	GBENU_STATS_P6(ale_unknown_bcast),
1373 	GBENU_STATS_P6(ale_unknown_bcast_bytes),
1374 	GBENU_STATS_P6(ale_pol_match),
1375 	GBENU_STATS_P6(ale_pol_match_red),
1376 	GBENU_STATS_P6(ale_pol_match_yellow),
1377 	GBENU_STATS_P6(tx_mem_protect_err),
1378 	GBENU_STATS_P6(tx_pri0_drop),
1379 	GBENU_STATS_P6(tx_pri1_drop),
1380 	GBENU_STATS_P6(tx_pri2_drop),
1381 	GBENU_STATS_P6(tx_pri3_drop),
1382 	GBENU_STATS_P6(tx_pri4_drop),
1383 	GBENU_STATS_P6(tx_pri5_drop),
1384 	GBENU_STATS_P6(tx_pri6_drop),
1385 	GBENU_STATS_P6(tx_pri7_drop),
1386 	GBENU_STATS_P6(tx_pri0_drop_bcnt),
1387 	GBENU_STATS_P6(tx_pri1_drop_bcnt),
1388 	GBENU_STATS_P6(tx_pri2_drop_bcnt),
1389 	GBENU_STATS_P6(tx_pri3_drop_bcnt),
1390 	GBENU_STATS_P6(tx_pri4_drop_bcnt),
1391 	GBENU_STATS_P6(tx_pri5_drop_bcnt),
1392 	GBENU_STATS_P6(tx_pri6_drop_bcnt),
1393 	GBENU_STATS_P6(tx_pri7_drop_bcnt),
1394 	/* GBENU Module 7 */
1395 	GBENU_STATS_P7(rx_good_frames),
1396 	GBENU_STATS_P7(rx_broadcast_frames),
1397 	GBENU_STATS_P7(rx_multicast_frames),
1398 	GBENU_STATS_P7(rx_pause_frames),
1399 	GBENU_STATS_P7(rx_crc_errors),
1400 	GBENU_STATS_P7(rx_align_code_errors),
1401 	GBENU_STATS_P7(rx_oversized_frames),
1402 	GBENU_STATS_P7(rx_jabber_frames),
1403 	GBENU_STATS_P7(rx_undersized_frames),
1404 	GBENU_STATS_P7(rx_fragments),
1405 	GBENU_STATS_P7(ale_drop),
1406 	GBENU_STATS_P7(ale_overrun_drop),
1407 	GBENU_STATS_P7(rx_bytes),
1408 	GBENU_STATS_P7(tx_good_frames),
1409 	GBENU_STATS_P7(tx_broadcast_frames),
1410 	GBENU_STATS_P7(tx_multicast_frames),
1411 	GBENU_STATS_P7(tx_pause_frames),
1412 	GBENU_STATS_P7(tx_deferred_frames),
1413 	GBENU_STATS_P7(tx_collision_frames),
1414 	GBENU_STATS_P7(tx_single_coll_frames),
1415 	GBENU_STATS_P7(tx_mult_coll_frames),
1416 	GBENU_STATS_P7(tx_excessive_collisions),
1417 	GBENU_STATS_P7(tx_late_collisions),
1418 	GBENU_STATS_P7(rx_ipg_error),
1419 	GBENU_STATS_P7(tx_carrier_sense_errors),
1420 	GBENU_STATS_P7(tx_bytes),
1421 	GBENU_STATS_P7(tx_64B_frames),
1422 	GBENU_STATS_P7(tx_65_to_127B_frames),
1423 	GBENU_STATS_P7(tx_128_to_255B_frames),
1424 	GBENU_STATS_P7(tx_256_to_511B_frames),
1425 	GBENU_STATS_P7(tx_512_to_1023B_frames),
1426 	GBENU_STATS_P7(tx_1024B_frames),
1427 	GBENU_STATS_P7(net_bytes),
1428 	GBENU_STATS_P7(rx_bottom_fifo_drop),
1429 	GBENU_STATS_P7(rx_port_mask_drop),
1430 	GBENU_STATS_P7(rx_top_fifo_drop),
1431 	GBENU_STATS_P7(ale_rate_limit_drop),
1432 	GBENU_STATS_P7(ale_vid_ingress_drop),
1433 	GBENU_STATS_P7(ale_da_eq_sa_drop),
1434 	GBENU_STATS_P7(ale_unknown_ucast),
1435 	GBENU_STATS_P7(ale_unknown_ucast_bytes),
1436 	GBENU_STATS_P7(ale_unknown_mcast),
1437 	GBENU_STATS_P7(ale_unknown_mcast_bytes),
1438 	GBENU_STATS_P7(ale_unknown_bcast),
1439 	GBENU_STATS_P7(ale_unknown_bcast_bytes),
1440 	GBENU_STATS_P7(ale_pol_match),
1441 	GBENU_STATS_P7(ale_pol_match_red),
1442 	GBENU_STATS_P7(ale_pol_match_yellow),
1443 	GBENU_STATS_P7(tx_mem_protect_err),
1444 	GBENU_STATS_P7(tx_pri0_drop),
1445 	GBENU_STATS_P7(tx_pri1_drop),
1446 	GBENU_STATS_P7(tx_pri2_drop),
1447 	GBENU_STATS_P7(tx_pri3_drop),
1448 	GBENU_STATS_P7(tx_pri4_drop),
1449 	GBENU_STATS_P7(tx_pri5_drop),
1450 	GBENU_STATS_P7(tx_pri6_drop),
1451 	GBENU_STATS_P7(tx_pri7_drop),
1452 	GBENU_STATS_P7(tx_pri0_drop_bcnt),
1453 	GBENU_STATS_P7(tx_pri1_drop_bcnt),
1454 	GBENU_STATS_P7(tx_pri2_drop_bcnt),
1455 	GBENU_STATS_P7(tx_pri3_drop_bcnt),
1456 	GBENU_STATS_P7(tx_pri4_drop_bcnt),
1457 	GBENU_STATS_P7(tx_pri5_drop_bcnt),
1458 	GBENU_STATS_P7(tx_pri6_drop_bcnt),
1459 	GBENU_STATS_P7(tx_pri7_drop_bcnt),
1460 	/* GBENU Module 8 */
1461 	GBENU_STATS_P8(rx_good_frames),
1462 	GBENU_STATS_P8(rx_broadcast_frames),
1463 	GBENU_STATS_P8(rx_multicast_frames),
1464 	GBENU_STATS_P8(rx_pause_frames),
1465 	GBENU_STATS_P8(rx_crc_errors),
1466 	GBENU_STATS_P8(rx_align_code_errors),
1467 	GBENU_STATS_P8(rx_oversized_frames),
1468 	GBENU_STATS_P8(rx_jabber_frames),
1469 	GBENU_STATS_P8(rx_undersized_frames),
1470 	GBENU_STATS_P8(rx_fragments),
1471 	GBENU_STATS_P8(ale_drop),
1472 	GBENU_STATS_P8(ale_overrun_drop),
1473 	GBENU_STATS_P8(rx_bytes),
1474 	GBENU_STATS_P8(tx_good_frames),
1475 	GBENU_STATS_P8(tx_broadcast_frames),
1476 	GBENU_STATS_P8(tx_multicast_frames),
1477 	GBENU_STATS_P8(tx_pause_frames),
1478 	GBENU_STATS_P8(tx_deferred_frames),
1479 	GBENU_STATS_P8(tx_collision_frames),
1480 	GBENU_STATS_P8(tx_single_coll_frames),
1481 	GBENU_STATS_P8(tx_mult_coll_frames),
1482 	GBENU_STATS_P8(tx_excessive_collisions),
1483 	GBENU_STATS_P8(tx_late_collisions),
1484 	GBENU_STATS_P8(rx_ipg_error),
1485 	GBENU_STATS_P8(tx_carrier_sense_errors),
1486 	GBENU_STATS_P8(tx_bytes),
1487 	GBENU_STATS_P8(tx_64B_frames),
1488 	GBENU_STATS_P8(tx_65_to_127B_frames),
1489 	GBENU_STATS_P8(tx_128_to_255B_frames),
1490 	GBENU_STATS_P8(tx_256_to_511B_frames),
1491 	GBENU_STATS_P8(tx_512_to_1023B_frames),
1492 	GBENU_STATS_P8(tx_1024B_frames),
1493 	GBENU_STATS_P8(net_bytes),
1494 	GBENU_STATS_P8(rx_bottom_fifo_drop),
1495 	GBENU_STATS_P8(rx_port_mask_drop),
1496 	GBENU_STATS_P8(rx_top_fifo_drop),
1497 	GBENU_STATS_P8(ale_rate_limit_drop),
1498 	GBENU_STATS_P8(ale_vid_ingress_drop),
1499 	GBENU_STATS_P8(ale_da_eq_sa_drop),
1500 	GBENU_STATS_P8(ale_unknown_ucast),
1501 	GBENU_STATS_P8(ale_unknown_ucast_bytes),
1502 	GBENU_STATS_P8(ale_unknown_mcast),
1503 	GBENU_STATS_P8(ale_unknown_mcast_bytes),
1504 	GBENU_STATS_P8(ale_unknown_bcast),
1505 	GBENU_STATS_P8(ale_unknown_bcast_bytes),
1506 	GBENU_STATS_P8(ale_pol_match),
1507 	GBENU_STATS_P8(ale_pol_match_red),
1508 	GBENU_STATS_P8(ale_pol_match_yellow),
1509 	GBENU_STATS_P8(tx_mem_protect_err),
1510 	GBENU_STATS_P8(tx_pri0_drop),
1511 	GBENU_STATS_P8(tx_pri1_drop),
1512 	GBENU_STATS_P8(tx_pri2_drop),
1513 	GBENU_STATS_P8(tx_pri3_drop),
1514 	GBENU_STATS_P8(tx_pri4_drop),
1515 	GBENU_STATS_P8(tx_pri5_drop),
1516 	GBENU_STATS_P8(tx_pri6_drop),
1517 	GBENU_STATS_P8(tx_pri7_drop),
1518 	GBENU_STATS_P8(tx_pri0_drop_bcnt),
1519 	GBENU_STATS_P8(tx_pri1_drop_bcnt),
1520 	GBENU_STATS_P8(tx_pri2_drop_bcnt),
1521 	GBENU_STATS_P8(tx_pri3_drop_bcnt),
1522 	GBENU_STATS_P8(tx_pri4_drop_bcnt),
1523 	GBENU_STATS_P8(tx_pri5_drop_bcnt),
1524 	GBENU_STATS_P8(tx_pri6_drop_bcnt),
1525 	GBENU_STATS_P8(tx_pri7_drop_bcnt),
1526 };
1527 
1528 #define XGBE_STATS0_INFO(field)				\
1529 {							\
1530 	"GBE_0:"#field, XGBE_STATS0_MODULE,		\
1531 	FIELD_SIZEOF(struct xgbe_hw_stats, field),	\
1532 	offsetof(struct xgbe_hw_stats, field)		\
1533 }
1534 
1535 #define XGBE_STATS1_INFO(field)				\
1536 {							\
1537 	"GBE_1:"#field, XGBE_STATS1_MODULE,		\
1538 	FIELD_SIZEOF(struct xgbe_hw_stats, field),	\
1539 	offsetof(struct xgbe_hw_stats, field)		\
1540 }
1541 
1542 #define XGBE_STATS2_INFO(field)				\
1543 {							\
1544 	"GBE_2:"#field, XGBE_STATS2_MODULE,		\
1545 	FIELD_SIZEOF(struct xgbe_hw_stats, field),	\
1546 	offsetof(struct xgbe_hw_stats, field)		\
1547 }
1548 
1549 static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
1550 	/* GBE module 0 */
1551 	XGBE_STATS0_INFO(rx_good_frames),
1552 	XGBE_STATS0_INFO(rx_broadcast_frames),
1553 	XGBE_STATS0_INFO(rx_multicast_frames),
1554 	XGBE_STATS0_INFO(rx_oversized_frames),
1555 	XGBE_STATS0_INFO(rx_undersized_frames),
1556 	XGBE_STATS0_INFO(overrun_type4),
1557 	XGBE_STATS0_INFO(overrun_type5),
1558 	XGBE_STATS0_INFO(rx_bytes),
1559 	XGBE_STATS0_INFO(tx_good_frames),
1560 	XGBE_STATS0_INFO(tx_broadcast_frames),
1561 	XGBE_STATS0_INFO(tx_multicast_frames),
1562 	XGBE_STATS0_INFO(tx_bytes),
1563 	XGBE_STATS0_INFO(tx_64byte_frames),
1564 	XGBE_STATS0_INFO(tx_65_to_127byte_frames),
1565 	XGBE_STATS0_INFO(tx_128_to_255byte_frames),
1566 	XGBE_STATS0_INFO(tx_256_to_511byte_frames),
1567 	XGBE_STATS0_INFO(tx_512_to_1023byte_frames),
1568 	XGBE_STATS0_INFO(tx_1024byte_frames),
1569 	XGBE_STATS0_INFO(net_bytes),
1570 	XGBE_STATS0_INFO(rx_sof_overruns),
1571 	XGBE_STATS0_INFO(rx_mof_overruns),
1572 	XGBE_STATS0_INFO(rx_dma_overruns),
1573 	/* XGBE module 1 */
1574 	XGBE_STATS1_INFO(rx_good_frames),
1575 	XGBE_STATS1_INFO(rx_broadcast_frames),
1576 	XGBE_STATS1_INFO(rx_multicast_frames),
1577 	XGBE_STATS1_INFO(rx_pause_frames),
1578 	XGBE_STATS1_INFO(rx_crc_errors),
1579 	XGBE_STATS1_INFO(rx_align_code_errors),
1580 	XGBE_STATS1_INFO(rx_oversized_frames),
1581 	XGBE_STATS1_INFO(rx_jabber_frames),
1582 	XGBE_STATS1_INFO(rx_undersized_frames),
1583 	XGBE_STATS1_INFO(rx_fragments),
1584 	XGBE_STATS1_INFO(overrun_type4),
1585 	XGBE_STATS1_INFO(overrun_type5),
1586 	XGBE_STATS1_INFO(rx_bytes),
1587 	XGBE_STATS1_INFO(tx_good_frames),
1588 	XGBE_STATS1_INFO(tx_broadcast_frames),
1589 	XGBE_STATS1_INFO(tx_multicast_frames),
1590 	XGBE_STATS1_INFO(tx_pause_frames),
1591 	XGBE_STATS1_INFO(tx_deferred_frames),
1592 	XGBE_STATS1_INFO(tx_collision_frames),
1593 	XGBE_STATS1_INFO(tx_single_coll_frames),
1594 	XGBE_STATS1_INFO(tx_mult_coll_frames),
1595 	XGBE_STATS1_INFO(tx_excessive_collisions),
1596 	XGBE_STATS1_INFO(tx_late_collisions),
1597 	XGBE_STATS1_INFO(tx_underrun),
1598 	XGBE_STATS1_INFO(tx_carrier_sense_errors),
1599 	XGBE_STATS1_INFO(tx_bytes),
1600 	XGBE_STATS1_INFO(tx_64byte_frames),
1601 	XGBE_STATS1_INFO(tx_65_to_127byte_frames),
1602 	XGBE_STATS1_INFO(tx_128_to_255byte_frames),
1603 	XGBE_STATS1_INFO(tx_256_to_511byte_frames),
1604 	XGBE_STATS1_INFO(tx_512_to_1023byte_frames),
1605 	XGBE_STATS1_INFO(tx_1024byte_frames),
1606 	XGBE_STATS1_INFO(net_bytes),
1607 	XGBE_STATS1_INFO(rx_sof_overruns),
1608 	XGBE_STATS1_INFO(rx_mof_overruns),
1609 	XGBE_STATS1_INFO(rx_dma_overruns),
1610 	/* XGBE module 2 */
1611 	XGBE_STATS2_INFO(rx_good_frames),
1612 	XGBE_STATS2_INFO(rx_broadcast_frames),
1613 	XGBE_STATS2_INFO(rx_multicast_frames),
1614 	XGBE_STATS2_INFO(rx_pause_frames),
1615 	XGBE_STATS2_INFO(rx_crc_errors),
1616 	XGBE_STATS2_INFO(rx_align_code_errors),
1617 	XGBE_STATS2_INFO(rx_oversized_frames),
1618 	XGBE_STATS2_INFO(rx_jabber_frames),
1619 	XGBE_STATS2_INFO(rx_undersized_frames),
1620 	XGBE_STATS2_INFO(rx_fragments),
1621 	XGBE_STATS2_INFO(overrun_type4),
1622 	XGBE_STATS2_INFO(overrun_type5),
1623 	XGBE_STATS2_INFO(rx_bytes),
1624 	XGBE_STATS2_INFO(tx_good_frames),
1625 	XGBE_STATS2_INFO(tx_broadcast_frames),
1626 	XGBE_STATS2_INFO(tx_multicast_frames),
1627 	XGBE_STATS2_INFO(tx_pause_frames),
1628 	XGBE_STATS2_INFO(tx_deferred_frames),
1629 	XGBE_STATS2_INFO(tx_collision_frames),
1630 	XGBE_STATS2_INFO(tx_single_coll_frames),
1631 	XGBE_STATS2_INFO(tx_mult_coll_frames),
1632 	XGBE_STATS2_INFO(tx_excessive_collisions),
1633 	XGBE_STATS2_INFO(tx_late_collisions),
1634 	XGBE_STATS2_INFO(tx_underrun),
1635 	XGBE_STATS2_INFO(tx_carrier_sense_errors),
1636 	XGBE_STATS2_INFO(tx_bytes),
1637 	XGBE_STATS2_INFO(tx_64byte_frames),
1638 	XGBE_STATS2_INFO(tx_65_to_127byte_frames),
1639 	XGBE_STATS2_INFO(tx_128_to_255byte_frames),
1640 	XGBE_STATS2_INFO(tx_256_to_511byte_frames),
1641 	XGBE_STATS2_INFO(tx_512_to_1023byte_frames),
1642 	XGBE_STATS2_INFO(tx_1024byte_frames),
1643 	XGBE_STATS2_INFO(net_bytes),
1644 	XGBE_STATS2_INFO(rx_sof_overruns),
1645 	XGBE_STATS2_INFO(rx_mof_overruns),
1646 	XGBE_STATS2_INFO(rx_dma_overruns),
1647 };
1648 
1649 #define for_each_intf(i, priv) \
1650 	list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list)
1651 
1652 #define for_each_sec_slave(slave, priv) \
1653 	list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list)
1654 
1655 #define first_sec_slave(priv)					\
1656 	list_first_entry(&priv->secondary_slaves, \
1657 			struct gbe_slave, slave_list)
1658 
1659 static void keystone_get_drvinfo(struct net_device *ndev,
1660 				 struct ethtool_drvinfo *info)
1661 {
1662 	strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
1663 	strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
1664 }
1665 
1666 static u32 keystone_get_msglevel(struct net_device *ndev)
1667 {
1668 	struct netcp_intf *netcp = netdev_priv(ndev);
1669 
1670 	return netcp->msg_enable;
1671 }
1672 
1673 static void keystone_set_msglevel(struct net_device *ndev, u32 value)
1674 {
1675 	struct netcp_intf *netcp = netdev_priv(ndev);
1676 
1677 	netcp->msg_enable = value;
1678 }
1679 
1680 static void keystone_get_stat_strings(struct net_device *ndev,
1681 				      uint32_t stringset, uint8_t *data)
1682 {
1683 	struct netcp_intf *netcp = netdev_priv(ndev);
1684 	struct gbe_intf *gbe_intf;
1685 	struct gbe_priv *gbe_dev;
1686 	int i;
1687 
1688 	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1689 	if (!gbe_intf)
1690 		return;
1691 	gbe_dev = gbe_intf->gbe_dev;
1692 
1693 	switch (stringset) {
1694 	case ETH_SS_STATS:
1695 		for (i = 0; i < gbe_dev->num_et_stats; i++) {
1696 			memcpy(data, gbe_dev->et_stats[i].desc,
1697 			       ETH_GSTRING_LEN);
1698 			data += ETH_GSTRING_LEN;
1699 		}
1700 		break;
1701 	case ETH_SS_TEST:
1702 		break;
1703 	}
1704 }
1705 
1706 static int keystone_get_sset_count(struct net_device *ndev, int stringset)
1707 {
1708 	struct netcp_intf *netcp = netdev_priv(ndev);
1709 	struct gbe_intf *gbe_intf;
1710 	struct gbe_priv *gbe_dev;
1711 
1712 	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1713 	if (!gbe_intf)
1714 		return -EINVAL;
1715 	gbe_dev = gbe_intf->gbe_dev;
1716 
1717 	switch (stringset) {
1718 	case ETH_SS_TEST:
1719 		return 0;
1720 	case ETH_SS_STATS:
1721 		return gbe_dev->num_et_stats;
1722 	default:
1723 		return -EINVAL;
1724 	}
1725 }
1726 
1727 static void gbe_reset_mod_stats(struct gbe_priv *gbe_dev, int stats_mod)
1728 {
1729 	void __iomem *base = gbe_dev->hw_stats_regs[stats_mod];
1730 	u32  __iomem *p_stats_entry;
1731 	int i;
1732 
1733 	for (i = 0; i < gbe_dev->num_et_stats; i++) {
1734 		if (gbe_dev->et_stats[i].type == stats_mod) {
1735 			p_stats_entry = base + gbe_dev->et_stats[i].offset;
1736 			gbe_dev->hw_stats[i] = 0;
1737 			gbe_dev->hw_stats_prev[i] = readl(p_stats_entry);
1738 		}
1739 	}
1740 }
1741 
1742 static inline void gbe_update_hw_stats_entry(struct gbe_priv *gbe_dev,
1743 					     int et_stats_entry)
1744 {
1745 	void __iomem *base = NULL;
1746 	u32  __iomem *p_stats_entry;
1747 	u32 curr, delta;
1748 
1749 	/* The hw_stats_regs pointers are already
1750 	 * properly set to point to the right base:
1751 	 */
1752 	base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[et_stats_entry].type];
1753 	p_stats_entry = base + gbe_dev->et_stats[et_stats_entry].offset;
1754 	curr = readl(p_stats_entry);
1755 	delta = curr - gbe_dev->hw_stats_prev[et_stats_entry];
1756 	gbe_dev->hw_stats_prev[et_stats_entry] = curr;
1757 	gbe_dev->hw_stats[et_stats_entry] += delta;
1758 }
1759 
1760 static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
1761 {
1762 	int i;
1763 
1764 	for (i = 0; i < gbe_dev->num_et_stats; i++) {
1765 		gbe_update_hw_stats_entry(gbe_dev, i);
1766 
1767 		if (data)
1768 			data[i] = gbe_dev->hw_stats[i];
1769 	}
1770 }
1771 
1772 static inline void gbe_stats_mod_visible_ver14(struct gbe_priv *gbe_dev,
1773 					       int stats_mod)
1774 {
1775 	u32 val;
1776 
1777 	val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1778 
1779 	switch (stats_mod) {
1780 	case GBE_STATSA_MODULE:
1781 	case GBE_STATSB_MODULE:
1782 		val &= ~GBE_STATS_CD_SEL;
1783 		break;
1784 	case GBE_STATSC_MODULE:
1785 	case GBE_STATSD_MODULE:
1786 		val |= GBE_STATS_CD_SEL;
1787 		break;
1788 	default:
1789 		return;
1790 	}
1791 
1792 	/* make the stat module visible */
1793 	writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1794 }
1795 
1796 static void gbe_reset_mod_stats_ver14(struct gbe_priv *gbe_dev, int stats_mod)
1797 {
1798 	gbe_stats_mod_visible_ver14(gbe_dev, stats_mod);
1799 	gbe_reset_mod_stats(gbe_dev, stats_mod);
1800 }
1801 
1802 static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
1803 {
1804 	u32 half_num_et_stats = (gbe_dev->num_et_stats / 2);
1805 	int et_entry, j, pair;
1806 
1807 	for (pair = 0; pair < 2; pair++) {
1808 		gbe_stats_mod_visible_ver14(gbe_dev, (pair ?
1809 						      GBE_STATSC_MODULE :
1810 						      GBE_STATSA_MODULE));
1811 
1812 		for (j = 0; j < half_num_et_stats; j++) {
1813 			et_entry = pair * half_num_et_stats + j;
1814 			gbe_update_hw_stats_entry(gbe_dev, et_entry);
1815 
1816 			if (data)
1817 				data[et_entry] = gbe_dev->hw_stats[et_entry];
1818 		}
1819 	}
1820 }
1821 
1822 static void keystone_get_ethtool_stats(struct net_device *ndev,
1823 				       struct ethtool_stats *stats,
1824 				       uint64_t *data)
1825 {
1826 	struct netcp_intf *netcp = netdev_priv(ndev);
1827 	struct gbe_intf *gbe_intf;
1828 	struct gbe_priv *gbe_dev;
1829 
1830 	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1831 	if (!gbe_intf)
1832 		return;
1833 
1834 	gbe_dev = gbe_intf->gbe_dev;
1835 	spin_lock_bh(&gbe_dev->hw_stats_lock);
1836 	if (gbe_dev->ss_version == GBE_SS_VERSION_14)
1837 		gbe_update_stats_ver14(gbe_dev, data);
1838 	else
1839 		gbe_update_stats(gbe_dev, data);
1840 	spin_unlock_bh(&gbe_dev->hw_stats_lock);
1841 }
1842 
1843 static int keystone_get_settings(struct net_device *ndev,
1844 				 struct ethtool_cmd *cmd)
1845 {
1846 	struct netcp_intf *netcp = netdev_priv(ndev);
1847 	struct phy_device *phy = ndev->phydev;
1848 	struct gbe_intf *gbe_intf;
1849 	int ret;
1850 
1851 	if (!phy)
1852 		return -EINVAL;
1853 
1854 	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1855 	if (!gbe_intf)
1856 		return -EINVAL;
1857 
1858 	if (!gbe_intf->slave)
1859 		return -EINVAL;
1860 
1861 	ret = phy_ethtool_gset(phy, cmd);
1862 	if (!ret)
1863 		cmd->port = gbe_intf->slave->phy_port_t;
1864 
1865 	return ret;
1866 }
1867 
1868 static int keystone_set_settings(struct net_device *ndev,
1869 				 struct ethtool_cmd *cmd)
1870 {
1871 	struct netcp_intf *netcp = netdev_priv(ndev);
1872 	struct phy_device *phy = ndev->phydev;
1873 	struct gbe_intf *gbe_intf;
1874 	u32 features = cmd->advertising & cmd->supported;
1875 
1876 	if (!phy)
1877 		return -EINVAL;
1878 
1879 	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1880 	if (!gbe_intf)
1881 		return -EINVAL;
1882 
1883 	if (!gbe_intf->slave)
1884 		return -EINVAL;
1885 
1886 	if (cmd->port != gbe_intf->slave->phy_port_t) {
1887 		if ((cmd->port == PORT_TP) && !(features & ADVERTISED_TP))
1888 			return -EINVAL;
1889 
1890 		if ((cmd->port == PORT_AUI) && !(features & ADVERTISED_AUI))
1891 			return -EINVAL;
1892 
1893 		if ((cmd->port == PORT_BNC) && !(features & ADVERTISED_BNC))
1894 			return -EINVAL;
1895 
1896 		if ((cmd->port == PORT_MII) && !(features & ADVERTISED_MII))
1897 			return -EINVAL;
1898 
1899 		if ((cmd->port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
1900 			return -EINVAL;
1901 	}
1902 
1903 	gbe_intf->slave->phy_port_t = cmd->port;
1904 	return phy_ethtool_sset(phy, cmd);
1905 }
1906 
1907 static const struct ethtool_ops keystone_ethtool_ops = {
1908 	.get_drvinfo		= keystone_get_drvinfo,
1909 	.get_link		= ethtool_op_get_link,
1910 	.get_msglevel		= keystone_get_msglevel,
1911 	.set_msglevel		= keystone_set_msglevel,
1912 	.get_strings		= keystone_get_stat_strings,
1913 	.get_sset_count		= keystone_get_sset_count,
1914 	.get_ethtool_stats	= keystone_get_ethtool_stats,
1915 	.get_settings		= keystone_get_settings,
1916 	.set_settings		= keystone_set_settings,
1917 };
1918 
1919 #define mac_hi(mac)	(((mac)[0] << 0) | ((mac)[1] << 8) |	\
1920 			 ((mac)[2] << 16) | ((mac)[3] << 24))
1921 #define mac_lo(mac)	(((mac)[4] << 0) | ((mac)[5] << 8))
1922 
1923 static void gbe_set_slave_mac(struct gbe_slave *slave,
1924 			      struct gbe_intf *gbe_intf)
1925 {
1926 	struct net_device *ndev = gbe_intf->ndev;
1927 
1928 	writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi));
1929 	writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo));
1930 }
1931 
1932 static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num)
1933 {
1934 	if (priv->host_port == 0)
1935 		return slave_num + 1;
1936 
1937 	return slave_num;
1938 }
1939 
1940 static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
1941 					  struct net_device *ndev,
1942 					  struct gbe_slave *slave,
1943 					  int up)
1944 {
1945 	struct phy_device *phy = slave->phy;
1946 	u32 mac_control = 0;
1947 
1948 	if (up) {
1949 		mac_control = slave->mac_control;
1950 		if (phy && (phy->speed == SPEED_1000)) {
1951 			mac_control |= MACSL_GIG_MODE;
1952 			mac_control &= ~MACSL_XGIG_MODE;
1953 		} else if (phy && (phy->speed == SPEED_10000)) {
1954 			mac_control |= MACSL_XGIG_MODE;
1955 			mac_control &= ~MACSL_GIG_MODE;
1956 		}
1957 
1958 		writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
1959 						 mac_control));
1960 
1961 		cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
1962 				     ALE_PORT_STATE,
1963 				     ALE_PORT_STATE_FORWARD);
1964 
1965 		if (ndev && slave->open &&
1966 		    slave->link_interface != SGMII_LINK_MAC_PHY &&
1967 		    slave->link_interface != XGMII_LINK_MAC_PHY)
1968 			netif_carrier_on(ndev);
1969 	} else {
1970 		writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
1971 						 mac_control));
1972 		cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
1973 				     ALE_PORT_STATE,
1974 				     ALE_PORT_STATE_DISABLE);
1975 		if (ndev &&
1976 		    slave->link_interface != SGMII_LINK_MAC_PHY &&
1977 		    slave->link_interface != XGMII_LINK_MAC_PHY)
1978 			netif_carrier_off(ndev);
1979 	}
1980 
1981 	if (phy)
1982 		phy_print_status(phy);
1983 }
1984 
1985 static bool gbe_phy_link_status(struct gbe_slave *slave)
1986 {
1987 	 return !slave->phy || slave->phy->link;
1988 }
1989 
1990 static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
1991 					  struct gbe_slave *slave,
1992 					  struct net_device *ndev)
1993 {
1994 	int sp = slave->slave_num;
1995 	int phy_link_state, sgmii_link_state = 1, link_state;
1996 
1997 	if (!slave->open)
1998 		return;
1999 
2000 	if (!SLAVE_LINK_IS_XGMII(slave)) {
2001 		sgmii_link_state =
2002 			netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp);
2003 	}
2004 
2005 	phy_link_state = gbe_phy_link_status(slave);
2006 	link_state = phy_link_state & sgmii_link_state;
2007 
2008 	if (atomic_xchg(&slave->link_state, link_state) != link_state)
2009 		netcp_ethss_link_state_action(gbe_dev, ndev, slave,
2010 					      link_state);
2011 }
2012 
2013 static void xgbe_adjust_link(struct net_device *ndev)
2014 {
2015 	struct netcp_intf *netcp = netdev_priv(ndev);
2016 	struct gbe_intf *gbe_intf;
2017 
2018 	gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
2019 	if (!gbe_intf)
2020 		return;
2021 
2022 	netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
2023 				      ndev);
2024 }
2025 
2026 static void gbe_adjust_link(struct net_device *ndev)
2027 {
2028 	struct netcp_intf *netcp = netdev_priv(ndev);
2029 	struct gbe_intf *gbe_intf;
2030 
2031 	gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
2032 	if (!gbe_intf)
2033 		return;
2034 
2035 	netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
2036 				      ndev);
2037 }
2038 
2039 static void gbe_adjust_link_sec_slaves(struct net_device *ndev)
2040 {
2041 	struct gbe_priv *gbe_dev = netdev_priv(ndev);
2042 	struct gbe_slave *slave;
2043 
2044 	for_each_sec_slave(slave, gbe_dev)
2045 		netcp_ethss_update_link_state(gbe_dev, slave, NULL);
2046 }
2047 
2048 /* Reset EMAC
2049  * Soft reset is set and polled until clear, or until a timeout occurs
2050  */
2051 static int gbe_port_reset(struct gbe_slave *slave)
2052 {
2053 	u32 i, v;
2054 
2055 	/* Set the soft reset bit */
2056 	writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset));
2057 
2058 	/* Wait for the bit to clear */
2059 	for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
2060 		v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset));
2061 		if ((v & SOFT_RESET_MASK) != SOFT_RESET)
2062 			return 0;
2063 	}
2064 
2065 	/* Timeout on the reset */
2066 	return GMACSL_RET_WARN_RESET_INCOMPLETE;
2067 }
2068 
2069 /* Configure EMAC */
2070 static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2071 			    int max_rx_len)
2072 {
2073 	void __iomem *rx_maxlen_reg;
2074 	u32 xgmii_mode;
2075 
2076 	if (max_rx_len > NETCP_MAX_FRAME_SIZE)
2077 		max_rx_len = NETCP_MAX_FRAME_SIZE;
2078 
2079 	/* Enable correct MII mode at SS level */
2080 	if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) &&
2081 	    (slave->link_interface >= XGMII_LINK_MAC_PHY)) {
2082 		xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control));
2083 		xgmii_mode |= (1 << slave->slave_num);
2084 		writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
2085 	}
2086 
2087 	if (IS_SS_ID_MU(gbe_dev))
2088 		rx_maxlen_reg = GBE_REG_ADDR(slave, port_regs, rx_maxlen);
2089 	else
2090 		rx_maxlen_reg = GBE_REG_ADDR(slave, emac_regs, rx_maxlen);
2091 
2092 	writel(max_rx_len, rx_maxlen_reg);
2093 	writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
2094 }
2095 
2096 static void gbe_sgmii_rtreset(struct gbe_priv *priv,
2097 			      struct gbe_slave *slave, bool set)
2098 {
2099 	if (SLAVE_LINK_IS_XGMII(slave))
2100 		return;
2101 
2102 	netcp_sgmii_rtreset(SGMII_BASE(priv, slave->slave_num),
2103 			    slave->slave_num, set);
2104 }
2105 
2106 static void gbe_slave_stop(struct gbe_intf *intf)
2107 {
2108 	struct gbe_priv *gbe_dev = intf->gbe_dev;
2109 	struct gbe_slave *slave = intf->slave;
2110 
2111 	gbe_sgmii_rtreset(gbe_dev, slave, true);
2112 	gbe_port_reset(slave);
2113 	/* Disable forwarding */
2114 	cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2115 			     ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
2116 	cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast,
2117 			   1 << slave->port_num, 0, 0);
2118 
2119 	if (!slave->phy)
2120 		return;
2121 
2122 	phy_stop(slave->phy);
2123 	phy_disconnect(slave->phy);
2124 	slave->phy = NULL;
2125 }
2126 
2127 static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
2128 {
2129 	if (SLAVE_LINK_IS_XGMII(slave))
2130 		return;
2131 
2132 	netcp_sgmii_reset(SGMII_BASE(priv, slave->slave_num), slave->slave_num);
2133 	netcp_sgmii_config(SGMII_BASE(priv, slave->slave_num), slave->slave_num,
2134 			   slave->link_interface);
2135 }
2136 
2137 static int gbe_slave_open(struct gbe_intf *gbe_intf)
2138 {
2139 	struct gbe_priv *priv = gbe_intf->gbe_dev;
2140 	struct gbe_slave *slave = gbe_intf->slave;
2141 	phy_interface_t phy_mode;
2142 	bool has_phy = false;
2143 
2144 	void (*hndlr)(struct net_device *) = gbe_adjust_link;
2145 
2146 	gbe_sgmii_config(priv, slave);
2147 	gbe_port_reset(slave);
2148 	gbe_sgmii_rtreset(priv, slave, false);
2149 	gbe_port_config(priv, slave, priv->rx_packet_max);
2150 	gbe_set_slave_mac(slave, gbe_intf);
2151 	/* enable forwarding */
2152 	cpsw_ale_control_set(priv->ale, slave->port_num,
2153 			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2154 	cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast,
2155 			   1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2);
2156 
2157 	if (slave->link_interface == SGMII_LINK_MAC_PHY) {
2158 		has_phy = true;
2159 		phy_mode = PHY_INTERFACE_MODE_SGMII;
2160 		slave->phy_port_t = PORT_MII;
2161 	} else if (slave->link_interface == XGMII_LINK_MAC_PHY) {
2162 		has_phy = true;
2163 		phy_mode = PHY_INTERFACE_MODE_NA;
2164 		slave->phy_port_t = PORT_FIBRE;
2165 	}
2166 
2167 	if (has_phy) {
2168 		if (priv->ss_version == XGBE_SS_VERSION_10)
2169 			hndlr = xgbe_adjust_link;
2170 
2171 		slave->phy = of_phy_connect(gbe_intf->ndev,
2172 					    slave->phy_node,
2173 					    hndlr, 0,
2174 					    phy_mode);
2175 		if (!slave->phy) {
2176 			dev_err(priv->dev, "phy not found on slave %d\n",
2177 				slave->slave_num);
2178 			return -ENODEV;
2179 		}
2180 		dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
2181 			phydev_name(slave->phy));
2182 		phy_start(slave->phy);
2183 		phy_read_status(slave->phy);
2184 	}
2185 	return 0;
2186 }
2187 
2188 static void gbe_init_host_port(struct gbe_priv *priv)
2189 {
2190 	int bypass_en = 1;
2191 
2192 	/* Host Tx Pri */
2193 	if (IS_SS_ID_NU(priv))
2194 		writel(HOST_TX_PRI_MAP_DEFAULT,
2195 		       GBE_REG_ADDR(priv, host_port_regs, tx_pri_map));
2196 
2197 	/* Max length register */
2198 	writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
2199 						  rx_maxlen));
2200 
2201 	cpsw_ale_start(priv->ale);
2202 
2203 	if (priv->enable_ale)
2204 		bypass_en = 0;
2205 
2206 	cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en);
2207 
2208 	cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1);
2209 
2210 	cpsw_ale_control_set(priv->ale, priv->host_port,
2211 			     ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2212 
2213 	cpsw_ale_control_set(priv->ale, 0,
2214 			     ALE_PORT_UNKNOWN_VLAN_MEMBER,
2215 			     GBE_PORT_MASK(priv->ale_ports));
2216 
2217 	cpsw_ale_control_set(priv->ale, 0,
2218 			     ALE_PORT_UNKNOWN_MCAST_FLOOD,
2219 			     GBE_PORT_MASK(priv->ale_ports - 1));
2220 
2221 	cpsw_ale_control_set(priv->ale, 0,
2222 			     ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
2223 			     GBE_PORT_MASK(priv->ale_ports));
2224 
2225 	cpsw_ale_control_set(priv->ale, 0,
2226 			     ALE_PORT_UNTAGGED_EGRESS,
2227 			     GBE_PORT_MASK(priv->ale_ports));
2228 }
2229 
2230 static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2231 {
2232 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2233 	u16 vlan_id;
2234 
2235 	cpsw_ale_add_mcast(gbe_dev->ale, addr,
2236 			   GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0,
2237 			   ALE_MCAST_FWD_2);
2238 	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2239 		cpsw_ale_add_mcast(gbe_dev->ale, addr,
2240 				   GBE_PORT_MASK(gbe_dev->ale_ports),
2241 				   ALE_VLAN, vlan_id, ALE_MCAST_FWD_2);
2242 	}
2243 }
2244 
2245 static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2246 {
2247 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2248 	u16 vlan_id;
2249 
2250 	cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2251 
2252 	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID)
2253 		cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2254 				   ALE_VLAN, vlan_id);
2255 }
2256 
2257 static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2258 {
2259 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2260 	u16 vlan_id;
2261 
2262 	cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0);
2263 
2264 	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2265 		cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id);
2266 	}
2267 }
2268 
2269 static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2270 {
2271 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2272 	u16 vlan_id;
2273 
2274 	cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2275 
2276 	for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2277 		cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2278 				   ALE_VLAN, vlan_id);
2279 	}
2280 }
2281 
2282 static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr)
2283 {
2284 	struct gbe_intf *gbe_intf = intf_priv;
2285 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2286 
2287 	dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n",
2288 		naddr->addr, naddr->type);
2289 
2290 	switch (naddr->type) {
2291 	case ADDR_MCAST:
2292 	case ADDR_BCAST:
2293 		gbe_add_mcast_addr(gbe_intf, naddr->addr);
2294 		break;
2295 	case ADDR_UCAST:
2296 	case ADDR_DEV:
2297 		gbe_add_ucast_addr(gbe_intf, naddr->addr);
2298 		break;
2299 	case ADDR_ANY:
2300 		/* nothing to do for promiscuous */
2301 	default:
2302 		break;
2303 	}
2304 
2305 	return 0;
2306 }
2307 
2308 static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr)
2309 {
2310 	struct gbe_intf *gbe_intf = intf_priv;
2311 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2312 
2313 	dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n",
2314 		naddr->addr, naddr->type);
2315 
2316 	switch (naddr->type) {
2317 	case ADDR_MCAST:
2318 	case ADDR_BCAST:
2319 		gbe_del_mcast_addr(gbe_intf, naddr->addr);
2320 		break;
2321 	case ADDR_UCAST:
2322 	case ADDR_DEV:
2323 		gbe_del_ucast_addr(gbe_intf, naddr->addr);
2324 		break;
2325 	case ADDR_ANY:
2326 		/* nothing to do for promiscuous */
2327 	default:
2328 		break;
2329 	}
2330 
2331 	return 0;
2332 }
2333 
2334 static int gbe_add_vid(void *intf_priv, int vid)
2335 {
2336 	struct gbe_intf *gbe_intf = intf_priv;
2337 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2338 
2339 	set_bit(vid, gbe_intf->active_vlans);
2340 
2341 	cpsw_ale_add_vlan(gbe_dev->ale, vid,
2342 			  GBE_PORT_MASK(gbe_dev->ale_ports),
2343 			  GBE_MASK_NO_PORTS,
2344 			  GBE_PORT_MASK(gbe_dev->ale_ports),
2345 			  GBE_PORT_MASK(gbe_dev->ale_ports - 1));
2346 
2347 	return 0;
2348 }
2349 
2350 static int gbe_del_vid(void *intf_priv, int vid)
2351 {
2352 	struct gbe_intf *gbe_intf = intf_priv;
2353 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2354 
2355 	cpsw_ale_del_vlan(gbe_dev->ale, vid, 0);
2356 	clear_bit(vid, gbe_intf->active_vlans);
2357 	return 0;
2358 }
2359 
2360 static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
2361 {
2362 	struct gbe_intf *gbe_intf = intf_priv;
2363 	struct phy_device *phy = gbe_intf->slave->phy;
2364 	int ret = -EOPNOTSUPP;
2365 
2366 	if (phy)
2367 		ret = phy_mii_ioctl(phy, req, cmd);
2368 
2369 	return ret;
2370 }
2371 
2372 static void netcp_ethss_timer(unsigned long arg)
2373 {
2374 	struct gbe_priv *gbe_dev = (struct gbe_priv *)arg;
2375 	struct gbe_intf *gbe_intf;
2376 	struct gbe_slave *slave;
2377 
2378 	/* Check & update SGMII link state of interfaces */
2379 	for_each_intf(gbe_intf, gbe_dev) {
2380 		if (!gbe_intf->slave->open)
2381 			continue;
2382 		netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave,
2383 					      gbe_intf->ndev);
2384 	}
2385 
2386 	/* Check & update SGMII link state of secondary ports */
2387 	for_each_sec_slave(slave, gbe_dev) {
2388 		netcp_ethss_update_link_state(gbe_dev, slave, NULL);
2389 	}
2390 
2391 	/* A timer runs as a BH, no need to block them */
2392 	spin_lock(&gbe_dev->hw_stats_lock);
2393 
2394 	if (gbe_dev->ss_version == GBE_SS_VERSION_14)
2395 		gbe_update_stats_ver14(gbe_dev, NULL);
2396 	else
2397 		gbe_update_stats(gbe_dev, NULL);
2398 
2399 	spin_unlock(&gbe_dev->hw_stats_lock);
2400 
2401 	gbe_dev->timer.expires	= jiffies + GBE_TIMER_INTERVAL;
2402 	add_timer(&gbe_dev->timer);
2403 }
2404 
2405 static int gbe_tx_hook(int order, void *data, struct netcp_packet *p_info)
2406 {
2407 	struct gbe_intf *gbe_intf = data;
2408 
2409 	p_info->tx_pipe = &gbe_intf->tx_pipe;
2410 	return 0;
2411 }
2412 
2413 static int gbe_open(void *intf_priv, struct net_device *ndev)
2414 {
2415 	struct gbe_intf *gbe_intf = intf_priv;
2416 	struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2417 	struct netcp_intf *netcp = netdev_priv(ndev);
2418 	struct gbe_slave *slave = gbe_intf->slave;
2419 	int port_num = slave->port_num;
2420 	u32 reg;
2421 	int ret;
2422 
2423 	reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
2424 	dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n",
2425 		GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
2426 		GBE_RTL_VERSION(reg), GBE_IDENT(reg));
2427 
2428 	/* For 10G and on NetCP 1.5, use directed to port */
2429 	if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) || IS_SS_ID_MU(gbe_dev))
2430 		gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO;
2431 
2432 	if (gbe_dev->enable_ale)
2433 		gbe_intf->tx_pipe.switch_to_port = 0;
2434 	else
2435 		gbe_intf->tx_pipe.switch_to_port = port_num;
2436 
2437 	dev_dbg(gbe_dev->dev,
2438 		"opened TX channel %s: %p with to port %d, flags %d\n",
2439 		gbe_intf->tx_pipe.dma_chan_name,
2440 		gbe_intf->tx_pipe.dma_channel,
2441 		gbe_intf->tx_pipe.switch_to_port,
2442 		gbe_intf->tx_pipe.flags);
2443 
2444 	gbe_slave_stop(gbe_intf);
2445 
2446 	/* disable priority elevation and enable statistics on all ports */
2447 	writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
2448 
2449 	/* Control register */
2450 	writel(GBE_CTL_P0_ENABLE, GBE_REG_ADDR(gbe_dev, switch_regs, control));
2451 
2452 	/* All statistics enabled and STAT AB visible by default */
2453 	writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs,
2454 						    stat_port_en));
2455 
2456 	ret = gbe_slave_open(gbe_intf);
2457 	if (ret)
2458 		goto fail;
2459 
2460 	netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook,
2461 			      gbe_intf);
2462 
2463 	slave->open = true;
2464 	netcp_ethss_update_link_state(gbe_dev, slave, ndev);
2465 	return 0;
2466 
2467 fail:
2468 	gbe_slave_stop(gbe_intf);
2469 	return ret;
2470 }
2471 
2472 static int gbe_close(void *intf_priv, struct net_device *ndev)
2473 {
2474 	struct gbe_intf *gbe_intf = intf_priv;
2475 	struct netcp_intf *netcp = netdev_priv(ndev);
2476 
2477 	gbe_slave_stop(gbe_intf);
2478 	netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook,
2479 				gbe_intf);
2480 
2481 	gbe_intf->slave->open = false;
2482 	atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID);
2483 	return 0;
2484 }
2485 
2486 static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2487 		      struct device_node *node)
2488 {
2489 	int port_reg_num;
2490 	u32 port_reg_ofs, emac_reg_ofs;
2491 	u32 port_reg_blk_sz, emac_reg_blk_sz;
2492 
2493 	if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
2494 		dev_err(gbe_dev->dev, "missing slave-port parameter\n");
2495 		return -EINVAL;
2496 	}
2497 
2498 	if (of_property_read_u32(node, "link-interface",
2499 				 &slave->link_interface)) {
2500 		dev_warn(gbe_dev->dev,
2501 			 "missing link-interface value defaulting to 1G mac-phy link\n");
2502 		slave->link_interface = SGMII_LINK_MAC_PHY;
2503 	}
2504 
2505 	slave->open = false;
2506 	slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
2507 	slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
2508 
2509 	if (slave->link_interface >= XGMII_LINK_MAC_PHY)
2510 		slave->mac_control = GBE_DEF_10G_MAC_CONTROL;
2511 	else
2512 		slave->mac_control = GBE_DEF_1G_MAC_CONTROL;
2513 
2514 	/* Emac regs memmap are contiguous but port regs are not */
2515 	port_reg_num = slave->slave_num;
2516 	if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
2517 		if (slave->slave_num > 1) {
2518 			port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET;
2519 			port_reg_num -= 2;
2520 		} else {
2521 			port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
2522 		}
2523 		emac_reg_ofs = GBE13_EMAC_OFFSET;
2524 		port_reg_blk_sz = 0x30;
2525 		emac_reg_blk_sz = 0x40;
2526 	} else if (IS_SS_ID_MU(gbe_dev)) {
2527 		port_reg_ofs = GBENU_SLAVE_PORT_OFFSET;
2528 		emac_reg_ofs = GBENU_EMAC_OFFSET;
2529 		port_reg_blk_sz = 0x1000;
2530 		emac_reg_blk_sz = 0x1000;
2531 	} else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
2532 		port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
2533 		emac_reg_ofs = XGBE10_EMAC_OFFSET;
2534 		port_reg_blk_sz = 0x30;
2535 		emac_reg_blk_sz = 0x40;
2536 	} else {
2537 		dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
2538 			gbe_dev->ss_version);
2539 		return -EINVAL;
2540 	}
2541 
2542 	slave->port_regs = gbe_dev->switch_regs + port_reg_ofs +
2543 				(port_reg_blk_sz * port_reg_num);
2544 	slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs +
2545 				(emac_reg_blk_sz * slave->slave_num);
2546 
2547 	if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
2548 		/* Initialize  slave port register offsets */
2549 		GBE_SET_REG_OFS(slave, port_regs, port_vlan);
2550 		GBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
2551 		GBE_SET_REG_OFS(slave, port_regs, sa_lo);
2552 		GBE_SET_REG_OFS(slave, port_regs, sa_hi);
2553 		GBE_SET_REG_OFS(slave, port_regs, ts_ctl);
2554 		GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
2555 		GBE_SET_REG_OFS(slave, port_regs, ts_vlan);
2556 		GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
2557 		GBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
2558 
2559 		/* Initialize EMAC register offsets */
2560 		GBE_SET_REG_OFS(slave, emac_regs, mac_control);
2561 		GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
2562 		GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
2563 
2564 	} else if (IS_SS_ID_MU(gbe_dev)) {
2565 		/* Initialize  slave port register offsets */
2566 		GBENU_SET_REG_OFS(slave, port_regs, port_vlan);
2567 		GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map);
2568 		GBENU_SET_REG_OFS(slave, port_regs, sa_lo);
2569 		GBENU_SET_REG_OFS(slave, port_regs, sa_hi);
2570 		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl);
2571 		GBENU_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
2572 		GBENU_SET_REG_OFS(slave, port_regs, ts_vlan);
2573 		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
2574 		GBENU_SET_REG_OFS(slave, port_regs, ts_ctl2);
2575 		GBENU_SET_REG_OFS(slave, port_regs, rx_maxlen);
2576 
2577 		/* Initialize EMAC register offsets */
2578 		GBENU_SET_REG_OFS(slave, emac_regs, mac_control);
2579 		GBENU_SET_REG_OFS(slave, emac_regs, soft_reset);
2580 
2581 	} else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
2582 		/* Initialize  slave port register offsets */
2583 		XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
2584 		XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
2585 		XGBE_SET_REG_OFS(slave, port_regs, sa_lo);
2586 		XGBE_SET_REG_OFS(slave, port_regs, sa_hi);
2587 		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl);
2588 		XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
2589 		XGBE_SET_REG_OFS(slave, port_regs, ts_vlan);
2590 		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
2591 		XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
2592 
2593 		/* Initialize EMAC register offsets */
2594 		XGBE_SET_REG_OFS(slave, emac_regs, mac_control);
2595 		XGBE_SET_REG_OFS(slave, emac_regs, soft_reset);
2596 		XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
2597 	}
2598 
2599 	atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID);
2600 	return 0;
2601 }
2602 
2603 static void init_secondary_ports(struct gbe_priv *gbe_dev,
2604 				 struct device_node *node)
2605 {
2606 	struct device *dev = gbe_dev->dev;
2607 	phy_interface_t phy_mode;
2608 	struct gbe_priv **priv;
2609 	struct device_node *port;
2610 	struct gbe_slave *slave;
2611 	bool mac_phy_link = false;
2612 
2613 	for_each_child_of_node(node, port) {
2614 		slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
2615 		if (!slave) {
2616 			dev_err(dev,
2617 				"memomry alloc failed for secondary port(%s), skipping...\n",
2618 				port->name);
2619 			continue;
2620 		}
2621 
2622 		if (init_slave(gbe_dev, slave, port)) {
2623 			dev_err(dev,
2624 				"Failed to initialize secondary port(%s), skipping...\n",
2625 				port->name);
2626 			devm_kfree(dev, slave);
2627 			continue;
2628 		}
2629 
2630 		gbe_sgmii_config(gbe_dev, slave);
2631 		gbe_port_reset(slave);
2632 		gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max);
2633 		list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves);
2634 		gbe_dev->num_slaves++;
2635 		if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
2636 		    (slave->link_interface == XGMII_LINK_MAC_PHY))
2637 			mac_phy_link = true;
2638 
2639 		slave->open = true;
2640 		if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
2641 			of_node_put(port);
2642 			break;
2643 		}
2644 	}
2645 
2646 	/* of_phy_connect() is needed only for MAC-PHY interface */
2647 	if (!mac_phy_link)
2648 		return;
2649 
2650 	/* Allocate dummy netdev device for attaching to phy device */
2651 	gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy",
2652 					NET_NAME_UNKNOWN, ether_setup);
2653 	if (!gbe_dev->dummy_ndev) {
2654 		dev_err(dev,
2655 			"Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n");
2656 		return;
2657 	}
2658 	priv = netdev_priv(gbe_dev->dummy_ndev);
2659 	*priv = gbe_dev;
2660 
2661 	if (slave->link_interface == SGMII_LINK_MAC_PHY) {
2662 		phy_mode = PHY_INTERFACE_MODE_SGMII;
2663 		slave->phy_port_t = PORT_MII;
2664 	} else {
2665 		phy_mode = PHY_INTERFACE_MODE_NA;
2666 		slave->phy_port_t = PORT_FIBRE;
2667 	}
2668 
2669 	for_each_sec_slave(slave, gbe_dev) {
2670 		if ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
2671 		    (slave->link_interface != XGMII_LINK_MAC_PHY))
2672 			continue;
2673 		slave->phy =
2674 			of_phy_connect(gbe_dev->dummy_ndev,
2675 				       slave->phy_node,
2676 				       gbe_adjust_link_sec_slaves,
2677 				       0, phy_mode);
2678 		if (!slave->phy) {
2679 			dev_err(dev, "phy not found for slave %d\n",
2680 				slave->slave_num);
2681 			slave->phy = NULL;
2682 		} else {
2683 			dev_dbg(dev, "phy found: id is: 0x%s\n",
2684 				phydev_name(slave->phy));
2685 			phy_start(slave->phy);
2686 			phy_read_status(slave->phy);
2687 		}
2688 	}
2689 }
2690 
2691 static void free_secondary_ports(struct gbe_priv *gbe_dev)
2692 {
2693 	struct gbe_slave *slave;
2694 
2695 	while (!list_empty(&gbe_dev->secondary_slaves)) {
2696 		slave = first_sec_slave(gbe_dev);
2697 
2698 		if (slave->phy)
2699 			phy_disconnect(slave->phy);
2700 		list_del(&slave->slave_list);
2701 	}
2702 	if (gbe_dev->dummy_ndev)
2703 		free_netdev(gbe_dev->dummy_ndev);
2704 }
2705 
2706 static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
2707 				 struct device_node *node)
2708 {
2709 	struct resource res;
2710 	void __iomem *regs;
2711 	int ret, i;
2712 
2713 	ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res);
2714 	if (ret) {
2715 		dev_err(gbe_dev->dev,
2716 			"Can't xlate xgbe of node(%s) ss address at %d\n",
2717 			node->name, XGBE_SS_REG_INDEX);
2718 		return ret;
2719 	}
2720 
2721 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
2722 	if (IS_ERR(regs)) {
2723 		dev_err(gbe_dev->dev, "Failed to map xgbe ss register base\n");
2724 		return PTR_ERR(regs);
2725 	}
2726 	gbe_dev->ss_regs = regs;
2727 
2728 	ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res);
2729 	if (ret) {
2730 		dev_err(gbe_dev->dev,
2731 			"Can't xlate xgbe of node(%s) sm address at %d\n",
2732 			node->name, XGBE_SM_REG_INDEX);
2733 		return ret;
2734 	}
2735 
2736 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
2737 	if (IS_ERR(regs)) {
2738 		dev_err(gbe_dev->dev, "Failed to map xgbe sm register base\n");
2739 		return PTR_ERR(regs);
2740 	}
2741 	gbe_dev->switch_regs = regs;
2742 
2743 	ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
2744 	if (ret) {
2745 		dev_err(gbe_dev->dev,
2746 			"Can't xlate xgbe serdes of node(%s) address at %d\n",
2747 			node->name, XGBE_SERDES_REG_INDEX);
2748 		return ret;
2749 	}
2750 
2751 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
2752 	if (IS_ERR(regs)) {
2753 		dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n");
2754 		return PTR_ERR(regs);
2755 	}
2756 	gbe_dev->xgbe_serdes_regs = regs;
2757 
2758 	gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
2759 	gbe_dev->et_stats = xgbe10_et_stats;
2760 	gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
2761 
2762 	gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
2763 					 gbe_dev->num_et_stats * sizeof(u64),
2764 					 GFP_KERNEL);
2765 	if (!gbe_dev->hw_stats) {
2766 		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
2767 		return -ENOMEM;
2768 	}
2769 
2770 	gbe_dev->hw_stats_prev =
2771 		devm_kzalloc(gbe_dev->dev,
2772 			     gbe_dev->num_et_stats * sizeof(u32),
2773 			     GFP_KERNEL);
2774 	if (!gbe_dev->hw_stats_prev) {
2775 		dev_err(gbe_dev->dev,
2776 			"hw_stats_prev memory allocation failed\n");
2777 		return -ENOMEM;
2778 	}
2779 
2780 	gbe_dev->ss_version = XGBE_SS_VERSION_10;
2781 	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
2782 					XGBE10_SGMII_MODULE_OFFSET;
2783 	gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
2784 
2785 	for (i = 0; i < gbe_dev->max_num_ports; i++)
2786 		gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
2787 			XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
2788 
2789 	gbe_dev->ale_reg = gbe_dev->switch_regs + XGBE10_ALE_OFFSET;
2790 	gbe_dev->ale_ports = gbe_dev->max_num_ports;
2791 	gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
2792 	gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
2793 	gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
2794 
2795 	/* Subsystem registers */
2796 	XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
2797 	XGBE_SET_REG_OFS(gbe_dev, ss_regs, control);
2798 
2799 	/* Switch module registers */
2800 	XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
2801 	XGBE_SET_REG_OFS(gbe_dev, switch_regs, control);
2802 	XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
2803 	XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
2804 	XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
2805 
2806 	/* Host port registers */
2807 	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
2808 	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
2809 	XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
2810 	return 0;
2811 }
2812 
2813 static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
2814 				    struct device_node *node)
2815 {
2816 	struct resource res;
2817 	void __iomem *regs;
2818 	int ret;
2819 
2820 	ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res);
2821 	if (ret) {
2822 		dev_err(gbe_dev->dev,
2823 			"Can't translate of node(%s) of gbe ss address at %d\n",
2824 			node->name, GBE_SS_REG_INDEX);
2825 		return ret;
2826 	}
2827 
2828 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
2829 	if (IS_ERR(regs)) {
2830 		dev_err(gbe_dev->dev, "Failed to map gbe register base\n");
2831 		return PTR_ERR(regs);
2832 	}
2833 	gbe_dev->ss_regs = regs;
2834 	gbe_dev->ss_version = readl(gbe_dev->ss_regs);
2835 	return 0;
2836 }
2837 
2838 static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
2839 				struct device_node *node)
2840 {
2841 	struct resource res;
2842 	void __iomem *regs;
2843 	int i, ret;
2844 
2845 	ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res);
2846 	if (ret) {
2847 		dev_err(gbe_dev->dev,
2848 			"Can't translate of gbe node(%s) address at index %d\n",
2849 			node->name, GBE_SGMII34_REG_INDEX);
2850 		return ret;
2851 	}
2852 
2853 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
2854 	if (IS_ERR(regs)) {
2855 		dev_err(gbe_dev->dev,
2856 			"Failed to map gbe sgmii port34 register base\n");
2857 		return PTR_ERR(regs);
2858 	}
2859 	gbe_dev->sgmii_port34_regs = regs;
2860 
2861 	ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res);
2862 	if (ret) {
2863 		dev_err(gbe_dev->dev,
2864 			"Can't translate of gbe node(%s) address at index %d\n",
2865 			node->name, GBE_SM_REG_INDEX);
2866 		return ret;
2867 	}
2868 
2869 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
2870 	if (IS_ERR(regs)) {
2871 		dev_err(gbe_dev->dev,
2872 			"Failed to map gbe switch module register base\n");
2873 		return PTR_ERR(regs);
2874 	}
2875 	gbe_dev->switch_regs = regs;
2876 
2877 	gbe_dev->num_stats_mods = gbe_dev->max_num_slaves;
2878 	gbe_dev->et_stats = gbe13_et_stats;
2879 	gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
2880 
2881 	gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
2882 					 gbe_dev->num_et_stats * sizeof(u64),
2883 					 GFP_KERNEL);
2884 	if (!gbe_dev->hw_stats) {
2885 		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
2886 		return -ENOMEM;
2887 	}
2888 
2889 	gbe_dev->hw_stats_prev =
2890 		devm_kzalloc(gbe_dev->dev,
2891 			     gbe_dev->num_et_stats * sizeof(u32),
2892 			     GFP_KERNEL);
2893 	if (!gbe_dev->hw_stats_prev) {
2894 		dev_err(gbe_dev->dev,
2895 			"hw_stats_prev memory allocation failed\n");
2896 		return -ENOMEM;
2897 	}
2898 
2899 	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
2900 	gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
2901 
2902 	/* K2HK has only 2 hw stats modules visible at a time, so
2903 	 * module 0 & 2 points to one base and
2904 	 * module 1 & 3 points to the other base
2905 	 */
2906 	for (i = 0; i < gbe_dev->max_num_slaves; i++) {
2907 		gbe_dev->hw_stats_regs[i] =
2908 			gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
2909 			(GBE_HW_STATS_REG_MAP_SZ * (i & 0x1));
2910 	}
2911 
2912 	gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
2913 	gbe_dev->ale_ports = gbe_dev->max_num_ports;
2914 	gbe_dev->host_port = GBE13_HOST_PORT_NUM;
2915 	gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
2916 	gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
2917 
2918 	/* Subsystem registers */
2919 	GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
2920 
2921 	/* Switch module registers */
2922 	GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
2923 	GBE_SET_REG_OFS(gbe_dev, switch_regs, control);
2924 	GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset);
2925 	GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
2926 	GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
2927 	GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
2928 
2929 	/* Host port registers */
2930 	GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
2931 	GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
2932 	return 0;
2933 }
2934 
2935 static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
2936 				struct device_node *node)
2937 {
2938 	struct resource res;
2939 	void __iomem *regs;
2940 	int i, ret;
2941 
2942 	gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
2943 	gbe_dev->et_stats = gbenu_et_stats;
2944 
2945 	if (IS_SS_ID_NU(gbe_dev))
2946 		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
2947 			(gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
2948 	else
2949 		gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
2950 					GBENU_ET_STATS_PORT_SIZE;
2951 
2952 	gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
2953 					 gbe_dev->num_et_stats * sizeof(u64),
2954 					 GFP_KERNEL);
2955 	if (!gbe_dev->hw_stats) {
2956 		dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
2957 		return -ENOMEM;
2958 	}
2959 
2960 	gbe_dev->hw_stats_prev =
2961 		devm_kzalloc(gbe_dev->dev,
2962 			     gbe_dev->num_et_stats * sizeof(u32),
2963 			     GFP_KERNEL);
2964 	if (!gbe_dev->hw_stats_prev) {
2965 		dev_err(gbe_dev->dev,
2966 			"hw_stats_prev memory allocation failed\n");
2967 		return -ENOMEM;
2968 	}
2969 
2970 	ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
2971 	if (ret) {
2972 		dev_err(gbe_dev->dev,
2973 			"Can't translate of gbenu node(%s) addr at index %d\n",
2974 			node->name, GBENU_SM_REG_INDEX);
2975 		return ret;
2976 	}
2977 
2978 	regs = devm_ioremap_resource(gbe_dev->dev, &res);
2979 	if (IS_ERR(regs)) {
2980 		dev_err(gbe_dev->dev,
2981 			"Failed to map gbenu switch module register base\n");
2982 		return PTR_ERR(regs);
2983 	}
2984 	gbe_dev->switch_regs = regs;
2985 
2986 	gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
2987 
2988 	/* Although sgmii modules are mem mapped to one contiguous
2989 	 * region on GBENU devices, setting sgmii_port34_regs allows
2990 	 * consistent code when accessing sgmii api
2991 	 */
2992 	gbe_dev->sgmii_port34_regs = gbe_dev->sgmii_port_regs +
2993 				     (2 * GBENU_SGMII_MODULE_SIZE);
2994 
2995 	gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
2996 
2997 	for (i = 0; i < (gbe_dev->max_num_ports); i++)
2998 		gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
2999 			GBENU_HW_STATS_OFFSET + (GBENU_HW_STATS_REG_MAP_SZ * i);
3000 
3001 	gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET;
3002 	gbe_dev->ale_ports = gbe_dev->max_num_ports;
3003 	gbe_dev->host_port = GBENU_HOST_PORT_NUM;
3004 	gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
3005 	gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
3006 
3007 	/* Subsystem registers */
3008 	GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3009 
3010 	/* Switch module registers */
3011 	GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3012 	GBENU_SET_REG_OFS(gbe_dev, switch_regs, control);
3013 	GBENU_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3014 	GBENU_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3015 
3016 	/* Host port registers */
3017 	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3018 	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3019 
3020 	/* For NU only.  2U does not need tx_pri_map.
3021 	 * NU cppi port 0 tx pkt streaming interface has (n-1)*8 egress threads
3022 	 * while 2U has only 1 such thread
3023 	 */
3024 	GBENU_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
3025 	return 0;
3026 }
3027 
3028 static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
3029 		     struct device_node *node, void **inst_priv)
3030 {
3031 	struct device_node *interfaces, *interface;
3032 	struct device_node *secondary_ports;
3033 	struct cpsw_ale_params ale_params;
3034 	struct gbe_priv *gbe_dev;
3035 	u32 slave_num;
3036 	int i, ret = 0;
3037 
3038 	if (!node) {
3039 		dev_err(dev, "device tree info unavailable\n");
3040 		return -ENODEV;
3041 	}
3042 
3043 	gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL);
3044 	if (!gbe_dev)
3045 		return -ENOMEM;
3046 
3047 	if (of_device_is_compatible(node, "ti,netcp-gbe-5") ||
3048 	    of_device_is_compatible(node, "ti,netcp-gbe")) {
3049 		gbe_dev->max_num_slaves = 4;
3050 	} else if (of_device_is_compatible(node, "ti,netcp-gbe-9")) {
3051 		gbe_dev->max_num_slaves = 8;
3052 	} else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) {
3053 		gbe_dev->max_num_slaves = 1;
3054 	} else if (of_device_is_compatible(node, "ti,netcp-xgbe")) {
3055 		gbe_dev->max_num_slaves = 2;
3056 	} else {
3057 		dev_err(dev, "device tree node for unknown device\n");
3058 		return -EINVAL;
3059 	}
3060 	gbe_dev->max_num_ports = gbe_dev->max_num_slaves + 1;
3061 
3062 	gbe_dev->dev = dev;
3063 	gbe_dev->netcp_device = netcp_device;
3064 	gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE;
3065 
3066 	/* init the hw stats lock */
3067 	spin_lock_init(&gbe_dev->hw_stats_lock);
3068 
3069 	if (of_find_property(node, "enable-ale", NULL)) {
3070 		gbe_dev->enable_ale = true;
3071 		dev_info(dev, "ALE enabled\n");
3072 	} else {
3073 		gbe_dev->enable_ale = false;
3074 		dev_dbg(dev, "ALE bypass enabled*\n");
3075 	}
3076 
3077 	ret = of_property_read_u32(node, "tx-queue",
3078 				   &gbe_dev->tx_queue_id);
3079 	if (ret < 0) {
3080 		dev_err(dev, "missing tx_queue parameter\n");
3081 		gbe_dev->tx_queue_id = GBE_TX_QUEUE;
3082 	}
3083 
3084 	ret = of_property_read_string(node, "tx-channel",
3085 				      &gbe_dev->dma_chan_name);
3086 	if (ret < 0) {
3087 		dev_err(dev, "missing \"tx-channel\" parameter\n");
3088 		return -EINVAL;
3089 	}
3090 
3091 	if (!strcmp(node->name, "gbe")) {
3092 		ret = get_gbe_resource_version(gbe_dev, node);
3093 		if (ret)
3094 			return ret;
3095 
3096 		dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
3097 
3098 		if (gbe_dev->ss_version == GBE_SS_VERSION_14)
3099 			ret = set_gbe_ethss14_priv(gbe_dev, node);
3100 		else if (IS_SS_ID_MU(gbe_dev))
3101 			ret = set_gbenu_ethss_priv(gbe_dev, node);
3102 		else
3103 			ret = -ENODEV;
3104 
3105 	} else if (!strcmp(node->name, "xgbe")) {
3106 		ret = set_xgbe_ethss10_priv(gbe_dev, node);
3107 		if (ret)
3108 			return ret;
3109 		ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
3110 					     gbe_dev->ss_regs);
3111 	} else {
3112 		dev_err(dev, "unknown GBE node(%s)\n", node->name);
3113 		ret = -ENODEV;
3114 	}
3115 
3116 	if (ret)
3117 		return ret;
3118 
3119 	interfaces = of_get_child_by_name(node, "interfaces");
3120 	if (!interfaces)
3121 		dev_err(dev, "could not find interfaces\n");
3122 
3123 	ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
3124 				gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
3125 	if (ret)
3126 		return ret;
3127 
3128 	ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
3129 	if (ret)
3130 		return ret;
3131 
3132 	/* Create network interfaces */
3133 	INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
3134 	for_each_child_of_node(interfaces, interface) {
3135 		ret = of_property_read_u32(interface, "slave-port", &slave_num);
3136 		if (ret) {
3137 			dev_err(dev, "missing slave-port parameter, skipping interface configuration for %s\n",
3138 				interface->name);
3139 			continue;
3140 		}
3141 		gbe_dev->num_slaves++;
3142 		if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
3143 			of_node_put(interface);
3144 			break;
3145 		}
3146 	}
3147 	of_node_put(interfaces);
3148 
3149 	if (!gbe_dev->num_slaves)
3150 		dev_warn(dev, "No network interface configured\n");
3151 
3152 	/* Initialize Secondary slave ports */
3153 	secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
3154 	INIT_LIST_HEAD(&gbe_dev->secondary_slaves);
3155 	if (secondary_ports && (gbe_dev->num_slaves <  gbe_dev->max_num_slaves))
3156 		init_secondary_ports(gbe_dev, secondary_ports);
3157 	of_node_put(secondary_ports);
3158 
3159 	if (!gbe_dev->num_slaves) {
3160 		dev_err(dev,
3161 			"No network interface or secondary ports configured\n");
3162 		ret = -ENODEV;
3163 		goto free_sec_ports;
3164 	}
3165 
3166 	memset(&ale_params, 0, sizeof(ale_params));
3167 	ale_params.dev		= gbe_dev->dev;
3168 	ale_params.ale_regs	= gbe_dev->ale_reg;
3169 	ale_params.ale_ageout	= GBE_DEFAULT_ALE_AGEOUT;
3170 	ale_params.ale_entries	= gbe_dev->ale_entries;
3171 	ale_params.ale_ports	= gbe_dev->ale_ports;
3172 
3173 	gbe_dev->ale = cpsw_ale_create(&ale_params);
3174 	if (!gbe_dev->ale) {
3175 		dev_err(gbe_dev->dev, "error initializing ale engine\n");
3176 		ret = -ENODEV;
3177 		goto free_sec_ports;
3178 	} else {
3179 		dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
3180 	}
3181 
3182 	/* initialize host port */
3183 	gbe_init_host_port(gbe_dev);
3184 
3185 	spin_lock_bh(&gbe_dev->hw_stats_lock);
3186 	for (i = 0; i < gbe_dev->num_stats_mods; i++) {
3187 		if (gbe_dev->ss_version == GBE_SS_VERSION_14)
3188 			gbe_reset_mod_stats_ver14(gbe_dev, i);
3189 		else
3190 			gbe_reset_mod_stats(gbe_dev, i);
3191 	}
3192 	spin_unlock_bh(&gbe_dev->hw_stats_lock);
3193 
3194 	init_timer(&gbe_dev->timer);
3195 	gbe_dev->timer.data	 = (unsigned long)gbe_dev;
3196 	gbe_dev->timer.function = netcp_ethss_timer;
3197 	gbe_dev->timer.expires	 = jiffies + GBE_TIMER_INTERVAL;
3198 	add_timer(&gbe_dev->timer);
3199 	*inst_priv = gbe_dev;
3200 	return 0;
3201 
3202 free_sec_ports:
3203 	free_secondary_ports(gbe_dev);
3204 	return ret;
3205 }
3206 
3207 static int gbe_attach(void *inst_priv, struct net_device *ndev,
3208 		      struct device_node *node, void **intf_priv)
3209 {
3210 	struct gbe_priv *gbe_dev = inst_priv;
3211 	struct gbe_intf *gbe_intf;
3212 	int ret;
3213 
3214 	if (!node) {
3215 		dev_err(gbe_dev->dev, "interface node not available\n");
3216 		return -ENODEV;
3217 	}
3218 
3219 	gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL);
3220 	if (!gbe_intf)
3221 		return -ENOMEM;
3222 
3223 	gbe_intf->ndev = ndev;
3224 	gbe_intf->dev = gbe_dev->dev;
3225 	gbe_intf->gbe_dev = gbe_dev;
3226 
3227 	gbe_intf->slave = devm_kzalloc(gbe_dev->dev,
3228 					sizeof(*gbe_intf->slave),
3229 					GFP_KERNEL);
3230 	if (!gbe_intf->slave) {
3231 		ret = -ENOMEM;
3232 		goto fail;
3233 	}
3234 
3235 	if (init_slave(gbe_dev, gbe_intf->slave, node)) {
3236 		ret = -ENODEV;
3237 		goto fail;
3238 	}
3239 
3240 	gbe_intf->tx_pipe = gbe_dev->tx_pipe;
3241 	ndev->ethtool_ops = &keystone_ethtool_ops;
3242 	list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head);
3243 	*intf_priv = gbe_intf;
3244 	return 0;
3245 
3246 fail:
3247 	if (gbe_intf->slave)
3248 		devm_kfree(gbe_dev->dev, gbe_intf->slave);
3249 	if (gbe_intf)
3250 		devm_kfree(gbe_dev->dev, gbe_intf);
3251 	return ret;
3252 }
3253 
3254 static int gbe_release(void *intf_priv)
3255 {
3256 	struct gbe_intf *gbe_intf = intf_priv;
3257 
3258 	gbe_intf->ndev->ethtool_ops = NULL;
3259 	list_del(&gbe_intf->gbe_intf_list);
3260 	devm_kfree(gbe_intf->dev, gbe_intf->slave);
3261 	devm_kfree(gbe_intf->dev, gbe_intf);
3262 	return 0;
3263 }
3264 
3265 static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
3266 {
3267 	struct gbe_priv *gbe_dev = inst_priv;
3268 
3269 	del_timer_sync(&gbe_dev->timer);
3270 	cpsw_ale_stop(gbe_dev->ale);
3271 	cpsw_ale_destroy(gbe_dev->ale);
3272 	netcp_txpipe_close(&gbe_dev->tx_pipe);
3273 	free_secondary_ports(gbe_dev);
3274 
3275 	if (!list_empty(&gbe_dev->gbe_intf_head))
3276 		dev_alert(gbe_dev->dev,
3277 			  "unreleased ethss interfaces present\n");
3278 
3279 	return 0;
3280 }
3281 
3282 static struct netcp_module gbe_module = {
3283 	.name		= GBE_MODULE_NAME,
3284 	.owner		= THIS_MODULE,
3285 	.primary	= true,
3286 	.probe		= gbe_probe,
3287 	.open		= gbe_open,
3288 	.close		= gbe_close,
3289 	.remove		= gbe_remove,
3290 	.attach		= gbe_attach,
3291 	.release	= gbe_release,
3292 	.add_addr	= gbe_add_addr,
3293 	.del_addr	= gbe_del_addr,
3294 	.add_vid	= gbe_add_vid,
3295 	.del_vid	= gbe_del_vid,
3296 	.ioctl		= gbe_ioctl,
3297 };
3298 
3299 static struct netcp_module xgbe_module = {
3300 	.name		= XGBE_MODULE_NAME,
3301 	.owner		= THIS_MODULE,
3302 	.primary	= true,
3303 	.probe		= gbe_probe,
3304 	.open		= gbe_open,
3305 	.close		= gbe_close,
3306 	.remove		= gbe_remove,
3307 	.attach		= gbe_attach,
3308 	.release	= gbe_release,
3309 	.add_addr	= gbe_add_addr,
3310 	.del_addr	= gbe_del_addr,
3311 	.add_vid	= gbe_add_vid,
3312 	.del_vid	= gbe_del_vid,
3313 	.ioctl		= gbe_ioctl,
3314 };
3315 
3316 static int __init keystone_gbe_init(void)
3317 {
3318 	int ret;
3319 
3320 	ret = netcp_register_module(&gbe_module);
3321 	if (ret)
3322 		return ret;
3323 
3324 	ret = netcp_register_module(&xgbe_module);
3325 	if (ret)
3326 		return ret;
3327 
3328 	return 0;
3329 }
3330 module_init(keystone_gbe_init);
3331 
3332 static void __exit keystone_gbe_exit(void)
3333 {
3334 	netcp_unregister_module(&gbe_module);
3335 	netcp_unregister_module(&xgbe_module);
3336 }
3337 module_exit(keystone_gbe_exit);
3338 
3339 MODULE_LICENSE("GPL v2");
3340 MODULE_DESCRIPTION("TI NETCP ETHSS driver for Keystone SOCs");
3341 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");
3342