xref: /linux/drivers/net/dsa/microchip/ksz9477.c (revision bf5802238dc181b1f7375d358af1d01cd72d1c11)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Microchip KSZ9477 switch driver main logic
4  *
5  * Copyright (C) 2017-2019 Microchip Technology Inc.
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/iopoll.h>
11 #include <linux/platform_data/microchip-ksz.h>
12 #include <linux/phy.h>
13 #include <linux/if_bridge.h>
14 #include <linux/if_vlan.h>
15 #include <net/dsa.h>
16 #include <net/switchdev.h>
17 
18 #include "ksz9477_reg.h"
19 #include "ksz_common.h"
20 #include "ksz9477.h"
21 
22 static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
23 {
24 	regmap_update_bits(ksz_regmap_8(dev), addr, bits, set ? bits : 0);
25 }
26 
27 static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
28 			 bool set)
29 {
30 	regmap_update_bits(ksz_regmap_8(dev), PORT_CTRL_ADDR(port, offset),
31 			   bits, set ? bits : 0);
32 }
33 
34 static void ksz9477_cfg32(struct ksz_device *dev, u32 addr, u32 bits, bool set)
35 {
36 	regmap_update_bits(ksz_regmap_32(dev), addr, bits, set ? bits : 0);
37 }
38 
39 static void ksz9477_port_cfg32(struct ksz_device *dev, int port, int offset,
40 			       u32 bits, bool set)
41 {
42 	regmap_update_bits(ksz_regmap_32(dev), PORT_CTRL_ADDR(port, offset),
43 			   bits, set ? bits : 0);
44 }
45 
46 int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu)
47 {
48 	u16 frame_size;
49 
50 	if (!dsa_is_cpu_port(dev->ds, port))
51 		return 0;
52 
53 	frame_size = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
54 
55 	return regmap_update_bits(ksz_regmap_16(dev), REG_SW_MTU__2,
56 				  REG_SW_MTU_MASK, frame_size);
57 }
58 
59 /**
60  * ksz9477_handle_wake_reason - Handle wake reason on a specified port.
61  * @dev: The device structure.
62  * @port: The port number.
63  *
64  * This function reads the PME (Power Management Event) status register of a
65  * specified port to determine the wake reason. If there is no wake event, it
66  * returns early. Otherwise, it logs the wake reason which could be due to a
67  * "Magic Packet", "Link Up", or "Energy Detect" event. The PME status register
68  * is then cleared to acknowledge the handling of the wake event.
69  *
70  * Return: 0 on success, or an error code on failure.
71  */
72 static int ksz9477_handle_wake_reason(struct ksz_device *dev, int port)
73 {
74 	u8 pme_status;
75 	int ret;
76 
77 	ret = ksz_pread8(dev, port, REG_PORT_PME_STATUS, &pme_status);
78 	if (ret)
79 		return ret;
80 
81 	if (!pme_status)
82 		return 0;
83 
84 	dev_dbg(dev->dev, "Wake event on port %d due to:%s%s%s\n", port,
85 		pme_status & PME_WOL_MAGICPKT ? " \"Magic Packet\"" : "",
86 		pme_status & PME_WOL_LINKUP ? " \"Link Up\"" : "",
87 		pme_status & PME_WOL_ENERGY ? " \"Energy detect\"" : "");
88 
89 	return ksz_pwrite8(dev, port, REG_PORT_PME_STATUS, pme_status);
90 }
91 
92 /**
93  * ksz9477_get_wol - Get Wake-on-LAN settings for a specified port.
94  * @dev: The device structure.
95  * @port: The port number.
96  * @wol: Pointer to ethtool Wake-on-LAN settings structure.
97  *
98  * This function checks the PME Pin Control Register to see if  PME Pin Output
99  * Enable is set, indicating PME is enabled. If enabled, it sets the supported
100  * and active WoL flags.
101  */
102 void ksz9477_get_wol(struct ksz_device *dev, int port,
103 		     struct ethtool_wolinfo *wol)
104 {
105 	u8 pme_ctrl;
106 	int ret;
107 
108 	if (!dev->wakeup_source)
109 		return;
110 
111 	wol->supported = WAKE_PHY;
112 
113 	/* Check if the current MAC address on this port can be set
114 	 * as global for WAKE_MAGIC support. The result may vary
115 	 * dynamically based on other ports configurations.
116 	 */
117 	if (ksz_is_port_mac_global_usable(dev->ds, port))
118 		wol->supported |= WAKE_MAGIC;
119 
120 	ret = ksz_pread8(dev, port, REG_PORT_PME_CTRL, &pme_ctrl);
121 	if (ret)
122 		return;
123 
124 	if (pme_ctrl & PME_WOL_MAGICPKT)
125 		wol->wolopts |= WAKE_MAGIC;
126 	if (pme_ctrl & (PME_WOL_LINKUP | PME_WOL_ENERGY))
127 		wol->wolopts |= WAKE_PHY;
128 }
129 
130 /**
131  * ksz9477_set_wol - Set Wake-on-LAN settings for a specified port.
132  * @dev: The device structure.
133  * @port: The port number.
134  * @wol: Pointer to ethtool Wake-on-LAN settings structure.
135  *
136  * This function configures Wake-on-LAN (WoL) settings for a specified port.
137  * It validates the provided WoL options, checks if PME is enabled via the
138  * switch's PME Pin Control Register, clears any previous wake reasons,
139  * and sets the Magic Packet flag in the port's PME control register if
140  * specified.
141  *
142  * Return: 0 on success, or other error codes on failure.
143  */
144 int ksz9477_set_wol(struct ksz_device *dev, int port,
145 		    struct ethtool_wolinfo *wol)
146 {
147 	u8 pme_ctrl = 0, pme_ctrl_old = 0;
148 	bool magic_switched_off;
149 	bool magic_switched_on;
150 	int ret;
151 
152 	if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
153 		return -EINVAL;
154 
155 	if (!dev->wakeup_source)
156 		return -EOPNOTSUPP;
157 
158 	ret = ksz9477_handle_wake_reason(dev, port);
159 	if (ret)
160 		return ret;
161 
162 	if (wol->wolopts & WAKE_MAGIC)
163 		pme_ctrl |= PME_WOL_MAGICPKT;
164 	if (wol->wolopts & WAKE_PHY)
165 		pme_ctrl |= PME_WOL_LINKUP | PME_WOL_ENERGY;
166 
167 	ret = ksz_pread8(dev, port, REG_PORT_PME_CTRL, &pme_ctrl_old);
168 	if (ret)
169 		return ret;
170 
171 	if (pme_ctrl_old == pme_ctrl)
172 		return 0;
173 
174 	magic_switched_off = (pme_ctrl_old & PME_WOL_MAGICPKT) &&
175 			    !(pme_ctrl & PME_WOL_MAGICPKT);
176 	magic_switched_on = !(pme_ctrl_old & PME_WOL_MAGICPKT) &&
177 			    (pme_ctrl & PME_WOL_MAGICPKT);
178 
179 	/* To keep reference count of MAC address, we should do this
180 	 * operation only on change of WOL settings.
181 	 */
182 	if (magic_switched_on) {
183 		ret = ksz_switch_macaddr_get(dev->ds, port, NULL);
184 		if (ret)
185 			return ret;
186 	} else if (magic_switched_off) {
187 		ksz_switch_macaddr_put(dev->ds);
188 	}
189 
190 	ret = ksz_pwrite8(dev, port, REG_PORT_PME_CTRL, pme_ctrl);
191 	if (ret) {
192 		if (magic_switched_on)
193 			ksz_switch_macaddr_put(dev->ds);
194 		return ret;
195 	}
196 
197 	return 0;
198 }
199 
200 /**
201  * ksz9477_wol_pre_shutdown - Prepares the switch device for shutdown while
202  *                            considering Wake-on-LAN (WoL) settings.
203  * @dev: The switch device structure.
204  * @wol_enabled: Pointer to a boolean which will be set to true if WoL is
205  *               enabled on any port.
206  *
207  * This function prepares the switch device for a safe shutdown while taking
208  * into account the Wake-on-LAN (WoL) settings on the user ports. It updates
209  * the wol_enabled flag accordingly to reflect whether WoL is active on any
210  * port.
211  */
212 void ksz9477_wol_pre_shutdown(struct ksz_device *dev, bool *wol_enabled)
213 {
214 	struct dsa_port *dp;
215 	int ret;
216 
217 	*wol_enabled = false;
218 
219 	if (!dev->wakeup_source)
220 		return;
221 
222 	dsa_switch_for_each_user_port(dp, dev->ds) {
223 		u8 pme_ctrl = 0;
224 
225 		ret = ksz_pread8(dev, dp->index, REG_PORT_PME_CTRL, &pme_ctrl);
226 		if (!ret && pme_ctrl)
227 			*wol_enabled = true;
228 
229 		/* make sure there are no pending wake events which would
230 		 * prevent the device from going to sleep/shutdown.
231 		 */
232 		ksz9477_handle_wake_reason(dev, dp->index);
233 	}
234 
235 	/* Now we are save to enable PME pin. */
236 	if (*wol_enabled)
237 		ksz_write8(dev, REG_SW_PME_CTRL, PME_ENABLE);
238 }
239 
240 static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev)
241 {
242 	unsigned int val;
243 
244 	return regmap_read_poll_timeout(ksz_regmap_8(dev), REG_SW_VLAN_CTRL,
245 					val, !(val & VLAN_START), 10, 1000);
246 }
247 
248 static int ksz9477_get_vlan_table(struct ksz_device *dev, u16 vid,
249 				  u32 *vlan_table)
250 {
251 	int ret;
252 
253 	mutex_lock(&dev->vlan_mutex);
254 
255 	ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
256 	ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_READ | VLAN_START);
257 
258 	/* wait to be cleared */
259 	ret = ksz9477_wait_vlan_ctrl_ready(dev);
260 	if (ret) {
261 		dev_dbg(dev->dev, "Failed to read vlan table\n");
262 		goto exit;
263 	}
264 
265 	ksz_read32(dev, REG_SW_VLAN_ENTRY__4, &vlan_table[0]);
266 	ksz_read32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, &vlan_table[1]);
267 	ksz_read32(dev, REG_SW_VLAN_ENTRY_PORTS__4, &vlan_table[2]);
268 
269 	ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
270 
271 exit:
272 	mutex_unlock(&dev->vlan_mutex);
273 
274 	return ret;
275 }
276 
277 static int ksz9477_set_vlan_table(struct ksz_device *dev, u16 vid,
278 				  u32 *vlan_table)
279 {
280 	int ret;
281 
282 	mutex_lock(&dev->vlan_mutex);
283 
284 	ksz_write32(dev, REG_SW_VLAN_ENTRY__4, vlan_table[0]);
285 	ksz_write32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, vlan_table[1]);
286 	ksz_write32(dev, REG_SW_VLAN_ENTRY_PORTS__4, vlan_table[2]);
287 
288 	ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
289 	ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_START | VLAN_WRITE);
290 
291 	/* wait to be cleared */
292 	ret = ksz9477_wait_vlan_ctrl_ready(dev);
293 	if (ret) {
294 		dev_dbg(dev->dev, "Failed to write vlan table\n");
295 		goto exit;
296 	}
297 
298 	ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
299 
300 	/* update vlan cache table */
301 	dev->vlan_cache[vid].table[0] = vlan_table[0];
302 	dev->vlan_cache[vid].table[1] = vlan_table[1];
303 	dev->vlan_cache[vid].table[2] = vlan_table[2];
304 
305 exit:
306 	mutex_unlock(&dev->vlan_mutex);
307 
308 	return ret;
309 }
310 
311 static void ksz9477_read_table(struct ksz_device *dev, u32 *table)
312 {
313 	ksz_read32(dev, REG_SW_ALU_VAL_A, &table[0]);
314 	ksz_read32(dev, REG_SW_ALU_VAL_B, &table[1]);
315 	ksz_read32(dev, REG_SW_ALU_VAL_C, &table[2]);
316 	ksz_read32(dev, REG_SW_ALU_VAL_D, &table[3]);
317 }
318 
319 static void ksz9477_write_table(struct ksz_device *dev, u32 *table)
320 {
321 	ksz_write32(dev, REG_SW_ALU_VAL_A, table[0]);
322 	ksz_write32(dev, REG_SW_ALU_VAL_B, table[1]);
323 	ksz_write32(dev, REG_SW_ALU_VAL_C, table[2]);
324 	ksz_write32(dev, REG_SW_ALU_VAL_D, table[3]);
325 }
326 
327 static int ksz9477_wait_alu_ready(struct ksz_device *dev)
328 {
329 	unsigned int val;
330 
331 	return regmap_read_poll_timeout(ksz_regmap_32(dev), REG_SW_ALU_CTRL__4,
332 					val, !(val & ALU_START), 10, 1000);
333 }
334 
335 static int ksz9477_wait_alu_sta_ready(struct ksz_device *dev)
336 {
337 	unsigned int val;
338 
339 	return regmap_read_poll_timeout(ksz_regmap_32(dev),
340 					REG_SW_ALU_STAT_CTRL__4,
341 					val, !(val & ALU_STAT_START),
342 					10, 1000);
343 }
344 
345 int ksz9477_reset_switch(struct ksz_device *dev)
346 {
347 	u8 data8;
348 	u32 data32;
349 
350 	/* reset switch */
351 	ksz_cfg(dev, REG_SW_OPERATION, SW_RESET, true);
352 
353 	/* turn off SPI DO Edge select */
354 	regmap_update_bits(ksz_regmap_8(dev), REG_SW_GLOBAL_SERIAL_CTRL_0,
355 			   SPI_AUTO_EDGE_DETECTION, 0);
356 
357 	/* default configuration */
358 	ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8);
359 	data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING |
360 	      SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE;
361 	ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
362 
363 	/* disable interrupts */
364 	ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
365 	ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0x7F);
366 	ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
367 
368 	/* KSZ9893 compatible chips do not support refclk configuration */
369 	if (dev->chip_id == KSZ9893_CHIP_ID ||
370 	    dev->chip_id == KSZ8563_CHIP_ID ||
371 	    dev->chip_id == KSZ9563_CHIP_ID)
372 		return 0;
373 
374 	data8 = SW_ENABLE_REFCLKO;
375 	if (dev->synclko_disable)
376 		data8 = 0;
377 	else if (dev->synclko_125)
378 		data8 = SW_ENABLE_REFCLKO | SW_REFCLKO_IS_125MHZ;
379 	ksz_write8(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1, data8);
380 
381 	return 0;
382 }
383 
384 void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
385 {
386 	struct ksz_port *p = &dev->ports[port];
387 	unsigned int val;
388 	u32 data;
389 	int ret;
390 
391 	/* retain the flush/freeze bit */
392 	data = p->freeze ? MIB_COUNTER_FLUSH_FREEZE : 0;
393 	data |= MIB_COUNTER_READ;
394 	data |= (addr << MIB_COUNTER_INDEX_S);
395 	ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, data);
396 
397 	ret = regmap_read_poll_timeout(ksz_regmap_32(dev),
398 			PORT_CTRL_ADDR(port, REG_PORT_MIB_CTRL_STAT__4),
399 			val, !(val & MIB_COUNTER_READ), 10, 1000);
400 	/* failed to read MIB. get out of loop */
401 	if (ret) {
402 		dev_dbg(dev->dev, "Failed to get MIB\n");
403 		return;
404 	}
405 
406 	/* count resets upon read */
407 	ksz_pread32(dev, port, REG_PORT_MIB_DATA, &data);
408 	*cnt += data;
409 }
410 
411 void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
412 		       u64 *dropped, u64 *cnt)
413 {
414 	addr = dev->info->mib_names[addr].index;
415 	ksz9477_r_mib_cnt(dev, port, addr, cnt);
416 }
417 
418 void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
419 {
420 	u32 val = freeze ? MIB_COUNTER_FLUSH_FREEZE : 0;
421 	struct ksz_port *p = &dev->ports[port];
422 
423 	/* enable/disable the port for flush/freeze function */
424 	mutex_lock(&p->mib.cnt_mutex);
425 	ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, val);
426 
427 	/* used by MIB counter reading code to know freeze is enabled */
428 	p->freeze = freeze;
429 	mutex_unlock(&p->mib.cnt_mutex);
430 }
431 
432 void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
433 {
434 	struct ksz_port_mib *mib = &dev->ports[port].mib;
435 
436 	/* flush all enabled port MIB counters */
437 	mutex_lock(&mib->cnt_mutex);
438 	ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4,
439 		     MIB_COUNTER_FLUSH_FREEZE);
440 	ksz_write8(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FLUSH);
441 	ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, 0);
442 	mutex_unlock(&mib->cnt_mutex);
443 }
444 
445 static void ksz9477_r_phy_quirks(struct ksz_device *dev, u16 addr, u16 reg,
446 				 u16 *data)
447 {
448 	/* KSZ8563R do not have extended registers but BMSR_ESTATEN and
449 	 * BMSR_ERCAP bits are set.
450 	 */
451 	if (dev->chip_id == KSZ8563_CHIP_ID && reg == MII_BMSR)
452 		*data &= ~(BMSR_ESTATEN | BMSR_ERCAP);
453 }
454 
455 int ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
456 {
457 	u16 val = 0xffff;
458 	int ret;
459 
460 	/* No real PHY after this. Simulate the PHY.
461 	 * A fixed PHY can be setup in the device tree, but this function is
462 	 * still called for that port during initialization.
463 	 * For RGMII PHY there is no way to access it so the fixed PHY should
464 	 * be used.  For SGMII PHY the supporting code will be added later.
465 	 */
466 	if (!dev->info->internal_phy[addr]) {
467 		struct ksz_port *p = &dev->ports[addr];
468 
469 		switch (reg) {
470 		case MII_BMCR:
471 			val = 0x1140;
472 			break;
473 		case MII_BMSR:
474 			val = 0x796d;
475 			break;
476 		case MII_PHYSID1:
477 			val = 0x0022;
478 			break;
479 		case MII_PHYSID2:
480 			val = 0x1631;
481 			break;
482 		case MII_ADVERTISE:
483 			val = 0x05e1;
484 			break;
485 		case MII_LPA:
486 			val = 0xc5e1;
487 			break;
488 		case MII_CTRL1000:
489 			val = 0x0700;
490 			break;
491 		case MII_STAT1000:
492 			if (p->phydev.speed == SPEED_1000)
493 				val = 0x3800;
494 			else
495 				val = 0;
496 			break;
497 		}
498 	} else {
499 		ret = ksz_pread16(dev, addr, 0x100 + (reg << 1), &val);
500 		if (ret)
501 			return ret;
502 
503 		ksz9477_r_phy_quirks(dev, addr, reg, &val);
504 	}
505 
506 	*data = val;
507 
508 	return 0;
509 }
510 
511 int ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
512 {
513 	u32 mask, val32;
514 
515 	/* No real PHY after this. */
516 	if (!dev->info->internal_phy[addr])
517 		return 0;
518 
519 	if (reg < 0x10)
520 		return ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
521 
522 	/* Errata: When using SPI, I2C, or in-band register access,
523 	 * writes to certain PHY registers should be performed as
524 	 * 32-bit writes instead of 16-bit writes.
525 	 */
526 	val32 = val;
527 	mask = 0xffff;
528 	if ((reg & 1) == 0) {
529 		val32 <<= 16;
530 		mask <<= 16;
531 	}
532 	reg &= ~1;
533 	return ksz_prmw32(dev, addr, 0x100 + (reg << 1), mask, val32);
534 }
535 
536 void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member)
537 {
538 	ksz_pwrite32(dev, port, REG_PORT_VLAN_MEMBERSHIP__4, member);
539 }
540 
541 void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
542 {
543 	const u16 *regs = dev->info->regs;
544 	u8 data;
545 
546 	regmap_update_bits(ksz_regmap_8(dev), REG_SW_LUE_CTRL_2,
547 			   SW_FLUSH_OPTION_M << SW_FLUSH_OPTION_S,
548 			   SW_FLUSH_OPTION_DYN_MAC << SW_FLUSH_OPTION_S);
549 
550 	if (port < dev->info->port_cnt) {
551 		/* flush individual port */
552 		ksz_pread8(dev, port, regs[P_STP_CTRL], &data);
553 		if (!(data & PORT_LEARN_DISABLE))
554 			ksz_pwrite8(dev, port, regs[P_STP_CTRL],
555 				    data | PORT_LEARN_DISABLE);
556 		ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true);
557 		ksz_pwrite8(dev, port, regs[P_STP_CTRL], data);
558 	} else {
559 		/* flush all */
560 		ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_STP_TABLE, true);
561 	}
562 }
563 
564 int ksz9477_port_vlan_filtering(struct ksz_device *dev, int port,
565 				bool flag, struct netlink_ext_ack *extack)
566 {
567 	if (flag) {
568 		ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
569 			     PORT_VLAN_LOOKUP_VID_0, true);
570 		ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, true);
571 	} else {
572 		ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, false);
573 		ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
574 			     PORT_VLAN_LOOKUP_VID_0, false);
575 	}
576 
577 	return 0;
578 }
579 
580 int ksz9477_port_vlan_add(struct ksz_device *dev, int port,
581 			  const struct switchdev_obj_port_vlan *vlan,
582 			  struct netlink_ext_ack *extack)
583 {
584 	u32 vlan_table[3];
585 	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
586 	int err;
587 
588 	err = ksz9477_get_vlan_table(dev, vlan->vid, vlan_table);
589 	if (err) {
590 		NL_SET_ERR_MSG_MOD(extack, "Failed to get vlan table");
591 		return err;
592 	}
593 
594 	vlan_table[0] = VLAN_VALID | (vlan->vid & VLAN_FID_M);
595 	if (untagged)
596 		vlan_table[1] |= BIT(port);
597 	else
598 		vlan_table[1] &= ~BIT(port);
599 	vlan_table[1] &= ~(BIT(dev->cpu_port));
600 
601 	vlan_table[2] |= BIT(port) | BIT(dev->cpu_port);
602 
603 	err = ksz9477_set_vlan_table(dev, vlan->vid, vlan_table);
604 	if (err) {
605 		NL_SET_ERR_MSG_MOD(extack, "Failed to set vlan table");
606 		return err;
607 	}
608 
609 	/* change PVID */
610 	if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
611 		ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, vlan->vid);
612 
613 	return 0;
614 }
615 
616 int ksz9477_port_vlan_del(struct ksz_device *dev, int port,
617 			  const struct switchdev_obj_port_vlan *vlan)
618 {
619 	bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
620 	u32 vlan_table[3];
621 	u16 pvid;
622 
623 	ksz_pread16(dev, port, REG_PORT_DEFAULT_VID, &pvid);
624 	pvid = pvid & 0xFFF;
625 
626 	if (ksz9477_get_vlan_table(dev, vlan->vid, vlan_table)) {
627 		dev_dbg(dev->dev, "Failed to get vlan table\n");
628 		return -ETIMEDOUT;
629 	}
630 
631 	vlan_table[2] &= ~BIT(port);
632 
633 	if (pvid == vlan->vid)
634 		pvid = 1;
635 
636 	if (untagged)
637 		vlan_table[1] &= ~BIT(port);
638 
639 	if (ksz9477_set_vlan_table(dev, vlan->vid, vlan_table)) {
640 		dev_dbg(dev->dev, "Failed to set vlan table\n");
641 		return -ETIMEDOUT;
642 	}
643 
644 	ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, pvid);
645 
646 	return 0;
647 }
648 
649 int ksz9477_fdb_add(struct ksz_device *dev, int port,
650 		    const unsigned char *addr, u16 vid, struct dsa_db db)
651 {
652 	u32 alu_table[4];
653 	u32 data;
654 	int ret = 0;
655 
656 	mutex_lock(&dev->alu_mutex);
657 
658 	/* find any entry with mac & vid */
659 	data = vid << ALU_FID_INDEX_S;
660 	data |= ((addr[0] << 8) | addr[1]);
661 	ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
662 
663 	data = ((addr[2] << 24) | (addr[3] << 16));
664 	data |= ((addr[4] << 8) | addr[5]);
665 	ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
666 
667 	/* start read operation */
668 	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
669 
670 	/* wait to be finished */
671 	ret = ksz9477_wait_alu_ready(dev);
672 	if (ret) {
673 		dev_dbg(dev->dev, "Failed to read ALU\n");
674 		goto exit;
675 	}
676 
677 	/* read ALU entry */
678 	ksz9477_read_table(dev, alu_table);
679 
680 	/* update ALU entry */
681 	alu_table[0] = ALU_V_STATIC_VALID;
682 	alu_table[1] |= BIT(port);
683 	if (vid)
684 		alu_table[1] |= ALU_V_USE_FID;
685 	alu_table[2] = (vid << ALU_V_FID_S);
686 	alu_table[2] |= ((addr[0] << 8) | addr[1]);
687 	alu_table[3] = ((addr[2] << 24) | (addr[3] << 16));
688 	alu_table[3] |= ((addr[4] << 8) | addr[5]);
689 
690 	ksz9477_write_table(dev, alu_table);
691 
692 	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
693 
694 	/* wait to be finished */
695 	ret = ksz9477_wait_alu_ready(dev);
696 	if (ret)
697 		dev_dbg(dev->dev, "Failed to write ALU\n");
698 
699 exit:
700 	mutex_unlock(&dev->alu_mutex);
701 
702 	return ret;
703 }
704 
705 int ksz9477_fdb_del(struct ksz_device *dev, int port,
706 		    const unsigned char *addr, u16 vid, struct dsa_db db)
707 {
708 	u32 alu_table[4];
709 	u32 data;
710 	int ret = 0;
711 
712 	mutex_lock(&dev->alu_mutex);
713 
714 	/* read any entry with mac & vid */
715 	data = vid << ALU_FID_INDEX_S;
716 	data |= ((addr[0] << 8) | addr[1]);
717 	ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
718 
719 	data = ((addr[2] << 24) | (addr[3] << 16));
720 	data |= ((addr[4] << 8) | addr[5]);
721 	ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
722 
723 	/* start read operation */
724 	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
725 
726 	/* wait to be finished */
727 	ret = ksz9477_wait_alu_ready(dev);
728 	if (ret) {
729 		dev_dbg(dev->dev, "Failed to read ALU\n");
730 		goto exit;
731 	}
732 
733 	ksz_read32(dev, REG_SW_ALU_VAL_A, &alu_table[0]);
734 	if (alu_table[0] & ALU_V_STATIC_VALID) {
735 		ksz_read32(dev, REG_SW_ALU_VAL_B, &alu_table[1]);
736 		ksz_read32(dev, REG_SW_ALU_VAL_C, &alu_table[2]);
737 		ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]);
738 
739 		/* clear forwarding port */
740 		alu_table[1] &= ~BIT(port);
741 
742 		/* if there is no port to forward, clear table */
743 		if ((alu_table[1] & ALU_V_PORT_MAP) == 0) {
744 			alu_table[0] = 0;
745 			alu_table[1] = 0;
746 			alu_table[2] = 0;
747 			alu_table[3] = 0;
748 		}
749 	} else {
750 		alu_table[0] = 0;
751 		alu_table[1] = 0;
752 		alu_table[2] = 0;
753 		alu_table[3] = 0;
754 	}
755 
756 	ksz9477_write_table(dev, alu_table);
757 
758 	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
759 
760 	/* wait to be finished */
761 	ret = ksz9477_wait_alu_ready(dev);
762 	if (ret)
763 		dev_dbg(dev->dev, "Failed to write ALU\n");
764 
765 exit:
766 	mutex_unlock(&dev->alu_mutex);
767 
768 	return ret;
769 }
770 
771 static void ksz9477_convert_alu(struct alu_struct *alu, u32 *alu_table)
772 {
773 	alu->is_static = !!(alu_table[0] & ALU_V_STATIC_VALID);
774 	alu->is_src_filter = !!(alu_table[0] & ALU_V_SRC_FILTER);
775 	alu->is_dst_filter = !!(alu_table[0] & ALU_V_DST_FILTER);
776 	alu->prio_age = (alu_table[0] >> ALU_V_PRIO_AGE_CNT_S) &
777 			ALU_V_PRIO_AGE_CNT_M;
778 	alu->mstp = alu_table[0] & ALU_V_MSTP_M;
779 
780 	alu->is_override = !!(alu_table[1] & ALU_V_OVERRIDE);
781 	alu->is_use_fid = !!(alu_table[1] & ALU_V_USE_FID);
782 	alu->port_forward = alu_table[1] & ALU_V_PORT_MAP;
783 
784 	alu->fid = (alu_table[2] >> ALU_V_FID_S) & ALU_V_FID_M;
785 
786 	alu->mac[0] = (alu_table[2] >> 8) & 0xFF;
787 	alu->mac[1] = alu_table[2] & 0xFF;
788 	alu->mac[2] = (alu_table[3] >> 24) & 0xFF;
789 	alu->mac[3] = (alu_table[3] >> 16) & 0xFF;
790 	alu->mac[4] = (alu_table[3] >> 8) & 0xFF;
791 	alu->mac[5] = alu_table[3] & 0xFF;
792 }
793 
794 int ksz9477_fdb_dump(struct ksz_device *dev, int port,
795 		     dsa_fdb_dump_cb_t *cb, void *data)
796 {
797 	int ret = 0;
798 	u32 ksz_data;
799 	u32 alu_table[4];
800 	struct alu_struct alu;
801 	int timeout;
802 
803 	mutex_lock(&dev->alu_mutex);
804 
805 	/* start ALU search */
806 	ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_START | ALU_SEARCH);
807 
808 	do {
809 		timeout = 1000;
810 		do {
811 			ksz_read32(dev, REG_SW_ALU_CTRL__4, &ksz_data);
812 			if ((ksz_data & ALU_VALID) || !(ksz_data & ALU_START))
813 				break;
814 			usleep_range(1, 10);
815 		} while (timeout-- > 0);
816 
817 		if (!timeout) {
818 			dev_dbg(dev->dev, "Failed to search ALU\n");
819 			ret = -ETIMEDOUT;
820 			goto exit;
821 		}
822 
823 		if (!(ksz_data & ALU_VALID))
824 			continue;
825 
826 		/* read ALU table */
827 		ksz9477_read_table(dev, alu_table);
828 
829 		ksz9477_convert_alu(&alu, alu_table);
830 
831 		if (alu.port_forward & BIT(port)) {
832 			ret = cb(alu.mac, alu.fid, alu.is_static, data);
833 			if (ret)
834 				goto exit;
835 		}
836 	} while (ksz_data & ALU_START);
837 
838 exit:
839 
840 	/* stop ALU search */
841 	ksz_write32(dev, REG_SW_ALU_CTRL__4, 0);
842 
843 	mutex_unlock(&dev->alu_mutex);
844 
845 	return ret;
846 }
847 
848 int ksz9477_mdb_add(struct ksz_device *dev, int port,
849 		    const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
850 {
851 	u32 static_table[4];
852 	const u8 *shifts;
853 	const u32 *masks;
854 	u32 data;
855 	int index;
856 	u32 mac_hi, mac_lo;
857 	int err = 0;
858 
859 	shifts = dev->info->shifts;
860 	masks = dev->info->masks;
861 
862 	mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
863 	mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
864 	mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
865 
866 	mutex_lock(&dev->alu_mutex);
867 
868 	for (index = 0; index < dev->info->num_statics; index++) {
869 		/* find empty slot first */
870 		data = (index << shifts[ALU_STAT_INDEX]) |
871 			masks[ALU_STAT_READ] | ALU_STAT_START;
872 		ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
873 
874 		/* wait to be finished */
875 		err = ksz9477_wait_alu_sta_ready(dev);
876 		if (err) {
877 			dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
878 			goto exit;
879 		}
880 
881 		/* read ALU static table */
882 		ksz9477_read_table(dev, static_table);
883 
884 		if (static_table[0] & ALU_V_STATIC_VALID) {
885 			/* check this has same vid & mac address */
886 			if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) &&
887 			    ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
888 			    static_table[3] == mac_lo) {
889 				/* found matching one */
890 				break;
891 			}
892 		} else {
893 			/* found empty one */
894 			break;
895 		}
896 	}
897 
898 	/* no available entry */
899 	if (index == dev->info->num_statics) {
900 		err = -ENOSPC;
901 		goto exit;
902 	}
903 
904 	/* add entry */
905 	static_table[0] = ALU_V_STATIC_VALID;
906 	static_table[1] |= BIT(port);
907 	if (mdb->vid)
908 		static_table[1] |= ALU_V_USE_FID;
909 	static_table[2] = (mdb->vid << ALU_V_FID_S);
910 	static_table[2] |= mac_hi;
911 	static_table[3] = mac_lo;
912 
913 	ksz9477_write_table(dev, static_table);
914 
915 	data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START;
916 	ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
917 
918 	/* wait to be finished */
919 	if (ksz9477_wait_alu_sta_ready(dev))
920 		dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
921 
922 exit:
923 	mutex_unlock(&dev->alu_mutex);
924 	return err;
925 }
926 
927 int ksz9477_mdb_del(struct ksz_device *dev, int port,
928 		    const struct switchdev_obj_port_mdb *mdb, struct dsa_db db)
929 {
930 	u32 static_table[4];
931 	const u8 *shifts;
932 	const u32 *masks;
933 	u32 data;
934 	int index;
935 	int ret = 0;
936 	u32 mac_hi, mac_lo;
937 
938 	shifts = dev->info->shifts;
939 	masks = dev->info->masks;
940 
941 	mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
942 	mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
943 	mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
944 
945 	mutex_lock(&dev->alu_mutex);
946 
947 	for (index = 0; index < dev->info->num_statics; index++) {
948 		/* find empty slot first */
949 		data = (index << shifts[ALU_STAT_INDEX]) |
950 			masks[ALU_STAT_READ] | ALU_STAT_START;
951 		ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
952 
953 		/* wait to be finished */
954 		ret = ksz9477_wait_alu_sta_ready(dev);
955 		if (ret) {
956 			dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
957 			goto exit;
958 		}
959 
960 		/* read ALU static table */
961 		ksz9477_read_table(dev, static_table);
962 
963 		if (static_table[0] & ALU_V_STATIC_VALID) {
964 			/* check this has same vid & mac address */
965 
966 			if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) &&
967 			    ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
968 			    static_table[3] == mac_lo) {
969 				/* found matching one */
970 				break;
971 			}
972 		}
973 	}
974 
975 	/* no available entry */
976 	if (index == dev->info->num_statics)
977 		goto exit;
978 
979 	/* clear port */
980 	static_table[1] &= ~BIT(port);
981 
982 	if ((static_table[1] & ALU_V_PORT_MAP) == 0) {
983 		/* delete entry */
984 		static_table[0] = 0;
985 		static_table[1] = 0;
986 		static_table[2] = 0;
987 		static_table[3] = 0;
988 	}
989 
990 	ksz9477_write_table(dev, static_table);
991 
992 	data = (index << shifts[ALU_STAT_INDEX]) | ALU_STAT_START;
993 	ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
994 
995 	/* wait to be finished */
996 	ret = ksz9477_wait_alu_sta_ready(dev);
997 	if (ret)
998 		dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
999 
1000 exit:
1001 	mutex_unlock(&dev->alu_mutex);
1002 
1003 	return ret;
1004 }
1005 
1006 int ksz9477_port_mirror_add(struct ksz_device *dev, int port,
1007 			    struct dsa_mall_mirror_tc_entry *mirror,
1008 			    bool ingress, struct netlink_ext_ack *extack)
1009 {
1010 	u8 data;
1011 	int p;
1012 
1013 	/* Limit to one sniffer port
1014 	 * Check if any of the port is already set for sniffing
1015 	 * If yes, instruct the user to remove the previous entry & exit
1016 	 */
1017 	for (p = 0; p < dev->info->port_cnt; p++) {
1018 		/* Skip the current sniffing port */
1019 		if (p == mirror->to_local_port)
1020 			continue;
1021 
1022 		ksz_pread8(dev, p, P_MIRROR_CTRL, &data);
1023 
1024 		if (data & PORT_MIRROR_SNIFFER) {
1025 			NL_SET_ERR_MSG_MOD(extack,
1026 					   "Sniffer port is already configured, delete existing rules & retry");
1027 			return -EBUSY;
1028 		}
1029 	}
1030 
1031 	if (ingress)
1032 		ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
1033 	else
1034 		ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true);
1035 
1036 	/* configure mirror port */
1037 	ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
1038 		     PORT_MIRROR_SNIFFER, true);
1039 
1040 	ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
1041 
1042 	return 0;
1043 }
1044 
1045 void ksz9477_port_mirror_del(struct ksz_device *dev, int port,
1046 			     struct dsa_mall_mirror_tc_entry *mirror)
1047 {
1048 	bool in_use = false;
1049 	u8 data;
1050 	int p;
1051 
1052 	if (mirror->ingress)
1053 		ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false);
1054 	else
1055 		ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false);
1056 
1057 
1058 	/* Check if any of the port is still referring to sniffer port */
1059 	for (p = 0; p < dev->info->port_cnt; p++) {
1060 		ksz_pread8(dev, p, P_MIRROR_CTRL, &data);
1061 
1062 		if ((data & (PORT_MIRROR_RX | PORT_MIRROR_TX))) {
1063 			in_use = true;
1064 			break;
1065 		}
1066 	}
1067 
1068 	/* delete sniffing if there are no other mirroring rules */
1069 	if (!in_use)
1070 		ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
1071 			     PORT_MIRROR_SNIFFER, false);
1072 }
1073 
1074 static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port)
1075 {
1076 	phy_interface_t interface;
1077 	bool gbit;
1078 
1079 	if (dev->info->internal_phy[port])
1080 		return PHY_INTERFACE_MODE_NA;
1081 
1082 	gbit = ksz_get_gbit(dev, port);
1083 
1084 	interface = ksz_get_xmii(dev, port, gbit);
1085 
1086 	return interface;
1087 }
1088 
1089 void ksz9477_get_caps(struct ksz_device *dev, int port,
1090 		      struct phylink_config *config)
1091 {
1092 	config->mac_capabilities = MAC_10 | MAC_100 | MAC_ASYM_PAUSE |
1093 				   MAC_SYM_PAUSE;
1094 
1095 	if (dev->info->gbit_capable[port])
1096 		config->mac_capabilities |= MAC_1000FD;
1097 }
1098 
1099 int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
1100 {
1101 	u32 secs = msecs / 1000;
1102 	u8 value;
1103 	u8 data;
1104 	int ret;
1105 
1106 	value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
1107 
1108 	ret = ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
1109 	if (ret < 0)
1110 		return ret;
1111 
1112 	data = FIELD_GET(SW_AGE_PERIOD_10_8_M, secs);
1113 
1114 	ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value);
1115 	if (ret < 0)
1116 		return ret;
1117 
1118 	value &= ~SW_AGE_CNT_M;
1119 	value |= FIELD_PREP(SW_AGE_CNT_M, data);
1120 
1121 	return ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
1122 }
1123 
1124 void ksz9477_port_queue_split(struct ksz_device *dev, int port)
1125 {
1126 	u8 data;
1127 
1128 	if (dev->info->num_tx_queues == 8)
1129 		data = PORT_EIGHT_QUEUE;
1130 	else if (dev->info->num_tx_queues == 4)
1131 		data = PORT_FOUR_QUEUE;
1132 	else if (dev->info->num_tx_queues == 2)
1133 		data = PORT_TWO_QUEUE;
1134 	else
1135 		data = PORT_SINGLE_QUEUE;
1136 
1137 	ksz_prmw8(dev, port, REG_PORT_CTRL_0, PORT_QUEUE_SPLIT_MASK, data);
1138 }
1139 
1140 void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
1141 {
1142 	struct dsa_switch *ds = dev->ds;
1143 	u16 data16;
1144 	u8 member;
1145 
1146 	/* enable tag tail for host port */
1147 	if (cpu_port)
1148 		ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_TAIL_TAG_ENABLE,
1149 			     true);
1150 
1151 	ksz9477_port_queue_split(dev, port);
1152 
1153 	ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, false);
1154 
1155 	/* set back pressure */
1156 	ksz_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE, true);
1157 
1158 	/* enable broadcast storm limit */
1159 	ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
1160 
1161 	/* disable DiffServ priority */
1162 	ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_PRIO_ENABLE, false);
1163 
1164 	/* replace priority */
1165 	ksz_port_cfg(dev, port, REG_PORT_MRI_MAC_CTRL, PORT_USER_PRIO_CEILING,
1166 		     false);
1167 	ksz9477_port_cfg32(dev, port, REG_PORT_MTI_QUEUE_CTRL_0__4,
1168 			   MTI_PVID_REPLACE, false);
1169 
1170 	/* enable 802.1p priority */
1171 	ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true);
1172 
1173 	/* force flow control for non-PHY ports only */
1174 	ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
1175 		     PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
1176 		     !dev->info->internal_phy[port]);
1177 
1178 	if (cpu_port)
1179 		member = dsa_user_ports(ds);
1180 	else
1181 		member = BIT(dsa_upstream_port(ds, port));
1182 
1183 	ksz9477_cfg_port_member(dev, port, member);
1184 
1185 	/* clear pending interrupts */
1186 	if (dev->info->internal_phy[port])
1187 		ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16);
1188 
1189 	ksz9477_port_acl_init(dev, port);
1190 
1191 	/* clear pending wake flags */
1192 	ksz9477_handle_wake_reason(dev, port);
1193 
1194 	/* Disable all WoL options by default. Otherwise
1195 	 * ksz_switch_macaddr_get/put logic will not work properly.
1196 	 */
1197 	ksz_pwrite8(dev, port, REG_PORT_PME_CTRL, 0);
1198 }
1199 
1200 void ksz9477_config_cpu_port(struct dsa_switch *ds)
1201 {
1202 	struct ksz_device *dev = ds->priv;
1203 	struct ksz_port *p;
1204 	int i;
1205 
1206 	for (i = 0; i < dev->info->port_cnt; i++) {
1207 		if (dsa_is_cpu_port(ds, i) &&
1208 		    (dev->info->cpu_ports & (1 << i))) {
1209 			phy_interface_t interface;
1210 			const char *prev_msg;
1211 			const char *prev_mode;
1212 
1213 			dev->cpu_port = i;
1214 			p = &dev->ports[i];
1215 
1216 			/* Read from XMII register to determine host port
1217 			 * interface.  If set specifically in device tree
1218 			 * note the difference to help debugging.
1219 			 */
1220 			interface = ksz9477_get_interface(dev, i);
1221 			if (!p->interface) {
1222 				if (dev->compat_interface) {
1223 					dev_warn(dev->dev,
1224 						 "Using legacy switch \"phy-mode\" property, because it is missing on port %d node. "
1225 						 "Please update your device tree.\n",
1226 						 i);
1227 					p->interface = dev->compat_interface;
1228 				} else {
1229 					p->interface = interface;
1230 				}
1231 			}
1232 			if (interface && interface != p->interface) {
1233 				prev_msg = " instead of ";
1234 				prev_mode = phy_modes(interface);
1235 			} else {
1236 				prev_msg = "";
1237 				prev_mode = "";
1238 			}
1239 			dev_info(dev->dev,
1240 				 "Port%d: using phy mode %s%s%s\n",
1241 				 i,
1242 				 phy_modes(p->interface),
1243 				 prev_msg,
1244 				 prev_mode);
1245 
1246 			/* enable cpu port */
1247 			ksz9477_port_setup(dev, i, true);
1248 		}
1249 	}
1250 
1251 	for (i = 0; i < dev->info->port_cnt; i++) {
1252 		if (i == dev->cpu_port)
1253 			continue;
1254 		ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED);
1255 	}
1256 }
1257 
1258 int ksz9477_enable_stp_addr(struct ksz_device *dev)
1259 {
1260 	const u32 *masks;
1261 	u32 data;
1262 	int ret;
1263 
1264 	masks = dev->info->masks;
1265 
1266 	/* Enable Reserved multicast table */
1267 	ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_RESV_MCAST_ENABLE, true);
1268 
1269 	/* Set the Override bit for forwarding BPDU packet to CPU */
1270 	ret = ksz_write32(dev, REG_SW_ALU_VAL_B,
1271 			  ALU_V_OVERRIDE | BIT(dev->cpu_port));
1272 	if (ret < 0)
1273 		return ret;
1274 
1275 	data = ALU_STAT_START | ALU_RESV_MCAST_ADDR | masks[ALU_STAT_WRITE];
1276 
1277 	ret = ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
1278 	if (ret < 0)
1279 		return ret;
1280 
1281 	/* wait to be finished */
1282 	ret = ksz9477_wait_alu_sta_ready(dev);
1283 	if (ret < 0) {
1284 		dev_err(dev->dev, "Failed to update Reserved Multicast table\n");
1285 		return ret;
1286 	}
1287 
1288 	return 0;
1289 }
1290 
1291 int ksz9477_setup(struct dsa_switch *ds)
1292 {
1293 	struct ksz_device *dev = ds->priv;
1294 	int ret = 0;
1295 
1296 	ds->mtu_enforcement_ingress = true;
1297 
1298 	/* Required for port partitioning. */
1299 	ksz9477_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY,
1300 		      true);
1301 
1302 	/* Do not work correctly with tail tagging. */
1303 	ksz_cfg(dev, REG_SW_MAC_CTRL_0, SW_CHECK_LENGTH, false);
1304 
1305 	/* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */
1306 	ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true);
1307 
1308 	/* Now we can configure default MTU value */
1309 	ret = regmap_update_bits(ksz_regmap_16(dev), REG_SW_MTU__2, REG_SW_MTU_MASK,
1310 				 VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
1311 	if (ret)
1312 		return ret;
1313 
1314 	/* queue based egress rate limit */
1315 	ksz_cfg(dev, REG_SW_MAC_CTRL_5, SW_OUT_RATE_LIMIT_QUEUE_BASED, true);
1316 
1317 	/* enable global MIB counter freeze function */
1318 	ksz_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
1319 
1320 	/* Make sure PME (WoL) is not enabled. If requested, it will be
1321 	 * enabled by ksz9477_wol_pre_shutdown(). Otherwise, some PMICs do not
1322 	 * like PME events changes before shutdown.
1323 	 */
1324 	ksz_write8(dev, REG_SW_PME_CTRL, 0);
1325 
1326 	return 0;
1327 }
1328 
1329 u32 ksz9477_get_port_addr(int port, int offset)
1330 {
1331 	return PORT_CTRL_ADDR(port, offset);
1332 }
1333 
1334 int ksz9477_tc_cbs_set_cinc(struct ksz_device *dev, int port, u32 val)
1335 {
1336 	val = val >> 8;
1337 
1338 	return ksz_pwrite16(dev, port, REG_PORT_MTI_CREDIT_INCREMENT, val);
1339 }
1340 
1341 /* The KSZ9477 provides following HW features to accelerate
1342  * HSR frames handling:
1343  *
1344  * 1. TX PACKET DUPLICATION FROM HOST TO SWITCH
1345  * 2. RX PACKET DUPLICATION DISCARDING
1346  * 3. PREVENTING PACKET LOOP IN THE RING BY SELF-ADDRESS FILTERING
1347  *
1348  * Only one from point 1. has the NETIF_F* flag available.
1349  *
1350  * Ones from point 2 and 3 are "best effort" - i.e. those will
1351  * work correctly most of the time, but it may happen that some
1352  * frames will not be caught - to be more specific; there is a race
1353  * condition in hardware such that, when duplicate packets are received
1354  * on member ports very close in time to each other, the hardware fails
1355  * to detect that they are duplicates.
1356  *
1357  * Hence, the SW needs to handle those special cases. However, the speed
1358  * up gain is considerable when above features are used.
1359  *
1360  * Moreover, the NETIF_F_HW_HSR_FWD feature is also enabled, as HSR frames
1361  * can be forwarded in the switch fabric between HSR ports.
1362  */
1363 #define KSZ9477_SUPPORTED_HSR_FEATURES (NETIF_F_HW_HSR_DUP | NETIF_F_HW_HSR_FWD)
1364 
1365 void ksz9477_hsr_join(struct dsa_switch *ds, int port, struct net_device *hsr)
1366 {
1367 	struct ksz_device *dev = ds->priv;
1368 	struct net_device *user;
1369 	struct dsa_port *hsr_dp;
1370 	u8 data, hsr_ports = 0;
1371 
1372 	/* Program which port(s) shall support HSR */
1373 	ksz_rmw32(dev, REG_HSR_PORT_MAP__4, BIT(port), BIT(port));
1374 
1375 	/* Forward frames between HSR ports (i.e. bridge together HSR ports) */
1376 	if (dev->hsr_ports) {
1377 		dsa_hsr_foreach_port(hsr_dp, ds, hsr)
1378 			hsr_ports |= BIT(hsr_dp->index);
1379 
1380 		hsr_ports |= BIT(dsa_upstream_port(ds, port));
1381 		dsa_hsr_foreach_port(hsr_dp, ds, hsr)
1382 			ksz9477_cfg_port_member(dev, hsr_dp->index, hsr_ports);
1383 	}
1384 
1385 	if (!dev->hsr_ports) {
1386 		/* Enable discarding of received HSR frames */
1387 		ksz_read8(dev, REG_HSR_ALU_CTRL_0__1, &data);
1388 		data |= HSR_DUPLICATE_DISCARD;
1389 		data &= ~HSR_NODE_UNICAST;
1390 		ksz_write8(dev, REG_HSR_ALU_CTRL_0__1, data);
1391 	}
1392 
1393 	/* Enable per port self-address filtering.
1394 	 * The global self-address filtering has already been enabled in the
1395 	 * ksz9477_reset_switch() function.
1396 	 */
1397 	ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, PORT_SRC_ADDR_FILTER, true);
1398 
1399 	/* Setup HW supported features for lan HSR ports */
1400 	user = dsa_to_port(ds, port)->user;
1401 	user->features |= KSZ9477_SUPPORTED_HSR_FEATURES;
1402 }
1403 
1404 void ksz9477_hsr_leave(struct dsa_switch *ds, int port, struct net_device *hsr)
1405 {
1406 	struct ksz_device *dev = ds->priv;
1407 
1408 	/* Clear port HSR support */
1409 	ksz_rmw32(dev, REG_HSR_PORT_MAP__4, BIT(port), 0);
1410 
1411 	/* Disable forwarding frames between HSR ports */
1412 	ksz9477_cfg_port_member(dev, port, BIT(dsa_upstream_port(ds, port)));
1413 
1414 	/* Disable per port self-address filtering */
1415 	ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, PORT_SRC_ADDR_FILTER, false);
1416 }
1417 
1418 int ksz9477_switch_init(struct ksz_device *dev)
1419 {
1420 	u8 data8;
1421 	int ret;
1422 
1423 	dev->port_mask = (1 << dev->info->port_cnt) - 1;
1424 
1425 	/* turn off SPI DO Edge select */
1426 	ret = ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
1427 	if (ret)
1428 		return ret;
1429 
1430 	data8 &= ~SPI_AUTO_EDGE_DETECTION;
1431 	ret = ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8);
1432 	if (ret)
1433 		return ret;
1434 
1435 	return 0;
1436 }
1437 
1438 void ksz9477_switch_exit(struct ksz_device *dev)
1439 {
1440 	ksz9477_reset_switch(dev);
1441 }
1442 
1443 MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
1444 MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch DSA Driver");
1445 MODULE_LICENSE("GPL");
1446