xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c (revision 307797159ac25fe5a2048bf5c6a5718298edca57)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/slab.h>
9 #include <linux/device.h>
10 #include <linux/skbuff.h>
11 #include <linux/if_vlan.h>
12 #include <linux/if_bridge.h>
13 #include <linux/workqueue.h>
14 #include <linux/jiffies.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/netlink.h>
17 #include <net/switchdev.h>
18 
19 #include "spectrum_span.h"
20 #include "spectrum_router.h"
21 #include "spectrum_switchdev.h"
22 #include "spectrum.h"
23 #include "core.h"
24 #include "reg.h"
25 
26 struct mlxsw_sp_bridge_ops;
27 
28 struct mlxsw_sp_bridge {
29 	struct mlxsw_sp *mlxsw_sp;
30 	struct {
31 		struct delayed_work dw;
32 #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
33 		unsigned int interval; /* ms */
34 	} fdb_notify;
35 #define MLXSW_SP_MIN_AGEING_TIME 10
36 #define MLXSW_SP_MAX_AGEING_TIME 1000000
37 #define MLXSW_SP_DEFAULT_AGEING_TIME 300
38 	u32 ageing_time;
39 	bool vlan_enabled_exists;
40 	struct list_head bridges_list;
41 	DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
42 	const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
43 	const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
44 };
45 
46 struct mlxsw_sp_bridge_device {
47 	struct net_device *dev;
48 	struct list_head list;
49 	struct list_head ports_list;
50 	struct list_head mids_list;
51 	u8 vlan_enabled:1,
52 	   multicast_enabled:1,
53 	   mrouter:1;
54 	const struct mlxsw_sp_bridge_ops *ops;
55 };
56 
57 struct mlxsw_sp_bridge_port {
58 	struct net_device *dev;
59 	struct mlxsw_sp_bridge_device *bridge_device;
60 	struct list_head list;
61 	struct list_head vlans_list;
62 	unsigned int ref_count;
63 	u8 stp_state;
64 	unsigned long flags;
65 	bool mrouter;
66 	bool lagged;
67 	union {
68 		u16 lag_id;
69 		u16 system_port;
70 	};
71 };
72 
73 struct mlxsw_sp_bridge_vlan {
74 	struct list_head list;
75 	struct list_head port_vlan_list;
76 	u16 vid;
77 };
78 
79 struct mlxsw_sp_bridge_ops {
80 	int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
81 			 struct mlxsw_sp_bridge_port *bridge_port,
82 			 struct mlxsw_sp_port *mlxsw_sp_port,
83 			 struct netlink_ext_ack *extack);
84 	void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
85 			   struct mlxsw_sp_bridge_port *bridge_port,
86 			   struct mlxsw_sp_port *mlxsw_sp_port);
87 	struct mlxsw_sp_fid *
88 		(*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
89 			   u16 vid);
90 };
91 
92 static int
93 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
94 			       struct mlxsw_sp_bridge_port *bridge_port,
95 			       u16 fid_index);
96 
97 static void
98 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
99 			       struct mlxsw_sp_bridge_port *bridge_port);
100 
101 static void
102 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
103 				   struct mlxsw_sp_bridge_device
104 				   *bridge_device);
105 
106 static void
107 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
108 				 struct mlxsw_sp_bridge_port *bridge_port,
109 				 bool add);
110 
111 static struct mlxsw_sp_bridge_device *
112 mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
113 			    const struct net_device *br_dev)
114 {
115 	struct mlxsw_sp_bridge_device *bridge_device;
116 
117 	list_for_each_entry(bridge_device, &bridge->bridges_list, list)
118 		if (bridge_device->dev == br_dev)
119 			return bridge_device;
120 
121 	return NULL;
122 }
123 
124 bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
125 					 const struct net_device *br_dev)
126 {
127 	return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
128 }
129 
130 static struct mlxsw_sp_bridge_device *
131 mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
132 			      struct net_device *br_dev)
133 {
134 	struct device *dev = bridge->mlxsw_sp->bus_info->dev;
135 	struct mlxsw_sp_bridge_device *bridge_device;
136 	bool vlan_enabled = br_vlan_enabled(br_dev);
137 
138 	if (vlan_enabled && bridge->vlan_enabled_exists) {
139 		dev_err(dev, "Only one VLAN-aware bridge is supported\n");
140 		return ERR_PTR(-EINVAL);
141 	}
142 
143 	bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
144 	if (!bridge_device)
145 		return ERR_PTR(-ENOMEM);
146 
147 	bridge_device->dev = br_dev;
148 	bridge_device->vlan_enabled = vlan_enabled;
149 	bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
150 	bridge_device->mrouter = br_multicast_router(br_dev);
151 	INIT_LIST_HEAD(&bridge_device->ports_list);
152 	if (vlan_enabled) {
153 		bridge->vlan_enabled_exists = true;
154 		bridge_device->ops = bridge->bridge_8021q_ops;
155 	} else {
156 		bridge_device->ops = bridge->bridge_8021d_ops;
157 	}
158 	INIT_LIST_HEAD(&bridge_device->mids_list);
159 	list_add(&bridge_device->list, &bridge->bridges_list);
160 
161 	return bridge_device;
162 }
163 
164 static void
165 mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
166 			       struct mlxsw_sp_bridge_device *bridge_device)
167 {
168 	list_del(&bridge_device->list);
169 	if (bridge_device->vlan_enabled)
170 		bridge->vlan_enabled_exists = false;
171 	WARN_ON(!list_empty(&bridge_device->ports_list));
172 	WARN_ON(!list_empty(&bridge_device->mids_list));
173 	kfree(bridge_device);
174 }
175 
176 static struct mlxsw_sp_bridge_device *
177 mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
178 			   struct net_device *br_dev)
179 {
180 	struct mlxsw_sp_bridge_device *bridge_device;
181 
182 	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
183 	if (bridge_device)
184 		return bridge_device;
185 
186 	return mlxsw_sp_bridge_device_create(bridge, br_dev);
187 }
188 
189 static void
190 mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
191 			   struct mlxsw_sp_bridge_device *bridge_device)
192 {
193 	if (list_empty(&bridge_device->ports_list))
194 		mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
195 }
196 
197 static struct mlxsw_sp_bridge_port *
198 __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
199 			    const struct net_device *brport_dev)
200 {
201 	struct mlxsw_sp_bridge_port *bridge_port;
202 
203 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
204 		if (bridge_port->dev == brport_dev)
205 			return bridge_port;
206 	}
207 
208 	return NULL;
209 }
210 
211 struct mlxsw_sp_bridge_port *
212 mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
213 			  struct net_device *brport_dev)
214 {
215 	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
216 	struct mlxsw_sp_bridge_device *bridge_device;
217 
218 	if (!br_dev)
219 		return NULL;
220 
221 	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
222 	if (!bridge_device)
223 		return NULL;
224 
225 	return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
226 }
227 
228 static struct mlxsw_sp_bridge_port *
229 mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
230 			    struct net_device *brport_dev)
231 {
232 	struct mlxsw_sp_bridge_port *bridge_port;
233 	struct mlxsw_sp_port *mlxsw_sp_port;
234 
235 	bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
236 	if (!bridge_port)
237 		return NULL;
238 
239 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
240 	bridge_port->lagged = mlxsw_sp_port->lagged;
241 	if (bridge_port->lagged)
242 		bridge_port->lag_id = mlxsw_sp_port->lag_id;
243 	else
244 		bridge_port->system_port = mlxsw_sp_port->local_port;
245 	bridge_port->dev = brport_dev;
246 	bridge_port->bridge_device = bridge_device;
247 	bridge_port->stp_state = BR_STATE_DISABLED;
248 	bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
249 			     BR_MCAST_FLOOD;
250 	INIT_LIST_HEAD(&bridge_port->vlans_list);
251 	list_add(&bridge_port->list, &bridge_device->ports_list);
252 	bridge_port->ref_count = 1;
253 
254 	return bridge_port;
255 }
256 
257 static void
258 mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
259 {
260 	list_del(&bridge_port->list);
261 	WARN_ON(!list_empty(&bridge_port->vlans_list));
262 	kfree(bridge_port);
263 }
264 
265 static bool
266 mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port *
267 				    bridge_port)
268 {
269 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_port->dev);
270 
271 	/* In case ports were pulled from out of a bridged LAG, then
272 	 * it's possible the reference count isn't zero, yet the bridge
273 	 * port should be destroyed, as it's no longer an upper of ours.
274 	 */
275 	if (!mlxsw_sp && list_empty(&bridge_port->vlans_list))
276 		return true;
277 	else if (bridge_port->ref_count == 0)
278 		return true;
279 	else
280 		return false;
281 }
282 
283 static struct mlxsw_sp_bridge_port *
284 mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
285 			 struct net_device *brport_dev)
286 {
287 	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
288 	struct mlxsw_sp_bridge_device *bridge_device;
289 	struct mlxsw_sp_bridge_port *bridge_port;
290 	int err;
291 
292 	bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
293 	if (bridge_port) {
294 		bridge_port->ref_count++;
295 		return bridge_port;
296 	}
297 
298 	bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev);
299 	if (IS_ERR(bridge_device))
300 		return ERR_CAST(bridge_device);
301 
302 	bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev);
303 	if (!bridge_port) {
304 		err = -ENOMEM;
305 		goto err_bridge_port_create;
306 	}
307 
308 	return bridge_port;
309 
310 err_bridge_port_create:
311 	mlxsw_sp_bridge_device_put(bridge, bridge_device);
312 	return ERR_PTR(err);
313 }
314 
315 static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
316 				     struct mlxsw_sp_bridge_port *bridge_port)
317 {
318 	struct mlxsw_sp_bridge_device *bridge_device;
319 
320 	bridge_port->ref_count--;
321 	if (!mlxsw_sp_bridge_port_should_destroy(bridge_port))
322 		return;
323 	bridge_device = bridge_port->bridge_device;
324 	mlxsw_sp_bridge_port_destroy(bridge_port);
325 	mlxsw_sp_bridge_device_put(bridge, bridge_device);
326 }
327 
328 static struct mlxsw_sp_port_vlan *
329 mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
330 				  const struct mlxsw_sp_bridge_device *
331 				  bridge_device,
332 				  u16 vid)
333 {
334 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
335 
336 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
337 			    list) {
338 		if (!mlxsw_sp_port_vlan->bridge_port)
339 			continue;
340 		if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
341 		    bridge_device)
342 			continue;
343 		if (bridge_device->vlan_enabled &&
344 		    mlxsw_sp_port_vlan->vid != vid)
345 			continue;
346 		return mlxsw_sp_port_vlan;
347 	}
348 
349 	return NULL;
350 }
351 
352 static struct mlxsw_sp_port_vlan*
353 mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
354 			       u16 fid_index)
355 {
356 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
357 
358 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
359 			    list) {
360 		struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
361 
362 		if (fid && mlxsw_sp_fid_index(fid) == fid_index)
363 			return mlxsw_sp_port_vlan;
364 	}
365 
366 	return NULL;
367 }
368 
369 static struct mlxsw_sp_bridge_vlan *
370 mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
371 			  u16 vid)
372 {
373 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
374 
375 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
376 		if (bridge_vlan->vid == vid)
377 			return bridge_vlan;
378 	}
379 
380 	return NULL;
381 }
382 
383 static struct mlxsw_sp_bridge_vlan *
384 mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
385 {
386 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
387 
388 	bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
389 	if (!bridge_vlan)
390 		return NULL;
391 
392 	INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
393 	bridge_vlan->vid = vid;
394 	list_add(&bridge_vlan->list, &bridge_port->vlans_list);
395 
396 	return bridge_vlan;
397 }
398 
399 static void
400 mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
401 {
402 	list_del(&bridge_vlan->list);
403 	WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
404 	kfree(bridge_vlan);
405 }
406 
407 static struct mlxsw_sp_bridge_vlan *
408 mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
409 {
410 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
411 
412 	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
413 	if (bridge_vlan)
414 		return bridge_vlan;
415 
416 	return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
417 }
418 
419 static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
420 {
421 	if (list_empty(&bridge_vlan->port_vlan_list))
422 		mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
423 }
424 
425 static void mlxsw_sp_port_bridge_flags_get(struct mlxsw_sp_bridge *bridge,
426 					   struct net_device *dev,
427 					   unsigned long *brport_flags)
428 {
429 	struct mlxsw_sp_bridge_port *bridge_port;
430 
431 	bridge_port = mlxsw_sp_bridge_port_find(bridge, dev);
432 	if (WARN_ON(!bridge_port))
433 		return;
434 
435 	memcpy(brport_flags, &bridge_port->flags, sizeof(*brport_flags));
436 }
437 
438 static int mlxsw_sp_port_attr_get(struct net_device *dev,
439 				  struct switchdev_attr *attr)
440 {
441 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
442 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
443 
444 	switch (attr->id) {
445 	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
446 		attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
447 		memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
448 		       attr->u.ppid.id_len);
449 		break;
450 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
451 		mlxsw_sp_port_bridge_flags_get(mlxsw_sp->bridge, attr->orig_dev,
452 					       &attr->u.brport_flags);
453 		break;
454 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT:
455 		attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD |
456 					       BR_MCAST_FLOOD;
457 		break;
458 	default:
459 		return -EOPNOTSUPP;
460 	}
461 
462 	return 0;
463 }
464 
465 static int
466 mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
467 				  struct mlxsw_sp_bridge_vlan *bridge_vlan,
468 				  u8 state)
469 {
470 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
471 
472 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
473 			    bridge_vlan_node) {
474 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
475 			continue;
476 		return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
477 						 bridge_vlan->vid, state);
478 	}
479 
480 	return 0;
481 }
482 
483 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
484 					    struct switchdev_trans *trans,
485 					    struct net_device *orig_dev,
486 					    u8 state)
487 {
488 	struct mlxsw_sp_bridge_port *bridge_port;
489 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
490 	int err;
491 
492 	if (switchdev_trans_ph_prepare(trans))
493 		return 0;
494 
495 	/* It's possible we failed to enslave the port, yet this
496 	 * operation is executed due to it being deferred.
497 	 */
498 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
499 						orig_dev);
500 	if (!bridge_port)
501 		return 0;
502 
503 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
504 		err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
505 							bridge_vlan, state);
506 		if (err)
507 			goto err_port_bridge_vlan_stp_set;
508 	}
509 
510 	bridge_port->stp_state = state;
511 
512 	return 0;
513 
514 err_port_bridge_vlan_stp_set:
515 	list_for_each_entry_continue_reverse(bridge_vlan,
516 					     &bridge_port->vlans_list, list)
517 		mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
518 						  bridge_port->stp_state);
519 	return err;
520 }
521 
522 static int
523 mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
524 				    struct mlxsw_sp_bridge_vlan *bridge_vlan,
525 				    enum mlxsw_sp_flood_type packet_type,
526 				    bool member)
527 {
528 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
529 
530 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
531 			    bridge_vlan_node) {
532 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
533 			continue;
534 		return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
535 					      packet_type,
536 					      mlxsw_sp_port->local_port,
537 					      member);
538 	}
539 
540 	return 0;
541 }
542 
543 static int
544 mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
545 				     struct mlxsw_sp_bridge_port *bridge_port,
546 				     enum mlxsw_sp_flood_type packet_type,
547 				     bool member)
548 {
549 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
550 	int err;
551 
552 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
553 		err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
554 							  bridge_vlan,
555 							  packet_type,
556 							  member);
557 		if (err)
558 			goto err_port_bridge_vlan_flood_set;
559 	}
560 
561 	return 0;
562 
563 err_port_bridge_vlan_flood_set:
564 	list_for_each_entry_continue_reverse(bridge_vlan,
565 					     &bridge_port->vlans_list, list)
566 		mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
567 						    packet_type, !member);
568 	return err;
569 }
570 
571 static int
572 mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
573 				       struct mlxsw_sp_bridge_vlan *bridge_vlan,
574 				       bool set)
575 {
576 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
577 	u16 vid = bridge_vlan->vid;
578 
579 	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
580 			    bridge_vlan_node) {
581 		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
582 			continue;
583 		return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
584 	}
585 
586 	return 0;
587 }
588 
589 static int
590 mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
591 				  struct mlxsw_sp_bridge_port *bridge_port,
592 				  bool set)
593 {
594 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
595 	int err;
596 
597 	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
598 		err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
599 							     bridge_vlan, set);
600 		if (err)
601 			goto err_port_bridge_vlan_learning_set;
602 	}
603 
604 	return 0;
605 
606 err_port_bridge_vlan_learning_set:
607 	list_for_each_entry_continue_reverse(bridge_vlan,
608 					     &bridge_port->vlans_list, list)
609 		mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
610 						       bridge_vlan, !set);
611 	return err;
612 }
613 
614 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
615 					   struct switchdev_trans *trans,
616 					   struct net_device *orig_dev,
617 					   unsigned long brport_flags)
618 {
619 	struct mlxsw_sp_bridge_port *bridge_port;
620 	int err;
621 
622 	if (switchdev_trans_ph_prepare(trans))
623 		return 0;
624 
625 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
626 						orig_dev);
627 	if (!bridge_port)
628 		return 0;
629 
630 	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
631 						   MLXSW_SP_FLOOD_TYPE_UC,
632 						   brport_flags & BR_FLOOD);
633 	if (err)
634 		return err;
635 
636 	err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port,
637 						brport_flags & BR_LEARNING);
638 	if (err)
639 		return err;
640 
641 	if (bridge_port->bridge_device->multicast_enabled)
642 		goto out;
643 
644 	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
645 						   MLXSW_SP_FLOOD_TYPE_MC,
646 						   brport_flags &
647 						   BR_MCAST_FLOOD);
648 	if (err)
649 		return err;
650 
651 out:
652 	memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags));
653 	return 0;
654 }
655 
656 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
657 {
658 	char sfdat_pl[MLXSW_REG_SFDAT_LEN];
659 	int err;
660 
661 	mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
662 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
663 	if (err)
664 		return err;
665 	mlxsw_sp->bridge->ageing_time = ageing_time;
666 	return 0;
667 }
668 
669 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
670 					    struct switchdev_trans *trans,
671 					    unsigned long ageing_clock_t)
672 {
673 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
674 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
675 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
676 
677 	if (switchdev_trans_ph_prepare(trans)) {
678 		if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
679 		    ageing_time > MLXSW_SP_MAX_AGEING_TIME)
680 			return -ERANGE;
681 		else
682 			return 0;
683 	}
684 
685 	return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
686 }
687 
688 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
689 					  struct switchdev_trans *trans,
690 					  struct net_device *orig_dev,
691 					  bool vlan_enabled)
692 {
693 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
694 	struct mlxsw_sp_bridge_device *bridge_device;
695 
696 	if (!switchdev_trans_ph_prepare(trans))
697 		return 0;
698 
699 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
700 	if (WARN_ON(!bridge_device))
701 		return -EINVAL;
702 
703 	if (bridge_device->vlan_enabled == vlan_enabled)
704 		return 0;
705 
706 	netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
707 	return -EINVAL;
708 }
709 
710 static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
711 					  struct switchdev_trans *trans,
712 					  struct net_device *orig_dev,
713 					  bool is_port_mrouter)
714 {
715 	struct mlxsw_sp_bridge_port *bridge_port;
716 	int err;
717 
718 	if (switchdev_trans_ph_prepare(trans))
719 		return 0;
720 
721 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
722 						orig_dev);
723 	if (!bridge_port)
724 		return 0;
725 
726 	if (!bridge_port->bridge_device->multicast_enabled)
727 		goto out;
728 
729 	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
730 						   MLXSW_SP_FLOOD_TYPE_MC,
731 						   is_port_mrouter);
732 	if (err)
733 		return err;
734 
735 	mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
736 					 is_port_mrouter);
737 out:
738 	bridge_port->mrouter = is_port_mrouter;
739 	return 0;
740 }
741 
742 static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
743 {
744 	const struct mlxsw_sp_bridge_device *bridge_device;
745 
746 	bridge_device = bridge_port->bridge_device;
747 	return bridge_device->multicast_enabled ? bridge_port->mrouter :
748 					bridge_port->flags & BR_MCAST_FLOOD;
749 }
750 
751 static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
752 					 struct switchdev_trans *trans,
753 					 struct net_device *orig_dev,
754 					 bool mc_disabled)
755 {
756 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
757 	struct mlxsw_sp_bridge_device *bridge_device;
758 	struct mlxsw_sp_bridge_port *bridge_port;
759 	int err;
760 
761 	if (switchdev_trans_ph_prepare(trans))
762 		return 0;
763 
764 	/* It's possible we failed to enslave the port, yet this
765 	 * operation is executed due to it being deferred.
766 	 */
767 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
768 	if (!bridge_device)
769 		return 0;
770 
771 	if (bridge_device->multicast_enabled != !mc_disabled) {
772 		bridge_device->multicast_enabled = !mc_disabled;
773 		mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port,
774 						   bridge_device);
775 	}
776 
777 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
778 		enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
779 		bool member = mlxsw_sp_mc_flood(bridge_port);
780 
781 		err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
782 							   bridge_port,
783 							   packet_type, member);
784 		if (err)
785 			return err;
786 	}
787 
788 	bridge_device->multicast_enabled = !mc_disabled;
789 
790 	return 0;
791 }
792 
793 static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp,
794 					 u16 mid_idx, bool add)
795 {
796 	char *smid_pl;
797 	int err;
798 
799 	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
800 	if (!smid_pl)
801 		return -ENOMEM;
802 
803 	mlxsw_reg_smid_pack(smid_pl, mid_idx,
804 			    mlxsw_sp_router_port(mlxsw_sp), add);
805 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
806 	kfree(smid_pl);
807 	return err;
808 }
809 
810 static void
811 mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
812 				   struct mlxsw_sp_bridge_device *bridge_device,
813 				   bool add)
814 {
815 	struct mlxsw_sp_mid *mid;
816 
817 	list_for_each_entry(mid, &bridge_device->mids_list, list)
818 		mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add);
819 }
820 
821 static int
822 mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
823 				  struct switchdev_trans *trans,
824 				  struct net_device *orig_dev,
825 				  bool is_mrouter)
826 {
827 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
828 	struct mlxsw_sp_bridge_device *bridge_device;
829 
830 	if (switchdev_trans_ph_prepare(trans))
831 		return 0;
832 
833 	/* It's possible we failed to enslave the port, yet this
834 	 * operation is executed due to it being deferred.
835 	 */
836 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
837 	if (!bridge_device)
838 		return 0;
839 
840 	if (bridge_device->mrouter != is_mrouter)
841 		mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device,
842 						   is_mrouter);
843 	bridge_device->mrouter = is_mrouter;
844 	return 0;
845 }
846 
847 static int mlxsw_sp_port_attr_set(struct net_device *dev,
848 				  const struct switchdev_attr *attr,
849 				  struct switchdev_trans *trans)
850 {
851 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
852 	int err;
853 
854 	switch (attr->id) {
855 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
856 		err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
857 						       attr->orig_dev,
858 						       attr->u.stp_state);
859 		break;
860 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
861 		err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
862 						      attr->orig_dev,
863 						      attr->u.brport_flags);
864 		break;
865 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
866 		err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
867 						       attr->u.ageing_time);
868 		break;
869 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
870 		err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
871 						     attr->orig_dev,
872 						     attr->u.vlan_filtering);
873 		break;
874 	case SWITCHDEV_ATTR_ID_PORT_MROUTER:
875 		err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans,
876 						     attr->orig_dev,
877 						     attr->u.mrouter);
878 		break;
879 	case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
880 		err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
881 						    attr->orig_dev,
882 						    attr->u.mc_disabled);
883 		break;
884 	case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
885 		err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port, trans,
886 							attr->orig_dev,
887 							attr->u.mrouter);
888 		break;
889 	default:
890 		err = -EOPNOTSUPP;
891 		break;
892 	}
893 
894 	if (switchdev_trans_ph_commit(trans))
895 		mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
896 
897 	return err;
898 }
899 
900 static int
901 mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
902 			    struct mlxsw_sp_bridge_port *bridge_port)
903 {
904 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
905 	struct mlxsw_sp_bridge_device *bridge_device;
906 	u8 local_port = mlxsw_sp_port->local_port;
907 	u16 vid = mlxsw_sp_port_vlan->vid;
908 	struct mlxsw_sp_fid *fid;
909 	int err;
910 
911 	bridge_device = bridge_port->bridge_device;
912 	fid = bridge_device->ops->fid_get(bridge_device, vid);
913 	if (IS_ERR(fid))
914 		return PTR_ERR(fid);
915 
916 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
917 				     bridge_port->flags & BR_FLOOD);
918 	if (err)
919 		goto err_fid_uc_flood_set;
920 
921 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
922 				     mlxsw_sp_mc_flood(bridge_port));
923 	if (err)
924 		goto err_fid_mc_flood_set;
925 
926 	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
927 				     true);
928 	if (err)
929 		goto err_fid_bc_flood_set;
930 
931 	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
932 	if (err)
933 		goto err_fid_port_vid_map;
934 
935 	mlxsw_sp_port_vlan->fid = fid;
936 
937 	return 0;
938 
939 err_fid_port_vid_map:
940 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
941 err_fid_bc_flood_set:
942 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
943 err_fid_mc_flood_set:
944 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
945 err_fid_uc_flood_set:
946 	mlxsw_sp_fid_put(fid);
947 	return err;
948 }
949 
950 static void
951 mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
952 {
953 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
954 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
955 	u8 local_port = mlxsw_sp_port->local_port;
956 	u16 vid = mlxsw_sp_port_vlan->vid;
957 
958 	mlxsw_sp_port_vlan->fid = NULL;
959 	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
960 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
961 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
962 	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
963 	mlxsw_sp_fid_put(fid);
964 }
965 
966 static u16
967 mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
968 			     u16 vid, bool is_pvid)
969 {
970 	if (is_pvid)
971 		return vid;
972 	else if (mlxsw_sp_port->pvid == vid)
973 		return 0;	/* Dis-allow untagged packets */
974 	else
975 		return mlxsw_sp_port->pvid;
976 }
977 
978 static int
979 mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
980 			       struct mlxsw_sp_bridge_port *bridge_port)
981 {
982 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
983 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
984 	u16 vid = mlxsw_sp_port_vlan->vid;
985 	int err;
986 
987 	/* No need to continue if only VLAN flags were changed */
988 	if (mlxsw_sp_port_vlan->bridge_port) {
989 		mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
990 		return 0;
991 	}
992 
993 	err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port);
994 	if (err)
995 		return err;
996 
997 	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
998 					     bridge_port->flags & BR_LEARNING);
999 	if (err)
1000 		goto err_port_vid_learning_set;
1001 
1002 	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
1003 					bridge_port->stp_state);
1004 	if (err)
1005 		goto err_port_vid_stp_set;
1006 
1007 	bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
1008 	if (!bridge_vlan) {
1009 		err = -ENOMEM;
1010 		goto err_bridge_vlan_get;
1011 	}
1012 
1013 	list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
1014 		 &bridge_vlan->port_vlan_list);
1015 
1016 	mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
1017 				 bridge_port->dev);
1018 	mlxsw_sp_port_vlan->bridge_port = bridge_port;
1019 
1020 	return 0;
1021 
1022 err_bridge_vlan_get:
1023 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1024 err_port_vid_stp_set:
1025 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1026 err_port_vid_learning_set:
1027 	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1028 	return err;
1029 }
1030 
1031 void
1032 mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1033 {
1034 	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1035 	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1036 	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1037 	struct mlxsw_sp_bridge_port *bridge_port;
1038 	u16 vid = mlxsw_sp_port_vlan->vid;
1039 	bool last_port, last_vlan;
1040 
1041 	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
1042 		    mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
1043 		return;
1044 
1045 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
1046 	last_vlan = list_is_singular(&bridge_port->vlans_list);
1047 	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
1048 	last_port = list_is_singular(&bridge_vlan->port_vlan_list);
1049 
1050 	list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
1051 	mlxsw_sp_bridge_vlan_put(bridge_vlan);
1052 	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1053 	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1054 	if (last_port)
1055 		mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
1056 					       bridge_port,
1057 					       mlxsw_sp_fid_index(fid));
1058 	if (last_vlan)
1059 		mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port);
1060 
1061 	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1062 
1063 	mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
1064 	mlxsw_sp_port_vlan->bridge_port = NULL;
1065 }
1066 
1067 static int
1068 mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
1069 			      struct mlxsw_sp_bridge_port *bridge_port,
1070 			      u16 vid, bool is_untagged, bool is_pvid)
1071 {
1072 	u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
1073 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1074 	u16 old_pvid = mlxsw_sp_port->pvid;
1075 	int err;
1076 
1077 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid);
1078 	if (IS_ERR(mlxsw_sp_port_vlan))
1079 		return PTR_ERR(mlxsw_sp_port_vlan);
1080 
1081 	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
1082 				     is_untagged);
1083 	if (err)
1084 		goto err_port_vlan_set;
1085 
1086 	err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
1087 	if (err)
1088 		goto err_port_pvid_set;
1089 
1090 	err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
1091 	if (err)
1092 		goto err_port_vlan_bridge_join;
1093 
1094 	return 0;
1095 
1096 err_port_vlan_bridge_join:
1097 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
1098 err_port_pvid_set:
1099 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1100 err_port_vlan_set:
1101 	mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1102 	return err;
1103 }
1104 
1105 static int
1106 mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
1107 				const struct net_device *br_dev,
1108 				const struct switchdev_obj_port_vlan *vlan)
1109 {
1110 	struct mlxsw_sp_rif *rif;
1111 	struct mlxsw_sp_fid *fid;
1112 	u16 pvid;
1113 	u16 vid;
1114 
1115 	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
1116 	if (!rif)
1117 		return 0;
1118 	fid = mlxsw_sp_rif_fid(rif);
1119 	pvid = mlxsw_sp_fid_8021q_vid(fid);
1120 
1121 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1122 		if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
1123 			if (vid != pvid) {
1124 				netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
1125 				return -EBUSY;
1126 			}
1127 		} else {
1128 			if (vid == pvid) {
1129 				netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
1130 				return -EBUSY;
1131 			}
1132 		}
1133 	}
1134 
1135 	return 0;
1136 }
1137 
1138 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
1139 				   const struct switchdev_obj_port_vlan *vlan,
1140 				   struct switchdev_trans *trans)
1141 {
1142 	bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1143 	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1144 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1145 	struct net_device *orig_dev = vlan->obj.orig_dev;
1146 	struct mlxsw_sp_bridge_port *bridge_port;
1147 	u16 vid;
1148 
1149 	if (netif_is_bridge_master(orig_dev)) {
1150 		int err = 0;
1151 
1152 		if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) &&
1153 		    br_vlan_enabled(orig_dev) &&
1154 		    switchdev_trans_ph_prepare(trans))
1155 			err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
1156 							      orig_dev, vlan);
1157 		if (!err)
1158 			err = -EOPNOTSUPP;
1159 		return err;
1160 	}
1161 
1162 	if (switchdev_trans_ph_prepare(trans))
1163 		return 0;
1164 
1165 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1166 	if (WARN_ON(!bridge_port))
1167 		return -EINVAL;
1168 
1169 	if (!bridge_port->bridge_device->vlan_enabled)
1170 		return 0;
1171 
1172 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
1173 		int err;
1174 
1175 		err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
1176 						    vid, flag_untagged,
1177 						    flag_pvid);
1178 		if (err)
1179 			return err;
1180 	}
1181 
1182 	return 0;
1183 }
1184 
1185 static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
1186 {
1187 	return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
1188 			MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
1189 }
1190 
1191 static int
1192 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
1193 			       struct mlxsw_sp_bridge_port *bridge_port,
1194 			       u16 fid_index)
1195 {
1196 	bool lagged = bridge_port->lagged;
1197 	char sfdf_pl[MLXSW_REG_SFDF_LEN];
1198 	u16 system_port;
1199 
1200 	system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
1201 	mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
1202 	mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
1203 	mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
1204 
1205 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
1206 }
1207 
1208 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
1209 {
1210 	return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
1211 			 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
1212 }
1213 
1214 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
1215 {
1216 	return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
1217 			MLXSW_REG_SFD_OP_WRITE_REMOVE;
1218 }
1219 
1220 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1221 				     const char *mac, u16 fid, bool adding,
1222 				     enum mlxsw_reg_sfd_rec_action action,
1223 				     bool dynamic)
1224 {
1225 	char *sfd_pl;
1226 	u8 num_rec;
1227 	int err;
1228 
1229 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1230 	if (!sfd_pl)
1231 		return -ENOMEM;
1232 
1233 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1234 	mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1235 			      mac, fid, action, local_port);
1236 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1237 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1238 	if (err)
1239 		goto out;
1240 
1241 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1242 		err = -EBUSY;
1243 
1244 out:
1245 	kfree(sfd_pl);
1246 	return err;
1247 }
1248 
1249 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1250 				   const char *mac, u16 fid, bool adding,
1251 				   bool dynamic)
1252 {
1253 	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
1254 					 MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
1255 }
1256 
1257 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1258 			bool adding)
1259 {
1260 	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
1261 					 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
1262 					 false);
1263 }
1264 
1265 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1266 				       const char *mac, u16 fid, u16 lag_vid,
1267 				       bool adding, bool dynamic)
1268 {
1269 	char *sfd_pl;
1270 	u8 num_rec;
1271 	int err;
1272 
1273 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1274 	if (!sfd_pl)
1275 		return -ENOMEM;
1276 
1277 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1278 	mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1279 				  mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1280 				  lag_vid, lag_id);
1281 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1282 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1283 	if (err)
1284 		goto out;
1285 
1286 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1287 		err = -EBUSY;
1288 
1289 out:
1290 	kfree(sfd_pl);
1291 	return err;
1292 }
1293 
1294 static int
1295 mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
1296 		      struct switchdev_notifier_fdb_info *fdb_info, bool adding)
1297 {
1298 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1299 	struct net_device *orig_dev = fdb_info->info.dev;
1300 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1301 	struct mlxsw_sp_bridge_device *bridge_device;
1302 	struct mlxsw_sp_bridge_port *bridge_port;
1303 	u16 fid_index, vid;
1304 
1305 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1306 	if (!bridge_port)
1307 		return -EINVAL;
1308 
1309 	bridge_device = bridge_port->bridge_device;
1310 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1311 							       bridge_device,
1312 							       fdb_info->vid);
1313 	if (!mlxsw_sp_port_vlan)
1314 		return 0;
1315 
1316 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1317 	vid = mlxsw_sp_port_vlan->vid;
1318 
1319 	if (!bridge_port->lagged)
1320 		return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
1321 					       bridge_port->system_port,
1322 					       fdb_info->addr, fid_index,
1323 					       adding, false);
1324 	else
1325 		return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
1326 						   bridge_port->lag_id,
1327 						   fdb_info->addr, fid_index,
1328 						   vid, adding, false);
1329 }
1330 
1331 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
1332 				u16 fid, u16 mid_idx, bool adding)
1333 {
1334 	char *sfd_pl;
1335 	u8 num_rec;
1336 	int err;
1337 
1338 	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1339 	if (!sfd_pl)
1340 		return -ENOMEM;
1341 
1342 	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1343 	mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
1344 			      MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
1345 	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1346 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1347 	if (err)
1348 		goto out;
1349 
1350 	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1351 		err = -EBUSY;
1352 
1353 out:
1354 	kfree(sfd_pl);
1355 	return err;
1356 }
1357 
1358 static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
1359 					 long *ports_bitmap,
1360 					 bool set_router_port)
1361 {
1362 	char *smid_pl;
1363 	int err, i;
1364 
1365 	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1366 	if (!smid_pl)
1367 		return -ENOMEM;
1368 
1369 	mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false);
1370 	for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
1371 		if (mlxsw_sp->ports[i])
1372 			mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
1373 	}
1374 
1375 	mlxsw_reg_smid_port_mask_set(smid_pl,
1376 				     mlxsw_sp_router_port(mlxsw_sp), 1);
1377 
1378 	for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
1379 		mlxsw_reg_smid_port_set(smid_pl, i, 1);
1380 
1381 	mlxsw_reg_smid_port_set(smid_pl, mlxsw_sp_router_port(mlxsw_sp),
1382 				set_router_port);
1383 
1384 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1385 	kfree(smid_pl);
1386 	return err;
1387 }
1388 
1389 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
1390 				  u16 mid_idx, bool add)
1391 {
1392 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1393 	char *smid_pl;
1394 	int err;
1395 
1396 	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1397 	if (!smid_pl)
1398 		return -ENOMEM;
1399 
1400 	mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add);
1401 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1402 	kfree(smid_pl);
1403 	return err;
1404 }
1405 
1406 static struct
1407 mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device,
1408 				const unsigned char *addr,
1409 				u16 fid)
1410 {
1411 	struct mlxsw_sp_mid *mid;
1412 
1413 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1414 		if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
1415 			return mid;
1416 	}
1417 	return NULL;
1418 }
1419 
1420 static void
1421 mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
1422 				      struct mlxsw_sp_bridge_port *bridge_port,
1423 				      unsigned long *ports_bitmap)
1424 {
1425 	struct mlxsw_sp_port *mlxsw_sp_port;
1426 	u64 max_lag_members, i;
1427 	int lag_id;
1428 
1429 	if (!bridge_port->lagged) {
1430 		set_bit(bridge_port->system_port, ports_bitmap);
1431 	} else {
1432 		max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1433 						     MAX_LAG_MEMBERS);
1434 		lag_id = bridge_port->lag_id;
1435 		for (i = 0; i < max_lag_members; i++) {
1436 			mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp,
1437 								 lag_id, i);
1438 			if (mlxsw_sp_port)
1439 				set_bit(mlxsw_sp_port->local_port,
1440 					ports_bitmap);
1441 		}
1442 	}
1443 }
1444 
1445 static void
1446 mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
1447 				struct mlxsw_sp_bridge_device *bridge_device,
1448 				struct mlxsw_sp *mlxsw_sp)
1449 {
1450 	struct mlxsw_sp_bridge_port *bridge_port;
1451 
1452 	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
1453 		if (bridge_port->mrouter) {
1454 			mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
1455 							      bridge_port,
1456 							      flood_bitmap);
1457 		}
1458 	}
1459 }
1460 
1461 static bool
1462 mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1463 			    struct mlxsw_sp_mid *mid,
1464 			    struct mlxsw_sp_bridge_device *bridge_device)
1465 {
1466 	long *flood_bitmap;
1467 	int num_of_ports;
1468 	int alloc_size;
1469 	u16 mid_idx;
1470 	int err;
1471 
1472 	mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
1473 				      MLXSW_SP_MID_MAX);
1474 	if (mid_idx == MLXSW_SP_MID_MAX)
1475 		return false;
1476 
1477 	num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1478 	alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports);
1479 	flood_bitmap = kzalloc(alloc_size, GFP_KERNEL);
1480 	if (!flood_bitmap)
1481 		return false;
1482 
1483 	bitmap_copy(flood_bitmap,  mid->ports_in_mid, num_of_ports);
1484 	mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
1485 
1486 	mid->mid = mid_idx;
1487 	err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap,
1488 					    bridge_device->mrouter);
1489 	kfree(flood_bitmap);
1490 	if (err)
1491 		return false;
1492 
1493 	err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx,
1494 				   true);
1495 	if (err)
1496 		return false;
1497 
1498 	set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
1499 	mid->in_hw = true;
1500 	return true;
1501 }
1502 
1503 static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1504 					struct mlxsw_sp_mid *mid)
1505 {
1506 	if (!mid->in_hw)
1507 		return 0;
1508 
1509 	clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
1510 	mid->in_hw = false;
1511 	return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid,
1512 				    false);
1513 }
1514 
1515 static struct
1516 mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
1517 				  struct mlxsw_sp_bridge_device *bridge_device,
1518 				  const unsigned char *addr,
1519 				  u16 fid)
1520 {
1521 	struct mlxsw_sp_mid *mid;
1522 	size_t alloc_size;
1523 
1524 	mid = kzalloc(sizeof(*mid), GFP_KERNEL);
1525 	if (!mid)
1526 		return NULL;
1527 
1528 	alloc_size = sizeof(unsigned long) *
1529 		     BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core));
1530 
1531 	mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL);
1532 	if (!mid->ports_in_mid)
1533 		goto err_ports_in_mid_alloc;
1534 
1535 	ether_addr_copy(mid->addr, addr);
1536 	mid->fid = fid;
1537 	mid->in_hw = false;
1538 
1539 	if (!bridge_device->multicast_enabled)
1540 		goto out;
1541 
1542 	if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device))
1543 		goto err_write_mdb_entry;
1544 
1545 out:
1546 	list_add_tail(&mid->list, &bridge_device->mids_list);
1547 	return mid;
1548 
1549 err_write_mdb_entry:
1550 	kfree(mid->ports_in_mid);
1551 err_ports_in_mid_alloc:
1552 	kfree(mid);
1553 	return NULL;
1554 }
1555 
1556 static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
1557 					 struct mlxsw_sp_mid *mid)
1558 {
1559 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1560 	int err = 0;
1561 
1562 	clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1563 	if (bitmap_empty(mid->ports_in_mid,
1564 			 mlxsw_core_max_ports(mlxsw_sp->core))) {
1565 		err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1566 		list_del(&mid->list);
1567 		kfree(mid->ports_in_mid);
1568 		kfree(mid);
1569 	}
1570 	return err;
1571 }
1572 
1573 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
1574 				 const struct switchdev_obj_port_mdb *mdb,
1575 				 struct switchdev_trans *trans)
1576 {
1577 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1578 	struct net_device *orig_dev = mdb->obj.orig_dev;
1579 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1580 	struct net_device *dev = mlxsw_sp_port->dev;
1581 	struct mlxsw_sp_bridge_device *bridge_device;
1582 	struct mlxsw_sp_bridge_port *bridge_port;
1583 	struct mlxsw_sp_mid *mid;
1584 	u16 fid_index;
1585 	int err = 0;
1586 
1587 	if (switchdev_trans_ph_prepare(trans))
1588 		return 0;
1589 
1590 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1591 	if (!bridge_port)
1592 		return 0;
1593 
1594 	bridge_device = bridge_port->bridge_device;
1595 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1596 							       bridge_device,
1597 							       mdb->vid);
1598 	if (!mlxsw_sp_port_vlan)
1599 		return 0;
1600 
1601 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1602 
1603 	mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1604 	if (!mid) {
1605 		mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr,
1606 					  fid_index);
1607 		if (!mid) {
1608 			netdev_err(dev, "Unable to allocate MC group\n");
1609 			return -ENOMEM;
1610 		}
1611 	}
1612 	set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1613 
1614 	if (!bridge_device->multicast_enabled)
1615 		return 0;
1616 
1617 	if (bridge_port->mrouter)
1618 		return 0;
1619 
1620 	err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true);
1621 	if (err) {
1622 		netdev_err(dev, "Unable to set SMID\n");
1623 		goto err_out;
1624 	}
1625 
1626 	return 0;
1627 
1628 err_out:
1629 	mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1630 	return err;
1631 }
1632 
1633 static void
1634 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
1635 				   struct mlxsw_sp_bridge_device
1636 				   *bridge_device)
1637 {
1638 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1639 	struct mlxsw_sp_mid *mid;
1640 	bool mc_enabled;
1641 
1642 	mc_enabled = bridge_device->multicast_enabled;
1643 
1644 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1645 		if (mc_enabled)
1646 			mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid,
1647 						    bridge_device);
1648 		else
1649 			mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1650 	}
1651 }
1652 
1653 static void
1654 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
1655 				 struct mlxsw_sp_bridge_port *bridge_port,
1656 				 bool add)
1657 {
1658 	struct mlxsw_sp_bridge_device *bridge_device;
1659 	struct mlxsw_sp_mid *mid;
1660 
1661 	bridge_device = bridge_port->bridge_device;
1662 
1663 	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1664 		if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid))
1665 			mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add);
1666 	}
1667 }
1668 
1669 struct mlxsw_sp_span_respin_work {
1670 	struct work_struct work;
1671 	struct mlxsw_sp *mlxsw_sp;
1672 };
1673 
1674 static void mlxsw_sp_span_respin_work(struct work_struct *work)
1675 {
1676 	struct mlxsw_sp_span_respin_work *respin_work =
1677 		container_of(work, struct mlxsw_sp_span_respin_work, work);
1678 
1679 	rtnl_lock();
1680 	mlxsw_sp_span_respin(respin_work->mlxsw_sp);
1681 	rtnl_unlock();
1682 	kfree(respin_work);
1683 }
1684 
1685 static void mlxsw_sp_span_respin_schedule(struct mlxsw_sp *mlxsw_sp)
1686 {
1687 	struct mlxsw_sp_span_respin_work *respin_work;
1688 
1689 	respin_work = kzalloc(sizeof(*respin_work), GFP_ATOMIC);
1690 	if (!respin_work)
1691 		return;
1692 
1693 	INIT_WORK(&respin_work->work, mlxsw_sp_span_respin_work);
1694 	respin_work->mlxsw_sp = mlxsw_sp;
1695 
1696 	mlxsw_core_schedule_work(&respin_work->work);
1697 }
1698 
1699 static int mlxsw_sp_port_obj_add(struct net_device *dev,
1700 				 const struct switchdev_obj *obj,
1701 				 struct switchdev_trans *trans)
1702 {
1703 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1704 	const struct switchdev_obj_port_vlan *vlan;
1705 	int err = 0;
1706 
1707 	switch (obj->id) {
1708 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1709 		vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
1710 		err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans);
1711 
1712 		if (switchdev_trans_ph_prepare(trans)) {
1713 			/* The event is emitted before the changes are actually
1714 			 * applied to the bridge. Therefore schedule the respin
1715 			 * call for later, so that the respin logic sees the
1716 			 * updated bridge state.
1717 			 */
1718 			mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
1719 		}
1720 		break;
1721 	case SWITCHDEV_OBJ_ID_PORT_MDB:
1722 		err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
1723 					    SWITCHDEV_OBJ_PORT_MDB(obj),
1724 					    trans);
1725 		break;
1726 	default:
1727 		err = -EOPNOTSUPP;
1728 		break;
1729 	}
1730 
1731 	return err;
1732 }
1733 
1734 static void
1735 mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
1736 			      struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
1737 {
1738 	u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : vid;
1739 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1740 
1741 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1742 	if (WARN_ON(!mlxsw_sp_port_vlan))
1743 		return;
1744 
1745 	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1746 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
1747 	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1748 	mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1749 }
1750 
1751 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1752 				   const struct switchdev_obj_port_vlan *vlan)
1753 {
1754 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1755 	struct net_device *orig_dev = vlan->obj.orig_dev;
1756 	struct mlxsw_sp_bridge_port *bridge_port;
1757 	u16 vid;
1758 
1759 	if (netif_is_bridge_master(orig_dev))
1760 		return -EOPNOTSUPP;
1761 
1762 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1763 	if (WARN_ON(!bridge_port))
1764 		return -EINVAL;
1765 
1766 	if (!bridge_port->bridge_device->vlan_enabled)
1767 		return 0;
1768 
1769 	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
1770 		mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid);
1771 
1772 	return 0;
1773 }
1774 
1775 static int
1776 __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1777 			struct mlxsw_sp_bridge_port *bridge_port,
1778 			struct mlxsw_sp_mid *mid)
1779 {
1780 	struct net_device *dev = mlxsw_sp_port->dev;
1781 	int err;
1782 
1783 	if (bridge_port->bridge_device->multicast_enabled &&
1784 	    !bridge_port->mrouter) {
1785 		err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1786 		if (err)
1787 			netdev_err(dev, "Unable to remove port from SMID\n");
1788 	}
1789 
1790 	err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1791 	if (err)
1792 		netdev_err(dev, "Unable to remove MC SFD\n");
1793 
1794 	return err;
1795 }
1796 
1797 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1798 				 const struct switchdev_obj_port_mdb *mdb)
1799 {
1800 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1801 	struct net_device *orig_dev = mdb->obj.orig_dev;
1802 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1803 	struct mlxsw_sp_bridge_device *bridge_device;
1804 	struct net_device *dev = mlxsw_sp_port->dev;
1805 	struct mlxsw_sp_bridge_port *bridge_port;
1806 	struct mlxsw_sp_mid *mid;
1807 	u16 fid_index;
1808 
1809 	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1810 	if (!bridge_port)
1811 		return 0;
1812 
1813 	bridge_device = bridge_port->bridge_device;
1814 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1815 							       bridge_device,
1816 							       mdb->vid);
1817 	if (!mlxsw_sp_port_vlan)
1818 		return 0;
1819 
1820 	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1821 
1822 	mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1823 	if (!mid) {
1824 		netdev_err(dev, "Unable to remove port from MC DB\n");
1825 		return -EINVAL;
1826 	}
1827 
1828 	return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid);
1829 }
1830 
1831 static void
1832 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1833 			       struct mlxsw_sp_bridge_port *bridge_port)
1834 {
1835 	struct mlxsw_sp_bridge_device *bridge_device;
1836 	struct mlxsw_sp_mid *mid, *tmp;
1837 
1838 	bridge_device = bridge_port->bridge_device;
1839 
1840 	list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) {
1841 		if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) {
1842 			__mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port,
1843 						mid);
1844 		} else if (bridge_device->multicast_enabled &&
1845 			   bridge_port->mrouter) {
1846 			mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1847 		}
1848 	}
1849 }
1850 
1851 static int mlxsw_sp_port_obj_del(struct net_device *dev,
1852 				 const struct switchdev_obj *obj)
1853 {
1854 	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1855 	int err = 0;
1856 
1857 	switch (obj->id) {
1858 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1859 		err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1860 					      SWITCHDEV_OBJ_PORT_VLAN(obj));
1861 		break;
1862 	case SWITCHDEV_OBJ_ID_PORT_MDB:
1863 		err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
1864 					    SWITCHDEV_OBJ_PORT_MDB(obj));
1865 		break;
1866 	default:
1867 		err = -EOPNOTSUPP;
1868 		break;
1869 	}
1870 
1871 	mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
1872 
1873 	return err;
1874 }
1875 
1876 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
1877 						   u16 lag_id)
1878 {
1879 	struct mlxsw_sp_port *mlxsw_sp_port;
1880 	u64 max_lag_members;
1881 	int i;
1882 
1883 	max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1884 					     MAX_LAG_MEMBERS);
1885 	for (i = 0; i < max_lag_members; i++) {
1886 		mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
1887 		if (mlxsw_sp_port)
1888 			return mlxsw_sp_port;
1889 	}
1890 	return NULL;
1891 }
1892 
1893 static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
1894 	.switchdev_port_attr_get	= mlxsw_sp_port_attr_get,
1895 	.switchdev_port_attr_set	= mlxsw_sp_port_attr_set,
1896 	.switchdev_port_obj_add		= mlxsw_sp_port_obj_add,
1897 	.switchdev_port_obj_del		= mlxsw_sp_port_obj_del,
1898 };
1899 
1900 static int
1901 mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
1902 				struct mlxsw_sp_bridge_port *bridge_port,
1903 				struct mlxsw_sp_port *mlxsw_sp_port,
1904 				struct netlink_ext_ack *extack)
1905 {
1906 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1907 
1908 	if (is_vlan_dev(bridge_port->dev)) {
1909 		NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
1910 		return -EINVAL;
1911 	}
1912 
1913 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
1914 	if (WARN_ON(!mlxsw_sp_port_vlan))
1915 		return -EINVAL;
1916 
1917 	/* Let VLAN-aware bridge take care of its own VLANs */
1918 	mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1919 
1920 	return 0;
1921 }
1922 
1923 static void
1924 mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
1925 				 struct mlxsw_sp_bridge_port *bridge_port,
1926 				 struct mlxsw_sp_port *mlxsw_sp_port)
1927 {
1928 	mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
1929 	/* Make sure untagged frames are allowed to ingress */
1930 	mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
1931 }
1932 
1933 static struct mlxsw_sp_fid *
1934 mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
1935 			      u16 vid)
1936 {
1937 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
1938 
1939 	return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
1940 }
1941 
1942 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
1943 	.port_join	= mlxsw_sp_bridge_8021q_port_join,
1944 	.port_leave	= mlxsw_sp_bridge_8021q_port_leave,
1945 	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
1946 };
1947 
1948 static bool
1949 mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
1950 			   const struct net_device *br_dev)
1951 {
1952 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1953 
1954 	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
1955 			    list) {
1956 		if (mlxsw_sp_port_vlan->bridge_port &&
1957 		    mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
1958 		    br_dev)
1959 			return true;
1960 	}
1961 
1962 	return false;
1963 }
1964 
1965 static int
1966 mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
1967 				struct mlxsw_sp_bridge_port *bridge_port,
1968 				struct mlxsw_sp_port *mlxsw_sp_port,
1969 				struct netlink_ext_ack *extack)
1970 {
1971 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1972 	struct net_device *dev = bridge_port->dev;
1973 	u16 vid;
1974 
1975 	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
1976 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1977 	if (WARN_ON(!mlxsw_sp_port_vlan))
1978 		return -EINVAL;
1979 
1980 	if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
1981 		NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port");
1982 		return -EINVAL;
1983 	}
1984 
1985 	/* Port is no longer usable as a router interface */
1986 	if (mlxsw_sp_port_vlan->fid)
1987 		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
1988 
1989 	return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
1990 }
1991 
1992 static void
1993 mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
1994 				 struct mlxsw_sp_bridge_port *bridge_port,
1995 				 struct mlxsw_sp_port *mlxsw_sp_port)
1996 {
1997 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1998 	struct net_device *dev = bridge_port->dev;
1999 	u16 vid;
2000 
2001 	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
2002 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2003 	if (WARN_ON(!mlxsw_sp_port_vlan))
2004 		return;
2005 
2006 	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2007 }
2008 
2009 static struct mlxsw_sp_fid *
2010 mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2011 			      u16 vid)
2012 {
2013 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2014 
2015 	return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2016 }
2017 
2018 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
2019 	.port_join	= mlxsw_sp_bridge_8021d_port_join,
2020 	.port_leave	= mlxsw_sp_bridge_8021d_port_leave,
2021 	.fid_get	= mlxsw_sp_bridge_8021d_fid_get,
2022 };
2023 
2024 int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
2025 			      struct net_device *brport_dev,
2026 			      struct net_device *br_dev,
2027 			      struct netlink_ext_ack *extack)
2028 {
2029 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2030 	struct mlxsw_sp_bridge_device *bridge_device;
2031 	struct mlxsw_sp_bridge_port *bridge_port;
2032 	int err;
2033 
2034 	bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev);
2035 	if (IS_ERR(bridge_port))
2036 		return PTR_ERR(bridge_port);
2037 	bridge_device = bridge_port->bridge_device;
2038 
2039 	err = bridge_device->ops->port_join(bridge_device, bridge_port,
2040 					    mlxsw_sp_port, extack);
2041 	if (err)
2042 		goto err_port_join;
2043 
2044 	return 0;
2045 
2046 err_port_join:
2047 	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2048 	return err;
2049 }
2050 
2051 void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2052 				struct net_device *brport_dev,
2053 				struct net_device *br_dev)
2054 {
2055 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2056 	struct mlxsw_sp_bridge_device *bridge_device;
2057 	struct mlxsw_sp_bridge_port *bridge_port;
2058 
2059 	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2060 	if (!bridge_device)
2061 		return;
2062 	bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
2063 	if (!bridge_port)
2064 		return;
2065 
2066 	bridge_device->ops->port_leave(bridge_device, bridge_port,
2067 				       mlxsw_sp_port);
2068 	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2069 }
2070 
2071 static void
2072 mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
2073 			    const char *mac, u16 vid,
2074 			    struct net_device *dev)
2075 {
2076 	struct switchdev_notifier_fdb_info info;
2077 
2078 	info.addr = mac;
2079 	info.vid = vid;
2080 	call_switchdev_notifiers(type, dev, &info.info);
2081 }
2082 
2083 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
2084 					    char *sfn_pl, int rec_index,
2085 					    bool adding)
2086 {
2087 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2088 	struct mlxsw_sp_bridge_device *bridge_device;
2089 	struct mlxsw_sp_bridge_port *bridge_port;
2090 	struct mlxsw_sp_port *mlxsw_sp_port;
2091 	enum switchdev_notifier_type type;
2092 	char mac[ETH_ALEN];
2093 	u8 local_port;
2094 	u16 vid, fid;
2095 	bool do_notification = true;
2096 	int err;
2097 
2098 	mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
2099 	mlxsw_sp_port = mlxsw_sp->ports[local_port];
2100 	if (!mlxsw_sp_port) {
2101 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
2102 		goto just_remove;
2103 	}
2104 
2105 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2106 	if (!mlxsw_sp_port_vlan) {
2107 		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2108 		goto just_remove;
2109 	}
2110 
2111 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
2112 	if (!bridge_port) {
2113 		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2114 		goto just_remove;
2115 	}
2116 
2117 	bridge_device = bridge_port->bridge_device;
2118 	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2119 
2120 do_fdb_op:
2121 	err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
2122 				      adding, true);
2123 	if (err) {
2124 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2125 		return;
2126 	}
2127 
2128 	if (!do_notification)
2129 		return;
2130 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2131 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev);
2132 
2133 	return;
2134 
2135 just_remove:
2136 	adding = false;
2137 	do_notification = false;
2138 	goto do_fdb_op;
2139 }
2140 
2141 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
2142 						char *sfn_pl, int rec_index,
2143 						bool adding)
2144 {
2145 	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2146 	struct mlxsw_sp_bridge_device *bridge_device;
2147 	struct mlxsw_sp_bridge_port *bridge_port;
2148 	struct mlxsw_sp_port *mlxsw_sp_port;
2149 	enum switchdev_notifier_type type;
2150 	char mac[ETH_ALEN];
2151 	u16 lag_vid = 0;
2152 	u16 lag_id;
2153 	u16 vid, fid;
2154 	bool do_notification = true;
2155 	int err;
2156 
2157 	mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
2158 	mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
2159 	if (!mlxsw_sp_port) {
2160 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
2161 		goto just_remove;
2162 	}
2163 
2164 	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2165 	if (!mlxsw_sp_port_vlan) {
2166 		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2167 		goto just_remove;
2168 	}
2169 
2170 	bridge_port = mlxsw_sp_port_vlan->bridge_port;
2171 	if (!bridge_port) {
2172 		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2173 		goto just_remove;
2174 	}
2175 
2176 	bridge_device = bridge_port->bridge_device;
2177 	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2178 	lag_vid = mlxsw_sp_port_vlan->vid;
2179 
2180 do_fdb_op:
2181 	err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
2182 					  adding, true);
2183 	if (err) {
2184 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2185 		return;
2186 	}
2187 
2188 	if (!do_notification)
2189 		return;
2190 	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2191 	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev);
2192 
2193 	return;
2194 
2195 just_remove:
2196 	adding = false;
2197 	do_notification = false;
2198 	goto do_fdb_op;
2199 }
2200 
2201 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
2202 					    char *sfn_pl, int rec_index)
2203 {
2204 	switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
2205 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
2206 		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2207 						rec_index, true);
2208 		break;
2209 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
2210 		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2211 						rec_index, false);
2212 		break;
2213 	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
2214 		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2215 						    rec_index, true);
2216 		break;
2217 	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
2218 		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2219 						    rec_index, false);
2220 		break;
2221 	}
2222 }
2223 
2224 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
2225 {
2226 	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
2227 
2228 	mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
2229 			       msecs_to_jiffies(bridge->fdb_notify.interval));
2230 }
2231 
2232 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
2233 {
2234 	struct mlxsw_sp_bridge *bridge;
2235 	struct mlxsw_sp *mlxsw_sp;
2236 	char *sfn_pl;
2237 	u8 num_rec;
2238 	int i;
2239 	int err;
2240 
2241 	sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
2242 	if (!sfn_pl)
2243 		return;
2244 
2245 	bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
2246 	mlxsw_sp = bridge->mlxsw_sp;
2247 
2248 	rtnl_lock();
2249 	mlxsw_reg_sfn_pack(sfn_pl);
2250 	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
2251 	if (err) {
2252 		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
2253 		goto out;
2254 	}
2255 	num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
2256 	for (i = 0; i < num_rec; i++)
2257 		mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
2258 
2259 out:
2260 	rtnl_unlock();
2261 	kfree(sfn_pl);
2262 	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
2263 }
2264 
2265 struct mlxsw_sp_switchdev_event_work {
2266 	struct work_struct work;
2267 	struct switchdev_notifier_fdb_info fdb_info;
2268 	struct net_device *dev;
2269 	unsigned long event;
2270 };
2271 
2272 static void mlxsw_sp_switchdev_event_work(struct work_struct *work)
2273 {
2274 	struct mlxsw_sp_switchdev_event_work *switchdev_work =
2275 		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
2276 	struct net_device *dev = switchdev_work->dev;
2277 	struct switchdev_notifier_fdb_info *fdb_info;
2278 	struct mlxsw_sp_port *mlxsw_sp_port;
2279 	int err;
2280 
2281 	rtnl_lock();
2282 	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
2283 	if (!mlxsw_sp_port)
2284 		goto out;
2285 
2286 	switch (switchdev_work->event) {
2287 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2288 		fdb_info = &switchdev_work->fdb_info;
2289 		if (!fdb_info->added_by_user)
2290 			break;
2291 		err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
2292 		if (err)
2293 			break;
2294 		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2295 					    fdb_info->addr,
2296 					    fdb_info->vid, dev);
2297 		break;
2298 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2299 		fdb_info = &switchdev_work->fdb_info;
2300 		if (!fdb_info->added_by_user)
2301 			break;
2302 		mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
2303 		break;
2304 	case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
2305 	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
2306 		/* These events are only used to potentially update an existing
2307 		 * SPAN mirror.
2308 		 */
2309 		break;
2310 	}
2311 
2312 	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
2313 
2314 out:
2315 	rtnl_unlock();
2316 	kfree(switchdev_work->fdb_info.addr);
2317 	kfree(switchdev_work);
2318 	dev_put(dev);
2319 }
2320 
2321 /* Called under rcu_read_lock() */
2322 static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
2323 				    unsigned long event, void *ptr)
2324 {
2325 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
2326 	struct mlxsw_sp_switchdev_event_work *switchdev_work;
2327 	struct switchdev_notifier_fdb_info *fdb_info = ptr;
2328 
2329 	if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
2330 		return NOTIFY_DONE;
2331 
2332 	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
2333 	if (!switchdev_work)
2334 		return NOTIFY_BAD;
2335 
2336 	INIT_WORK(&switchdev_work->work, mlxsw_sp_switchdev_event_work);
2337 	switchdev_work->dev = dev;
2338 	switchdev_work->event = event;
2339 
2340 	switch (event) {
2341 	case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
2342 	case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */
2343 	case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
2344 	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
2345 		memcpy(&switchdev_work->fdb_info, ptr,
2346 		       sizeof(switchdev_work->fdb_info));
2347 		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
2348 		if (!switchdev_work->fdb_info.addr)
2349 			goto err_addr_alloc;
2350 		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
2351 				fdb_info->addr);
2352 		/* Take a reference on the device. This can be either
2353 		 * upper device containig mlxsw_sp_port or just a
2354 		 * mlxsw_sp_port
2355 		 */
2356 		dev_hold(dev);
2357 		break;
2358 	default:
2359 		kfree(switchdev_work);
2360 		return NOTIFY_DONE;
2361 	}
2362 
2363 	mlxsw_core_schedule_work(&switchdev_work->work);
2364 
2365 	return NOTIFY_DONE;
2366 
2367 err_addr_alloc:
2368 	kfree(switchdev_work);
2369 	return NOTIFY_BAD;
2370 }
2371 
2372 static struct notifier_block mlxsw_sp_switchdev_notifier = {
2373 	.notifier_call = mlxsw_sp_switchdev_event,
2374 };
2375 
2376 u8
2377 mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
2378 {
2379 	return bridge_port->stp_state;
2380 }
2381 
2382 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
2383 {
2384 	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
2385 	int err;
2386 
2387 	err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
2388 	if (err) {
2389 		dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
2390 		return err;
2391 	}
2392 
2393 	err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
2394 	if (err) {
2395 		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
2396 		return err;
2397 	}
2398 
2399 	INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
2400 	bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
2401 	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
2402 	return 0;
2403 }
2404 
2405 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
2406 {
2407 	cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
2408 	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
2409 
2410 }
2411 
2412 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
2413 {
2414 	struct mlxsw_sp_bridge *bridge;
2415 
2416 	bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
2417 	if (!bridge)
2418 		return -ENOMEM;
2419 	mlxsw_sp->bridge = bridge;
2420 	bridge->mlxsw_sp = mlxsw_sp;
2421 
2422 	INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
2423 
2424 	bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
2425 	bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
2426 
2427 	return mlxsw_sp_fdb_init(mlxsw_sp);
2428 }
2429 
2430 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
2431 {
2432 	mlxsw_sp_fdb_fini(mlxsw_sp);
2433 	WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
2434 	kfree(mlxsw_sp->bridge);
2435 }
2436 
2437 void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
2438 {
2439 	mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
2440 }
2441 
2442 void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
2443 {
2444 }
2445