xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c (revision a460513ed4b6994bfeb7bd86f72853140bc1ac12)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Ltd */
3 
4 #include <linux/mlx5/driver.h>
5 #include "eswitch.h"
6 #include "priv.h"
7 #include "sf/dev/dev.h"
8 #include "mlx5_ifc_vhca_event.h"
9 #include "vhca_event.h"
10 #include "ecpf.h"
11 
12 struct mlx5_sf {
13 	struct devlink_port dl_port;
14 	unsigned int port_index;
15 	u16 id;
16 	u16 hw_fn_id;
17 	u16 hw_state;
18 };
19 
20 struct mlx5_sf_table {
21 	struct mlx5_core_dev *dev; /* To refer from notifier context. */
22 	struct xarray port_indices; /* port index based lookup. */
23 	refcount_t refcount;
24 	struct completion disable_complete;
25 	struct mutex sf_state_lock; /* Serializes sf state among user cmds & vhca event handler. */
26 	struct notifier_block esw_nb;
27 	struct notifier_block vhca_nb;
28 	u8 ecpu: 1;
29 };
30 
31 static struct mlx5_sf *
32 mlx5_sf_lookup_by_index(struct mlx5_sf_table *table, unsigned int port_index)
33 {
34 	return xa_load(&table->port_indices, port_index);
35 }
36 
37 static struct mlx5_sf *
38 mlx5_sf_lookup_by_function_id(struct mlx5_sf_table *table, unsigned int fn_id)
39 {
40 	unsigned long index;
41 	struct mlx5_sf *sf;
42 
43 	xa_for_each(&table->port_indices, index, sf) {
44 		if (sf->hw_fn_id == fn_id)
45 			return sf;
46 	}
47 	return NULL;
48 }
49 
50 static int mlx5_sf_id_insert(struct mlx5_sf_table *table, struct mlx5_sf *sf)
51 {
52 	return xa_insert(&table->port_indices, sf->port_index, sf, GFP_KERNEL);
53 }
54 
55 static void mlx5_sf_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf)
56 {
57 	xa_erase(&table->port_indices, sf->port_index);
58 }
59 
60 static struct mlx5_sf *
61 mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *extack)
62 {
63 	unsigned int dl_port_index;
64 	struct mlx5_sf *sf;
65 	u16 hw_fn_id;
66 	int id_err;
67 	int err;
68 
69 	id_err = mlx5_sf_hw_table_sf_alloc(table->dev, sfnum);
70 	if (id_err < 0) {
71 		err = id_err;
72 		goto id_err;
73 	}
74 
75 	sf = kzalloc(sizeof(*sf), GFP_KERNEL);
76 	if (!sf) {
77 		err = -ENOMEM;
78 		goto alloc_err;
79 	}
80 	sf->id = id_err;
81 	hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, sf->id);
82 	dl_port_index = mlx5_esw_vport_to_devlink_port_index(table->dev, hw_fn_id);
83 	sf->port_index = dl_port_index;
84 	sf->hw_fn_id = hw_fn_id;
85 	sf->hw_state = MLX5_VHCA_STATE_ALLOCATED;
86 
87 	err = mlx5_sf_id_insert(table, sf);
88 	if (err)
89 		goto insert_err;
90 
91 	return sf;
92 
93 insert_err:
94 	kfree(sf);
95 alloc_err:
96 	mlx5_sf_hw_table_sf_free(table->dev, id_err);
97 id_err:
98 	if (err == -EEXIST)
99 		NL_SET_ERR_MSG_MOD(extack, "SF already exist. Choose different sfnum");
100 	return ERR_PTR(err);
101 }
102 
103 static void mlx5_sf_free(struct mlx5_sf_table *table, struct mlx5_sf *sf)
104 {
105 	mlx5_sf_id_erase(table, sf);
106 	mlx5_sf_hw_table_sf_free(table->dev, sf->id);
107 	kfree(sf);
108 }
109 
110 static struct mlx5_sf_table *mlx5_sf_table_try_get(struct mlx5_core_dev *dev)
111 {
112 	struct mlx5_sf_table *table = dev->priv.sf_table;
113 
114 	if (!table)
115 		return NULL;
116 
117 	return refcount_inc_not_zero(&table->refcount) ? table : NULL;
118 }
119 
120 static void mlx5_sf_table_put(struct mlx5_sf_table *table)
121 {
122 	if (refcount_dec_and_test(&table->refcount))
123 		complete(&table->disable_complete);
124 }
125 
126 static enum devlink_port_fn_state mlx5_sf_to_devlink_state(u8 hw_state)
127 {
128 	switch (hw_state) {
129 	case MLX5_VHCA_STATE_ACTIVE:
130 	case MLX5_VHCA_STATE_IN_USE:
131 	case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
132 		return DEVLINK_PORT_FN_STATE_ACTIVE;
133 	case MLX5_VHCA_STATE_INVALID:
134 	case MLX5_VHCA_STATE_ALLOCATED:
135 	default:
136 		return DEVLINK_PORT_FN_STATE_INACTIVE;
137 	}
138 }
139 
140 static enum devlink_port_fn_opstate mlx5_sf_to_devlink_opstate(u8 hw_state)
141 {
142 	switch (hw_state) {
143 	case MLX5_VHCA_STATE_IN_USE:
144 	case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
145 		return DEVLINK_PORT_FN_OPSTATE_ATTACHED;
146 	case MLX5_VHCA_STATE_INVALID:
147 	case MLX5_VHCA_STATE_ALLOCATED:
148 	case MLX5_VHCA_STATE_ACTIVE:
149 	default:
150 		return DEVLINK_PORT_FN_OPSTATE_DETACHED;
151 	}
152 }
153 
154 static bool mlx5_sf_is_active(const struct mlx5_sf *sf)
155 {
156 	return sf->hw_state == MLX5_VHCA_STATE_ACTIVE || sf->hw_state == MLX5_VHCA_STATE_IN_USE;
157 }
158 
159 int mlx5_devlink_sf_port_fn_state_get(struct devlink *devlink, struct devlink_port *dl_port,
160 				      enum devlink_port_fn_state *state,
161 				      enum devlink_port_fn_opstate *opstate,
162 				      struct netlink_ext_ack *extack)
163 {
164 	struct mlx5_core_dev *dev = devlink_priv(devlink);
165 	struct mlx5_sf_table *table;
166 	struct mlx5_sf *sf;
167 	int err = 0;
168 
169 	table = mlx5_sf_table_try_get(dev);
170 	if (!table)
171 		return -EOPNOTSUPP;
172 
173 	sf = mlx5_sf_lookup_by_index(table, dl_port->index);
174 	if (!sf) {
175 		err = -EOPNOTSUPP;
176 		goto sf_err;
177 	}
178 	mutex_lock(&table->sf_state_lock);
179 	*state = mlx5_sf_to_devlink_state(sf->hw_state);
180 	*opstate = mlx5_sf_to_devlink_opstate(sf->hw_state);
181 	mutex_unlock(&table->sf_state_lock);
182 sf_err:
183 	mlx5_sf_table_put(table);
184 	return err;
185 }
186 
187 static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
188 {
189 	int err;
190 
191 	if (mlx5_sf_is_active(sf))
192 		return 0;
193 	if (sf->hw_state != MLX5_VHCA_STATE_ALLOCATED)
194 		return -EINVAL;
195 
196 	err = mlx5_cmd_sf_enable_hca(dev, sf->hw_fn_id);
197 	if (err)
198 		return err;
199 
200 	sf->hw_state = MLX5_VHCA_STATE_ACTIVE;
201 	return 0;
202 }
203 
204 static int mlx5_sf_deactivate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
205 {
206 	int err;
207 
208 	if (!mlx5_sf_is_active(sf))
209 		return 0;
210 
211 	err = mlx5_cmd_sf_disable_hca(dev, sf->hw_fn_id);
212 	if (err)
213 		return err;
214 
215 	sf->hw_state = MLX5_VHCA_STATE_TEARDOWN_REQUEST;
216 	return 0;
217 }
218 
219 static int mlx5_sf_state_set(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
220 			     struct mlx5_sf *sf,
221 			     enum devlink_port_fn_state state)
222 {
223 	int err = 0;
224 
225 	mutex_lock(&table->sf_state_lock);
226 	if (state == mlx5_sf_to_devlink_state(sf->hw_state))
227 		goto out;
228 	if (state == DEVLINK_PORT_FN_STATE_ACTIVE)
229 		err = mlx5_sf_activate(dev, sf);
230 	else if (state == DEVLINK_PORT_FN_STATE_INACTIVE)
231 		err = mlx5_sf_deactivate(dev, sf);
232 	else
233 		err = -EINVAL;
234 out:
235 	mutex_unlock(&table->sf_state_lock);
236 	return err;
237 }
238 
239 int mlx5_devlink_sf_port_fn_state_set(struct devlink *devlink, struct devlink_port *dl_port,
240 				      enum devlink_port_fn_state state,
241 				      struct netlink_ext_ack *extack)
242 {
243 	struct mlx5_core_dev *dev = devlink_priv(devlink);
244 	struct mlx5_sf_table *table;
245 	struct mlx5_sf *sf;
246 	int err;
247 
248 	table = mlx5_sf_table_try_get(dev);
249 	if (!table) {
250 		NL_SET_ERR_MSG_MOD(extack,
251 				   "Port state set is only supported in eswitch switchdev mode or SF ports are disabled.");
252 		return -EOPNOTSUPP;
253 	}
254 	sf = mlx5_sf_lookup_by_index(table, dl_port->index);
255 	if (!sf) {
256 		err = -ENODEV;
257 		goto out;
258 	}
259 
260 	err = mlx5_sf_state_set(dev, table, sf, state);
261 out:
262 	mlx5_sf_table_put(table);
263 	return err;
264 }
265 
266 static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
267 		       const struct devlink_port_new_attrs *new_attr,
268 		       struct netlink_ext_ack *extack,
269 		       unsigned int *new_port_index)
270 {
271 	struct mlx5_eswitch *esw = dev->priv.eswitch;
272 	struct mlx5_sf *sf;
273 	u16 hw_fn_id;
274 	int err;
275 
276 	sf = mlx5_sf_alloc(table, new_attr->sfnum, extack);
277 	if (IS_ERR(sf))
278 		return PTR_ERR(sf);
279 
280 	hw_fn_id = mlx5_sf_sw_to_hw_id(dev, sf->id);
281 	err = mlx5_esw_offloads_sf_vport_enable(esw, &sf->dl_port, hw_fn_id, new_attr->sfnum);
282 	if (err)
283 		goto esw_err;
284 	*new_port_index = sf->port_index;
285 	return 0;
286 
287 esw_err:
288 	mlx5_sf_free(table, sf);
289 	return err;
290 }
291 
292 static int
293 mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_attrs *new_attr,
294 		       struct netlink_ext_ack *extack)
295 {
296 	if (new_attr->flavour != DEVLINK_PORT_FLAVOUR_PCI_SF) {
297 		NL_SET_ERR_MSG_MOD(extack, "Driver supports only SF port addition");
298 		return -EOPNOTSUPP;
299 	}
300 	if (new_attr->port_index_valid) {
301 		NL_SET_ERR_MSG_MOD(extack,
302 				   "Driver does not support user defined port index assignment");
303 		return -EOPNOTSUPP;
304 	}
305 	if (!new_attr->sfnum_valid) {
306 		NL_SET_ERR_MSG_MOD(extack,
307 				   "User must provide unique sfnum. Driver does not support auto assignment");
308 		return -EOPNOTSUPP;
309 	}
310 	if (new_attr->controller_valid && new_attr->controller) {
311 		NL_SET_ERR_MSG_MOD(extack, "External controller is unsupported");
312 		return -EOPNOTSUPP;
313 	}
314 	if (new_attr->pfnum != PCI_FUNC(dev->pdev->devfn)) {
315 		NL_SET_ERR_MSG_MOD(extack, "Invalid pfnum supplied");
316 		return -EOPNOTSUPP;
317 	}
318 	return 0;
319 }
320 
321 int mlx5_devlink_sf_port_new(struct devlink *devlink,
322 			     const struct devlink_port_new_attrs *new_attr,
323 			     struct netlink_ext_ack *extack,
324 			     unsigned int *new_port_index)
325 {
326 	struct mlx5_core_dev *dev = devlink_priv(devlink);
327 	struct mlx5_sf_table *table;
328 	int err;
329 
330 	err = mlx5_sf_new_check_attr(dev, new_attr, extack);
331 	if (err)
332 		return err;
333 
334 	table = mlx5_sf_table_try_get(dev);
335 	if (!table) {
336 		NL_SET_ERR_MSG_MOD(extack,
337 				   "Port add is only supported in eswitch switchdev mode or SF ports are disabled.");
338 		return -EOPNOTSUPP;
339 	}
340 	err = mlx5_sf_add(dev, table, new_attr, extack, new_port_index);
341 	mlx5_sf_table_put(table);
342 	return err;
343 }
344 
345 static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf)
346 {
347 	if (sf->hw_state == MLX5_VHCA_STATE_ALLOCATED) {
348 		mlx5_sf_free(table, sf);
349 	} else if (mlx5_sf_is_active(sf)) {
350 		/* Even if its active, it is treated as in_use because by the time,
351 		 * it is disabled here, it may getting used. So it is safe to
352 		 * always look for the event to ensure that it is recycled only after
353 		 * firmware gives confirmation that it is detached by the driver.
354 		 */
355 		mlx5_cmd_sf_disable_hca(table->dev, sf->hw_fn_id);
356 		mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->id);
357 		kfree(sf);
358 	} else {
359 		mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->id);
360 		kfree(sf);
361 	}
362 }
363 
364 int mlx5_devlink_sf_port_del(struct devlink *devlink, unsigned int port_index,
365 			     struct netlink_ext_ack *extack)
366 {
367 	struct mlx5_core_dev *dev = devlink_priv(devlink);
368 	struct mlx5_eswitch *esw = dev->priv.eswitch;
369 	struct mlx5_sf_table *table;
370 	struct mlx5_sf *sf;
371 	int err = 0;
372 
373 	table = mlx5_sf_table_try_get(dev);
374 	if (!table) {
375 		NL_SET_ERR_MSG_MOD(extack,
376 				   "Port del is only supported in eswitch switchdev mode or SF ports are disabled.");
377 		return -EOPNOTSUPP;
378 	}
379 	sf = mlx5_sf_lookup_by_index(table, port_index);
380 	if (!sf) {
381 		err = -ENODEV;
382 		goto sf_err;
383 	}
384 
385 	mlx5_esw_offloads_sf_vport_disable(esw, sf->hw_fn_id);
386 	mlx5_sf_id_erase(table, sf);
387 
388 	mutex_lock(&table->sf_state_lock);
389 	mlx5_sf_dealloc(table, sf);
390 	mutex_unlock(&table->sf_state_lock);
391 sf_err:
392 	mlx5_sf_table_put(table);
393 	return err;
394 }
395 
396 static bool mlx5_sf_state_update_check(const struct mlx5_sf *sf, u8 new_state)
397 {
398 	if (sf->hw_state == MLX5_VHCA_STATE_ACTIVE && new_state == MLX5_VHCA_STATE_IN_USE)
399 		return true;
400 
401 	if (sf->hw_state == MLX5_VHCA_STATE_IN_USE && new_state == MLX5_VHCA_STATE_ACTIVE)
402 		return true;
403 
404 	if (sf->hw_state == MLX5_VHCA_STATE_TEARDOWN_REQUEST &&
405 	    new_state == MLX5_VHCA_STATE_ALLOCATED)
406 		return true;
407 
408 	return false;
409 }
410 
411 static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data)
412 {
413 	struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, vhca_nb);
414 	const struct mlx5_vhca_state_event *event = data;
415 	bool update = false;
416 	struct mlx5_sf *sf;
417 
418 	table = mlx5_sf_table_try_get(table->dev);
419 	if (!table)
420 		return 0;
421 
422 	mutex_lock(&table->sf_state_lock);
423 	sf = mlx5_sf_lookup_by_function_id(table, event->function_id);
424 	if (!sf)
425 		goto sf_err;
426 
427 	/* When driver is attached or detached to a function, an event
428 	 * notifies such state change.
429 	 */
430 	update = mlx5_sf_state_update_check(sf, event->new_vhca_state);
431 	if (update)
432 		sf->hw_state = event->new_vhca_state;
433 sf_err:
434 	mutex_unlock(&table->sf_state_lock);
435 	mlx5_sf_table_put(table);
436 	return 0;
437 }
438 
439 static void mlx5_sf_table_enable(struct mlx5_sf_table *table)
440 {
441 	if (!mlx5_sf_max_functions(table->dev))
442 		return;
443 
444 	init_completion(&table->disable_complete);
445 	refcount_set(&table->refcount, 1);
446 }
447 
448 static void mlx5_sf_deactivate_all(struct mlx5_sf_table *table)
449 {
450 	struct mlx5_eswitch *esw = table->dev->priv.eswitch;
451 	unsigned long index;
452 	struct mlx5_sf *sf;
453 
454 	/* At this point, no new user commands can start and no vhca event can
455 	 * arrive. It is safe to destroy all user created SFs.
456 	 */
457 	xa_for_each(&table->port_indices, index, sf) {
458 		mlx5_esw_offloads_sf_vport_disable(esw, sf->hw_fn_id);
459 		mlx5_sf_id_erase(table, sf);
460 		mlx5_sf_dealloc(table, sf);
461 	}
462 }
463 
464 static void mlx5_sf_table_disable(struct mlx5_sf_table *table)
465 {
466 	if (!mlx5_sf_max_functions(table->dev))
467 		return;
468 
469 	if (!refcount_read(&table->refcount))
470 		return;
471 
472 	/* Balances with refcount_set; drop the reference so that new user cmd cannot start
473 	 * and new vhca event handler cannnot run.
474 	 */
475 	mlx5_sf_table_put(table);
476 	wait_for_completion(&table->disable_complete);
477 
478 	mlx5_sf_deactivate_all(table);
479 }
480 
481 static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, void *data)
482 {
483 	struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, esw_nb);
484 	const struct mlx5_esw_event_info *mode = data;
485 
486 	switch (mode->new_mode) {
487 	case MLX5_ESWITCH_OFFLOADS:
488 		mlx5_sf_table_enable(table);
489 		break;
490 	case MLX5_ESWITCH_NONE:
491 		mlx5_sf_table_disable(table);
492 		break;
493 	default:
494 		break;
495 	}
496 
497 	return 0;
498 }
499 
500 static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev)
501 {
502 	return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) && mlx5_sf_supported(dev);
503 }
504 
505 int mlx5_sf_table_init(struct mlx5_core_dev *dev)
506 {
507 	struct mlx5_sf_table *table;
508 	int err;
509 
510 	if (!mlx5_sf_table_supported(dev) || !mlx5_vhca_event_supported(dev))
511 		return 0;
512 
513 	table = kzalloc(sizeof(*table), GFP_KERNEL);
514 	if (!table)
515 		return -ENOMEM;
516 
517 	mutex_init(&table->sf_state_lock);
518 	table->dev = dev;
519 	xa_init(&table->port_indices);
520 	dev->priv.sf_table = table;
521 	refcount_set(&table->refcount, 0);
522 	table->esw_nb.notifier_call = mlx5_sf_esw_event;
523 	err = mlx5_esw_event_notifier_register(dev->priv.eswitch, &table->esw_nb);
524 	if (err)
525 		goto reg_err;
526 
527 	table->vhca_nb.notifier_call = mlx5_sf_vhca_event;
528 	err = mlx5_vhca_event_notifier_register(table->dev, &table->vhca_nb);
529 	if (err)
530 		goto vhca_err;
531 
532 	return 0;
533 
534 vhca_err:
535 	mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
536 reg_err:
537 	mutex_destroy(&table->sf_state_lock);
538 	kfree(table);
539 	dev->priv.sf_table = NULL;
540 	return err;
541 }
542 
543 void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
544 {
545 	struct mlx5_sf_table *table = dev->priv.sf_table;
546 
547 	if (!table)
548 		return;
549 
550 	mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb);
551 	mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
552 	WARN_ON(refcount_read(&table->refcount));
553 	mutex_destroy(&table->sf_state_lock);
554 	WARN_ON(!xa_empty(&table->port_indices));
555 	kfree(table);
556 }
557