xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c (revision 8dd765a5d769c521d73931850d1c8708fbc490cb)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Ltd. */
3 
4 #include <linux/mlx5/driver.h>
5 #include "eswitch.h"
6 
7 static void
8 mlx5_esw_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_id *ppid)
9 {
10 	u64 parent_id;
11 
12 	parent_id = mlx5_query_nic_system_image_guid(dev);
13 	ppid->id_len = sizeof(parent_id);
14 	memcpy(ppid->id, &parent_id, sizeof(parent_id));
15 }
16 
17 static bool mlx5_esw_devlink_port_supported(struct mlx5_eswitch *esw, u16 vport_num)
18 {
19 	return (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF) ||
20 	       mlx5_eswitch_is_vf_vport(esw, vport_num) ||
21 	       mlx5_core_is_ec_vf_vport(esw->dev, vport_num);
22 }
23 
24 static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch *esw,
25 							   u16 vport_num,
26 							   struct devlink_port *dl_port)
27 {
28 	struct mlx5_core_dev *dev = esw->dev;
29 	struct netdev_phys_item_id ppid = {};
30 	u32 controller_num = 0;
31 	bool external;
32 	u16 pfnum;
33 
34 	mlx5_esw_get_port_parent_id(dev, &ppid);
35 	pfnum = mlx5_get_dev_index(dev);
36 	external = mlx5_core_is_ecpf_esw_manager(dev);
37 	if (external)
38 		controller_num = dev->priv.eswitch->offloads.host_number + 1;
39 
40 	if (vport_num == MLX5_VPORT_PF) {
41 		memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len);
42 		dl_port->attrs.switch_id.id_len = ppid.id_len;
43 		devlink_port_attrs_pci_pf_set(dl_port, controller_num, pfnum, external);
44 	} else if (mlx5_eswitch_is_vf_vport(esw, vport_num)) {
45 		memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len);
46 		dl_port->attrs.switch_id.id_len = ppid.id_len;
47 		devlink_port_attrs_pci_vf_set(dl_port, controller_num, pfnum,
48 					      vport_num - 1, external);
49 	}  else if (mlx5_core_is_ec_vf_vport(esw->dev, vport_num)) {
50 		memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len);
51 		dl_port->attrs.switch_id.id_len = ppid.id_len;
52 		devlink_port_attrs_pci_vf_set(dl_port, 0, pfnum,
53 					      vport_num - 1, false);
54 	}
55 }
56 
57 int mlx5_esw_offloads_pf_vf_devlink_port_init(struct mlx5_eswitch *esw,
58 					      struct mlx5_vport *vport)
59 {
60 	struct mlx5_devlink_port *dl_port;
61 	u16 vport_num = vport->vport;
62 
63 	if (!mlx5_esw_devlink_port_supported(esw, vport_num))
64 		return 0;
65 
66 	dl_port = kzalloc(sizeof(*dl_port), GFP_KERNEL);
67 	if (!dl_port)
68 		return -ENOMEM;
69 
70 	mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(esw, vport_num,
71 						       &dl_port->dl_port);
72 
73 	vport->dl_port = dl_port;
74 	mlx5_devlink_port_init(dl_port, vport);
75 	return 0;
76 }
77 
78 void mlx5_esw_offloads_pf_vf_devlink_port_cleanup(struct mlx5_eswitch *esw,
79 						  struct mlx5_vport *vport)
80 {
81 	if (!vport->dl_port)
82 		return;
83 
84 	kfree(vport->dl_port);
85 	vport->dl_port = NULL;
86 }
87 
88 static const struct devlink_port_ops mlx5_esw_pf_vf_dl_port_ops = {
89 	.port_fn_hw_addr_get = mlx5_devlink_port_fn_hw_addr_get,
90 	.port_fn_hw_addr_set = mlx5_devlink_port_fn_hw_addr_set,
91 	.port_fn_roce_get = mlx5_devlink_port_fn_roce_get,
92 	.port_fn_roce_set = mlx5_devlink_port_fn_roce_set,
93 	.port_fn_migratable_get = mlx5_devlink_port_fn_migratable_get,
94 	.port_fn_migratable_set = mlx5_devlink_port_fn_migratable_set,
95 #ifdef CONFIG_XFRM_OFFLOAD
96 	.port_fn_ipsec_crypto_get = mlx5_devlink_port_fn_ipsec_crypto_get,
97 	.port_fn_ipsec_crypto_set = mlx5_devlink_port_fn_ipsec_crypto_set,
98 	.port_fn_ipsec_packet_get = mlx5_devlink_port_fn_ipsec_packet_get,
99 	.port_fn_ipsec_packet_set = mlx5_devlink_port_fn_ipsec_packet_set,
100 #endif /* CONFIG_XFRM_OFFLOAD */
101 };
102 
103 static void mlx5_esw_offloads_sf_devlink_port_attrs_set(struct mlx5_eswitch *esw,
104 							struct devlink_port *dl_port,
105 							u32 controller, u32 sfnum)
106 {
107 	struct mlx5_core_dev *dev = esw->dev;
108 	struct netdev_phys_item_id ppid = {};
109 	u16 pfnum;
110 
111 	pfnum = mlx5_get_dev_index(dev);
112 	mlx5_esw_get_port_parent_id(dev, &ppid);
113 	memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len);
114 	dl_port->attrs.switch_id.id_len = ppid.id_len;
115 	devlink_port_attrs_pci_sf_set(dl_port, controller, pfnum, sfnum, !!controller);
116 }
117 
118 int mlx5_esw_offloads_sf_devlink_port_init(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
119 					   struct mlx5_devlink_port *dl_port,
120 					   u32 controller, u32 sfnum)
121 {
122 	mlx5_esw_offloads_sf_devlink_port_attrs_set(esw, &dl_port->dl_port, controller, sfnum);
123 
124 	vport->dl_port = dl_port;
125 	mlx5_devlink_port_init(dl_port, vport);
126 	return 0;
127 }
128 
129 void mlx5_esw_offloads_sf_devlink_port_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
130 {
131 	vport->dl_port = NULL;
132 }
133 
134 static const struct devlink_port_ops mlx5_esw_dl_sf_port_ops = {
135 #ifdef CONFIG_MLX5_SF_MANAGER
136 	.port_del = mlx5_devlink_sf_port_del,
137 #endif
138 	.port_fn_hw_addr_get = mlx5_devlink_port_fn_hw_addr_get,
139 	.port_fn_hw_addr_set = mlx5_devlink_port_fn_hw_addr_set,
140 	.port_fn_roce_get = mlx5_devlink_port_fn_roce_get,
141 	.port_fn_roce_set = mlx5_devlink_port_fn_roce_set,
142 #ifdef CONFIG_MLX5_SF_MANAGER
143 	.port_fn_state_get = mlx5_devlink_sf_port_fn_state_get,
144 	.port_fn_state_set = mlx5_devlink_sf_port_fn_state_set,
145 #endif
146 };
147 
148 int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
149 {
150 	struct mlx5_core_dev *dev = esw->dev;
151 	const struct devlink_port_ops *ops;
152 	struct mlx5_devlink_port *dl_port;
153 	u16 vport_num = vport->vport;
154 	unsigned int dl_port_index;
155 	struct devlink *devlink;
156 	int err;
157 
158 	dl_port = vport->dl_port;
159 	if (!dl_port)
160 		return 0;
161 
162 	if (mlx5_esw_is_sf_vport(esw, vport_num))
163 		ops = &mlx5_esw_dl_sf_port_ops;
164 	else if (mlx5_eswitch_is_pf_vf_vport(esw, vport_num))
165 		ops = &mlx5_esw_pf_vf_dl_port_ops;
166 	else
167 		ops = NULL;
168 
169 	devlink = priv_to_devlink(dev);
170 	dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num);
171 	err = devl_port_register_with_ops(devlink, &dl_port->dl_port, dl_port_index, ops);
172 	if (err)
173 		return err;
174 
175 	err = devl_rate_leaf_create(&dl_port->dl_port, vport, NULL);
176 	if (err)
177 		goto rate_err;
178 
179 	return 0;
180 
181 rate_err:
182 	devl_port_unregister(&dl_port->dl_port);
183 	return err;
184 }
185 
186 void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
187 {
188 	struct mlx5_devlink_port *dl_port;
189 
190 	if (!vport->dl_port)
191 		return;
192 	dl_port = vport->dl_port;
193 
194 	mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL);
195 	devl_rate_leaf_destroy(&dl_port->dl_port);
196 
197 	devl_port_unregister(&dl_port->dl_port);
198 }
199 
200 struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num)
201 {
202 	struct mlx5_vport *vport;
203 
204 	vport = mlx5_eswitch_get_vport(esw, vport_num);
205 	return IS_ERR(vport) ? ERR_CAST(vport) : &vport->dl_port->dl_port;
206 }
207