1 /* 2 * Copyright (c) 2015, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #ifndef __MLX5_ESWITCH_H__ 34 #define __MLX5_ESWITCH_H__ 35 36 #include <linux/if_ether.h> 37 #include <linux/if_link.h> 38 #include <linux/atomic.h> 39 #include <net/devlink.h> 40 #include <linux/mlx5/device.h> 41 #include <linux/mlx5/eswitch.h> 42 #include <linux/mlx5/vport.h> 43 #include <linux/mlx5/fs.h> 44 #include "lib/mpfs.h" 45 #include "en/tc_ct.h" 46 47 #define FDB_TC_MAX_CHAIN 3 48 #define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1) 49 #define FDB_TC_SLOW_PATH_CHAIN (FDB_FT_CHAIN + 1) 50 51 /* The index of the last real chain (FT) + 1 as chain zero is valid as well */ 52 #define FDB_NUM_CHAINS (FDB_FT_CHAIN + 1) 53 54 #define FDB_TC_MAX_PRIO 16 55 #define FDB_TC_LEVELS_PER_PRIO 2 56 57 #ifdef CONFIG_MLX5_ESWITCH 58 59 #define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15 60 61 #define MLX5_MAX_UC_PER_VPORT(dev) \ 62 (1 << MLX5_CAP_GEN(dev, log_max_current_uc_list)) 63 64 #define MLX5_MAX_MC_PER_VPORT(dev) \ 65 (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list)) 66 67 #define MLX5_MIN_BW_SHARE 1 68 69 #define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \ 70 min_t(u32, max_t(u32, (rate) / (divider), MLX5_MIN_BW_SHARE), limit) 71 72 #define mlx5_esw_has_fwd_fdb(dev) \ 73 MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table) 74 75 struct vport_ingress { 76 struct mlx5_flow_table *acl; 77 struct mlx5_flow_handle *allow_rule; 78 struct { 79 struct mlx5_flow_group *allow_spoofchk_only_grp; 80 struct mlx5_flow_group *allow_untagged_spoofchk_grp; 81 struct mlx5_flow_group *allow_untagged_only_grp; 82 struct mlx5_flow_group *drop_grp; 83 struct mlx5_flow_handle *drop_rule; 84 struct mlx5_fc *drop_counter; 85 } legacy; 86 struct { 87 /* Optional group to add an FTE to do internal priority 88 * tagging on ingress packets. 89 */ 90 struct mlx5_flow_group *metadata_prio_tag_grp; 91 /* Group to add default match-all FTE entry to tag ingress 92 * packet with metadata. 93 */ 94 struct mlx5_flow_group *metadata_allmatch_grp; 95 struct mlx5_modify_hdr *modify_metadata; 96 struct mlx5_flow_handle *modify_metadata_rule; 97 } offloads; 98 }; 99 100 struct vport_egress { 101 struct mlx5_flow_table *acl; 102 struct mlx5_flow_group *allowed_vlans_grp; 103 struct mlx5_flow_group *drop_grp; 104 struct mlx5_flow_handle *allowed_vlan; 105 struct { 106 struct mlx5_flow_handle *drop_rule; 107 struct mlx5_fc *drop_counter; 108 } legacy; 109 }; 110 111 struct mlx5_vport_drop_stats { 112 u64 rx_dropped; 113 u64 tx_dropped; 114 }; 115 116 struct mlx5_vport_info { 117 u8 mac[ETH_ALEN]; 118 u16 vlan; 119 u8 qos; 120 u64 node_guid; 121 int link_state; 122 u32 min_rate; 123 u32 max_rate; 124 bool spoofchk; 125 bool trusted; 126 }; 127 128 /* Vport context events */ 129 enum mlx5_eswitch_vport_event { 130 MLX5_VPORT_UC_ADDR_CHANGE = BIT(0), 131 MLX5_VPORT_MC_ADDR_CHANGE = BIT(1), 132 MLX5_VPORT_PROMISC_CHANGE = BIT(3), 133 }; 134 135 struct mlx5_vport { 136 struct mlx5_core_dev *dev; 137 int vport; 138 struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE]; 139 struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE]; 140 struct mlx5_flow_handle *promisc_rule; 141 struct mlx5_flow_handle *allmulti_rule; 142 struct work_struct vport_change_handler; 143 144 struct vport_ingress ingress; 145 struct vport_egress egress; 146 147 struct mlx5_vport_info info; 148 149 struct { 150 bool enabled; 151 u32 esw_tsar_ix; 152 u32 bw_share; 153 } qos; 154 155 bool enabled; 156 enum mlx5_eswitch_vport_event enabled_events; 157 }; 158 159 enum offloads_fdb_flags { 160 ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED = BIT(0), 161 }; 162 163 struct mlx5_esw_chains_priv; 164 165 struct mlx5_eswitch_fdb { 166 union { 167 struct legacy_fdb { 168 struct mlx5_flow_table *fdb; 169 struct mlx5_flow_group *addr_grp; 170 struct mlx5_flow_group *allmulti_grp; 171 struct mlx5_flow_group *promisc_grp; 172 struct mlx5_flow_table *vepa_fdb; 173 struct mlx5_flow_handle *vepa_uplink_rule; 174 struct mlx5_flow_handle *vepa_star_rule; 175 } legacy; 176 177 struct offloads_fdb { 178 struct mlx5_flow_namespace *ns; 179 struct mlx5_flow_table *slow_fdb; 180 struct mlx5_flow_group *send_to_vport_grp; 181 struct mlx5_flow_group *peer_miss_grp; 182 struct mlx5_flow_handle **peer_miss_rules; 183 struct mlx5_flow_group *miss_grp; 184 struct mlx5_flow_handle *miss_rule_uni; 185 struct mlx5_flow_handle *miss_rule_multi; 186 int vlan_push_pop_refcount; 187 188 struct mlx5_esw_chains_priv *esw_chains_priv; 189 struct { 190 DECLARE_HASHTABLE(table, 8); 191 /* Protects vports.table */ 192 struct mutex lock; 193 } vports; 194 195 } offloads; 196 }; 197 u32 flags; 198 }; 199 200 struct mlx5_esw_offload { 201 struct mlx5_flow_table *ft_offloads_restore; 202 struct mlx5_flow_group *restore_group; 203 struct mlx5_modify_hdr *restore_copy_hdr_id; 204 205 struct mlx5_flow_table *ft_offloads; 206 struct mlx5_flow_group *vport_rx_group; 207 struct mlx5_eswitch_rep *vport_reps; 208 struct list_head peer_flows; 209 struct mutex peer_mutex; 210 struct mutex encap_tbl_lock; /* protects encap_tbl */ 211 DECLARE_HASHTABLE(encap_tbl, 8); 212 struct mod_hdr_tbl mod_hdr; 213 DECLARE_HASHTABLE(termtbl_tbl, 8); 214 struct mutex termtbl_mutex; /* protects termtbl hash */ 215 const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES]; 216 u8 inline_mode; 217 atomic64_t num_flows; 218 enum devlink_eswitch_encap_mode encap; 219 }; 220 221 /* E-Switch MC FDB table hash node */ 222 struct esw_mc_addr { /* SRIOV only */ 223 struct l2addr_node node; 224 struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */ 225 u32 refcnt; 226 }; 227 228 struct mlx5_host_work { 229 struct work_struct work; 230 struct mlx5_eswitch *esw; 231 }; 232 233 struct mlx5_esw_functions { 234 struct mlx5_nb nb; 235 u16 num_vfs; 236 }; 237 238 enum { 239 MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0), 240 MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1), 241 }; 242 243 struct mlx5_eswitch { 244 struct mlx5_core_dev *dev; 245 struct mlx5_nb nb; 246 struct mlx5_eswitch_fdb fdb_table; 247 /* legacy data structures */ 248 struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE]; 249 struct esw_mc_addr mc_promisc; 250 /* end of legacy */ 251 struct workqueue_struct *work_queue; 252 struct mlx5_vport *vports; 253 u32 flags; 254 int total_vports; 255 int enabled_vports; 256 /* Synchronize between vport change events 257 * and async SRIOV admin state changes 258 */ 259 struct mutex state_lock; 260 261 /* Protects eswitch mode change that occurs via one or more 262 * user commands, i.e. sriov state change, devlink commands. 263 */ 264 struct mutex mode_lock; 265 266 struct { 267 bool enabled; 268 u32 root_tsar_id; 269 } qos; 270 271 struct mlx5_esw_offload offloads; 272 int mode; 273 int nvports; 274 u16 manager_vport; 275 u16 first_host_vport; 276 struct mlx5_esw_functions esw_funcs; 277 struct { 278 u32 large_group_num; 279 } params; 280 }; 281 282 void esw_offloads_disable(struct mlx5_eswitch *esw); 283 int esw_offloads_enable(struct mlx5_eswitch *esw); 284 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw); 285 int esw_offloads_init_reps(struct mlx5_eswitch *esw); 286 void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, 287 struct mlx5_vport *vport); 288 int esw_vport_create_ingress_acl_table(struct mlx5_eswitch *esw, 289 struct mlx5_vport *vport, 290 int table_size); 291 void esw_vport_destroy_ingress_acl_table(struct mlx5_vport *vport); 292 void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw, 293 struct mlx5_vport *vport); 294 int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, 295 struct mlx5_vport *vport); 296 void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw, 297 struct mlx5_vport *vport); 298 int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, 299 u32 rate_mbps); 300 301 /* E-Switch API */ 302 int mlx5_eswitch_init(struct mlx5_core_dev *dev); 303 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw); 304 305 #define MLX5_ESWITCH_IGNORE_NUM_VFS (-1) 306 int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs); 307 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs); 308 void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf); 309 void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf); 310 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, 311 u16 vport, u8 mac[ETH_ALEN]); 312 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, 313 u16 vport, int link_state); 314 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, 315 u16 vport, u16 vlan, u8 qos); 316 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, 317 u16 vport, bool spoofchk); 318 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, 319 u16 vport_num, bool setting); 320 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport, 321 u32 max_rate, u32 min_rate); 322 int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting); 323 int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting); 324 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, 325 u16 vport, struct ifla_vf_info *ivi); 326 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, 327 u16 vport, 328 struct ifla_vf_stats *vf_stats); 329 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule); 330 331 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport, 332 bool other_vport, 333 void *in, int inlen); 334 int mlx5_eswitch_query_esw_vport_context(struct mlx5_core_dev *dev, u16 vport, 335 bool other_vport, 336 void *out, int outlen); 337 338 struct mlx5_flow_spec; 339 struct mlx5_esw_flow_attr; 340 struct mlx5_termtbl_handle; 341 342 bool 343 mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw, 344 struct mlx5_esw_flow_attr *attr, 345 struct mlx5_flow_act *flow_act, 346 struct mlx5_flow_spec *spec); 347 348 struct mlx5_flow_handle * 349 mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw, 350 struct mlx5_flow_table *ft, 351 struct mlx5_flow_spec *spec, 352 struct mlx5_esw_flow_attr *attr, 353 struct mlx5_flow_act *flow_act, 354 struct mlx5_flow_destination *dest, 355 int num_dest); 356 357 void 358 mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw, 359 struct mlx5_termtbl_handle *tt); 360 361 struct mlx5_flow_handle * 362 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, 363 struct mlx5_flow_spec *spec, 364 struct mlx5_esw_flow_attr *attr); 365 struct mlx5_flow_handle * 366 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, 367 struct mlx5_flow_spec *spec, 368 struct mlx5_esw_flow_attr *attr); 369 void 370 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, 371 struct mlx5_flow_handle *rule, 372 struct mlx5_esw_flow_attr *attr); 373 void 374 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw, 375 struct mlx5_flow_handle *rule, 376 struct mlx5_esw_flow_attr *attr); 377 378 struct mlx5_flow_handle * 379 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, 380 struct mlx5_flow_destination *dest); 381 382 enum { 383 SET_VLAN_STRIP = BIT(0), 384 SET_VLAN_INSERT = BIT(1) 385 }; 386 387 enum mlx5_flow_match_level { 388 MLX5_MATCH_NONE = MLX5_INLINE_MODE_NONE, 389 MLX5_MATCH_L2 = MLX5_INLINE_MODE_L2, 390 MLX5_MATCH_L3 = MLX5_INLINE_MODE_IP, 391 MLX5_MATCH_L4 = MLX5_INLINE_MODE_TCP_UDP, 392 }; 393 394 /* current maximum for flow based vport multicasting */ 395 #define MLX5_MAX_FLOW_FWD_VPORTS 2 396 397 enum { 398 MLX5_ESW_DEST_ENCAP = BIT(0), 399 MLX5_ESW_DEST_ENCAP_VALID = BIT(1), 400 }; 401 402 enum { 403 MLX5_ESW_ATTR_FLAG_VLAN_HANDLED = BIT(0), 404 MLX5_ESW_ATTR_FLAG_SLOW_PATH = BIT(1), 405 MLX5_ESW_ATTR_FLAG_NO_IN_PORT = BIT(2), 406 }; 407 408 struct mlx5_esw_flow_attr { 409 struct mlx5_eswitch_rep *in_rep; 410 struct mlx5_core_dev *in_mdev; 411 struct mlx5_core_dev *counter_dev; 412 413 int split_count; 414 int out_count; 415 416 int action; 417 __be16 vlan_proto[MLX5_FS_VLAN_DEPTH]; 418 u16 vlan_vid[MLX5_FS_VLAN_DEPTH]; 419 u8 vlan_prio[MLX5_FS_VLAN_DEPTH]; 420 u8 total_vlan; 421 struct { 422 u32 flags; 423 struct mlx5_eswitch_rep *rep; 424 struct mlx5_pkt_reformat *pkt_reformat; 425 struct mlx5_core_dev *mdev; 426 struct mlx5_termtbl_handle *termtbl; 427 } dests[MLX5_MAX_FLOW_FWD_VPORTS]; 428 struct mlx5_modify_hdr *modify_hdr; 429 u8 inner_match_level; 430 u8 outer_match_level; 431 struct mlx5_fc *counter; 432 u32 chain; 433 u16 prio; 434 u32 dest_chain; 435 u32 flags; 436 struct mlx5_flow_table *fdb; 437 struct mlx5_flow_table *dest_ft; 438 struct mlx5_ct_attr ct_attr; 439 struct mlx5e_tc_flow_parse_attr *parse_attr; 440 }; 441 442 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, 443 struct netlink_ext_ack *extack); 444 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode); 445 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, 446 struct netlink_ext_ack *extack); 447 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode); 448 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, 449 enum devlink_eswitch_encap_mode encap, 450 struct netlink_ext_ack *extack); 451 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, 452 enum devlink_eswitch_encap_mode *encap); 453 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type); 454 455 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, 456 struct mlx5_esw_flow_attr *attr); 457 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, 458 struct mlx5_esw_flow_attr *attr); 459 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, 460 u16 vport, u16 vlan, u8 qos, u8 set_flags); 461 462 int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw, 463 struct mlx5_vport *vport, 464 u16 vlan_id, u32 flow_action); 465 466 static inline bool mlx5_esw_qos_enabled(struct mlx5_eswitch *esw) 467 { 468 return esw->qos.enabled; 469 } 470 471 static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev, 472 u8 vlan_depth) 473 { 474 bool ret = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) && 475 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan); 476 477 if (vlan_depth == 1) 478 return ret; 479 480 return ret && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan_2) && 481 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2); 482 } 483 484 bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, 485 struct mlx5_core_dev *dev1); 486 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0, 487 struct mlx5_core_dev *dev1); 488 489 const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev); 490 491 #define MLX5_DEBUG_ESWITCH_MASK BIT(3) 492 493 #define esw_info(__dev, format, ...) \ 494 dev_info((__dev)->device, "E-Switch: " format, ##__VA_ARGS__) 495 496 #define esw_warn(__dev, format, ...) \ 497 dev_warn((__dev)->device, "E-Switch: " format, ##__VA_ARGS__) 498 499 #define esw_debug(dev, format, ...) \ 500 mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__) 501 502 /* The returned number is valid only when the dev is eswitch manager. */ 503 static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev) 504 { 505 return mlx5_core_is_ecpf_esw_manager(dev) ? 506 MLX5_VPORT_ECPF : MLX5_VPORT_PF; 507 } 508 509 static inline bool 510 mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num) 511 { 512 return esw->manager_vport == vport_num; 513 } 514 515 static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev) 516 { 517 return mlx5_core_is_ecpf_esw_manager(dev) ? 518 MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF; 519 } 520 521 static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) 522 { 523 /* Ideally device should have the functions changed supported 524 * capability regardless of it being ECPF or PF wherever such 525 * event should be processed such as on eswitch manager device. 526 * However, some ECPF based device might not have this capability 527 * set. Hence OR for ECPF check to cover such device. 528 */ 529 return MLX5_CAP_ESW(dev, esw_functions_changed) || 530 mlx5_core_is_ecpf_esw_manager(dev); 531 } 532 533 static inline int mlx5_eswitch_uplink_idx(struct mlx5_eswitch *esw) 534 { 535 /* Uplink always locate at the last element of the array.*/ 536 return esw->total_vports - 1; 537 } 538 539 static inline int mlx5_eswitch_ecpf_idx(struct mlx5_eswitch *esw) 540 { 541 return esw->total_vports - 2; 542 } 543 544 static inline int mlx5_eswitch_vport_num_to_index(struct mlx5_eswitch *esw, 545 u16 vport_num) 546 { 547 if (vport_num == MLX5_VPORT_ECPF) { 548 if (!mlx5_ecpf_vport_exists(esw->dev)) 549 esw_warn(esw->dev, "ECPF vport doesn't exist!\n"); 550 return mlx5_eswitch_ecpf_idx(esw); 551 } 552 553 if (vport_num == MLX5_VPORT_UPLINK) 554 return mlx5_eswitch_uplink_idx(esw); 555 556 return vport_num; 557 } 558 559 static inline u16 mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw, 560 int index) 561 { 562 if (index == mlx5_eswitch_ecpf_idx(esw) && 563 mlx5_ecpf_vport_exists(esw->dev)) 564 return MLX5_VPORT_ECPF; 565 566 if (index == mlx5_eswitch_uplink_idx(esw)) 567 return MLX5_VPORT_UPLINK; 568 569 return index; 570 } 571 572 /* TODO: This mlx5e_tc function shouldn't be called by eswitch */ 573 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw); 574 575 /* The vport getter/iterator are only valid after esw->total_vports 576 * and vport->vport are initialized in mlx5_eswitch_init. 577 */ 578 #define mlx5_esw_for_all_vports(esw, i, vport) \ 579 for ((i) = MLX5_VPORT_PF; \ 580 (vport) = &(esw)->vports[i], \ 581 (i) < (esw)->total_vports; (i)++) 582 583 #define mlx5_esw_for_all_vports_reverse(esw, i, vport) \ 584 for ((i) = (esw)->total_vports - 1; \ 585 (vport) = &(esw)->vports[i], \ 586 (i) >= MLX5_VPORT_PF; (i)--) 587 588 #define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \ 589 for ((i) = MLX5_VPORT_FIRST_VF; \ 590 (vport) = &(esw)->vports[(i)], \ 591 (i) <= (nvfs); (i)++) 592 593 #define mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, nvfs) \ 594 for ((i) = (nvfs); \ 595 (vport) = &(esw)->vports[(i)], \ 596 (i) >= MLX5_VPORT_FIRST_VF; (i)--) 597 598 /* The rep getter/iterator are only valid after esw->total_vports 599 * and vport->vport are initialized in mlx5_eswitch_init. 600 */ 601 #define mlx5_esw_for_all_reps(esw, i, rep) \ 602 for ((i) = MLX5_VPORT_PF; \ 603 (rep) = &(esw)->offloads.vport_reps[i], \ 604 (i) < (esw)->total_vports; (i)++) 605 606 #define mlx5_esw_for_each_vf_rep(esw, i, rep, nvfs) \ 607 for ((i) = MLX5_VPORT_FIRST_VF; \ 608 (rep) = &(esw)->offloads.vport_reps[i], \ 609 (i) <= (nvfs); (i)++) 610 611 #define mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvfs) \ 612 for ((i) = (nvfs); \ 613 (rep) = &(esw)->offloads.vport_reps[i], \ 614 (i) >= MLX5_VPORT_FIRST_VF; (i)--) 615 616 #define mlx5_esw_for_each_vf_vport_num(esw, vport, nvfs) \ 617 for ((vport) = MLX5_VPORT_FIRST_VF; (vport) <= (nvfs); (vport)++) 618 619 #define mlx5_esw_for_each_vf_vport_num_reverse(esw, vport, nvfs) \ 620 for ((vport) = (nvfs); (vport) >= MLX5_VPORT_FIRST_VF; (vport)--) 621 622 /* Includes host PF (vport 0) if it's not esw manager. */ 623 #define mlx5_esw_for_each_host_func_rep(esw, i, rep, nvfs) \ 624 for ((i) = (esw)->first_host_vport; \ 625 (rep) = &(esw)->offloads.vport_reps[i], \ 626 (i) <= (nvfs); (i)++) 627 628 #define mlx5_esw_for_each_host_func_rep_reverse(esw, i, rep, nvfs) \ 629 for ((i) = (nvfs); \ 630 (rep) = &(esw)->offloads.vport_reps[i], \ 631 (i) >= (esw)->first_host_vport; (i)--) 632 633 #define mlx5_esw_for_each_host_func_vport(esw, vport, nvfs) \ 634 for ((vport) = (esw)->first_host_vport; \ 635 (vport) <= (nvfs); (vport)++) 636 637 #define mlx5_esw_for_each_host_func_vport_reverse(esw, vport, nvfs) \ 638 for ((vport) = (nvfs); \ 639 (vport) >= (esw)->first_host_vport; (vport)--) 640 641 struct mlx5_vport *__must_check 642 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num); 643 644 bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num); 645 646 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data); 647 648 int 649 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, 650 enum mlx5_eswitch_vport_event enabled_events); 651 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw); 652 653 int 654 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, 655 struct mlx5_vport *vport); 656 void 657 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, 658 struct mlx5_vport *vport); 659 660 int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw); 661 void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw); 662 663 struct mlx5_flow_handle * 664 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag); 665 u32 666 esw_get_max_restore_tag(struct mlx5_eswitch *esw); 667 668 int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num); 669 void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num); 670 671 int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num, 672 enum mlx5_eswitch_vport_event enabled_events); 673 void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num); 674 675 int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs, 676 enum mlx5_eswitch_vport_event enabled_events); 677 void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs); 678 679 #else /* CONFIG_MLX5_ESWITCH */ 680 /* eswitch API stubs */ 681 static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } 682 static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} 683 static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; } 684 static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {} 685 static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; } 686 static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; } 687 static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) 688 { 689 return ERR_PTR(-EOPNOTSUPP); 690 } 691 692 static inline struct mlx5_flow_handle * 693 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) 694 { 695 return ERR_PTR(-EOPNOTSUPP); 696 } 697 #endif /* CONFIG_MLX5_ESWITCH */ 698 699 #endif /* __MLX5_ESWITCH_H__ */ 700