1 /* 2 * Copyright (c) 2015, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #ifndef __MLX5_ESWITCH_H__ 34 #define __MLX5_ESWITCH_H__ 35 36 #include <linux/if_ether.h> 37 #include <linux/if_link.h> 38 #include <linux/atomic.h> 39 #include <net/devlink.h> 40 #include <linux/mlx5/device.h> 41 #include <linux/mlx5/eswitch.h> 42 #include <linux/mlx5/vport.h> 43 #include <linux/mlx5/fs.h> 44 #include "lib/mpfs.h" 45 #include "en/tc_ct.h" 46 47 #define FDB_TC_MAX_CHAIN 3 48 #define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1) 49 #define FDB_TC_SLOW_PATH_CHAIN (FDB_FT_CHAIN + 1) 50 51 /* The index of the last real chain (FT) + 1 as chain zero is valid as well */ 52 #define FDB_NUM_CHAINS (FDB_FT_CHAIN + 1) 53 54 #define FDB_TC_MAX_PRIO 16 55 #define FDB_TC_LEVELS_PER_PRIO 2 56 57 #ifdef CONFIG_MLX5_ESWITCH 58 59 #define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15 60 61 #define MLX5_MAX_UC_PER_VPORT(dev) \ 62 (1 << MLX5_CAP_GEN(dev, log_max_current_uc_list)) 63 64 #define MLX5_MAX_MC_PER_VPORT(dev) \ 65 (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list)) 66 67 #define MLX5_MIN_BW_SHARE 1 68 69 #define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \ 70 min_t(u32, max_t(u32, (rate) / (divider), MLX5_MIN_BW_SHARE), limit) 71 72 #define mlx5_esw_has_fwd_fdb(dev) \ 73 MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table) 74 75 struct vport_ingress { 76 struct mlx5_flow_table *acl; 77 struct mlx5_flow_handle *allow_rule; 78 struct { 79 struct mlx5_flow_group *allow_spoofchk_only_grp; 80 struct mlx5_flow_group *allow_untagged_spoofchk_grp; 81 struct mlx5_flow_group *allow_untagged_only_grp; 82 struct mlx5_flow_group *drop_grp; 83 struct mlx5_flow_handle *drop_rule; 84 struct mlx5_fc *drop_counter; 85 } legacy; 86 struct { 87 /* Optional group to add an FTE to do internal priority 88 * tagging on ingress packets. 89 */ 90 struct mlx5_flow_group *metadata_prio_tag_grp; 91 /* Group to add default match-all FTE entry to tag ingress 92 * packet with metadata. 93 */ 94 struct mlx5_flow_group *metadata_allmatch_grp; 95 struct mlx5_modify_hdr *modify_metadata; 96 struct mlx5_flow_handle *modify_metadata_rule; 97 } offloads; 98 }; 99 100 struct vport_egress { 101 struct mlx5_flow_table *acl; 102 struct mlx5_flow_handle *allowed_vlan; 103 struct mlx5_flow_group *vlan_grp; 104 union { 105 struct { 106 struct mlx5_flow_group *drop_grp; 107 struct mlx5_flow_handle *drop_rule; 108 struct mlx5_fc *drop_counter; 109 } legacy; 110 struct { 111 struct mlx5_flow_group *fwd_grp; 112 struct mlx5_flow_handle *fwd_rule; 113 } offloads; 114 }; 115 }; 116 117 struct mlx5_vport_drop_stats { 118 u64 rx_dropped; 119 u64 tx_dropped; 120 }; 121 122 struct mlx5_vport_info { 123 u8 mac[ETH_ALEN]; 124 u16 vlan; 125 u8 qos; 126 u64 node_guid; 127 int link_state; 128 u32 min_rate; 129 u32 max_rate; 130 bool spoofchk; 131 bool trusted; 132 }; 133 134 /* Vport context events */ 135 enum mlx5_eswitch_vport_event { 136 MLX5_VPORT_UC_ADDR_CHANGE = BIT(0), 137 MLX5_VPORT_MC_ADDR_CHANGE = BIT(1), 138 MLX5_VPORT_PROMISC_CHANGE = BIT(3), 139 }; 140 141 struct mlx5_vport { 142 struct mlx5_core_dev *dev; 143 int vport; 144 struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE]; 145 struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE]; 146 struct mlx5_flow_handle *promisc_rule; 147 struct mlx5_flow_handle *allmulti_rule; 148 struct work_struct vport_change_handler; 149 150 struct vport_ingress ingress; 151 struct vport_egress egress; 152 u32 default_metadata; 153 u32 metadata; 154 155 struct mlx5_vport_info info; 156 157 struct { 158 bool enabled; 159 u32 esw_tsar_ix; 160 u32 bw_share; 161 } qos; 162 163 bool enabled; 164 enum mlx5_eswitch_vport_event enabled_events; 165 }; 166 167 enum offloads_fdb_flags { 168 ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED = BIT(0), 169 }; 170 171 struct mlx5_esw_chains_priv; 172 173 struct mlx5_eswitch_fdb { 174 union { 175 struct legacy_fdb { 176 struct mlx5_flow_table *fdb; 177 struct mlx5_flow_group *addr_grp; 178 struct mlx5_flow_group *allmulti_grp; 179 struct mlx5_flow_group *promisc_grp; 180 struct mlx5_flow_table *vepa_fdb; 181 struct mlx5_flow_handle *vepa_uplink_rule; 182 struct mlx5_flow_handle *vepa_star_rule; 183 } legacy; 184 185 struct offloads_fdb { 186 struct mlx5_flow_namespace *ns; 187 struct mlx5_flow_table *slow_fdb; 188 struct mlx5_flow_group *send_to_vport_grp; 189 struct mlx5_flow_group *peer_miss_grp; 190 struct mlx5_flow_handle **peer_miss_rules; 191 struct mlx5_flow_group *miss_grp; 192 struct mlx5_flow_handle *miss_rule_uni; 193 struct mlx5_flow_handle *miss_rule_multi; 194 int vlan_push_pop_refcount; 195 196 struct mlx5_esw_chains_priv *esw_chains_priv; 197 struct { 198 DECLARE_HASHTABLE(table, 8); 199 /* Protects vports.table */ 200 struct mutex lock; 201 } vports; 202 203 } offloads; 204 }; 205 u32 flags; 206 }; 207 208 struct mlx5_esw_offload { 209 struct mlx5_flow_table *ft_offloads_restore; 210 struct mlx5_flow_group *restore_group; 211 struct mlx5_modify_hdr *restore_copy_hdr_id; 212 213 struct mlx5_flow_table *ft_offloads; 214 struct mlx5_flow_group *vport_rx_group; 215 struct mlx5_eswitch_rep *vport_reps; 216 struct list_head peer_flows; 217 struct mutex peer_mutex; 218 struct mutex encap_tbl_lock; /* protects encap_tbl */ 219 DECLARE_HASHTABLE(encap_tbl, 8); 220 struct mutex decap_tbl_lock; /* protects decap_tbl */ 221 DECLARE_HASHTABLE(decap_tbl, 8); 222 struct mod_hdr_tbl mod_hdr; 223 DECLARE_HASHTABLE(termtbl_tbl, 8); 224 struct mutex termtbl_mutex; /* protects termtbl hash */ 225 const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES]; 226 u8 inline_mode; 227 atomic64_t num_flows; 228 enum devlink_eswitch_encap_mode encap; 229 struct ida vport_metadata_ida; 230 }; 231 232 /* E-Switch MC FDB table hash node */ 233 struct esw_mc_addr { /* SRIOV only */ 234 struct l2addr_node node; 235 struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */ 236 u32 refcnt; 237 }; 238 239 struct mlx5_host_work { 240 struct work_struct work; 241 struct mlx5_eswitch *esw; 242 }; 243 244 struct mlx5_esw_functions { 245 struct mlx5_nb nb; 246 u16 num_vfs; 247 }; 248 249 enum { 250 MLX5_ESWITCH_VPORT_MATCH_METADATA = BIT(0), 251 MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1), 252 }; 253 254 struct mlx5_eswitch { 255 struct mlx5_core_dev *dev; 256 struct mlx5_nb nb; 257 struct mlx5_eswitch_fdb fdb_table; 258 /* legacy data structures */ 259 struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE]; 260 struct esw_mc_addr mc_promisc; 261 /* end of legacy */ 262 struct workqueue_struct *work_queue; 263 struct mlx5_vport *vports; 264 u32 flags; 265 int total_vports; 266 int enabled_vports; 267 /* Synchronize between vport change events 268 * and async SRIOV admin state changes 269 */ 270 struct mutex state_lock; 271 272 /* Protects eswitch mode change that occurs via one or more 273 * user commands, i.e. sriov state change, devlink commands. 274 */ 275 struct mutex mode_lock; 276 277 struct { 278 bool enabled; 279 u32 root_tsar_id; 280 } qos; 281 282 struct mlx5_esw_offload offloads; 283 int mode; 284 int nvports; 285 u16 manager_vport; 286 u16 first_host_vport; 287 struct mlx5_esw_functions esw_funcs; 288 struct { 289 u32 large_group_num; 290 } params; 291 }; 292 293 void esw_offloads_disable(struct mlx5_eswitch *esw); 294 int esw_offloads_enable(struct mlx5_eswitch *esw); 295 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw); 296 int esw_offloads_init_reps(struct mlx5_eswitch *esw); 297 298 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw); 299 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata); 300 301 int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, 302 u32 rate_mbps); 303 304 /* E-Switch API */ 305 int mlx5_eswitch_init(struct mlx5_core_dev *dev); 306 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw); 307 308 #define MLX5_ESWITCH_IGNORE_NUM_VFS (-1) 309 int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs); 310 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs); 311 void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf); 312 void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf); 313 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, 314 u16 vport, u8 mac[ETH_ALEN]); 315 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, 316 u16 vport, int link_state); 317 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, 318 u16 vport, u16 vlan, u8 qos); 319 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, 320 u16 vport, bool spoofchk); 321 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, 322 u16 vport_num, bool setting); 323 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport, 324 u32 max_rate, u32 min_rate); 325 int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting); 326 int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting); 327 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, 328 u16 vport, struct ifla_vf_info *ivi); 329 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, 330 u16 vport, 331 struct ifla_vf_stats *vf_stats); 332 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule); 333 334 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport, 335 bool other_vport, void *in); 336 337 struct mlx5_flow_spec; 338 struct mlx5_esw_flow_attr; 339 struct mlx5_termtbl_handle; 340 341 bool 342 mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw, 343 struct mlx5_esw_flow_attr *attr, 344 struct mlx5_flow_act *flow_act, 345 struct mlx5_flow_spec *spec); 346 347 struct mlx5_flow_handle * 348 mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw, 349 struct mlx5_flow_table *ft, 350 struct mlx5_flow_spec *spec, 351 struct mlx5_esw_flow_attr *attr, 352 struct mlx5_flow_act *flow_act, 353 struct mlx5_flow_destination *dest, 354 int num_dest); 355 356 void 357 mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw, 358 struct mlx5_termtbl_handle *tt); 359 360 struct mlx5_flow_handle * 361 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, 362 struct mlx5_flow_spec *spec, 363 struct mlx5_esw_flow_attr *attr); 364 struct mlx5_flow_handle * 365 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, 366 struct mlx5_flow_spec *spec, 367 struct mlx5_esw_flow_attr *attr); 368 void 369 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, 370 struct mlx5_flow_handle *rule, 371 struct mlx5_esw_flow_attr *attr); 372 void 373 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw, 374 struct mlx5_flow_handle *rule, 375 struct mlx5_esw_flow_attr *attr); 376 377 struct mlx5_flow_handle * 378 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, 379 struct mlx5_flow_destination *dest); 380 381 enum { 382 SET_VLAN_STRIP = BIT(0), 383 SET_VLAN_INSERT = BIT(1) 384 }; 385 386 enum mlx5_flow_match_level { 387 MLX5_MATCH_NONE = MLX5_INLINE_MODE_NONE, 388 MLX5_MATCH_L2 = MLX5_INLINE_MODE_L2, 389 MLX5_MATCH_L3 = MLX5_INLINE_MODE_IP, 390 MLX5_MATCH_L4 = MLX5_INLINE_MODE_TCP_UDP, 391 }; 392 393 /* current maximum for flow based vport multicasting */ 394 #define MLX5_MAX_FLOW_FWD_VPORTS 2 395 396 enum { 397 MLX5_ESW_DEST_ENCAP = BIT(0), 398 MLX5_ESW_DEST_ENCAP_VALID = BIT(1), 399 }; 400 401 enum { 402 MLX5_ESW_ATTR_FLAG_VLAN_HANDLED = BIT(0), 403 MLX5_ESW_ATTR_FLAG_SLOW_PATH = BIT(1), 404 MLX5_ESW_ATTR_FLAG_NO_IN_PORT = BIT(2), 405 }; 406 407 struct mlx5_esw_flow_attr { 408 struct mlx5_eswitch_rep *in_rep; 409 struct mlx5_core_dev *in_mdev; 410 struct mlx5_core_dev *counter_dev; 411 412 int split_count; 413 int out_count; 414 415 int action; 416 __be16 vlan_proto[MLX5_FS_VLAN_DEPTH]; 417 u16 vlan_vid[MLX5_FS_VLAN_DEPTH]; 418 u8 vlan_prio[MLX5_FS_VLAN_DEPTH]; 419 u8 total_vlan; 420 struct { 421 u32 flags; 422 struct mlx5_eswitch_rep *rep; 423 struct mlx5_pkt_reformat *pkt_reformat; 424 struct mlx5_core_dev *mdev; 425 struct mlx5_termtbl_handle *termtbl; 426 } dests[MLX5_MAX_FLOW_FWD_VPORTS]; 427 struct mlx5_modify_hdr *modify_hdr; 428 u8 inner_match_level; 429 u8 outer_match_level; 430 struct mlx5_fc *counter; 431 u32 chain; 432 u16 prio; 433 u32 dest_chain; 434 u32 flags; 435 struct mlx5_flow_table *fdb; 436 struct mlx5_flow_table *dest_ft; 437 struct mlx5_ct_attr ct_attr; 438 struct mlx5_pkt_reformat *decap_pkt_reformat; 439 struct mlx5e_tc_flow_parse_attr *parse_attr; 440 }; 441 442 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, 443 struct netlink_ext_ack *extack); 444 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode); 445 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, 446 struct netlink_ext_ack *extack); 447 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode); 448 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, 449 enum devlink_eswitch_encap_mode encap, 450 struct netlink_ext_ack *extack); 451 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, 452 enum devlink_eswitch_encap_mode *encap); 453 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type); 454 455 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, 456 struct mlx5_esw_flow_attr *attr); 457 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, 458 struct mlx5_esw_flow_attr *attr); 459 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, 460 u16 vport, u16 vlan, u8 qos, u8 set_flags); 461 462 static inline bool mlx5_esw_qos_enabled(struct mlx5_eswitch *esw) 463 { 464 return esw->qos.enabled; 465 } 466 467 static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev, 468 u8 vlan_depth) 469 { 470 bool ret = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) && 471 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan); 472 473 if (vlan_depth == 1) 474 return ret; 475 476 return ret && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan_2) && 477 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2); 478 } 479 480 bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, 481 struct mlx5_core_dev *dev1); 482 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0, 483 struct mlx5_core_dev *dev1); 484 485 const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev); 486 487 #define MLX5_DEBUG_ESWITCH_MASK BIT(3) 488 489 #define esw_info(__dev, format, ...) \ 490 dev_info((__dev)->device, "E-Switch: " format, ##__VA_ARGS__) 491 492 #define esw_warn(__dev, format, ...) \ 493 dev_warn((__dev)->device, "E-Switch: " format, ##__VA_ARGS__) 494 495 #define esw_debug(dev, format, ...) \ 496 mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__) 497 498 /* The returned number is valid only when the dev is eswitch manager. */ 499 static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev) 500 { 501 return mlx5_core_is_ecpf_esw_manager(dev) ? 502 MLX5_VPORT_ECPF : MLX5_VPORT_PF; 503 } 504 505 static inline bool 506 mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num) 507 { 508 return esw->manager_vport == vport_num; 509 } 510 511 static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev) 512 { 513 return mlx5_core_is_ecpf_esw_manager(dev) ? 514 MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF; 515 } 516 517 static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) 518 { 519 /* Ideally device should have the functions changed supported 520 * capability regardless of it being ECPF or PF wherever such 521 * event should be processed such as on eswitch manager device. 522 * However, some ECPF based device might not have this capability 523 * set. Hence OR for ECPF check to cover such device. 524 */ 525 return MLX5_CAP_ESW(dev, esw_functions_changed) || 526 mlx5_core_is_ecpf_esw_manager(dev); 527 } 528 529 static inline int mlx5_eswitch_uplink_idx(struct mlx5_eswitch *esw) 530 { 531 /* Uplink always locate at the last element of the array.*/ 532 return esw->total_vports - 1; 533 } 534 535 static inline int mlx5_eswitch_ecpf_idx(struct mlx5_eswitch *esw) 536 { 537 return esw->total_vports - 2; 538 } 539 540 static inline int mlx5_eswitch_vport_num_to_index(struct mlx5_eswitch *esw, 541 u16 vport_num) 542 { 543 if (vport_num == MLX5_VPORT_ECPF) { 544 if (!mlx5_ecpf_vport_exists(esw->dev)) 545 esw_warn(esw->dev, "ECPF vport doesn't exist!\n"); 546 return mlx5_eswitch_ecpf_idx(esw); 547 } 548 549 if (vport_num == MLX5_VPORT_UPLINK) 550 return mlx5_eswitch_uplink_idx(esw); 551 552 return vport_num; 553 } 554 555 static inline u16 mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw, 556 int index) 557 { 558 if (index == mlx5_eswitch_ecpf_idx(esw) && 559 mlx5_ecpf_vport_exists(esw->dev)) 560 return MLX5_VPORT_ECPF; 561 562 if (index == mlx5_eswitch_uplink_idx(esw)) 563 return MLX5_VPORT_UPLINK; 564 565 return index; 566 } 567 568 /* TODO: This mlx5e_tc function shouldn't be called by eswitch */ 569 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw); 570 571 /* The vport getter/iterator are only valid after esw->total_vports 572 * and vport->vport are initialized in mlx5_eswitch_init. 573 */ 574 #define mlx5_esw_for_all_vports(esw, i, vport) \ 575 for ((i) = MLX5_VPORT_PF; \ 576 (vport) = &(esw)->vports[i], \ 577 (i) < (esw)->total_vports; (i)++) 578 579 #define mlx5_esw_for_all_vports_reverse(esw, i, vport) \ 580 for ((i) = (esw)->total_vports - 1; \ 581 (vport) = &(esw)->vports[i], \ 582 (i) >= MLX5_VPORT_PF; (i)--) 583 584 #define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \ 585 for ((i) = MLX5_VPORT_FIRST_VF; \ 586 (vport) = &(esw)->vports[(i)], \ 587 (i) <= (nvfs); (i)++) 588 589 #define mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, nvfs) \ 590 for ((i) = (nvfs); \ 591 (vport) = &(esw)->vports[(i)], \ 592 (i) >= MLX5_VPORT_FIRST_VF; (i)--) 593 594 /* The rep getter/iterator are only valid after esw->total_vports 595 * and vport->vport are initialized in mlx5_eswitch_init. 596 */ 597 #define mlx5_esw_for_all_reps(esw, i, rep) \ 598 for ((i) = MLX5_VPORT_PF; \ 599 (rep) = &(esw)->offloads.vport_reps[i], \ 600 (i) < (esw)->total_vports; (i)++) 601 602 #define mlx5_esw_for_each_vf_rep(esw, i, rep, nvfs) \ 603 for ((i) = MLX5_VPORT_FIRST_VF; \ 604 (rep) = &(esw)->offloads.vport_reps[i], \ 605 (i) <= (nvfs); (i)++) 606 607 #define mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvfs) \ 608 for ((i) = (nvfs); \ 609 (rep) = &(esw)->offloads.vport_reps[i], \ 610 (i) >= MLX5_VPORT_FIRST_VF; (i)--) 611 612 #define mlx5_esw_for_each_vf_vport_num(esw, vport, nvfs) \ 613 for ((vport) = MLX5_VPORT_FIRST_VF; (vport) <= (nvfs); (vport)++) 614 615 #define mlx5_esw_for_each_vf_vport_num_reverse(esw, vport, nvfs) \ 616 for ((vport) = (nvfs); (vport) >= MLX5_VPORT_FIRST_VF; (vport)--) 617 618 /* Includes host PF (vport 0) if it's not esw manager. */ 619 #define mlx5_esw_for_each_host_func_rep(esw, i, rep, nvfs) \ 620 for ((i) = (esw)->first_host_vport; \ 621 (rep) = &(esw)->offloads.vport_reps[i], \ 622 (i) <= (nvfs); (i)++) 623 624 #define mlx5_esw_for_each_host_func_rep_reverse(esw, i, rep, nvfs) \ 625 for ((i) = (nvfs); \ 626 (rep) = &(esw)->offloads.vport_reps[i], \ 627 (i) >= (esw)->first_host_vport; (i)--) 628 629 #define mlx5_esw_for_each_host_func_vport(esw, vport, nvfs) \ 630 for ((vport) = (esw)->first_host_vport; \ 631 (vport) <= (nvfs); (vport)++) 632 633 #define mlx5_esw_for_each_host_func_vport_reverse(esw, vport, nvfs) \ 634 for ((vport) = (nvfs); \ 635 (vport) >= (esw)->first_host_vport; (vport)--) 636 637 struct mlx5_vport *__must_check 638 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num); 639 640 bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num); 641 642 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data); 643 644 int 645 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw, 646 enum mlx5_eswitch_vport_event enabled_events); 647 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw); 648 649 int 650 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw, 651 struct mlx5_vport *vport); 652 void 653 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw, 654 struct mlx5_vport *vport); 655 656 int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw); 657 void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw); 658 659 struct mlx5_flow_handle * 660 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag); 661 u32 662 esw_get_max_restore_tag(struct mlx5_eswitch *esw); 663 664 int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num); 665 void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num); 666 667 int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num, 668 enum mlx5_eswitch_vport_event enabled_events); 669 void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num); 670 671 int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs, 672 enum mlx5_eswitch_vport_event enabled_events); 673 void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs); 674 675 #else /* CONFIG_MLX5_ESWITCH */ 676 /* eswitch API stubs */ 677 static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } 678 static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} 679 static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; } 680 static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {} 681 static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; } 682 static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; } 683 static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) 684 { 685 return ERR_PTR(-EOPNOTSUPP); 686 } 687 688 static inline struct mlx5_flow_handle * 689 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) 690 { 691 return ERR_PTR(-EOPNOTSUPP); 692 } 693 #endif /* CONFIG_MLX5_ESWITCH */ 694 695 #endif /* __MLX5_ESWITCH_H__ */ 696