1 /* 2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/mlx5/driver.h> 34 #include <linux/mlx5/device.h> 35 #include <linux/mlx5/mlx5_ifc.h> 36 37 #include "fs_core.h" 38 #include "fs_cmd.h" 39 #include "fs_ft_pool.h" 40 #include "mlx5_core.h" 41 #include "eswitch.h" 42 43 static int mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace *ns, 44 struct mlx5_flow_table *ft, 45 u32 underlay_qpn, 46 bool disconnect) 47 { 48 return 0; 49 } 50 51 static int mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace *ns, 52 struct mlx5_flow_table *ft, 53 struct mlx5_flow_table_attr *ft_attr, 54 struct mlx5_flow_table *next_ft) 55 { 56 int max_fte = ft_attr->max_fte; 57 58 ft->max_fte = max_fte ? roundup_pow_of_two(max_fte) : 1; 59 60 return 0; 61 } 62 63 static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_flow_root_namespace *ns, 64 struct mlx5_flow_table *ft) 65 { 66 return 0; 67 } 68 69 static int mlx5_cmd_stub_modify_flow_table(struct mlx5_flow_root_namespace *ns, 70 struct mlx5_flow_table *ft, 71 struct mlx5_flow_table *next_ft) 72 { 73 return 0; 74 } 75 76 static int mlx5_cmd_stub_create_flow_group(struct mlx5_flow_root_namespace *ns, 77 struct mlx5_flow_table *ft, 78 u32 *in, 79 struct mlx5_flow_group *fg) 80 { 81 return 0; 82 } 83 84 static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_flow_root_namespace *ns, 85 struct mlx5_flow_table *ft, 86 struct mlx5_flow_group *fg) 87 { 88 return 0; 89 } 90 91 static int mlx5_cmd_stub_create_fte(struct mlx5_flow_root_namespace *ns, 92 struct mlx5_flow_table *ft, 93 struct mlx5_flow_group *group, 94 struct fs_fte *fte) 95 { 96 return 0; 97 } 98 99 static int mlx5_cmd_stub_update_fte(struct mlx5_flow_root_namespace *ns, 100 struct mlx5_flow_table *ft, 101 struct mlx5_flow_group *group, 102 int modify_mask, 103 struct fs_fte *fte) 104 { 105 return -EOPNOTSUPP; 106 } 107 108 static int mlx5_cmd_stub_delete_fte(struct mlx5_flow_root_namespace *ns, 109 struct mlx5_flow_table *ft, 110 struct fs_fte *fte) 111 { 112 return 0; 113 } 114 115 static int mlx5_cmd_stub_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns, 116 struct mlx5_pkt_reformat_params *params, 117 enum mlx5_flow_namespace_type namespace, 118 struct mlx5_pkt_reformat *pkt_reformat) 119 { 120 return 0; 121 } 122 123 static void mlx5_cmd_stub_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns, 124 struct mlx5_pkt_reformat *pkt_reformat) 125 { 126 } 127 128 static int mlx5_cmd_stub_modify_header_alloc(struct mlx5_flow_root_namespace *ns, 129 u8 namespace, u8 num_actions, 130 void *modify_actions, 131 struct mlx5_modify_hdr *modify_hdr) 132 { 133 return 0; 134 } 135 136 static void mlx5_cmd_stub_modify_header_dealloc(struct mlx5_flow_root_namespace *ns, 137 struct mlx5_modify_hdr *modify_hdr) 138 { 139 } 140 141 static int mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace *ns, 142 struct mlx5_flow_root_namespace *peer_ns, 143 u16 peer_vhca_id) 144 { 145 return 0; 146 } 147 148 static int mlx5_cmd_stub_create_ns(struct mlx5_flow_root_namespace *ns) 149 { 150 return 0; 151 } 152 153 static int mlx5_cmd_stub_destroy_ns(struct mlx5_flow_root_namespace *ns) 154 { 155 return 0; 156 } 157 158 static u32 mlx5_cmd_stub_get_capabilities(struct mlx5_flow_root_namespace *ns, 159 enum fs_flow_table_type ft_type) 160 { 161 return 0; 162 } 163 164 static int mlx5_cmd_set_slave_root_fdb(struct mlx5_core_dev *master, 165 struct mlx5_core_dev *slave, 166 bool ft_id_valid, 167 u32 ft_id) 168 { 169 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {}; 170 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {}; 171 struct mlx5_flow_root_namespace *root; 172 struct mlx5_flow_namespace *ns; 173 174 MLX5_SET(set_flow_table_root_in, in, opcode, 175 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); 176 MLX5_SET(set_flow_table_root_in, in, table_type, 177 FS_FT_FDB); 178 if (ft_id_valid) { 179 MLX5_SET(set_flow_table_root_in, in, 180 table_eswitch_owner_vhca_id_valid, 1); 181 MLX5_SET(set_flow_table_root_in, in, 182 table_eswitch_owner_vhca_id, 183 MLX5_CAP_GEN(master, vhca_id)); 184 MLX5_SET(set_flow_table_root_in, in, table_id, 185 ft_id); 186 } else { 187 ns = mlx5_get_flow_namespace(slave, 188 MLX5_FLOW_NAMESPACE_FDB); 189 root = find_root(&ns->node); 190 MLX5_SET(set_flow_table_root_in, in, table_id, 191 root->root_ft->id); 192 } 193 194 return mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out)); 195 } 196 197 static int 198 mlx5_cmd_stub_destroy_match_definer(struct mlx5_flow_root_namespace *ns, 199 int definer_id) 200 { 201 return 0; 202 } 203 204 static int 205 mlx5_cmd_stub_create_match_definer(struct mlx5_flow_root_namespace *ns, 206 u16 format_id, u32 *match_mask) 207 { 208 return 0; 209 } 210 211 static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns, 212 struct mlx5_flow_table *ft, u32 underlay_qpn, 213 bool disconnect) 214 { 215 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {}; 216 struct mlx5_core_dev *dev = ns->dev; 217 int err; 218 219 if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) && 220 underlay_qpn == 0) 221 return 0; 222 223 if (ft->type == FS_FT_FDB && 224 mlx5_lag_is_shared_fdb(dev) && 225 !mlx5_lag_is_master(dev)) 226 return 0; 227 228 MLX5_SET(set_flow_table_root_in, in, opcode, 229 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); 230 MLX5_SET(set_flow_table_root_in, in, table_type, ft->type); 231 232 if (disconnect) 233 MLX5_SET(set_flow_table_root_in, in, op_mod, 1); 234 else 235 MLX5_SET(set_flow_table_root_in, in, table_id, ft->id); 236 237 MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn); 238 MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport); 239 MLX5_SET(set_flow_table_root_in, in, other_vport, 240 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); 241 242 err = mlx5_cmd_exec_in(dev, set_flow_table_root, in); 243 if (!err && 244 ft->type == FS_FT_FDB && 245 mlx5_lag_is_shared_fdb(dev) && 246 mlx5_lag_is_master(dev)) { 247 struct mlx5_core_dev *peer_dev; 248 int i, j; 249 250 mlx5_lag_for_each_peer_mdev(dev, peer_dev, i) { 251 err = mlx5_cmd_set_slave_root_fdb(dev, peer_dev, !disconnect, 252 (!disconnect) ? ft->id : 0); 253 if (err && !disconnect) { 254 mlx5_lag_for_each_peer_mdev(dev, peer_dev, j) { 255 if (j < i) 256 mlx5_cmd_set_slave_root_fdb(dev, peer_dev, 1, 257 ns->root_ft->id); 258 else 259 break; 260 } 261 262 MLX5_SET(set_flow_table_root_in, in, op_mod, 0); 263 MLX5_SET(set_flow_table_root_in, in, table_id, 264 ns->root_ft->id); 265 mlx5_cmd_exec_in(dev, set_flow_table_root, in); 266 } 267 if (err) 268 break; 269 } 270 271 } 272 273 return err; 274 } 275 276 static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns, 277 struct mlx5_flow_table *ft, 278 struct mlx5_flow_table_attr *ft_attr, 279 struct mlx5_flow_table *next_ft) 280 { 281 int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT); 282 int en_decap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); 283 int term = !!(ft->flags & MLX5_FLOW_TABLE_TERMINATION); 284 u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {}; 285 u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {}; 286 struct mlx5_core_dev *dev = ns->dev; 287 unsigned int size; 288 int err; 289 290 size = mlx5_ft_pool_get_avail_sz(dev, ft->type, ft_attr->max_fte); 291 if (!size) 292 return -ENOSPC; 293 294 MLX5_SET(create_flow_table_in, in, opcode, 295 MLX5_CMD_OP_CREATE_FLOW_TABLE); 296 297 MLX5_SET(create_flow_table_in, in, uid, ft_attr->uid); 298 MLX5_SET(create_flow_table_in, in, table_type, ft->type); 299 MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level); 300 MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, size ? ilog2(size) : 0); 301 MLX5_SET(create_flow_table_in, in, vport_number, ft->vport); 302 MLX5_SET(create_flow_table_in, in, other_vport, 303 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); 304 305 MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en, 306 en_decap); 307 MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en, 308 en_encap); 309 MLX5_SET(create_flow_table_in, in, flow_table_context.termination_table, 310 term); 311 312 switch (ft->op_mod) { 313 case FS_FT_OP_MOD_NORMAL: 314 if (next_ft) { 315 MLX5_SET(create_flow_table_in, in, 316 flow_table_context.table_miss_action, 317 MLX5_FLOW_TABLE_MISS_ACTION_FWD); 318 MLX5_SET(create_flow_table_in, in, 319 flow_table_context.table_miss_id, next_ft->id); 320 } else { 321 MLX5_SET(create_flow_table_in, in, 322 flow_table_context.table_miss_action, 323 ft->def_miss_action); 324 } 325 break; 326 327 case FS_FT_OP_MOD_LAG_DEMUX: 328 MLX5_SET(create_flow_table_in, in, op_mod, 0x1); 329 if (next_ft) 330 MLX5_SET(create_flow_table_in, in, 331 flow_table_context.lag_master_next_table_id, 332 next_ft->id); 333 break; 334 } 335 336 err = mlx5_cmd_exec_inout(dev, create_flow_table, in, out); 337 if (!err) { 338 ft->id = MLX5_GET(create_flow_table_out, out, 339 table_id); 340 ft->max_fte = size; 341 } else { 342 mlx5_ft_pool_put_sz(ns->dev, size); 343 } 344 345 return err; 346 } 347 348 static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns, 349 struct mlx5_flow_table *ft) 350 { 351 u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {}; 352 struct mlx5_core_dev *dev = ns->dev; 353 int err; 354 355 MLX5_SET(destroy_flow_table_in, in, opcode, 356 MLX5_CMD_OP_DESTROY_FLOW_TABLE); 357 MLX5_SET(destroy_flow_table_in, in, table_type, ft->type); 358 MLX5_SET(destroy_flow_table_in, in, table_id, ft->id); 359 MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport); 360 MLX5_SET(destroy_flow_table_in, in, other_vport, 361 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); 362 363 err = mlx5_cmd_exec_in(dev, destroy_flow_table, in); 364 if (!err) 365 mlx5_ft_pool_put_sz(ns->dev, ft->max_fte); 366 367 return err; 368 } 369 370 static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns, 371 struct mlx5_flow_table *ft, 372 struct mlx5_flow_table *next_ft) 373 { 374 u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {}; 375 struct mlx5_core_dev *dev = ns->dev; 376 377 MLX5_SET(modify_flow_table_in, in, opcode, 378 MLX5_CMD_OP_MODIFY_FLOW_TABLE); 379 MLX5_SET(modify_flow_table_in, in, table_type, ft->type); 380 MLX5_SET(modify_flow_table_in, in, table_id, ft->id); 381 382 if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) { 383 MLX5_SET(modify_flow_table_in, in, modify_field_select, 384 MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID); 385 if (next_ft) { 386 MLX5_SET(modify_flow_table_in, in, 387 flow_table_context.lag_master_next_table_id, next_ft->id); 388 } else { 389 MLX5_SET(modify_flow_table_in, in, 390 flow_table_context.lag_master_next_table_id, 0); 391 } 392 } else { 393 MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport); 394 MLX5_SET(modify_flow_table_in, in, other_vport, 395 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); 396 MLX5_SET(modify_flow_table_in, in, modify_field_select, 397 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID); 398 if (next_ft) { 399 MLX5_SET(modify_flow_table_in, in, 400 flow_table_context.table_miss_action, 401 MLX5_FLOW_TABLE_MISS_ACTION_FWD); 402 MLX5_SET(modify_flow_table_in, in, 403 flow_table_context.table_miss_id, 404 next_ft->id); 405 } else { 406 MLX5_SET(modify_flow_table_in, in, 407 flow_table_context.table_miss_action, 408 ft->def_miss_action); 409 } 410 } 411 412 return mlx5_cmd_exec_in(dev, modify_flow_table, in); 413 } 414 415 static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns, 416 struct mlx5_flow_table *ft, 417 u32 *in, 418 struct mlx5_flow_group *fg) 419 { 420 u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {}; 421 struct mlx5_core_dev *dev = ns->dev; 422 int err; 423 424 MLX5_SET(create_flow_group_in, in, opcode, 425 MLX5_CMD_OP_CREATE_FLOW_GROUP); 426 MLX5_SET(create_flow_group_in, in, table_type, ft->type); 427 MLX5_SET(create_flow_group_in, in, table_id, ft->id); 428 MLX5_SET(create_flow_group_in, in, vport_number, ft->vport); 429 MLX5_SET(create_flow_group_in, in, other_vport, 430 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); 431 err = mlx5_cmd_exec_inout(dev, create_flow_group, in, out); 432 if (!err) 433 fg->id = MLX5_GET(create_flow_group_out, out, 434 group_id); 435 return err; 436 } 437 438 static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns, 439 struct mlx5_flow_table *ft, 440 struct mlx5_flow_group *fg) 441 { 442 u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {}; 443 struct mlx5_core_dev *dev = ns->dev; 444 445 MLX5_SET(destroy_flow_group_in, in, opcode, 446 MLX5_CMD_OP_DESTROY_FLOW_GROUP); 447 MLX5_SET(destroy_flow_group_in, in, table_type, ft->type); 448 MLX5_SET(destroy_flow_group_in, in, table_id, ft->id); 449 MLX5_SET(destroy_flow_group_in, in, group_id, fg->id); 450 MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport); 451 MLX5_SET(destroy_flow_group_in, in, other_vport, 452 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); 453 return mlx5_cmd_exec_in(dev, destroy_flow_group, in); 454 } 455 456 static int mlx5_set_extended_dest(struct mlx5_core_dev *dev, 457 struct fs_fte *fte, bool *extended_dest) 458 { 459 int fw_log_max_fdb_encap_uplink = 460 MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink); 461 int num_fwd_destinations = 0; 462 struct mlx5_flow_rule *dst; 463 int num_encap = 0; 464 465 *extended_dest = false; 466 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) 467 return 0; 468 469 list_for_each_entry(dst, &fte->node.children, node.list) { 470 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER || 471 dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_NONE) 472 continue; 473 if ((dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT || 474 dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) && 475 dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) 476 num_encap++; 477 num_fwd_destinations++; 478 } 479 if (num_fwd_destinations > 1 && num_encap > 0) 480 *extended_dest = true; 481 482 if (*extended_dest && !fw_log_max_fdb_encap_uplink) { 483 mlx5_core_warn(dev, "FW does not support extended destination"); 484 return -EOPNOTSUPP; 485 } 486 if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) { 487 mlx5_core_warn(dev, "FW does not support more than %d encaps", 488 1 << fw_log_max_fdb_encap_uplink); 489 return -EOPNOTSUPP; 490 } 491 492 return 0; 493 } 494 495 static void 496 mlx5_cmd_set_fte_flow_meter(struct fs_fte *fte, void *in_flow_context) 497 { 498 void *exe_aso_ctrl; 499 void *execute_aso; 500 501 execute_aso = MLX5_ADDR_OF(flow_context, in_flow_context, 502 execute_aso[0]); 503 MLX5_SET(execute_aso, execute_aso, valid, 1); 504 MLX5_SET(execute_aso, execute_aso, aso_object_id, 505 fte->action.exe_aso.object_id); 506 507 exe_aso_ctrl = MLX5_ADDR_OF(execute_aso, execute_aso, exe_aso_ctrl); 508 MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, return_reg_id, 509 fte->action.exe_aso.return_reg_id); 510 MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, aso_type, 511 fte->action.exe_aso.type); 512 MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, init_color, 513 fte->action.exe_aso.flow_meter.init_color); 514 MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, meter_id, 515 fte->action.exe_aso.flow_meter.meter_idx); 516 } 517 518 static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, 519 int opmod, int modify_mask, 520 struct mlx5_flow_table *ft, 521 unsigned group_id, 522 struct fs_fte *fte) 523 { 524 u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0}; 525 bool extended_dest = false; 526 struct mlx5_flow_rule *dst; 527 void *in_flow_context, *vlan; 528 void *in_match_value; 529 int reformat_id = 0; 530 unsigned int inlen; 531 int dst_cnt_size; 532 u32 *in, action; 533 void *in_dests; 534 int err; 535 536 if (mlx5_set_extended_dest(dev, fte, &extended_dest)) 537 return -EOPNOTSUPP; 538 539 if (!extended_dest) 540 dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct); 541 else 542 dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format); 543 544 inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size; 545 in = kvzalloc(inlen, GFP_KERNEL); 546 if (!in) 547 return -ENOMEM; 548 549 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY); 550 MLX5_SET(set_fte_in, in, op_mod, opmod); 551 MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask); 552 MLX5_SET(set_fte_in, in, table_type, ft->type); 553 MLX5_SET(set_fte_in, in, table_id, ft->id); 554 MLX5_SET(set_fte_in, in, flow_index, fte->index); 555 MLX5_SET(set_fte_in, in, ignore_flow_level, 556 !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL)); 557 558 MLX5_SET(set_fte_in, in, vport_number, ft->vport); 559 MLX5_SET(set_fte_in, in, other_vport, 560 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); 561 562 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context); 563 MLX5_SET(flow_context, in_flow_context, group_id, group_id); 564 565 MLX5_SET(flow_context, in_flow_context, flow_tag, 566 fte->flow_context.flow_tag); 567 MLX5_SET(flow_context, in_flow_context, flow_source, 568 fte->flow_context.flow_source); 569 570 MLX5_SET(flow_context, in_flow_context, extended_destination, 571 extended_dest); 572 573 action = fte->action.action; 574 if (extended_dest) 575 action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; 576 577 MLX5_SET(flow_context, in_flow_context, action, action); 578 579 if (!extended_dest && fte->action.pkt_reformat) { 580 struct mlx5_pkt_reformat *pkt_reformat = fte->action.pkt_reformat; 581 582 if (pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_SW) { 583 reformat_id = mlx5_fs_dr_action_get_pkt_reformat_id(pkt_reformat); 584 if (reformat_id < 0) { 585 mlx5_core_err(dev, 586 "Unsupported SW-owned pkt_reformat type (%d) in FW-owned table\n", 587 pkt_reformat->reformat_type); 588 err = reformat_id; 589 goto err_out; 590 } 591 } else { 592 reformat_id = fte->action.pkt_reformat->id; 593 } 594 } 595 596 MLX5_SET(flow_context, in_flow_context, packet_reformat_id, (u32)reformat_id); 597 598 if (fte->action.modify_hdr) { 599 if (fte->action.modify_hdr->owner == MLX5_FLOW_RESOURCE_OWNER_SW) { 600 mlx5_core_err(dev, "Can't use SW-owned modify_hdr in FW-owned table\n"); 601 err = -EOPNOTSUPP; 602 goto err_out; 603 } 604 605 MLX5_SET(flow_context, in_flow_context, modify_header_id, 606 fte->action.modify_hdr->id); 607 } 608 609 MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_type, 610 fte->action.crypto.type); 611 MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_obj_id, 612 fte->action.crypto.obj_id); 613 614 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan); 615 616 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype); 617 MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid); 618 MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio); 619 620 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2); 621 622 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype); 623 MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid); 624 MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio); 625 626 in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context, 627 match_value); 628 memcpy(in_match_value, &fte->val, sizeof(fte->val)); 629 630 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination); 631 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { 632 int list_size = 0; 633 634 list_for_each_entry(dst, &fte->node.children, node.list) { 635 enum mlx5_flow_destination_type type = dst->dest_attr.type; 636 enum mlx5_ifc_flow_destination_type ifc_type; 637 unsigned int id; 638 639 if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) 640 continue; 641 642 switch (type) { 643 case MLX5_FLOW_DESTINATION_TYPE_NONE: 644 continue; 645 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM: 646 id = dst->dest_attr.ft_num; 647 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE; 648 break; 649 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE: 650 id = dst->dest_attr.ft->id; 651 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE; 652 break; 653 case MLX5_FLOW_DESTINATION_TYPE_UPLINK: 654 case MLX5_FLOW_DESTINATION_TYPE_VPORT: 655 MLX5_SET(dest_format_struct, in_dests, 656 destination_eswitch_owner_vhca_id_valid, 657 !!(dst->dest_attr.vport.flags & 658 MLX5_FLOW_DEST_VPORT_VHCA_ID)); 659 MLX5_SET(dest_format_struct, in_dests, 660 destination_eswitch_owner_vhca_id, 661 dst->dest_attr.vport.vhca_id); 662 if (type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) { 663 /* destination_id is reserved */ 664 id = 0; 665 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK; 666 break; 667 } 668 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT; 669 id = dst->dest_attr.vport.num; 670 if (extended_dest && 671 dst->dest_attr.vport.pkt_reformat) { 672 MLX5_SET(dest_format_struct, in_dests, 673 packet_reformat, 674 !!(dst->dest_attr.vport.flags & 675 MLX5_FLOW_DEST_VPORT_REFORMAT_ID)); 676 MLX5_SET(extended_dest_format, in_dests, 677 packet_reformat_id, 678 dst->dest_attr.vport.pkt_reformat->id); 679 } 680 break; 681 case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER: 682 id = dst->dest_attr.sampler_id; 683 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER; 684 break; 685 case MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE: 686 MLX5_SET(dest_format_struct, in_dests, 687 destination_table_type, dst->dest_attr.ft->type); 688 id = dst->dest_attr.ft->id; 689 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TABLE_TYPE; 690 break; 691 default: 692 id = dst->dest_attr.tir_num; 693 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TIR; 694 } 695 696 MLX5_SET(dest_format_struct, in_dests, destination_type, 697 ifc_type); 698 MLX5_SET(dest_format_struct, in_dests, destination_id, id); 699 in_dests += dst_cnt_size; 700 list_size++; 701 } 702 703 MLX5_SET(flow_context, in_flow_context, destination_list_size, 704 list_size); 705 } 706 707 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 708 int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev, 709 log_max_flow_counter, 710 ft->type)); 711 int list_size = 0; 712 713 list_for_each_entry(dst, &fte->node.children, node.list) { 714 if (dst->dest_attr.type != 715 MLX5_FLOW_DESTINATION_TYPE_COUNTER) 716 continue; 717 718 MLX5_SET(flow_counter_list, in_dests, flow_counter_id, 719 dst->dest_attr.counter_id); 720 in_dests += dst_cnt_size; 721 list_size++; 722 } 723 if (list_size > max_list_size) { 724 err = -EINVAL; 725 goto err_out; 726 } 727 728 MLX5_SET(flow_context, in_flow_context, flow_counter_list_size, 729 list_size); 730 } 731 732 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) { 733 if (fte->action.exe_aso.type == MLX5_EXE_ASO_FLOW_METER) { 734 mlx5_cmd_set_fte_flow_meter(fte, in_flow_context); 735 } else { 736 err = -EOPNOTSUPP; 737 goto err_out; 738 } 739 } 740 741 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); 742 err_out: 743 kvfree(in); 744 return err; 745 } 746 747 static int mlx5_cmd_create_fte(struct mlx5_flow_root_namespace *ns, 748 struct mlx5_flow_table *ft, 749 struct mlx5_flow_group *group, 750 struct fs_fte *fte) 751 { 752 struct mlx5_core_dev *dev = ns->dev; 753 unsigned int group_id = group->id; 754 755 return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte); 756 } 757 758 static int mlx5_cmd_update_fte(struct mlx5_flow_root_namespace *ns, 759 struct mlx5_flow_table *ft, 760 struct mlx5_flow_group *fg, 761 int modify_mask, 762 struct fs_fte *fte) 763 { 764 int opmod; 765 struct mlx5_core_dev *dev = ns->dev; 766 int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev, 767 flow_table_properties_nic_receive. 768 flow_modify_en); 769 if (!atomic_mod_cap) 770 return -EOPNOTSUPP; 771 opmod = 1; 772 773 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, fg->id, fte); 774 } 775 776 static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns, 777 struct mlx5_flow_table *ft, 778 struct fs_fte *fte) 779 { 780 u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {}; 781 struct mlx5_core_dev *dev = ns->dev; 782 783 MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY); 784 MLX5_SET(delete_fte_in, in, table_type, ft->type); 785 MLX5_SET(delete_fte_in, in, table_id, ft->id); 786 MLX5_SET(delete_fte_in, in, flow_index, fte->index); 787 MLX5_SET(delete_fte_in, in, vport_number, ft->vport); 788 MLX5_SET(delete_fte_in, in, other_vport, 789 !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); 790 791 return mlx5_cmd_exec_in(dev, delete_fte, in); 792 } 793 794 int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, 795 enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask, 796 u32 *id) 797 { 798 u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {}; 799 u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {}; 800 int err; 801 802 MLX5_SET(alloc_flow_counter_in, in, opcode, 803 MLX5_CMD_OP_ALLOC_FLOW_COUNTER); 804 MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask); 805 806 err = mlx5_cmd_exec_inout(dev, alloc_flow_counter, in, out); 807 if (!err) 808 *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id); 809 return err; 810 } 811 812 int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id) 813 { 814 return mlx5_cmd_fc_bulk_alloc(dev, 0, id); 815 } 816 817 int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id) 818 { 819 u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {}; 820 821 MLX5_SET(dealloc_flow_counter_in, in, opcode, 822 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER); 823 MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id); 824 return mlx5_cmd_exec_in(dev, dealloc_flow_counter, in); 825 } 826 827 int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id, 828 u64 *packets, u64 *bytes) 829 { 830 u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) + 831 MLX5_ST_SZ_BYTES(traffic_counter)] = {}; 832 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {}; 833 void *stats; 834 int err = 0; 835 836 MLX5_SET(query_flow_counter_in, in, opcode, 837 MLX5_CMD_OP_QUERY_FLOW_COUNTER); 838 MLX5_SET(query_flow_counter_in, in, op_mod, 0); 839 MLX5_SET(query_flow_counter_in, in, flow_counter_id, id); 840 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 841 if (err) 842 return err; 843 844 stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics); 845 *packets = MLX5_GET64(traffic_counter, stats, packets); 846 *bytes = MLX5_GET64(traffic_counter, stats, octets); 847 return 0; 848 } 849 850 int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len) 851 { 852 return MLX5_ST_SZ_BYTES(query_flow_counter_out) + 853 MLX5_ST_SZ_BYTES(traffic_counter) * bulk_len; 854 } 855 856 int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len, 857 u32 *out) 858 { 859 int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len); 860 u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {}; 861 862 MLX5_SET(query_flow_counter_in, in, opcode, 863 MLX5_CMD_OP_QUERY_FLOW_COUNTER); 864 MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id); 865 MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len); 866 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); 867 } 868 869 static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns, 870 struct mlx5_pkt_reformat_params *params, 871 enum mlx5_flow_namespace_type namespace, 872 struct mlx5_pkt_reformat *pkt_reformat) 873 { 874 u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {}; 875 struct mlx5_core_dev *dev = ns->dev; 876 void *packet_reformat_context_in; 877 int max_encap_size; 878 void *reformat; 879 int inlen; 880 int err; 881 u32 *in; 882 883 if (namespace == MLX5_FLOW_NAMESPACE_FDB || 884 namespace == MLX5_FLOW_NAMESPACE_FDB_BYPASS) 885 max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size); 886 else 887 max_encap_size = MLX5_CAP_FLOWTABLE(dev, max_encap_header_size); 888 889 if (params->size > max_encap_size) { 890 mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n", 891 params->size, max_encap_size); 892 return -EINVAL; 893 } 894 895 in = kzalloc(MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in) + 896 params->size, GFP_KERNEL); 897 if (!in) 898 return -ENOMEM; 899 900 packet_reformat_context_in = MLX5_ADDR_OF(alloc_packet_reformat_context_in, 901 in, packet_reformat_context); 902 reformat = MLX5_ADDR_OF(packet_reformat_context_in, 903 packet_reformat_context_in, 904 reformat_data); 905 inlen = reformat - (void *)in + params->size; 906 907 MLX5_SET(alloc_packet_reformat_context_in, in, opcode, 908 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT); 909 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in, 910 reformat_data_size, params->size); 911 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in, 912 reformat_type, params->type); 913 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in, 914 reformat_param_0, params->param_0); 915 MLX5_SET(packet_reformat_context_in, packet_reformat_context_in, 916 reformat_param_1, params->param_1); 917 if (params->data && params->size) 918 memcpy(reformat, params->data, params->size); 919 920 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); 921 922 pkt_reformat->id = MLX5_GET(alloc_packet_reformat_context_out, 923 out, packet_reformat_id); 924 pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_FW; 925 926 kfree(in); 927 return err; 928 } 929 930 static void mlx5_cmd_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns, 931 struct mlx5_pkt_reformat *pkt_reformat) 932 { 933 u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {}; 934 struct mlx5_core_dev *dev = ns->dev; 935 936 MLX5_SET(dealloc_packet_reformat_context_in, in, opcode, 937 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT); 938 MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id, 939 pkt_reformat->id); 940 941 mlx5_cmd_exec_in(dev, dealloc_packet_reformat_context, in); 942 } 943 944 static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns, 945 u8 namespace, u8 num_actions, 946 void *modify_actions, 947 struct mlx5_modify_hdr *modify_hdr) 948 { 949 u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {}; 950 int max_actions, actions_size, inlen, err; 951 struct mlx5_core_dev *dev = ns->dev; 952 void *actions_in; 953 u8 table_type; 954 u32 *in; 955 956 switch (namespace) { 957 case MLX5_FLOW_NAMESPACE_FDB: 958 case MLX5_FLOW_NAMESPACE_FDB_BYPASS: 959 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions); 960 table_type = FS_FT_FDB; 961 break; 962 case MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC: 963 case MLX5_FLOW_NAMESPACE_KERNEL: 964 case MLX5_FLOW_NAMESPACE_BYPASS: 965 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions); 966 table_type = FS_FT_NIC_RX; 967 break; 968 case MLX5_FLOW_NAMESPACE_EGRESS: 969 case MLX5_FLOW_NAMESPACE_EGRESS_IPSEC: 970 case MLX5_FLOW_NAMESPACE_EGRESS_MACSEC: 971 max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions); 972 table_type = FS_FT_NIC_TX; 973 break; 974 case MLX5_FLOW_NAMESPACE_ESW_INGRESS: 975 max_actions = MLX5_CAP_ESW_INGRESS_ACL(dev, max_modify_header_actions); 976 table_type = FS_FT_ESW_INGRESS_ACL; 977 break; 978 case MLX5_FLOW_NAMESPACE_RDMA_TX_MACSEC: 979 case MLX5_FLOW_NAMESPACE_RDMA_TX: 980 max_actions = MLX5_CAP_FLOWTABLE_RDMA_TX(dev, max_modify_header_actions); 981 table_type = FS_FT_RDMA_TX; 982 break; 983 default: 984 return -EOPNOTSUPP; 985 } 986 987 if (num_actions > max_actions) { 988 mlx5_core_warn(dev, "too many modify header actions %d, max supported %d\n", 989 num_actions, max_actions); 990 return -EOPNOTSUPP; 991 } 992 993 actions_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions; 994 inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size; 995 996 in = kzalloc(inlen, GFP_KERNEL); 997 if (!in) 998 return -ENOMEM; 999 1000 MLX5_SET(alloc_modify_header_context_in, in, opcode, 1001 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT); 1002 MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type); 1003 MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_actions); 1004 1005 actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions); 1006 memcpy(actions_in, modify_actions, actions_size); 1007 1008 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); 1009 1010 modify_hdr->id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id); 1011 modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_FW; 1012 kfree(in); 1013 return err; 1014 } 1015 1016 static void mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace *ns, 1017 struct mlx5_modify_hdr *modify_hdr) 1018 { 1019 u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {}; 1020 struct mlx5_core_dev *dev = ns->dev; 1021 1022 MLX5_SET(dealloc_modify_header_context_in, in, opcode, 1023 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT); 1024 MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id, 1025 modify_hdr->id); 1026 1027 mlx5_cmd_exec_in(dev, dealloc_modify_header_context, in); 1028 } 1029 1030 static int mlx5_cmd_destroy_match_definer(struct mlx5_flow_root_namespace *ns, 1031 int definer_id) 1032 { 1033 u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {}; 1034 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; 1035 1036 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, 1037 MLX5_CMD_OP_DESTROY_GENERAL_OBJECT); 1038 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, 1039 MLX5_OBJ_TYPE_MATCH_DEFINER); 1040 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, definer_id); 1041 1042 return mlx5_cmd_exec(ns->dev, in, sizeof(in), out, sizeof(out)); 1043 } 1044 1045 static int mlx5_cmd_create_match_definer(struct mlx5_flow_root_namespace *ns, 1046 u16 format_id, u32 *match_mask) 1047 { 1048 u32 out[MLX5_ST_SZ_DW(create_match_definer_out)] = {}; 1049 u32 in[MLX5_ST_SZ_DW(create_match_definer_in)] = {}; 1050 struct mlx5_core_dev *dev = ns->dev; 1051 void *ptr; 1052 int err; 1053 1054 MLX5_SET(create_match_definer_in, in, general_obj_in_cmd_hdr.opcode, 1055 MLX5_CMD_OP_CREATE_GENERAL_OBJECT); 1056 MLX5_SET(create_match_definer_in, in, general_obj_in_cmd_hdr.obj_type, 1057 MLX5_OBJ_TYPE_MATCH_DEFINER); 1058 1059 ptr = MLX5_ADDR_OF(create_match_definer_in, in, obj_context); 1060 MLX5_SET(match_definer, ptr, format_id, format_id); 1061 1062 ptr = MLX5_ADDR_OF(match_definer, ptr, match_mask); 1063 memcpy(ptr, match_mask, MLX5_FLD_SZ_BYTES(match_definer, match_mask)); 1064 1065 err = mlx5_cmd_exec_inout(dev, create_match_definer, in, out); 1066 return err ? err : MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); 1067 } 1068 1069 static u32 mlx5_cmd_get_capabilities(struct mlx5_flow_root_namespace *ns, 1070 enum fs_flow_table_type ft_type) 1071 { 1072 return 0; 1073 } 1074 1075 static const struct mlx5_flow_cmds mlx5_flow_cmds = { 1076 .create_flow_table = mlx5_cmd_create_flow_table, 1077 .destroy_flow_table = mlx5_cmd_destroy_flow_table, 1078 .modify_flow_table = mlx5_cmd_modify_flow_table, 1079 .create_flow_group = mlx5_cmd_create_flow_group, 1080 .destroy_flow_group = mlx5_cmd_destroy_flow_group, 1081 .create_fte = mlx5_cmd_create_fte, 1082 .update_fte = mlx5_cmd_update_fte, 1083 .delete_fte = mlx5_cmd_delete_fte, 1084 .update_root_ft = mlx5_cmd_update_root_ft, 1085 .packet_reformat_alloc = mlx5_cmd_packet_reformat_alloc, 1086 .packet_reformat_dealloc = mlx5_cmd_packet_reformat_dealloc, 1087 .modify_header_alloc = mlx5_cmd_modify_header_alloc, 1088 .modify_header_dealloc = mlx5_cmd_modify_header_dealloc, 1089 .create_match_definer = mlx5_cmd_create_match_definer, 1090 .destroy_match_definer = mlx5_cmd_destroy_match_definer, 1091 .set_peer = mlx5_cmd_stub_set_peer, 1092 .create_ns = mlx5_cmd_stub_create_ns, 1093 .destroy_ns = mlx5_cmd_stub_destroy_ns, 1094 .get_capabilities = mlx5_cmd_get_capabilities, 1095 }; 1096 1097 static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = { 1098 .create_flow_table = mlx5_cmd_stub_create_flow_table, 1099 .destroy_flow_table = mlx5_cmd_stub_destroy_flow_table, 1100 .modify_flow_table = mlx5_cmd_stub_modify_flow_table, 1101 .create_flow_group = mlx5_cmd_stub_create_flow_group, 1102 .destroy_flow_group = mlx5_cmd_stub_destroy_flow_group, 1103 .create_fte = mlx5_cmd_stub_create_fte, 1104 .update_fte = mlx5_cmd_stub_update_fte, 1105 .delete_fte = mlx5_cmd_stub_delete_fte, 1106 .update_root_ft = mlx5_cmd_stub_update_root_ft, 1107 .packet_reformat_alloc = mlx5_cmd_stub_packet_reformat_alloc, 1108 .packet_reformat_dealloc = mlx5_cmd_stub_packet_reformat_dealloc, 1109 .modify_header_alloc = mlx5_cmd_stub_modify_header_alloc, 1110 .modify_header_dealloc = mlx5_cmd_stub_modify_header_dealloc, 1111 .create_match_definer = mlx5_cmd_stub_create_match_definer, 1112 .destroy_match_definer = mlx5_cmd_stub_destroy_match_definer, 1113 .set_peer = mlx5_cmd_stub_set_peer, 1114 .create_ns = mlx5_cmd_stub_create_ns, 1115 .destroy_ns = mlx5_cmd_stub_destroy_ns, 1116 .get_capabilities = mlx5_cmd_stub_get_capabilities, 1117 }; 1118 1119 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void) 1120 { 1121 return &mlx5_flow_cmds; 1122 } 1123 1124 static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_stub_cmds(void) 1125 { 1126 return &mlx5_flow_cmd_stubs; 1127 } 1128 1129 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type) 1130 { 1131 switch (type) { 1132 case FS_FT_NIC_RX: 1133 case FS_FT_ESW_EGRESS_ACL: 1134 case FS_FT_ESW_INGRESS_ACL: 1135 case FS_FT_FDB: 1136 case FS_FT_SNIFFER_RX: 1137 case FS_FT_SNIFFER_TX: 1138 case FS_FT_NIC_TX: 1139 case FS_FT_RDMA_RX: 1140 case FS_FT_RDMA_TX: 1141 case FS_FT_PORT_SEL: 1142 return mlx5_fs_cmd_get_fw_cmds(); 1143 default: 1144 return mlx5_fs_cmd_get_stub_cmds(); 1145 } 1146 } 1147