1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2019 Mellanox Technologies. */ 3 4 #include <linux/types.h> 5 #include <linux/crc32.h> 6 #include "dr_ste.h" 7 8 struct dr_hw_ste_format { 9 u8 ctrl[DR_STE_SIZE_CTRL]; 10 u8 tag[DR_STE_SIZE_TAG]; 11 u8 mask[DR_STE_SIZE_MASK]; 12 }; 13 14 static u32 dr_ste_crc32_calc(const void *input_data, size_t length) 15 { 16 u32 crc = crc32(0, input_data, length); 17 18 return (__force u32)htonl(crc); 19 } 20 21 bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps) 22 { 23 return caps->sw_format_ver > MLX5_STEERING_FORMAT_CONNECTX_5; 24 } 25 26 u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl) 27 { 28 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; 29 u8 masked[DR_STE_SIZE_TAG] = {}; 30 u32 crc32, index; 31 u16 bit; 32 int i; 33 34 /* Don't calculate CRC if the result is predicted */ 35 if (htbl->chunk->num_of_entries == 1 || htbl->byte_mask == 0) 36 return 0; 37 38 /* Mask tag using byte mask, bit per byte */ 39 bit = 1 << (DR_STE_SIZE_TAG - 1); 40 for (i = 0; i < DR_STE_SIZE_TAG; i++) { 41 if (htbl->byte_mask & bit) 42 masked[i] = hw_ste->tag[i]; 43 44 bit = bit >> 1; 45 } 46 47 crc32 = dr_ste_crc32_calc(masked, DR_STE_SIZE_TAG); 48 index = crc32 & (htbl->chunk->num_of_entries - 1); 49 50 return index; 51 } 52 53 u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask) 54 { 55 u16 byte_mask = 0; 56 int i; 57 58 for (i = 0; i < DR_STE_SIZE_MASK; i++) { 59 byte_mask = byte_mask << 1; 60 if (bit_mask[i] == 0xff) 61 byte_mask |= 1; 62 } 63 return byte_mask; 64 } 65 66 static u8 *dr_ste_get_tag(u8 *hw_ste_p) 67 { 68 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; 69 70 return hw_ste->tag; 71 } 72 73 void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask) 74 { 75 struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p; 76 77 memcpy(hw_ste->mask, bit_mask, DR_STE_SIZE_MASK); 78 } 79 80 static void dr_ste_set_always_hit(struct dr_hw_ste_format *hw_ste) 81 { 82 memset(&hw_ste->tag, 0, sizeof(hw_ste->tag)); 83 memset(&hw_ste->mask, 0, sizeof(hw_ste->mask)); 84 } 85 86 static void dr_ste_set_always_miss(struct dr_hw_ste_format *hw_ste) 87 { 88 hw_ste->tag[0] = 0xdc; 89 hw_ste->mask[0] = 0; 90 } 91 92 void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx, 93 u8 *hw_ste_p, u64 miss_addr) 94 { 95 ste_ctx->set_miss_addr(hw_ste_p, miss_addr); 96 } 97 98 static void dr_ste_always_miss_addr(struct mlx5dr_ste_ctx *ste_ctx, 99 struct mlx5dr_ste *ste, u64 miss_addr) 100 { 101 u8 *hw_ste_p = ste->hw_ste; 102 103 ste_ctx->set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE); 104 ste_ctx->set_miss_addr(hw_ste_p, miss_addr); 105 dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste); 106 } 107 108 void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx, 109 u8 *hw_ste, u64 icm_addr, u32 ht_size) 110 { 111 ste_ctx->set_hit_addr(hw_ste, icm_addr, ht_size); 112 } 113 114 u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste) 115 { 116 u32 index = ste - ste->htbl->ste_arr; 117 118 return ste->htbl->chunk->icm_addr + DR_STE_SIZE * index; 119 } 120 121 u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste) 122 { 123 u32 index = ste - ste->htbl->ste_arr; 124 125 return ste->htbl->chunk->mr_addr + DR_STE_SIZE * index; 126 } 127 128 struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste) 129 { 130 u32 index = ste - ste->htbl->ste_arr; 131 132 return &ste->htbl->miss_list[index]; 133 } 134 135 static void dr_ste_always_hit_htbl(struct mlx5dr_ste_ctx *ste_ctx, 136 struct mlx5dr_ste *ste, 137 struct mlx5dr_ste_htbl *next_htbl) 138 { 139 struct mlx5dr_icm_chunk *chunk = next_htbl->chunk; 140 u8 *hw_ste = ste->hw_ste; 141 142 ste_ctx->set_byte_mask(hw_ste, next_htbl->byte_mask); 143 ste_ctx->set_next_lu_type(hw_ste, next_htbl->lu_type); 144 ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries); 145 146 dr_ste_set_always_hit((struct dr_hw_ste_format *)ste->hw_ste); 147 } 148 149 bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher, 150 u8 ste_location) 151 { 152 return ste_location == nic_matcher->num_of_builders; 153 } 154 155 /* Replace relevant fields, except of: 156 * htbl - keep the origin htbl 157 * miss_list + list - already took the src from the list. 158 * icm_addr/mr_addr - depends on the hosting table. 159 * 160 * Before: 161 * | a | -> | b | -> | c | -> 162 * 163 * After: 164 * | a | -> | c | -> 165 * While the data that was in b copied to a. 166 */ 167 static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src) 168 { 169 memcpy(dst->hw_ste, src->hw_ste, DR_STE_SIZE_REDUCED); 170 dst->next_htbl = src->next_htbl; 171 if (dst->next_htbl) 172 dst->next_htbl->pointing_ste = dst; 173 174 dst->refcount = src->refcount; 175 176 INIT_LIST_HEAD(&dst->rule_list); 177 list_splice_tail_init(&src->rule_list, &dst->rule_list); 178 } 179 180 /* Free ste which is the head and the only one in miss_list */ 181 static void 182 dr_ste_remove_head_ste(struct mlx5dr_ste_ctx *ste_ctx, 183 struct mlx5dr_ste *ste, 184 struct mlx5dr_matcher_rx_tx *nic_matcher, 185 struct mlx5dr_ste_send_info *ste_info_head, 186 struct list_head *send_ste_list, 187 struct mlx5dr_ste_htbl *stats_tbl) 188 { 189 u8 tmp_data_ste[DR_STE_SIZE] = {}; 190 struct mlx5dr_ste tmp_ste = {}; 191 u64 miss_addr; 192 193 tmp_ste.hw_ste = tmp_data_ste; 194 195 /* Use temp ste because dr_ste_always_miss_addr 196 * touches bit_mask area which doesn't exist at ste->hw_ste. 197 */ 198 memcpy(tmp_ste.hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED); 199 miss_addr = nic_matcher->e_anchor->chunk->icm_addr; 200 dr_ste_always_miss_addr(ste_ctx, &tmp_ste, miss_addr); 201 memcpy(ste->hw_ste, tmp_ste.hw_ste, DR_STE_SIZE_REDUCED); 202 203 list_del_init(&ste->miss_list_node); 204 205 /* Write full STE size in order to have "always_miss" */ 206 mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE, 207 0, tmp_data_ste, 208 ste_info_head, 209 send_ste_list, 210 true /* Copy data */); 211 212 stats_tbl->ctrl.num_of_valid_entries--; 213 } 214 215 /* Free ste which is the head but NOT the only one in miss_list: 216 * |_ste_| --> |_next_ste_| -->|__| -->|__| -->/0 217 */ 218 static void 219 dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx *nic_matcher, 220 struct mlx5dr_ste *ste, 221 struct mlx5dr_ste *next_ste, 222 struct mlx5dr_ste_send_info *ste_info_head, 223 struct list_head *send_ste_list, 224 struct mlx5dr_ste_htbl *stats_tbl) 225 226 { 227 struct mlx5dr_ste_htbl *next_miss_htbl; 228 u8 hw_ste[DR_STE_SIZE] = {}; 229 int sb_idx; 230 231 next_miss_htbl = next_ste->htbl; 232 233 /* Remove from the miss_list the next_ste before copy */ 234 list_del_init(&next_ste->miss_list_node); 235 236 /* All rule-members that use next_ste should know about that */ 237 mlx5dr_rule_update_rule_member(next_ste, ste); 238 239 /* Move data from next into ste */ 240 dr_ste_replace(ste, next_ste); 241 242 /* Copy all 64 hw_ste bytes */ 243 memcpy(hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED); 244 sb_idx = ste->ste_chain_location - 1; 245 mlx5dr_ste_set_bit_mask(hw_ste, 246 nic_matcher->ste_builder[sb_idx].bit_mask); 247 248 /* Del the htbl that contains the next_ste. 249 * The origin htbl stay with the same number of entries. 250 */ 251 mlx5dr_htbl_put(next_miss_htbl); 252 253 mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE, 254 0, hw_ste, 255 ste_info_head, 256 send_ste_list, 257 true /* Copy data */); 258 259 stats_tbl->ctrl.num_of_collisions--; 260 stats_tbl->ctrl.num_of_valid_entries--; 261 } 262 263 /* Free ste that is located in the middle of the miss list: 264 * |__| -->|_prev_ste_|->|_ste_|-->|_next_ste_| 265 */ 266 static void dr_ste_remove_middle_ste(struct mlx5dr_ste_ctx *ste_ctx, 267 struct mlx5dr_ste *ste, 268 struct mlx5dr_ste_send_info *ste_info, 269 struct list_head *send_ste_list, 270 struct mlx5dr_ste_htbl *stats_tbl) 271 { 272 struct mlx5dr_ste *prev_ste; 273 u64 miss_addr; 274 275 prev_ste = list_prev_entry(ste, miss_list_node); 276 if (WARN_ON(!prev_ste)) 277 return; 278 279 miss_addr = ste_ctx->get_miss_addr(ste->hw_ste); 280 ste_ctx->set_miss_addr(prev_ste->hw_ste, miss_addr); 281 282 mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_CTRL, 0, 283 prev_ste->hw_ste, ste_info, 284 send_ste_list, true /* Copy data*/); 285 286 list_del_init(&ste->miss_list_node); 287 288 stats_tbl->ctrl.num_of_valid_entries--; 289 stats_tbl->ctrl.num_of_collisions--; 290 } 291 292 void mlx5dr_ste_free(struct mlx5dr_ste *ste, 293 struct mlx5dr_matcher *matcher, 294 struct mlx5dr_matcher_rx_tx *nic_matcher) 295 { 296 struct mlx5dr_ste_send_info *cur_ste_info, *tmp_ste_info; 297 struct mlx5dr_domain *dmn = matcher->tbl->dmn; 298 struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx; 299 struct mlx5dr_ste_send_info ste_info_head; 300 struct mlx5dr_ste *next_ste, *first_ste; 301 bool put_on_origin_table = true; 302 struct mlx5dr_ste_htbl *stats_tbl; 303 LIST_HEAD(send_ste_list); 304 305 first_ste = list_first_entry(mlx5dr_ste_get_miss_list(ste), 306 struct mlx5dr_ste, miss_list_node); 307 stats_tbl = first_ste->htbl; 308 309 /* Two options: 310 * 1. ste is head: 311 * a. head ste is the only ste in the miss list 312 * b. head ste is not the only ste in the miss-list 313 * 2. ste is not head 314 */ 315 if (first_ste == ste) { /* Ste is the head */ 316 struct mlx5dr_ste *last_ste; 317 318 last_ste = list_last_entry(mlx5dr_ste_get_miss_list(ste), 319 struct mlx5dr_ste, miss_list_node); 320 if (last_ste == first_ste) 321 next_ste = NULL; 322 else 323 next_ste = list_next_entry(ste, miss_list_node); 324 325 if (!next_ste) { 326 /* One and only entry in the list */ 327 dr_ste_remove_head_ste(ste_ctx, ste, 328 nic_matcher, 329 &ste_info_head, 330 &send_ste_list, 331 stats_tbl); 332 } else { 333 /* First but not only entry in the list */ 334 dr_ste_replace_head_ste(nic_matcher, ste, 335 next_ste, &ste_info_head, 336 &send_ste_list, stats_tbl); 337 put_on_origin_table = false; 338 } 339 } else { /* Ste in the middle of the list */ 340 dr_ste_remove_middle_ste(ste_ctx, ste, 341 &ste_info_head, &send_ste_list, 342 stats_tbl); 343 } 344 345 /* Update HW */ 346 list_for_each_entry_safe(cur_ste_info, tmp_ste_info, 347 &send_ste_list, send_list) { 348 list_del(&cur_ste_info->send_list); 349 mlx5dr_send_postsend_ste(dmn, cur_ste_info->ste, 350 cur_ste_info->data, cur_ste_info->size, 351 cur_ste_info->offset); 352 } 353 354 if (put_on_origin_table) 355 mlx5dr_htbl_put(ste->htbl); 356 } 357 358 bool mlx5dr_ste_equal_tag(void *src, void *dst) 359 { 360 struct dr_hw_ste_format *s_hw_ste = (struct dr_hw_ste_format *)src; 361 struct dr_hw_ste_format *d_hw_ste = (struct dr_hw_ste_format *)dst; 362 363 return !memcmp(s_hw_ste->tag, d_hw_ste->tag, DR_STE_SIZE_TAG); 364 } 365 366 void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx, 367 u8 *hw_ste, 368 struct mlx5dr_ste_htbl *next_htbl) 369 { 370 struct mlx5dr_icm_chunk *chunk = next_htbl->chunk; 371 372 ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries); 373 } 374 375 void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx, 376 u8 *hw_ste_p, u32 ste_size) 377 { 378 if (ste_ctx->prepare_for_postsend) 379 ste_ctx->prepare_for_postsend(hw_ste_p, ste_size); 380 } 381 382 /* Init one ste as a pattern for ste data array */ 383 void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx, 384 u16 gvmi, 385 struct mlx5dr_domain_rx_tx *nic_dmn, 386 struct mlx5dr_ste_htbl *htbl, 387 u8 *formatted_ste, 388 struct mlx5dr_htbl_connect_info *connect_info) 389 { 390 struct mlx5dr_ste ste = {}; 391 392 ste_ctx->ste_init(formatted_ste, htbl->lu_type, nic_dmn->ste_type, gvmi); 393 ste.hw_ste = formatted_ste; 394 395 if (connect_info->type == CONNECT_HIT) 396 dr_ste_always_hit_htbl(ste_ctx, &ste, connect_info->hit_next_htbl); 397 else 398 dr_ste_always_miss_addr(ste_ctx, &ste, connect_info->miss_icm_addr); 399 } 400 401 int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn, 402 struct mlx5dr_domain_rx_tx *nic_dmn, 403 struct mlx5dr_ste_htbl *htbl, 404 struct mlx5dr_htbl_connect_info *connect_info, 405 bool update_hw_ste) 406 { 407 u8 formatted_ste[DR_STE_SIZE] = {}; 408 409 mlx5dr_ste_set_formatted_ste(dmn->ste_ctx, 410 dmn->info.caps.gvmi, 411 nic_dmn, 412 htbl, 413 formatted_ste, 414 connect_info); 415 416 return mlx5dr_send_postsend_formatted_htbl(dmn, htbl, formatted_ste, update_hw_ste); 417 } 418 419 int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher, 420 struct mlx5dr_matcher_rx_tx *nic_matcher, 421 struct mlx5dr_ste *ste, 422 u8 *cur_hw_ste, 423 enum mlx5dr_icm_chunk_size log_table_size) 424 { 425 struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn; 426 struct mlx5dr_domain *dmn = matcher->tbl->dmn; 427 struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx; 428 struct mlx5dr_htbl_connect_info info; 429 struct mlx5dr_ste_htbl *next_htbl; 430 431 if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) { 432 u16 next_lu_type; 433 u16 byte_mask; 434 435 next_lu_type = ste_ctx->get_next_lu_type(cur_hw_ste); 436 byte_mask = ste_ctx->get_byte_mask(cur_hw_ste); 437 438 next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool, 439 log_table_size, 440 next_lu_type, 441 byte_mask); 442 if (!next_htbl) { 443 mlx5dr_dbg(dmn, "Failed allocating table\n"); 444 return -ENOMEM; 445 } 446 447 /* Write new table to HW */ 448 info.type = CONNECT_MISS; 449 info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr; 450 if (mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, next_htbl, 451 &info, false)) { 452 mlx5dr_info(dmn, "Failed writing table to HW\n"); 453 goto free_table; 454 } 455 456 mlx5dr_ste_set_hit_addr_by_next_htbl(ste_ctx, 457 cur_hw_ste, next_htbl); 458 ste->next_htbl = next_htbl; 459 next_htbl->pointing_ste = ste; 460 } 461 462 return 0; 463 464 free_table: 465 mlx5dr_ste_htbl_free(next_htbl); 466 return -ENOENT; 467 } 468 469 static void dr_ste_set_ctrl(struct mlx5dr_ste_htbl *htbl) 470 { 471 struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl; 472 int num_of_entries; 473 474 htbl->ctrl.may_grow = true; 475 476 if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask) 477 htbl->ctrl.may_grow = false; 478 479 /* Threshold is 50%, one is added to table of size 1 */ 480 num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk_size); 481 ctrl->increase_threshold = (num_of_entries + 1) / 2; 482 } 483 484 struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool, 485 enum mlx5dr_icm_chunk_size chunk_size, 486 u16 lu_type, u16 byte_mask) 487 { 488 struct mlx5dr_icm_chunk *chunk; 489 struct mlx5dr_ste_htbl *htbl; 490 int i; 491 492 htbl = kzalloc(sizeof(*htbl), GFP_KERNEL); 493 if (!htbl) 494 return NULL; 495 496 chunk = mlx5dr_icm_alloc_chunk(pool, chunk_size); 497 if (!chunk) 498 goto out_free_htbl; 499 500 htbl->chunk = chunk; 501 htbl->lu_type = lu_type; 502 htbl->byte_mask = byte_mask; 503 htbl->ste_arr = chunk->ste_arr; 504 htbl->hw_ste_arr = chunk->hw_ste_arr; 505 htbl->miss_list = chunk->miss_list; 506 htbl->refcount = 0; 507 508 for (i = 0; i < chunk->num_of_entries; i++) { 509 struct mlx5dr_ste *ste = &htbl->ste_arr[i]; 510 511 ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED; 512 ste->htbl = htbl; 513 ste->refcount = 0; 514 INIT_LIST_HEAD(&ste->miss_list_node); 515 INIT_LIST_HEAD(&htbl->miss_list[i]); 516 INIT_LIST_HEAD(&ste->rule_list); 517 } 518 519 htbl->chunk_size = chunk_size; 520 dr_ste_set_ctrl(htbl); 521 return htbl; 522 523 out_free_htbl: 524 kfree(htbl); 525 return NULL; 526 } 527 528 int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl) 529 { 530 if (htbl->refcount) 531 return -EBUSY; 532 533 mlx5dr_icm_free_chunk(htbl->chunk); 534 kfree(htbl); 535 return 0; 536 } 537 538 void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx, 539 struct mlx5dr_domain *dmn, 540 u8 *action_type_set, 541 u8 *hw_ste_arr, 542 struct mlx5dr_ste_actions_attr *attr, 543 u32 *added_stes) 544 { 545 ste_ctx->set_actions_tx(dmn, action_type_set, hw_ste_arr, 546 attr, added_stes); 547 } 548 549 void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx, 550 struct mlx5dr_domain *dmn, 551 u8 *action_type_set, 552 u8 *hw_ste_arr, 553 struct mlx5dr_ste_actions_attr *attr, 554 u32 *added_stes) 555 { 556 ste_ctx->set_actions_rx(dmn, action_type_set, hw_ste_arr, 557 attr, added_stes); 558 } 559 560 const struct mlx5dr_ste_action_modify_field * 561 mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field) 562 { 563 const struct mlx5dr_ste_action_modify_field *hw_field; 564 565 if (sw_field >= ste_ctx->modify_field_arr_sz) 566 return NULL; 567 568 hw_field = &ste_ctx->modify_field_arr[sw_field]; 569 if (!hw_field->end && !hw_field->start) 570 return NULL; 571 572 return hw_field; 573 } 574 575 void mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx *ste_ctx, 576 __be64 *hw_action, 577 u8 hw_field, 578 u8 shifter, 579 u8 length, 580 u32 data) 581 { 582 ste_ctx->set_action_set((u8 *)hw_action, 583 hw_field, shifter, length, data); 584 } 585 586 void mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx *ste_ctx, 587 __be64 *hw_action, 588 u8 hw_field, 589 u8 shifter, 590 u8 length, 591 u32 data) 592 { 593 ste_ctx->set_action_add((u8 *)hw_action, 594 hw_field, shifter, length, data); 595 } 596 597 void mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx *ste_ctx, 598 __be64 *hw_action, 599 u8 dst_hw_field, 600 u8 dst_shifter, 601 u8 dst_len, 602 u8 src_hw_field, 603 u8 src_shifter) 604 { 605 ste_ctx->set_action_copy((u8 *)hw_action, 606 dst_hw_field, dst_shifter, dst_len, 607 src_hw_field, src_shifter); 608 } 609 610 int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx, 611 void *data, u32 data_sz, 612 u8 *hw_action, u32 hw_action_sz, 613 u16 *used_hw_action_num) 614 { 615 /* Only Ethernet frame is supported, with VLAN (18) or without (14) */ 616 if (data_sz != HDR_LEN_L2 && data_sz != HDR_LEN_L2_W_VLAN) 617 return -EINVAL; 618 619 return ste_ctx->set_action_decap_l3_list(data, data_sz, 620 hw_action, hw_action_sz, 621 used_hw_action_num); 622 } 623 624 int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn, 625 u8 match_criteria, 626 struct mlx5dr_match_param *mask, 627 struct mlx5dr_match_param *value) 628 { 629 if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) { 630 if (mask->misc.source_port && mask->misc.source_port != 0xffff) { 631 mlx5dr_err(dmn, 632 "Partial mask source_port is not supported\n"); 633 return -EINVAL; 634 } 635 if (mask->misc.source_eswitch_owner_vhca_id && 636 mask->misc.source_eswitch_owner_vhca_id != 0xffff) { 637 mlx5dr_err(dmn, 638 "Partial mask source_eswitch_owner_vhca_id is not supported\n"); 639 return -EINVAL; 640 } 641 } 642 643 return 0; 644 } 645 646 int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher, 647 struct mlx5dr_matcher_rx_tx *nic_matcher, 648 struct mlx5dr_match_param *value, 649 u8 *ste_arr) 650 { 651 struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn; 652 struct mlx5dr_domain *dmn = matcher->tbl->dmn; 653 struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx; 654 struct mlx5dr_ste_build *sb; 655 int ret, i; 656 657 ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria, 658 &matcher->mask, value); 659 if (ret) 660 return ret; 661 662 sb = nic_matcher->ste_builder; 663 for (i = 0; i < nic_matcher->num_of_builders; i++) { 664 ste_ctx->ste_init(ste_arr, 665 sb->lu_type, 666 nic_dmn->ste_type, 667 dmn->info.caps.gvmi); 668 669 mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask); 670 671 ret = sb->ste_build_tag_func(value, sb, dr_ste_get_tag(ste_arr)); 672 if (ret) 673 return ret; 674 675 /* Connect the STEs */ 676 if (i < (nic_matcher->num_of_builders - 1)) { 677 /* Need the next builder for these fields, 678 * not relevant for the last ste in the chain. 679 */ 680 sb++; 681 ste_ctx->set_next_lu_type(ste_arr, sb->lu_type); 682 ste_ctx->set_byte_mask(ste_arr, sb->byte_mask); 683 } 684 ste_arr += DR_STE_SIZE; 685 } 686 return 0; 687 } 688 689 static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec) 690 { 691 spec->gre_c_present = MLX5_GET(fte_match_set_misc, mask, gre_c_present); 692 spec->gre_k_present = MLX5_GET(fte_match_set_misc, mask, gre_k_present); 693 spec->gre_s_present = MLX5_GET(fte_match_set_misc, mask, gre_s_present); 694 spec->source_vhca_port = MLX5_GET(fte_match_set_misc, mask, source_vhca_port); 695 spec->source_sqn = MLX5_GET(fte_match_set_misc, mask, source_sqn); 696 697 spec->source_port = MLX5_GET(fte_match_set_misc, mask, source_port); 698 spec->source_eswitch_owner_vhca_id = MLX5_GET(fte_match_set_misc, mask, 699 source_eswitch_owner_vhca_id); 700 701 spec->outer_second_prio = MLX5_GET(fte_match_set_misc, mask, outer_second_prio); 702 spec->outer_second_cfi = MLX5_GET(fte_match_set_misc, mask, outer_second_cfi); 703 spec->outer_second_vid = MLX5_GET(fte_match_set_misc, mask, outer_second_vid); 704 spec->inner_second_prio = MLX5_GET(fte_match_set_misc, mask, inner_second_prio); 705 spec->inner_second_cfi = MLX5_GET(fte_match_set_misc, mask, inner_second_cfi); 706 spec->inner_second_vid = MLX5_GET(fte_match_set_misc, mask, inner_second_vid); 707 708 spec->outer_second_cvlan_tag = 709 MLX5_GET(fte_match_set_misc, mask, outer_second_cvlan_tag); 710 spec->inner_second_cvlan_tag = 711 MLX5_GET(fte_match_set_misc, mask, inner_second_cvlan_tag); 712 spec->outer_second_svlan_tag = 713 MLX5_GET(fte_match_set_misc, mask, outer_second_svlan_tag); 714 spec->inner_second_svlan_tag = 715 MLX5_GET(fte_match_set_misc, mask, inner_second_svlan_tag); 716 717 spec->gre_protocol = MLX5_GET(fte_match_set_misc, mask, gre_protocol); 718 719 spec->gre_key_h = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.hi); 720 spec->gre_key_l = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.lo); 721 722 spec->vxlan_vni = MLX5_GET(fte_match_set_misc, mask, vxlan_vni); 723 724 spec->geneve_vni = MLX5_GET(fte_match_set_misc, mask, geneve_vni); 725 spec->geneve_oam = MLX5_GET(fte_match_set_misc, mask, geneve_oam); 726 727 spec->outer_ipv6_flow_label = 728 MLX5_GET(fte_match_set_misc, mask, outer_ipv6_flow_label); 729 730 spec->inner_ipv6_flow_label = 731 MLX5_GET(fte_match_set_misc, mask, inner_ipv6_flow_label); 732 733 spec->geneve_opt_len = MLX5_GET(fte_match_set_misc, mask, geneve_opt_len); 734 spec->geneve_protocol_type = 735 MLX5_GET(fte_match_set_misc, mask, geneve_protocol_type); 736 737 spec->bth_dst_qp = MLX5_GET(fte_match_set_misc, mask, bth_dst_qp); 738 } 739 740 static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec) 741 { 742 __be32 raw_ip[4]; 743 744 spec->smac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_47_16); 745 746 spec->smac_15_0 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_15_0); 747 spec->ethertype = MLX5_GET(fte_match_set_lyr_2_4, mask, ethertype); 748 749 spec->dmac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_47_16); 750 751 spec->dmac_15_0 = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_15_0); 752 spec->first_prio = MLX5_GET(fte_match_set_lyr_2_4, mask, first_prio); 753 spec->first_cfi = MLX5_GET(fte_match_set_lyr_2_4, mask, first_cfi); 754 spec->first_vid = MLX5_GET(fte_match_set_lyr_2_4, mask, first_vid); 755 756 spec->ip_protocol = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_protocol); 757 spec->ip_dscp = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_dscp); 758 spec->ip_ecn = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_ecn); 759 spec->cvlan_tag = MLX5_GET(fte_match_set_lyr_2_4, mask, cvlan_tag); 760 spec->svlan_tag = MLX5_GET(fte_match_set_lyr_2_4, mask, svlan_tag); 761 spec->frag = MLX5_GET(fte_match_set_lyr_2_4, mask, frag); 762 spec->ip_version = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_version); 763 spec->tcp_flags = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_flags); 764 spec->tcp_sport = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_sport); 765 spec->tcp_dport = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_dport); 766 767 spec->ttl_hoplimit = MLX5_GET(fte_match_set_lyr_2_4, mask, ttl_hoplimit); 768 769 spec->udp_sport = MLX5_GET(fte_match_set_lyr_2_4, mask, udp_sport); 770 spec->udp_dport = MLX5_GET(fte_match_set_lyr_2_4, mask, udp_dport); 771 772 memcpy(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask, 773 src_ipv4_src_ipv6.ipv6_layout.ipv6), 774 sizeof(raw_ip)); 775 776 spec->src_ip_127_96 = be32_to_cpu(raw_ip[0]); 777 spec->src_ip_95_64 = be32_to_cpu(raw_ip[1]); 778 spec->src_ip_63_32 = be32_to_cpu(raw_ip[2]); 779 spec->src_ip_31_0 = be32_to_cpu(raw_ip[3]); 780 781 memcpy(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask, 782 dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 783 sizeof(raw_ip)); 784 785 spec->dst_ip_127_96 = be32_to_cpu(raw_ip[0]); 786 spec->dst_ip_95_64 = be32_to_cpu(raw_ip[1]); 787 spec->dst_ip_63_32 = be32_to_cpu(raw_ip[2]); 788 spec->dst_ip_31_0 = be32_to_cpu(raw_ip[3]); 789 } 790 791 static void dr_ste_copy_mask_misc2(char *mask, struct mlx5dr_match_misc2 *spec) 792 { 793 spec->outer_first_mpls_label = 794 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_label); 795 spec->outer_first_mpls_exp = 796 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_exp); 797 spec->outer_first_mpls_s_bos = 798 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_s_bos); 799 spec->outer_first_mpls_ttl = 800 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_ttl); 801 spec->inner_first_mpls_label = 802 MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_label); 803 spec->inner_first_mpls_exp = 804 MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_exp); 805 spec->inner_first_mpls_s_bos = 806 MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_s_bos); 807 spec->inner_first_mpls_ttl = 808 MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_ttl); 809 spec->outer_first_mpls_over_gre_label = 810 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_label); 811 spec->outer_first_mpls_over_gre_exp = 812 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_exp); 813 spec->outer_first_mpls_over_gre_s_bos = 814 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_s_bos); 815 spec->outer_first_mpls_over_gre_ttl = 816 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_ttl); 817 spec->outer_first_mpls_over_udp_label = 818 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_label); 819 spec->outer_first_mpls_over_udp_exp = 820 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_exp); 821 spec->outer_first_mpls_over_udp_s_bos = 822 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_s_bos); 823 spec->outer_first_mpls_over_udp_ttl = 824 MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_ttl); 825 spec->metadata_reg_c_7 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_7); 826 spec->metadata_reg_c_6 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_6); 827 spec->metadata_reg_c_5 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_5); 828 spec->metadata_reg_c_4 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_4); 829 spec->metadata_reg_c_3 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_3); 830 spec->metadata_reg_c_2 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_2); 831 spec->metadata_reg_c_1 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_1); 832 spec->metadata_reg_c_0 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_0); 833 spec->metadata_reg_a = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_a); 834 } 835 836 static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec) 837 { 838 spec->inner_tcp_seq_num = MLX5_GET(fte_match_set_misc3, mask, inner_tcp_seq_num); 839 spec->outer_tcp_seq_num = MLX5_GET(fte_match_set_misc3, mask, outer_tcp_seq_num); 840 spec->inner_tcp_ack_num = MLX5_GET(fte_match_set_misc3, mask, inner_tcp_ack_num); 841 spec->outer_tcp_ack_num = MLX5_GET(fte_match_set_misc3, mask, outer_tcp_ack_num); 842 spec->outer_vxlan_gpe_vni = 843 MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_vni); 844 spec->outer_vxlan_gpe_next_protocol = 845 MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_next_protocol); 846 spec->outer_vxlan_gpe_flags = 847 MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_flags); 848 spec->icmpv4_header_data = MLX5_GET(fte_match_set_misc3, mask, icmp_header_data); 849 spec->icmpv6_header_data = 850 MLX5_GET(fte_match_set_misc3, mask, icmpv6_header_data); 851 spec->icmpv4_type = MLX5_GET(fte_match_set_misc3, mask, icmp_type); 852 spec->icmpv4_code = MLX5_GET(fte_match_set_misc3, mask, icmp_code); 853 spec->icmpv6_type = MLX5_GET(fte_match_set_misc3, mask, icmpv6_type); 854 spec->icmpv6_code = MLX5_GET(fte_match_set_misc3, mask, icmpv6_code); 855 } 856 857 void mlx5dr_ste_copy_param(u8 match_criteria, 858 struct mlx5dr_match_param *set_param, 859 struct mlx5dr_match_parameters *mask) 860 { 861 u8 tail_param[MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)] = {}; 862 u8 *data = (u8 *)mask->match_buf; 863 size_t param_location; 864 void *buff; 865 866 if (match_criteria & DR_MATCHER_CRITERIA_OUTER) { 867 if (mask->match_sz < sizeof(struct mlx5dr_match_spec)) { 868 memcpy(tail_param, data, mask->match_sz); 869 buff = tail_param; 870 } else { 871 buff = mask->match_buf; 872 } 873 dr_ste_copy_mask_spec(buff, &set_param->outer); 874 } 875 param_location = sizeof(struct mlx5dr_match_spec); 876 877 if (match_criteria & DR_MATCHER_CRITERIA_MISC) { 878 if (mask->match_sz < param_location + 879 sizeof(struct mlx5dr_match_misc)) { 880 memcpy(tail_param, data + param_location, 881 mask->match_sz - param_location); 882 buff = tail_param; 883 } else { 884 buff = data + param_location; 885 } 886 dr_ste_copy_mask_misc(buff, &set_param->misc); 887 } 888 param_location += sizeof(struct mlx5dr_match_misc); 889 890 if (match_criteria & DR_MATCHER_CRITERIA_INNER) { 891 if (mask->match_sz < param_location + 892 sizeof(struct mlx5dr_match_spec)) { 893 memcpy(tail_param, data + param_location, 894 mask->match_sz - param_location); 895 buff = tail_param; 896 } else { 897 buff = data + param_location; 898 } 899 dr_ste_copy_mask_spec(buff, &set_param->inner); 900 } 901 param_location += sizeof(struct mlx5dr_match_spec); 902 903 if (match_criteria & DR_MATCHER_CRITERIA_MISC2) { 904 if (mask->match_sz < param_location + 905 sizeof(struct mlx5dr_match_misc2)) { 906 memcpy(tail_param, data + param_location, 907 mask->match_sz - param_location); 908 buff = tail_param; 909 } else { 910 buff = data + param_location; 911 } 912 dr_ste_copy_mask_misc2(buff, &set_param->misc2); 913 } 914 915 param_location += sizeof(struct mlx5dr_match_misc2); 916 917 if (match_criteria & DR_MATCHER_CRITERIA_MISC3) { 918 if (mask->match_sz < param_location + 919 sizeof(struct mlx5dr_match_misc3)) { 920 memcpy(tail_param, data + param_location, 921 mask->match_sz - param_location); 922 buff = tail_param; 923 } else { 924 buff = data + param_location; 925 } 926 dr_ste_copy_mask_misc3(buff, &set_param->misc3); 927 } 928 } 929 930 void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx, 931 struct mlx5dr_ste_build *sb, 932 struct mlx5dr_match_param *mask, 933 bool inner, bool rx) 934 { 935 sb->rx = rx; 936 sb->inner = inner; 937 ste_ctx->build_eth_l2_src_dst_init(sb, mask); 938 } 939 940 void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx *ste_ctx, 941 struct mlx5dr_ste_build *sb, 942 struct mlx5dr_match_param *mask, 943 bool inner, bool rx) 944 { 945 sb->rx = rx; 946 sb->inner = inner; 947 ste_ctx->build_eth_l3_ipv6_dst_init(sb, mask); 948 } 949 950 void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx *ste_ctx, 951 struct mlx5dr_ste_build *sb, 952 struct mlx5dr_match_param *mask, 953 bool inner, bool rx) 954 { 955 sb->rx = rx; 956 sb->inner = inner; 957 ste_ctx->build_eth_l3_ipv6_src_init(sb, mask); 958 } 959 960 void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx *ste_ctx, 961 struct mlx5dr_ste_build *sb, 962 struct mlx5dr_match_param *mask, 963 bool inner, bool rx) 964 { 965 sb->rx = rx; 966 sb->inner = inner; 967 ste_ctx->build_eth_l3_ipv4_5_tuple_init(sb, mask); 968 } 969 970 void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx *ste_ctx, 971 struct mlx5dr_ste_build *sb, 972 struct mlx5dr_match_param *mask, 973 bool inner, bool rx) 974 { 975 sb->rx = rx; 976 sb->inner = inner; 977 ste_ctx->build_eth_l2_src_init(sb, mask); 978 } 979 980 void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx *ste_ctx, 981 struct mlx5dr_ste_build *sb, 982 struct mlx5dr_match_param *mask, 983 bool inner, bool rx) 984 { 985 sb->rx = rx; 986 sb->inner = inner; 987 ste_ctx->build_eth_l2_dst_init(sb, mask); 988 } 989 990 void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx *ste_ctx, 991 struct mlx5dr_ste_build *sb, 992 struct mlx5dr_match_param *mask, bool inner, bool rx) 993 { 994 sb->rx = rx; 995 sb->inner = inner; 996 ste_ctx->build_eth_l2_tnl_init(sb, mask); 997 } 998 999 void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx *ste_ctx, 1000 struct mlx5dr_ste_build *sb, 1001 struct mlx5dr_match_param *mask, 1002 bool inner, bool rx) 1003 { 1004 sb->rx = rx; 1005 sb->inner = inner; 1006 ste_ctx->build_eth_l3_ipv4_misc_init(sb, mask); 1007 } 1008 1009 void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx *ste_ctx, 1010 struct mlx5dr_ste_build *sb, 1011 struct mlx5dr_match_param *mask, 1012 bool inner, bool rx) 1013 { 1014 sb->rx = rx; 1015 sb->inner = inner; 1016 ste_ctx->build_eth_ipv6_l3_l4_init(sb, mask); 1017 } 1018 1019 static int dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param *value, 1020 struct mlx5dr_ste_build *sb, 1021 u8 *tag) 1022 { 1023 return 0; 1024 } 1025 1026 void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx) 1027 { 1028 sb->rx = rx; 1029 sb->lu_type = MLX5DR_STE_LU_TYPE_DONT_CARE; 1030 sb->byte_mask = 0; 1031 sb->ste_build_tag_func = &dr_ste_build_empty_always_hit_tag; 1032 } 1033 1034 void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx, 1035 struct mlx5dr_ste_build *sb, 1036 struct mlx5dr_match_param *mask, 1037 bool inner, bool rx) 1038 { 1039 sb->rx = rx; 1040 sb->inner = inner; 1041 ste_ctx->build_mpls_init(sb, mask); 1042 } 1043 1044 void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx, 1045 struct mlx5dr_ste_build *sb, 1046 struct mlx5dr_match_param *mask, 1047 bool inner, bool rx) 1048 { 1049 sb->rx = rx; 1050 sb->inner = inner; 1051 ste_ctx->build_tnl_gre_init(sb, mask); 1052 } 1053 1054 void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_ctx *ste_ctx, 1055 struct mlx5dr_ste_build *sb, 1056 struct mlx5dr_match_param *mask, 1057 bool inner, bool rx) 1058 { 1059 sb->rx = rx; 1060 sb->inner = inner; 1061 ste_ctx->build_tnl_mpls_init(sb, mask); 1062 } 1063 1064 int mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx, 1065 struct mlx5dr_ste_build *sb, 1066 struct mlx5dr_match_param *mask, 1067 struct mlx5dr_cmd_caps *caps, 1068 bool inner, bool rx) 1069 { 1070 sb->rx = rx; 1071 sb->inner = inner; 1072 sb->caps = caps; 1073 return ste_ctx->build_icmp_init(sb, mask); 1074 } 1075 1076 void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx, 1077 struct mlx5dr_ste_build *sb, 1078 struct mlx5dr_match_param *mask, 1079 bool inner, bool rx) 1080 { 1081 sb->rx = rx; 1082 sb->inner = inner; 1083 ste_ctx->build_general_purpose_init(sb, mask); 1084 } 1085 1086 void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx *ste_ctx, 1087 struct mlx5dr_ste_build *sb, 1088 struct mlx5dr_match_param *mask, 1089 bool inner, bool rx) 1090 { 1091 sb->rx = rx; 1092 sb->inner = inner; 1093 ste_ctx->build_eth_l4_misc_init(sb, mask); 1094 } 1095 1096 void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx, 1097 struct mlx5dr_ste_build *sb, 1098 struct mlx5dr_match_param *mask, 1099 bool inner, bool rx) 1100 { 1101 sb->rx = rx; 1102 sb->inner = inner; 1103 ste_ctx->build_tnl_vxlan_gpe_init(sb, mask); 1104 } 1105 1106 void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx, 1107 struct mlx5dr_ste_build *sb, 1108 struct mlx5dr_match_param *mask, 1109 bool inner, bool rx) 1110 { 1111 sb->rx = rx; 1112 sb->inner = inner; 1113 ste_ctx->build_tnl_geneve_init(sb, mask); 1114 } 1115 1116 void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx, 1117 struct mlx5dr_ste_build *sb, 1118 struct mlx5dr_match_param *mask, 1119 bool inner, bool rx) 1120 { 1121 sb->rx = rx; 1122 sb->inner = inner; 1123 ste_ctx->build_register_0_init(sb, mask); 1124 } 1125 1126 void mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx *ste_ctx, 1127 struct mlx5dr_ste_build *sb, 1128 struct mlx5dr_match_param *mask, 1129 bool inner, bool rx) 1130 { 1131 sb->rx = rx; 1132 sb->inner = inner; 1133 ste_ctx->build_register_1_init(sb, mask); 1134 } 1135 1136 void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx, 1137 struct mlx5dr_ste_build *sb, 1138 struct mlx5dr_match_param *mask, 1139 struct mlx5dr_domain *dmn, 1140 bool inner, bool rx) 1141 { 1142 /* Set vhca_id_valid before we reset source_eswitch_owner_vhca_id */ 1143 sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id; 1144 1145 sb->rx = rx; 1146 sb->dmn = dmn; 1147 sb->inner = inner; 1148 ste_ctx->build_src_gvmi_qpn_init(sb, mask); 1149 } 1150 1151 static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = { 1152 [MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0, 1153 [MLX5_STEERING_FORMAT_CONNECTX_6DX] = &ste_ctx_v1, 1154 }; 1155 1156 struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version) 1157 { 1158 if (version > MLX5_STEERING_FORMAT_CONNECTX_6DX) 1159 return NULL; 1160 1161 return mlx5dr_ste_ctx_arr[version]; 1162 } 1163