xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c (revision 164666fa66669d437bdcc8d5f1744a2aee73be41)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
3 
4 #include <linux/types.h>
5 #include <linux/crc32.h>
6 #include "dr_ste.h"
7 
8 #define SVLAN_ETHERTYPE		0x88a8
9 #define DR_STE_ENABLE_FLOW_TAG	BIT(31)
10 
11 enum dr_ste_v0_entry_type {
12 	DR_STE_TYPE_TX          = 1,
13 	DR_STE_TYPE_RX          = 2,
14 	DR_STE_TYPE_MODIFY_PKT  = 6,
15 };
16 
17 enum dr_ste_v0_action_tunl {
18 	DR_STE_TUNL_ACTION_NONE		= 0,
19 	DR_STE_TUNL_ACTION_ENABLE	= 1,
20 	DR_STE_TUNL_ACTION_DECAP	= 2,
21 	DR_STE_TUNL_ACTION_L3_DECAP	= 3,
22 	DR_STE_TUNL_ACTION_POP_VLAN	= 4,
23 };
24 
25 enum dr_ste_v0_action_type {
26 	DR_STE_ACTION_TYPE_PUSH_VLAN	= 1,
27 	DR_STE_ACTION_TYPE_ENCAP_L3	= 3,
28 	DR_STE_ACTION_TYPE_ENCAP	= 4,
29 };
30 
31 enum dr_ste_v0_action_mdfy_op {
32 	DR_STE_ACTION_MDFY_OP_COPY	= 0x1,
33 	DR_STE_ACTION_MDFY_OP_SET	= 0x2,
34 	DR_STE_ACTION_MDFY_OP_ADD	= 0x3,
35 };
36 
37 #define DR_STE_CALC_LU_TYPE(lookup_type, rx, inner) \
38 	((inner) ? DR_STE_V0_LU_TYPE_##lookup_type##_I : \
39 		   (rx) ? DR_STE_V0_LU_TYPE_##lookup_type##_D : \
40 			  DR_STE_V0_LU_TYPE_##lookup_type##_O)
41 
42 enum {
43 	DR_STE_V0_LU_TYPE_NOP				= 0x00,
44 	DR_STE_V0_LU_TYPE_SRC_GVMI_AND_QP		= 0x05,
45 	DR_STE_V0_LU_TYPE_ETHL2_TUNNELING_I		= 0x0a,
46 	DR_STE_V0_LU_TYPE_ETHL2_DST_O			= 0x06,
47 	DR_STE_V0_LU_TYPE_ETHL2_DST_I			= 0x07,
48 	DR_STE_V0_LU_TYPE_ETHL2_DST_D			= 0x1b,
49 	DR_STE_V0_LU_TYPE_ETHL2_SRC_O			= 0x08,
50 	DR_STE_V0_LU_TYPE_ETHL2_SRC_I			= 0x09,
51 	DR_STE_V0_LU_TYPE_ETHL2_SRC_D			= 0x1c,
52 	DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_O		= 0x36,
53 	DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_I		= 0x37,
54 	DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_D		= 0x38,
55 	DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_O		= 0x0d,
56 	DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_I		= 0x0e,
57 	DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_D		= 0x1e,
58 	DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_O		= 0x0f,
59 	DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_I		= 0x10,
60 	DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_D		= 0x1f,
61 	DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_O		= 0x11,
62 	DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_I		= 0x12,
63 	DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_D		= 0x20,
64 	DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_O		= 0x29,
65 	DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_I		= 0x2a,
66 	DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_D		= 0x2b,
67 	DR_STE_V0_LU_TYPE_ETHL4_O			= 0x13,
68 	DR_STE_V0_LU_TYPE_ETHL4_I			= 0x14,
69 	DR_STE_V0_LU_TYPE_ETHL4_D			= 0x21,
70 	DR_STE_V0_LU_TYPE_ETHL4_MISC_O			= 0x2c,
71 	DR_STE_V0_LU_TYPE_ETHL4_MISC_I			= 0x2d,
72 	DR_STE_V0_LU_TYPE_ETHL4_MISC_D			= 0x2e,
73 	DR_STE_V0_LU_TYPE_MPLS_FIRST_O			= 0x15,
74 	DR_STE_V0_LU_TYPE_MPLS_FIRST_I			= 0x24,
75 	DR_STE_V0_LU_TYPE_MPLS_FIRST_D			= 0x25,
76 	DR_STE_V0_LU_TYPE_GRE				= 0x16,
77 	DR_STE_V0_LU_TYPE_FLEX_PARSER_0			= 0x22,
78 	DR_STE_V0_LU_TYPE_FLEX_PARSER_1			= 0x23,
79 	DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER	= 0x19,
80 	DR_STE_V0_LU_TYPE_GENERAL_PURPOSE		= 0x18,
81 	DR_STE_V0_LU_TYPE_STEERING_REGISTERS_0		= 0x2f,
82 	DR_STE_V0_LU_TYPE_STEERING_REGISTERS_1		= 0x30,
83 	DR_STE_V0_LU_TYPE_TUNNEL_HEADER			= 0x34,
84 	DR_STE_V0_LU_TYPE_DONT_CARE			= MLX5DR_STE_LU_TYPE_DONT_CARE,
85 };
86 
87 enum {
88 	DR_STE_V0_ACTION_MDFY_FLD_L2_0		= 0,
89 	DR_STE_V0_ACTION_MDFY_FLD_L2_1		= 1,
90 	DR_STE_V0_ACTION_MDFY_FLD_L2_2		= 2,
91 	DR_STE_V0_ACTION_MDFY_FLD_L3_0		= 3,
92 	DR_STE_V0_ACTION_MDFY_FLD_L3_1		= 4,
93 	DR_STE_V0_ACTION_MDFY_FLD_L3_2		= 5,
94 	DR_STE_V0_ACTION_MDFY_FLD_L3_3		= 6,
95 	DR_STE_V0_ACTION_MDFY_FLD_L3_4		= 7,
96 	DR_STE_V0_ACTION_MDFY_FLD_L4_0		= 8,
97 	DR_STE_V0_ACTION_MDFY_FLD_L4_1		= 9,
98 	DR_STE_V0_ACTION_MDFY_FLD_MPLS		= 10,
99 	DR_STE_V0_ACTION_MDFY_FLD_L2_TNL_0	= 11,
100 	DR_STE_V0_ACTION_MDFY_FLD_REG_0		= 12,
101 	DR_STE_V0_ACTION_MDFY_FLD_REG_1		= 13,
102 	DR_STE_V0_ACTION_MDFY_FLD_REG_2		= 14,
103 	DR_STE_V0_ACTION_MDFY_FLD_REG_3		= 15,
104 	DR_STE_V0_ACTION_MDFY_FLD_L4_2		= 16,
105 	DR_STE_V0_ACTION_MDFY_FLD_FLEX_0	= 17,
106 	DR_STE_V0_ACTION_MDFY_FLD_FLEX_1	= 18,
107 	DR_STE_V0_ACTION_MDFY_FLD_FLEX_2	= 19,
108 	DR_STE_V0_ACTION_MDFY_FLD_FLEX_3	= 20,
109 	DR_STE_V0_ACTION_MDFY_FLD_L2_TNL_1	= 21,
110 	DR_STE_V0_ACTION_MDFY_FLD_METADATA	= 22,
111 	DR_STE_V0_ACTION_MDFY_FLD_RESERVED	= 23,
112 };
113 
114 static const struct mlx5dr_ste_action_modify_field dr_ste_v0_action_modify_field_arr[] = {
115 	[MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
116 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_1, .start = 16, .end = 47,
117 	},
118 	[MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
119 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_1, .start = 0, .end = 15,
120 	},
121 	[MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
122 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_2, .start = 32, .end = 47,
123 	},
124 	[MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
125 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_0, .start = 16, .end = 47,
126 	},
127 	[MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
128 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_0, .start = 0, .end = 15,
129 	},
130 	[MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
131 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 0, .end = 5,
132 	},
133 	[MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
134 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 48, .end = 56,
135 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
136 	},
137 	[MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
138 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 0, .end = 15,
139 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
140 	},
141 	[MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
142 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 16, .end = 31,
143 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
144 	},
145 	[MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
146 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 8, .end = 15,
147 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
148 	},
149 	[MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
150 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 8, .end = 15,
151 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
152 	},
153 	[MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
154 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 0, .end = 15,
155 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
156 	},
157 	[MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
158 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 16, .end = 31,
159 		.l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
160 	},
161 	[MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
162 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_3, .start = 32, .end = 63,
163 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
164 	},
165 	[MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
166 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_3, .start = 0, .end = 31,
167 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
168 	},
169 	[MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
170 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_4, .start = 32, .end = 63,
171 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
172 	},
173 	[MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
174 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_4, .start = 0, .end = 31,
175 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
176 	},
177 	[MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
178 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 32, .end = 63,
179 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
180 	},
181 	[MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
182 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 0, .end = 31,
183 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
184 	},
185 	[MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
186 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_2, .start = 32, .end = 63,
187 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
188 	},
189 	[MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
190 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_2, .start = 0, .end = 31,
191 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
192 	},
193 	[MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
194 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 0, .end = 31,
195 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
196 	},
197 	[MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
198 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 32, .end = 63,
199 		.l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
200 	},
201 	[MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
202 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_METADATA, .start = 0, .end = 31,
203 	},
204 	[MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
205 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_METADATA, .start = 32, .end = 63,
206 	},
207 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
208 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_0, .start = 32, .end = 63,
209 	},
210 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
211 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_0, .start = 0, .end = 31,
212 	},
213 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
214 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_1, .start = 32, .end = 63,
215 	},
216 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
217 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_1, .start = 0, .end = 31,
218 	},
219 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
220 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_2, .start = 32, .end = 63,
221 	},
222 	[MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
223 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_2, .start = 0, .end = 31,
224 	},
225 	[MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
226 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_1, .start = 32, .end = 63,
227 	},
228 	[MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
229 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_1, .start = 0, .end = 31,
230 	},
231 	[MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
232 		.hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_2, .start = 0, .end = 15,
233 	},
234 };
235 
236 static void dr_ste_v0_set_entry_type(u8 *hw_ste_p, u8 entry_type)
237 {
238 	MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
239 }
240 
241 static u8 dr_ste_v0_get_entry_type(u8 *hw_ste_p)
242 {
243 	return MLX5_GET(ste_general, hw_ste_p, entry_type);
244 }
245 
246 static void dr_ste_v0_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
247 {
248 	u64 index = miss_addr >> 6;
249 
250 	/* Miss address for TX and RX STEs located in the same offsets */
251 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32, index >> 26);
252 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6, index);
253 }
254 
255 static u64 dr_ste_v0_get_miss_addr(u8 *hw_ste_p)
256 {
257 	u64 index =
258 		((u64)MLX5_GET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6) |
259 		 ((u64)MLX5_GET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32)) << 26);
260 
261 	return index << 6;
262 }
263 
264 static void dr_ste_v0_set_byte_mask(u8 *hw_ste_p, u16 byte_mask)
265 {
266 	MLX5_SET(ste_general, hw_ste_p, byte_mask, byte_mask);
267 }
268 
269 static u16 dr_ste_v0_get_byte_mask(u8 *hw_ste_p)
270 {
271 	return MLX5_GET(ste_general, hw_ste_p, byte_mask);
272 }
273 
274 static void dr_ste_v0_set_lu_type(u8 *hw_ste_p, u16 lu_type)
275 {
276 	MLX5_SET(ste_general, hw_ste_p, entry_sub_type, lu_type);
277 }
278 
279 static void dr_ste_v0_set_next_lu_type(u8 *hw_ste_p, u16 lu_type)
280 {
281 	MLX5_SET(ste_general, hw_ste_p, next_lu_type, lu_type);
282 }
283 
284 static u16 dr_ste_v0_get_next_lu_type(u8 *hw_ste_p)
285 {
286 	return MLX5_GET(ste_general, hw_ste_p, next_lu_type);
287 }
288 
289 static void dr_ste_v0_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
290 {
291 	MLX5_SET(ste_general, hw_ste_p, next_table_base_63_48, gvmi);
292 }
293 
294 static void dr_ste_v0_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
295 {
296 	u64 index = (icm_addr >> 5) | ht_size;
297 
298 	MLX5_SET(ste_general, hw_ste_p, next_table_base_39_32_size, index >> 27);
299 	MLX5_SET(ste_general, hw_ste_p, next_table_base_31_5_size, index);
300 }
301 
302 static void dr_ste_v0_init_full(u8 *hw_ste_p, u16 lu_type,
303 				enum dr_ste_v0_entry_type entry_type, u16 gvmi)
304 {
305 	dr_ste_v0_set_entry_type(hw_ste_p, entry_type);
306 	dr_ste_v0_set_lu_type(hw_ste_p, lu_type);
307 	dr_ste_v0_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
308 
309 	/* Set GVMI once, this is the same for RX/TX
310 	 * bits 63_48 of next table base / miss address encode the next GVMI
311 	 */
312 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, gvmi, gvmi);
313 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, next_table_base_63_48, gvmi);
314 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_63_48, gvmi);
315 }
316 
317 static void dr_ste_v0_init(u8 *hw_ste_p, u16 lu_type,
318 			   bool is_rx, u16 gvmi)
319 {
320 	enum dr_ste_v0_entry_type entry_type;
321 
322 	entry_type = is_rx ? DR_STE_TYPE_RX : DR_STE_TYPE_TX;
323 	dr_ste_v0_init_full(hw_ste_p, lu_type, entry_type, gvmi);
324 }
325 
326 static void dr_ste_v0_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag)
327 {
328 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, qp_list_pointer,
329 		 DR_STE_ENABLE_FLOW_TAG | flow_tag);
330 }
331 
332 static void dr_ste_v0_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
333 {
334 	/* This can be used for both rx_steering_mult and for sx_transmit */
335 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_15_0, ctr_id);
336 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_23_16, ctr_id >> 16);
337 }
338 
339 static void dr_ste_v0_set_go_back_bit(u8 *hw_ste_p)
340 {
341 	MLX5_SET(ste_sx_transmit, hw_ste_p, go_back, 1);
342 }
343 
344 static void dr_ste_v0_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr,
345 				       bool go_back)
346 {
347 	MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
348 		 DR_STE_ACTION_TYPE_PUSH_VLAN);
349 	MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr);
350 	/* Due to HW limitation we need to set this bit, otherwise reformat +
351 	 * push vlan will not work.
352 	 */
353 	if (go_back)
354 		dr_ste_v0_set_go_back_bit(hw_ste_p);
355 }
356 
357 static void dr_ste_v0_set_tx_encap(void *hw_ste_p, u32 reformat_id,
358 				   int size, bool encap_l3)
359 {
360 	MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
361 		 encap_l3 ? DR_STE_ACTION_TYPE_ENCAP_L3 : DR_STE_ACTION_TYPE_ENCAP);
362 	/* The hardware expects here size in words (2 byte) */
363 	MLX5_SET(ste_sx_transmit, hw_ste_p, action_description, size / 2);
364 	MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, reformat_id);
365 }
366 
367 static void dr_ste_v0_set_rx_decap(u8 *hw_ste_p)
368 {
369 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
370 		 DR_STE_TUNL_ACTION_DECAP);
371 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, fail_on_error, 1);
372 }
373 
374 static void dr_ste_v0_set_rx_pop_vlan(u8 *hw_ste_p)
375 {
376 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
377 		 DR_STE_TUNL_ACTION_POP_VLAN);
378 }
379 
380 static void dr_ste_v0_set_rx_decap_l3(u8 *hw_ste_p, bool vlan)
381 {
382 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
383 		 DR_STE_TUNL_ACTION_L3_DECAP);
384 	MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0);
385 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, fail_on_error, 1);
386 }
387 
388 static void dr_ste_v0_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
389 					  u32 re_write_index)
390 {
391 	MLX5_SET(ste_modify_packet, hw_ste_p, number_of_re_write_actions,
392 		 num_of_actions);
393 	MLX5_SET(ste_modify_packet, hw_ste_p, header_re_write_actions_pointer,
394 		 re_write_index);
395 }
396 
397 static void dr_ste_v0_arr_init_next(u8 **last_ste,
398 				    u32 *added_stes,
399 				    enum dr_ste_v0_entry_type entry_type,
400 				    u16 gvmi)
401 {
402 	(*added_stes)++;
403 	*last_ste += DR_STE_SIZE;
404 	dr_ste_v0_init_full(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE,
405 			    entry_type, gvmi);
406 }
407 
408 static void
409 dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
410 			 u8 *action_type_set,
411 			 u8 *last_ste,
412 			 struct mlx5dr_ste_actions_attr *attr,
413 			 u32 *added_stes)
414 {
415 	bool encap = action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2] ||
416 		action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3];
417 
418 	/* We want to make sure the modify header comes before L2
419 	 * encapsulation. The reason for that is that we support
420 	 * modify headers for outer headers only
421 	 */
422 	if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
423 		dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT);
424 		dr_ste_v0_set_rewrite_actions(last_ste,
425 					      attr->modify_actions,
426 					      attr->modify_index);
427 	}
428 
429 	if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
430 		int i;
431 
432 		for (i = 0; i < attr->vlans.count; i++) {
433 			if (i || action_type_set[DR_ACTION_TYP_MODIFY_HDR])
434 				dr_ste_v0_arr_init_next(&last_ste,
435 							added_stes,
436 							DR_STE_TYPE_TX,
437 							attr->gvmi);
438 
439 			dr_ste_v0_set_tx_push_vlan(last_ste,
440 						   attr->vlans.headers[i],
441 						   encap);
442 		}
443 	}
444 
445 	if (encap) {
446 		/* Modify header and encapsulation require a different STEs.
447 		 * Since modify header STE format doesn't support encapsulation
448 		 * tunneling_action.
449 		 */
450 		if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] ||
451 		    action_type_set[DR_ACTION_TYP_PUSH_VLAN])
452 			dr_ste_v0_arr_init_next(&last_ste,
453 						added_stes,
454 						DR_STE_TYPE_TX,
455 						attr->gvmi);
456 
457 		dr_ste_v0_set_tx_encap(last_ste,
458 				       attr->reformat.id,
459 				       attr->reformat.size,
460 				       action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]);
461 		/* Whenever prio_tag_required enabled, we can be sure that the
462 		 * previous table (ACL) already push vlan to our packet,
463 		 * And due to HW limitation we need to set this bit, otherwise
464 		 * push vlan + reformat will not work.
465 		 */
466 		if (MLX5_CAP_GEN(dmn->mdev, prio_tag_required))
467 			dr_ste_v0_set_go_back_bit(last_ste);
468 	}
469 
470 	if (action_type_set[DR_ACTION_TYP_CTR])
471 		dr_ste_v0_set_counter_id(last_ste, attr->ctr_id);
472 
473 	dr_ste_v0_set_hit_gvmi(last_ste, attr->hit_gvmi);
474 	dr_ste_v0_set_hit_addr(last_ste, attr->final_icm_addr, 1);
475 }
476 
477 static void
478 dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
479 			 u8 *action_type_set,
480 			 u8 *last_ste,
481 			 struct mlx5dr_ste_actions_attr *attr,
482 			 u32 *added_stes)
483 {
484 	if (action_type_set[DR_ACTION_TYP_CTR])
485 		dr_ste_v0_set_counter_id(last_ste, attr->ctr_id);
486 
487 	if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
488 		dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT);
489 		dr_ste_v0_set_rx_decap_l3(last_ste, attr->decap_with_vlan);
490 		dr_ste_v0_set_rewrite_actions(last_ste,
491 					      attr->decap_actions,
492 					      attr->decap_index);
493 	}
494 
495 	if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2])
496 		dr_ste_v0_set_rx_decap(last_ste);
497 
498 	if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
499 		int i;
500 
501 		for (i = 0; i < attr->vlans.count; i++) {
502 			if (i ||
503 			    action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2] ||
504 			    action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2])
505 				dr_ste_v0_arr_init_next(&last_ste,
506 							added_stes,
507 							DR_STE_TYPE_RX,
508 							attr->gvmi);
509 
510 			dr_ste_v0_set_rx_pop_vlan(last_ste);
511 		}
512 	}
513 
514 	if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
515 		if (dr_ste_v0_get_entry_type(last_ste) == DR_STE_TYPE_MODIFY_PKT)
516 			dr_ste_v0_arr_init_next(&last_ste,
517 						added_stes,
518 						DR_STE_TYPE_MODIFY_PKT,
519 						attr->gvmi);
520 		else
521 			dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT);
522 
523 		dr_ste_v0_set_rewrite_actions(last_ste,
524 					      attr->modify_actions,
525 					      attr->modify_index);
526 	}
527 
528 	if (action_type_set[DR_ACTION_TYP_TAG]) {
529 		if (dr_ste_v0_get_entry_type(last_ste) == DR_STE_TYPE_MODIFY_PKT)
530 			dr_ste_v0_arr_init_next(&last_ste,
531 						added_stes,
532 						DR_STE_TYPE_RX,
533 						attr->gvmi);
534 
535 		dr_ste_v0_rx_set_flow_tag(last_ste, attr->flow_tag);
536 	}
537 
538 	dr_ste_v0_set_hit_gvmi(last_ste, attr->hit_gvmi);
539 	dr_ste_v0_set_hit_addr(last_ste, attr->final_icm_addr, 1);
540 }
541 
542 static void dr_ste_v0_set_action_set(u8 *hw_action,
543 				     u8 hw_field,
544 				     u8 shifter,
545 				     u8 length,
546 				     u32 data)
547 {
548 	length = (length == 32) ? 0 : length;
549 	MLX5_SET(dr_action_hw_set, hw_action, opcode, DR_STE_ACTION_MDFY_OP_SET);
550 	MLX5_SET(dr_action_hw_set, hw_action, destination_field_code, hw_field);
551 	MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, shifter);
552 	MLX5_SET(dr_action_hw_set, hw_action, destination_length, length);
553 	MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
554 }
555 
556 static void dr_ste_v0_set_action_add(u8 *hw_action,
557 				     u8 hw_field,
558 				     u8 shifter,
559 				     u8 length,
560 				     u32 data)
561 {
562 	length = (length == 32) ? 0 : length;
563 	MLX5_SET(dr_action_hw_set, hw_action, opcode, DR_STE_ACTION_MDFY_OP_ADD);
564 	MLX5_SET(dr_action_hw_set, hw_action, destination_field_code, hw_field);
565 	MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, shifter);
566 	MLX5_SET(dr_action_hw_set, hw_action, destination_length, length);
567 	MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
568 }
569 
570 static void dr_ste_v0_set_action_copy(u8 *hw_action,
571 				      u8 dst_hw_field,
572 				      u8 dst_shifter,
573 				      u8 dst_len,
574 				      u8 src_hw_field,
575 				      u8 src_shifter)
576 {
577 	MLX5_SET(dr_action_hw_copy, hw_action, opcode, DR_STE_ACTION_MDFY_OP_COPY);
578 	MLX5_SET(dr_action_hw_copy, hw_action, destination_field_code, dst_hw_field);
579 	MLX5_SET(dr_action_hw_copy, hw_action, destination_left_shifter, dst_shifter);
580 	MLX5_SET(dr_action_hw_copy, hw_action, destination_length, dst_len);
581 	MLX5_SET(dr_action_hw_copy, hw_action, source_field_code, src_hw_field);
582 	MLX5_SET(dr_action_hw_copy, hw_action, source_left_shifter, src_shifter);
583 }
584 
585 #define DR_STE_DECAP_L3_MIN_ACTION_NUM	5
586 
587 static int
588 dr_ste_v0_set_action_decap_l3_list(void *data, u32 data_sz,
589 				   u8 *hw_action, u32 hw_action_sz,
590 				   u16 *used_hw_action_num)
591 {
592 	struct mlx5_ifc_l2_hdr_bits *l2_hdr = data;
593 	u32 hw_action_num;
594 	int required_actions;
595 	u32 hdr_fld_4b;
596 	u16 hdr_fld_2b;
597 	u16 vlan_type;
598 	bool vlan;
599 
600 	vlan = (data_sz != HDR_LEN_L2);
601 	hw_action_num = hw_action_sz / MLX5_ST_SZ_BYTES(dr_action_hw_set);
602 	required_actions = DR_STE_DECAP_L3_MIN_ACTION_NUM + !!vlan;
603 
604 	if (hw_action_num < required_actions)
605 		return -ENOMEM;
606 
607 	/* dmac_47_16 */
608 	MLX5_SET(dr_action_hw_set, hw_action,
609 		 opcode, DR_STE_ACTION_MDFY_OP_SET);
610 	MLX5_SET(dr_action_hw_set, hw_action,
611 		 destination_length, 0);
612 	MLX5_SET(dr_action_hw_set, hw_action,
613 		 destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_0);
614 	MLX5_SET(dr_action_hw_set, hw_action,
615 		 destination_left_shifter, 16);
616 	hdr_fld_4b = MLX5_GET(l2_hdr, l2_hdr, dmac_47_16);
617 	MLX5_SET(dr_action_hw_set, hw_action,
618 		 inline_data, hdr_fld_4b);
619 	hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
620 
621 	/* smac_47_16 */
622 	MLX5_SET(dr_action_hw_set, hw_action,
623 		 opcode, DR_STE_ACTION_MDFY_OP_SET);
624 	MLX5_SET(dr_action_hw_set, hw_action,
625 		 destination_length, 0);
626 	MLX5_SET(dr_action_hw_set, hw_action,
627 		 destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_1);
628 	MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, 16);
629 	hdr_fld_4b = (MLX5_GET(l2_hdr, l2_hdr, smac_31_0) >> 16 |
630 		      MLX5_GET(l2_hdr, l2_hdr, smac_47_32) << 16);
631 	MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_4b);
632 	hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
633 
634 	/* dmac_15_0 */
635 	MLX5_SET(dr_action_hw_set, hw_action,
636 		 opcode, DR_STE_ACTION_MDFY_OP_SET);
637 	MLX5_SET(dr_action_hw_set, hw_action,
638 		 destination_length, 16);
639 	MLX5_SET(dr_action_hw_set, hw_action,
640 		 destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_0);
641 	MLX5_SET(dr_action_hw_set, hw_action,
642 		 destination_left_shifter, 0);
643 	hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, dmac_15_0);
644 	MLX5_SET(dr_action_hw_set, hw_action,
645 		 inline_data, hdr_fld_2b);
646 	hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
647 
648 	/* ethertype + (optional) vlan */
649 	MLX5_SET(dr_action_hw_set, hw_action,
650 		 opcode, DR_STE_ACTION_MDFY_OP_SET);
651 	MLX5_SET(dr_action_hw_set, hw_action,
652 		 destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_2);
653 	MLX5_SET(dr_action_hw_set, hw_action,
654 		 destination_left_shifter, 32);
655 	if (!vlan) {
656 		hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
657 		MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_2b);
658 		MLX5_SET(dr_action_hw_set, hw_action, destination_length, 16);
659 	} else {
660 		hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
661 		vlan_type = hdr_fld_2b == SVLAN_ETHERTYPE ? DR_STE_SVLAN : DR_STE_CVLAN;
662 		hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan);
663 		hdr_fld_4b = (vlan_type << 16) | hdr_fld_2b;
664 		MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_4b);
665 		MLX5_SET(dr_action_hw_set, hw_action, destination_length, 18);
666 	}
667 	hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
668 
669 	/* smac_15_0 */
670 	MLX5_SET(dr_action_hw_set, hw_action,
671 		 opcode, DR_STE_ACTION_MDFY_OP_SET);
672 	MLX5_SET(dr_action_hw_set, hw_action,
673 		 destination_length, 16);
674 	MLX5_SET(dr_action_hw_set, hw_action,
675 		 destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_1);
676 	MLX5_SET(dr_action_hw_set, hw_action,
677 		 destination_left_shifter, 0);
678 	hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, smac_31_0);
679 	MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_2b);
680 	hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
681 
682 	if (vlan) {
683 		MLX5_SET(dr_action_hw_set, hw_action,
684 			 opcode, DR_STE_ACTION_MDFY_OP_SET);
685 		hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan_type);
686 		MLX5_SET(dr_action_hw_set, hw_action,
687 			 inline_data, hdr_fld_2b);
688 		MLX5_SET(dr_action_hw_set, hw_action,
689 			 destination_length, 16);
690 		MLX5_SET(dr_action_hw_set, hw_action,
691 			 destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_2);
692 		MLX5_SET(dr_action_hw_set, hw_action,
693 			 destination_left_shifter, 0);
694 	}
695 
696 	*used_hw_action_num = required_actions;
697 
698 	return 0;
699 }
700 
701 static void
702 dr_ste_v0_build_eth_l2_src_dst_bit_mask(struct mlx5dr_match_param *value,
703 					bool inner, u8 *bit_mask)
704 {
705 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
706 
707 	DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
708 	DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
709 
710 	if (mask->smac_47_16 || mask->smac_15_0) {
711 		MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_47_32,
712 			 mask->smac_47_16 >> 16);
713 		MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_31_0,
714 			 mask->smac_47_16 << 16 | mask->smac_15_0);
715 		mask->smac_47_16 = 0;
716 		mask->smac_15_0 = 0;
717 	}
718 
719 	DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_vlan_id, mask, first_vid);
720 	DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_cfi, mask, first_cfi);
721 	DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_priority, mask, first_prio);
722 	DR_STE_SET_ONES(eth_l2_src_dst, bit_mask, l3_type, mask, ip_version);
723 
724 	if (mask->cvlan_tag) {
725 		MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
726 		mask->cvlan_tag = 0;
727 	} else if (mask->svlan_tag) {
728 		MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
729 		mask->svlan_tag = 0;
730 	}
731 }
732 
733 static int
734 dr_ste_v0_build_eth_l2_src_dst_tag(struct mlx5dr_match_param *value,
735 				   struct mlx5dr_ste_build *sb,
736 				   u8 *tag)
737 {
738 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
739 
740 	DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_47_16, spec, dmac_47_16);
741 	DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_15_0, spec, dmac_15_0);
742 
743 	if (spec->smac_47_16 || spec->smac_15_0) {
744 		MLX5_SET(ste_eth_l2_src_dst, tag, smac_47_32,
745 			 spec->smac_47_16 >> 16);
746 		MLX5_SET(ste_eth_l2_src_dst, tag, smac_31_0,
747 			 spec->smac_47_16 << 16 | spec->smac_15_0);
748 		spec->smac_47_16 = 0;
749 		spec->smac_15_0 = 0;
750 	}
751 
752 	if (spec->ip_version) {
753 		if (spec->ip_version == IP_VERSION_IPV4) {
754 			MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV4);
755 			spec->ip_version = 0;
756 		} else if (spec->ip_version == IP_VERSION_IPV6) {
757 			MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV6);
758 			spec->ip_version = 0;
759 		} else {
760 			return -EINVAL;
761 		}
762 	}
763 
764 	DR_STE_SET_TAG(eth_l2_src_dst, tag, first_vlan_id, spec, first_vid);
765 	DR_STE_SET_TAG(eth_l2_src_dst, tag, first_cfi, spec, first_cfi);
766 	DR_STE_SET_TAG(eth_l2_src_dst, tag, first_priority, spec, first_prio);
767 
768 	if (spec->cvlan_tag) {
769 		MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_CVLAN);
770 		spec->cvlan_tag = 0;
771 	} else if (spec->svlan_tag) {
772 		MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_SVLAN);
773 		spec->svlan_tag = 0;
774 	}
775 	return 0;
776 }
777 
778 static void
779 dr_ste_v0_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
780 				    struct mlx5dr_match_param *mask)
781 {
782 	dr_ste_v0_build_eth_l2_src_dst_bit_mask(mask, sb->inner, sb->bit_mask);
783 
784 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC_DST, sb->rx, sb->inner);
785 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
786 	sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_src_dst_tag;
787 }
788 
789 static int
790 dr_ste_v0_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
791 				    struct mlx5dr_ste_build *sb,
792 				    u8 *tag)
793 {
794 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
795 
796 	DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
797 	DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
798 	DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
799 	DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
800 
801 	return 0;
802 }
803 
804 static void
805 dr_ste_v0_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
806 				     struct mlx5dr_match_param *mask)
807 {
808 	dr_ste_v0_build_eth_l3_ipv6_dst_tag(mask, sb, sb->bit_mask);
809 
810 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_DST, sb->rx, sb->inner);
811 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
812 	sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv6_dst_tag;
813 }
814 
815 static int
816 dr_ste_v0_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
817 				    struct mlx5dr_ste_build *sb,
818 				    u8 *tag)
819 {
820 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
821 
822 	DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
823 	DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
824 	DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
825 	DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
826 
827 	return 0;
828 }
829 
830 static void
831 dr_ste_v0_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
832 				     struct mlx5dr_match_param *mask)
833 {
834 	dr_ste_v0_build_eth_l3_ipv6_src_tag(mask, sb, sb->bit_mask);
835 
836 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_SRC, sb->rx, sb->inner);
837 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
838 	sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv6_src_tag;
839 }
840 
841 static int
842 dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
843 					struct mlx5dr_ste_build *sb,
844 					u8 *tag)
845 {
846 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
847 
848 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_address, spec, dst_ip_31_0);
849 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_address, spec, src_ip_31_0);
850 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, tcp_dport);
851 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, udp_dport);
852 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, tcp_sport);
853 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, udp_sport);
854 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, protocol, spec, ip_protocol);
855 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, fragmented, spec, frag);
856 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, dscp, spec, ip_dscp);
857 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, ecn, spec, ip_ecn);
858 
859 	if (spec->tcp_flags) {
860 		DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, tag, spec);
861 		spec->tcp_flags = 0;
862 	}
863 
864 	return 0;
865 }
866 
867 static void
868 dr_ste_v0_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
869 					 struct mlx5dr_match_param *mask)
870 {
871 	dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag(mask, sb, sb->bit_mask);
872 
873 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_5_TUPLE, sb->rx, sb->inner);
874 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
875 	sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag;
876 }
877 
878 static void
879 dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
880 					   bool inner, u8 *bit_mask)
881 {
882 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
883 	struct mlx5dr_match_misc *misc_mask = &value->misc;
884 
885 	DR_STE_SET_TAG(eth_l2_src, bit_mask, first_vlan_id, mask, first_vid);
886 	DR_STE_SET_TAG(eth_l2_src, bit_mask, first_cfi, mask, first_cfi);
887 	DR_STE_SET_TAG(eth_l2_src, bit_mask, first_priority, mask, first_prio);
888 	DR_STE_SET_TAG(eth_l2_src, bit_mask, ip_fragmented, mask, frag);
889 	DR_STE_SET_TAG(eth_l2_src, bit_mask, l3_ethertype, mask, ethertype);
890 	DR_STE_SET_ONES(eth_l2_src, bit_mask, l3_type, mask, ip_version);
891 
892 	if (mask->svlan_tag || mask->cvlan_tag) {
893 		MLX5_SET(ste_eth_l2_src, bit_mask, first_vlan_qualifier, -1);
894 		mask->cvlan_tag = 0;
895 		mask->svlan_tag = 0;
896 	}
897 
898 	if (inner) {
899 		if (misc_mask->inner_second_cvlan_tag ||
900 		    misc_mask->inner_second_svlan_tag) {
901 			MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
902 			misc_mask->inner_second_cvlan_tag = 0;
903 			misc_mask->inner_second_svlan_tag = 0;
904 		}
905 
906 		DR_STE_SET_TAG(eth_l2_src, bit_mask,
907 			       second_vlan_id, misc_mask, inner_second_vid);
908 		DR_STE_SET_TAG(eth_l2_src, bit_mask,
909 			       second_cfi, misc_mask, inner_second_cfi);
910 		DR_STE_SET_TAG(eth_l2_src, bit_mask,
911 			       second_priority, misc_mask, inner_second_prio);
912 	} else {
913 		if (misc_mask->outer_second_cvlan_tag ||
914 		    misc_mask->outer_second_svlan_tag) {
915 			MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
916 			misc_mask->outer_second_cvlan_tag = 0;
917 			misc_mask->outer_second_svlan_tag = 0;
918 		}
919 
920 		DR_STE_SET_TAG(eth_l2_src, bit_mask,
921 			       second_vlan_id, misc_mask, outer_second_vid);
922 		DR_STE_SET_TAG(eth_l2_src, bit_mask,
923 			       second_cfi, misc_mask, outer_second_cfi);
924 		DR_STE_SET_TAG(eth_l2_src, bit_mask,
925 			       second_priority, misc_mask, outer_second_prio);
926 	}
927 }
928 
929 static int
930 dr_ste_v0_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
931 				      bool inner, u8 *tag)
932 {
933 	struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
934 	struct mlx5dr_match_misc *misc_spec = &value->misc;
935 
936 	DR_STE_SET_TAG(eth_l2_src, tag, first_vlan_id, spec, first_vid);
937 	DR_STE_SET_TAG(eth_l2_src, tag, first_cfi, spec, first_cfi);
938 	DR_STE_SET_TAG(eth_l2_src, tag, first_priority, spec, first_prio);
939 	DR_STE_SET_TAG(eth_l2_src, tag, ip_fragmented, spec, frag);
940 	DR_STE_SET_TAG(eth_l2_src, tag, l3_ethertype, spec, ethertype);
941 
942 	if (spec->ip_version) {
943 		if (spec->ip_version == IP_VERSION_IPV4) {
944 			MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV4);
945 			spec->ip_version = 0;
946 		} else if (spec->ip_version == IP_VERSION_IPV6) {
947 			MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV6);
948 			spec->ip_version = 0;
949 		} else {
950 			return -EINVAL;
951 		}
952 	}
953 
954 	if (spec->cvlan_tag) {
955 		MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_CVLAN);
956 		spec->cvlan_tag = 0;
957 	} else if (spec->svlan_tag) {
958 		MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_SVLAN);
959 		spec->svlan_tag = 0;
960 	}
961 
962 	if (inner) {
963 		if (misc_spec->inner_second_cvlan_tag) {
964 			MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
965 			misc_spec->inner_second_cvlan_tag = 0;
966 		} else if (misc_spec->inner_second_svlan_tag) {
967 			MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
968 			misc_spec->inner_second_svlan_tag = 0;
969 		}
970 
971 		DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, inner_second_vid);
972 		DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, inner_second_cfi);
973 		DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, inner_second_prio);
974 	} else {
975 		if (misc_spec->outer_second_cvlan_tag) {
976 			MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
977 			misc_spec->outer_second_cvlan_tag = 0;
978 		} else if (misc_spec->outer_second_svlan_tag) {
979 			MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
980 			misc_spec->outer_second_svlan_tag = 0;
981 		}
982 		DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, outer_second_vid);
983 		DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, outer_second_cfi);
984 		DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, outer_second_prio);
985 	}
986 
987 	return 0;
988 }
989 
990 static void
991 dr_ste_v0_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
992 				    bool inner, u8 *bit_mask)
993 {
994 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
995 
996 	DR_STE_SET_TAG(eth_l2_src, bit_mask, smac_47_16, mask, smac_47_16);
997 	DR_STE_SET_TAG(eth_l2_src, bit_mask, smac_15_0, mask, smac_15_0);
998 
999 	dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
1000 }
1001 
1002 static int
1003 dr_ste_v0_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
1004 			       struct mlx5dr_ste_build *sb,
1005 			       u8 *tag)
1006 {
1007 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1008 
1009 	DR_STE_SET_TAG(eth_l2_src, tag, smac_47_16, spec, smac_47_16);
1010 	DR_STE_SET_TAG(eth_l2_src, tag, smac_15_0, spec, smac_15_0);
1011 
1012 	return dr_ste_v0_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
1013 }
1014 
1015 static void
1016 dr_ste_v0_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
1017 				struct mlx5dr_match_param *mask)
1018 {
1019 	dr_ste_v0_build_eth_l2_src_bit_mask(mask, sb->inner, sb->bit_mask);
1020 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC, sb->rx, sb->inner);
1021 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1022 	sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_src_tag;
1023 }
1024 
1025 static void
1026 dr_ste_v0_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
1027 				    struct mlx5dr_ste_build *sb,
1028 				    u8 *bit_mask)
1029 {
1030 	struct mlx5dr_match_spec *mask = sb->inner ? &value->inner : &value->outer;
1031 
1032 	DR_STE_SET_TAG(eth_l2_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
1033 	DR_STE_SET_TAG(eth_l2_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
1034 
1035 	dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(value, sb->inner, bit_mask);
1036 }
1037 
1038 static int
1039 dr_ste_v0_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
1040 			       struct mlx5dr_ste_build *sb,
1041 			       u8 *tag)
1042 {
1043 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1044 
1045 	DR_STE_SET_TAG(eth_l2_dst, tag, dmac_47_16, spec, dmac_47_16);
1046 	DR_STE_SET_TAG(eth_l2_dst, tag, dmac_15_0, spec, dmac_15_0);
1047 
1048 	return dr_ste_v0_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
1049 }
1050 
1051 static void
1052 dr_ste_v0_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
1053 				struct mlx5dr_match_param *mask)
1054 {
1055 	dr_ste_v0_build_eth_l2_dst_bit_mask(mask, sb, sb->bit_mask);
1056 
1057 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_DST, sb->rx, sb->inner);
1058 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1059 	sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_dst_tag;
1060 }
1061 
1062 static void
1063 dr_ste_v0_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
1064 				    bool inner, u8 *bit_mask)
1065 {
1066 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1067 	struct mlx5dr_match_misc *misc = &value->misc;
1068 
1069 	DR_STE_SET_TAG(eth_l2_tnl, bit_mask, dmac_47_16, mask, dmac_47_16);
1070 	DR_STE_SET_TAG(eth_l2_tnl, bit_mask, dmac_15_0, mask, dmac_15_0);
1071 	DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_vlan_id, mask, first_vid);
1072 	DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_cfi, mask, first_cfi);
1073 	DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_priority, mask, first_prio);
1074 	DR_STE_SET_TAG(eth_l2_tnl, bit_mask, ip_fragmented, mask, frag);
1075 	DR_STE_SET_TAG(eth_l2_tnl, bit_mask, l3_ethertype, mask, ethertype);
1076 	DR_STE_SET_ONES(eth_l2_tnl, bit_mask, l3_type, mask, ip_version);
1077 
1078 	if (misc->vxlan_vni) {
1079 		MLX5_SET(ste_eth_l2_tnl, bit_mask,
1080 			 l2_tunneling_network_id, (misc->vxlan_vni << 8));
1081 		misc->vxlan_vni = 0;
1082 	}
1083 
1084 	if (mask->svlan_tag || mask->cvlan_tag) {
1085 		MLX5_SET(ste_eth_l2_tnl, bit_mask, first_vlan_qualifier, -1);
1086 		mask->cvlan_tag = 0;
1087 		mask->svlan_tag = 0;
1088 	}
1089 }
1090 
1091 static int
1092 dr_ste_v0_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
1093 			       struct mlx5dr_ste_build *sb,
1094 			       u8 *tag)
1095 {
1096 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1097 	struct mlx5dr_match_misc *misc = &value->misc;
1098 
1099 	DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_47_16, spec, dmac_47_16);
1100 	DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_15_0, spec, dmac_15_0);
1101 	DR_STE_SET_TAG(eth_l2_tnl, tag, first_vlan_id, spec, first_vid);
1102 	DR_STE_SET_TAG(eth_l2_tnl, tag, first_cfi, spec, first_cfi);
1103 	DR_STE_SET_TAG(eth_l2_tnl, tag, ip_fragmented, spec, frag);
1104 	DR_STE_SET_TAG(eth_l2_tnl, tag, first_priority, spec, first_prio);
1105 	DR_STE_SET_TAG(eth_l2_tnl, tag, l3_ethertype, spec, ethertype);
1106 
1107 	if (misc->vxlan_vni) {
1108 		MLX5_SET(ste_eth_l2_tnl, tag, l2_tunneling_network_id,
1109 			 (misc->vxlan_vni << 8));
1110 		misc->vxlan_vni = 0;
1111 	}
1112 
1113 	if (spec->cvlan_tag) {
1114 		MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_CVLAN);
1115 		spec->cvlan_tag = 0;
1116 	} else if (spec->svlan_tag) {
1117 		MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_SVLAN);
1118 		spec->svlan_tag = 0;
1119 	}
1120 
1121 	if (spec->ip_version) {
1122 		if (spec->ip_version == IP_VERSION_IPV4) {
1123 			MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV4);
1124 			spec->ip_version = 0;
1125 		} else if (spec->ip_version == IP_VERSION_IPV6) {
1126 			MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV6);
1127 			spec->ip_version = 0;
1128 		} else {
1129 			return -EINVAL;
1130 		}
1131 	}
1132 
1133 	return 0;
1134 }
1135 
1136 static void
1137 dr_ste_v0_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
1138 				struct mlx5dr_match_param *mask)
1139 {
1140 	dr_ste_v0_build_eth_l2_tnl_bit_mask(mask, sb->inner, sb->bit_mask);
1141 
1142 	sb->lu_type = DR_STE_V0_LU_TYPE_ETHL2_TUNNELING_I;
1143 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1144 	sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_tnl_tag;
1145 }
1146 
1147 static int
1148 dr_ste_v0_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
1149 				     struct mlx5dr_ste_build *sb,
1150 				     u8 *tag)
1151 {
1152 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1153 
1154 	DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit);
1155 
1156 	return 0;
1157 }
1158 
1159 static void
1160 dr_ste_v0_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
1161 				      struct mlx5dr_match_param *mask)
1162 {
1163 	dr_ste_v0_build_eth_l3_ipv4_misc_tag(mask, sb, sb->bit_mask);
1164 
1165 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_MISC, sb->rx, sb->inner);
1166 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1167 	sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv4_misc_tag;
1168 }
1169 
1170 static int
1171 dr_ste_v0_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
1172 				   struct mlx5dr_ste_build *sb,
1173 				   u8 *tag)
1174 {
1175 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1176 	struct mlx5dr_match_misc *misc = &value->misc;
1177 
1178 	DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport);
1179 	DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport);
1180 	DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, udp_dport);
1181 	DR_STE_SET_TAG(eth_l4, tag, src_port, spec, udp_sport);
1182 	DR_STE_SET_TAG(eth_l4, tag, protocol, spec, ip_protocol);
1183 	DR_STE_SET_TAG(eth_l4, tag, fragmented, spec, frag);
1184 	DR_STE_SET_TAG(eth_l4, tag, dscp, spec, ip_dscp);
1185 	DR_STE_SET_TAG(eth_l4, tag, ecn, spec, ip_ecn);
1186 	DR_STE_SET_TAG(eth_l4, tag, ipv6_hop_limit, spec, ttl_hoplimit);
1187 
1188 	if (sb->inner)
1189 		DR_STE_SET_TAG(eth_l4, tag, flow_label, misc, inner_ipv6_flow_label);
1190 	else
1191 		DR_STE_SET_TAG(eth_l4, tag, flow_label, misc, outer_ipv6_flow_label);
1192 
1193 	if (spec->tcp_flags) {
1194 		DR_STE_SET_TCP_FLAGS(eth_l4, tag, spec);
1195 		spec->tcp_flags = 0;
1196 	}
1197 
1198 	return 0;
1199 }
1200 
1201 static void
1202 dr_ste_v0_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
1203 				    struct mlx5dr_match_param *mask)
1204 {
1205 	dr_ste_v0_build_eth_ipv6_l3_l4_tag(mask, sb, sb->bit_mask);
1206 
1207 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4, sb->rx, sb->inner);
1208 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1209 	sb->ste_build_tag_func = &dr_ste_v0_build_eth_ipv6_l3_l4_tag;
1210 }
1211 
1212 static int
1213 dr_ste_v0_build_mpls_tag(struct mlx5dr_match_param *value,
1214 			 struct mlx5dr_ste_build *sb,
1215 			 u8 *tag)
1216 {
1217 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1218 
1219 	if (sb->inner)
1220 		DR_STE_SET_MPLS(mpls, misc2, inner, tag);
1221 	else
1222 		DR_STE_SET_MPLS(mpls, misc2, outer, tag);
1223 
1224 	return 0;
1225 }
1226 
1227 static void
1228 dr_ste_v0_build_mpls_init(struct mlx5dr_ste_build *sb,
1229 			  struct mlx5dr_match_param *mask)
1230 {
1231 	dr_ste_v0_build_mpls_tag(mask, sb, sb->bit_mask);
1232 
1233 	sb->lu_type = DR_STE_CALC_LU_TYPE(MPLS_FIRST, sb->rx, sb->inner);
1234 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1235 	sb->ste_build_tag_func = &dr_ste_v0_build_mpls_tag;
1236 }
1237 
1238 static int
1239 dr_ste_v0_build_tnl_gre_tag(struct mlx5dr_match_param *value,
1240 			    struct mlx5dr_ste_build *sb,
1241 			    u8 *tag)
1242 {
1243 	struct  mlx5dr_match_misc *misc = &value->misc;
1244 
1245 	DR_STE_SET_TAG(gre, tag, gre_protocol, misc, gre_protocol);
1246 
1247 	DR_STE_SET_TAG(gre, tag, gre_k_present, misc, gre_k_present);
1248 	DR_STE_SET_TAG(gre, tag, gre_key_h, misc, gre_key_h);
1249 	DR_STE_SET_TAG(gre, tag, gre_key_l, misc, gre_key_l);
1250 
1251 	DR_STE_SET_TAG(gre, tag, gre_c_present, misc, gre_c_present);
1252 
1253 	DR_STE_SET_TAG(gre, tag, gre_s_present, misc, gre_s_present);
1254 
1255 	return 0;
1256 }
1257 
1258 static void
1259 dr_ste_v0_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
1260 			     struct mlx5dr_match_param *mask)
1261 {
1262 	dr_ste_v0_build_tnl_gre_tag(mask, sb, sb->bit_mask);
1263 
1264 	sb->lu_type = DR_STE_V0_LU_TYPE_GRE;
1265 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1266 	sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gre_tag;
1267 }
1268 
1269 static int
1270 dr_ste_v0_build_tnl_mpls_tag(struct mlx5dr_match_param *value,
1271 			     struct mlx5dr_ste_build *sb,
1272 			     u8 *tag)
1273 {
1274 	struct mlx5dr_match_misc2 *misc_2 = &value->misc2;
1275 	u32 mpls_hdr;
1276 
1277 	if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2)) {
1278 		mpls_hdr = misc_2->outer_first_mpls_over_gre_label << HDR_MPLS_OFFSET_LABEL;
1279 		misc_2->outer_first_mpls_over_gre_label = 0;
1280 		mpls_hdr |= misc_2->outer_first_mpls_over_gre_exp << HDR_MPLS_OFFSET_EXP;
1281 		misc_2->outer_first_mpls_over_gre_exp = 0;
1282 		mpls_hdr |= misc_2->outer_first_mpls_over_gre_s_bos << HDR_MPLS_OFFSET_S_BOS;
1283 		misc_2->outer_first_mpls_over_gre_s_bos = 0;
1284 		mpls_hdr |= misc_2->outer_first_mpls_over_gre_ttl << HDR_MPLS_OFFSET_TTL;
1285 		misc_2->outer_first_mpls_over_gre_ttl = 0;
1286 	} else {
1287 		mpls_hdr = misc_2->outer_first_mpls_over_udp_label << HDR_MPLS_OFFSET_LABEL;
1288 		misc_2->outer_first_mpls_over_udp_label = 0;
1289 		mpls_hdr |= misc_2->outer_first_mpls_over_udp_exp << HDR_MPLS_OFFSET_EXP;
1290 		misc_2->outer_first_mpls_over_udp_exp = 0;
1291 		mpls_hdr |= misc_2->outer_first_mpls_over_udp_s_bos << HDR_MPLS_OFFSET_S_BOS;
1292 		misc_2->outer_first_mpls_over_udp_s_bos = 0;
1293 		mpls_hdr |= misc_2->outer_first_mpls_over_udp_ttl << HDR_MPLS_OFFSET_TTL;
1294 		misc_2->outer_first_mpls_over_udp_ttl = 0;
1295 	}
1296 
1297 	MLX5_SET(ste_flex_parser_0, tag, flex_parser_3, mpls_hdr);
1298 	return 0;
1299 }
1300 
1301 static void
1302 dr_ste_v0_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
1303 			      struct mlx5dr_match_param *mask)
1304 {
1305 	dr_ste_v0_build_tnl_mpls_tag(mask, sb, sb->bit_mask);
1306 
1307 	sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
1308 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1309 	sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_tag;
1310 }
1311 
1312 static int
1313 dr_ste_v0_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param *value,
1314 				      struct mlx5dr_ste_build *sb,
1315 				      u8 *tag)
1316 {
1317 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1318 	u8 *parser_ptr;
1319 	u8 parser_id;
1320 	u32 mpls_hdr;
1321 
1322 	mpls_hdr = misc2->outer_first_mpls_over_udp_label << HDR_MPLS_OFFSET_LABEL;
1323 	misc2->outer_first_mpls_over_udp_label = 0;
1324 	mpls_hdr |= misc2->outer_first_mpls_over_udp_exp << HDR_MPLS_OFFSET_EXP;
1325 	misc2->outer_first_mpls_over_udp_exp = 0;
1326 	mpls_hdr |= misc2->outer_first_mpls_over_udp_s_bos << HDR_MPLS_OFFSET_S_BOS;
1327 	misc2->outer_first_mpls_over_udp_s_bos = 0;
1328 	mpls_hdr |= misc2->outer_first_mpls_over_udp_ttl << HDR_MPLS_OFFSET_TTL;
1329 	misc2->outer_first_mpls_over_udp_ttl = 0;
1330 
1331 	parser_id = sb->caps->flex_parser_id_mpls_over_udp;
1332 	parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
1333 	*(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
1334 
1335 	return 0;
1336 }
1337 
1338 static void
1339 dr_ste_v0_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
1340 				       struct mlx5dr_match_param *mask)
1341 {
1342 	dr_ste_v0_build_tnl_mpls_over_udp_tag(mask, sb, sb->bit_mask);
1343 	/* STEs with lookup type FLEX_PARSER_{0/1} includes
1344 	 * flex parsers_{0-3}/{4-7} respectively.
1345 	 */
1346 	sb->lu_type = sb->caps->flex_parser_id_mpls_over_udp > DR_STE_MAX_FLEX_0_ID ?
1347 		      DR_STE_V0_LU_TYPE_FLEX_PARSER_1 :
1348 		      DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
1349 
1350 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1351 	sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_over_udp_tag;
1352 }
1353 
1354 static int
1355 dr_ste_v0_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param *value,
1356 				      struct mlx5dr_ste_build *sb,
1357 				      u8 *tag)
1358 {
1359 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1360 	u8 *parser_ptr;
1361 	u8 parser_id;
1362 	u32 mpls_hdr;
1363 
1364 	mpls_hdr = misc2->outer_first_mpls_over_gre_label << HDR_MPLS_OFFSET_LABEL;
1365 	misc2->outer_first_mpls_over_gre_label = 0;
1366 	mpls_hdr |= misc2->outer_first_mpls_over_gre_exp << HDR_MPLS_OFFSET_EXP;
1367 	misc2->outer_first_mpls_over_gre_exp = 0;
1368 	mpls_hdr |= misc2->outer_first_mpls_over_gre_s_bos << HDR_MPLS_OFFSET_S_BOS;
1369 	misc2->outer_first_mpls_over_gre_s_bos = 0;
1370 	mpls_hdr |= misc2->outer_first_mpls_over_gre_ttl << HDR_MPLS_OFFSET_TTL;
1371 	misc2->outer_first_mpls_over_gre_ttl = 0;
1372 
1373 	parser_id = sb->caps->flex_parser_id_mpls_over_gre;
1374 	parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
1375 	*(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
1376 
1377 	return 0;
1378 }
1379 
1380 static void
1381 dr_ste_v0_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
1382 				       struct mlx5dr_match_param *mask)
1383 {
1384 	dr_ste_v0_build_tnl_mpls_over_gre_tag(mask, sb, sb->bit_mask);
1385 
1386 	/* STEs with lookup type FLEX_PARSER_{0/1} includes
1387 	 * flex parsers_{0-3}/{4-7} respectively.
1388 	 */
1389 	sb->lu_type = sb->caps->flex_parser_id_mpls_over_gre > DR_STE_MAX_FLEX_0_ID ?
1390 		      DR_STE_V0_LU_TYPE_FLEX_PARSER_1 :
1391 		      DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
1392 
1393 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1394 	sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_over_gre_tag;
1395 }
1396 
1397 #define ICMP_TYPE_OFFSET_FIRST_DW	24
1398 #define ICMP_CODE_OFFSET_FIRST_DW	16
1399 
1400 static int
1401 dr_ste_v0_build_icmp_tag(struct mlx5dr_match_param *value,
1402 			 struct mlx5dr_ste_build *sb,
1403 			 u8 *tag)
1404 {
1405 	struct mlx5dr_match_misc3 *misc_3 = &value->misc3;
1406 	u32 *icmp_header_data;
1407 	int dw0_location;
1408 	int dw1_location;
1409 	u8 *parser_ptr;
1410 	u8 *icmp_type;
1411 	u8 *icmp_code;
1412 	bool is_ipv4;
1413 	u32 icmp_hdr;
1414 
1415 	is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc_3);
1416 	if (is_ipv4) {
1417 		icmp_header_data	= &misc_3->icmpv4_header_data;
1418 		icmp_type		= &misc_3->icmpv4_type;
1419 		icmp_code		= &misc_3->icmpv4_code;
1420 		dw0_location		= sb->caps->flex_parser_id_icmp_dw0;
1421 		dw1_location		= sb->caps->flex_parser_id_icmp_dw1;
1422 	} else {
1423 		icmp_header_data	= &misc_3->icmpv6_header_data;
1424 		icmp_type		= &misc_3->icmpv6_type;
1425 		icmp_code		= &misc_3->icmpv6_code;
1426 		dw0_location		= sb->caps->flex_parser_id_icmpv6_dw0;
1427 		dw1_location		= sb->caps->flex_parser_id_icmpv6_dw1;
1428 	}
1429 
1430 	parser_ptr = dr_ste_calc_flex_parser_offset(tag, dw0_location);
1431 	icmp_hdr = (*icmp_type << ICMP_TYPE_OFFSET_FIRST_DW) |
1432 		   (*icmp_code << ICMP_CODE_OFFSET_FIRST_DW);
1433 	*(__be32 *)parser_ptr = cpu_to_be32(icmp_hdr);
1434 	*icmp_code = 0;
1435 	*icmp_type = 0;
1436 
1437 	parser_ptr = dr_ste_calc_flex_parser_offset(tag, dw1_location);
1438 	*(__be32 *)parser_ptr = cpu_to_be32(*icmp_header_data);
1439 	*icmp_header_data = 0;
1440 
1441 	return 0;
1442 }
1443 
1444 static void
1445 dr_ste_v0_build_icmp_init(struct mlx5dr_ste_build *sb,
1446 			  struct mlx5dr_match_param *mask)
1447 {
1448 	u8 parser_id;
1449 	bool is_ipv4;
1450 
1451 	dr_ste_v0_build_icmp_tag(mask, sb, sb->bit_mask);
1452 
1453 	/* STEs with lookup type FLEX_PARSER_{0/1} includes
1454 	 * flex parsers_{0-3}/{4-7} respectively.
1455 	 */
1456 	is_ipv4 = DR_MASK_IS_ICMPV4_SET(&mask->misc3);
1457 	parser_id = is_ipv4 ? sb->caps->flex_parser_id_icmp_dw0 :
1458 		    sb->caps->flex_parser_id_icmpv6_dw0;
1459 	sb->lu_type = parser_id > DR_STE_MAX_FLEX_0_ID ?
1460 		      DR_STE_V0_LU_TYPE_FLEX_PARSER_1 :
1461 		      DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
1462 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1463 	sb->ste_build_tag_func = &dr_ste_v0_build_icmp_tag;
1464 }
1465 
1466 static int
1467 dr_ste_v0_build_general_purpose_tag(struct mlx5dr_match_param *value,
1468 				    struct mlx5dr_ste_build *sb,
1469 				    u8 *tag)
1470 {
1471 	struct mlx5dr_match_misc2 *misc_2 = &value->misc2;
1472 
1473 	DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
1474 		       misc_2, metadata_reg_a);
1475 
1476 	return 0;
1477 }
1478 
1479 static void
1480 dr_ste_v0_build_general_purpose_init(struct mlx5dr_ste_build *sb,
1481 				     struct mlx5dr_match_param *mask)
1482 {
1483 	dr_ste_v0_build_general_purpose_tag(mask, sb, sb->bit_mask);
1484 
1485 	sb->lu_type = DR_STE_V0_LU_TYPE_GENERAL_PURPOSE;
1486 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1487 	sb->ste_build_tag_func = &dr_ste_v0_build_general_purpose_tag;
1488 }
1489 
1490 static int
1491 dr_ste_v0_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
1492 				struct mlx5dr_ste_build *sb,
1493 				u8 *tag)
1494 {
1495 	struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1496 
1497 	if (sb->inner) {
1498 		DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, inner_tcp_seq_num);
1499 		DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, inner_tcp_ack_num);
1500 	} else {
1501 		DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, outer_tcp_seq_num);
1502 		DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, outer_tcp_ack_num);
1503 	}
1504 
1505 	return 0;
1506 }
1507 
1508 static void
1509 dr_ste_v0_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
1510 				 struct mlx5dr_match_param *mask)
1511 {
1512 	dr_ste_v0_build_eth_l4_misc_tag(mask, sb, sb->bit_mask);
1513 
1514 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4_MISC, sb->rx, sb->inner);
1515 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1516 	sb->ste_build_tag_func = &dr_ste_v0_build_eth_l4_misc_tag;
1517 }
1518 
1519 static int
1520 dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
1521 					      struct mlx5dr_ste_build *sb,
1522 					      u8 *tag)
1523 {
1524 	struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1525 
1526 	DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1527 		       outer_vxlan_gpe_flags, misc3,
1528 		       outer_vxlan_gpe_flags);
1529 	DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1530 		       outer_vxlan_gpe_next_protocol, misc3,
1531 		       outer_vxlan_gpe_next_protocol);
1532 	DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
1533 		       outer_vxlan_gpe_vni, misc3,
1534 		       outer_vxlan_gpe_vni);
1535 
1536 	return 0;
1537 }
1538 
1539 static void
1540 dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
1541 					       struct mlx5dr_match_param *mask)
1542 {
1543 	dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag(mask, sb, sb->bit_mask);
1544 	sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1545 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1546 	sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag;
1547 }
1548 
1549 static int
1550 dr_ste_v0_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
1551 					   struct mlx5dr_ste_build *sb,
1552 					   u8 *tag)
1553 {
1554 	struct mlx5dr_match_misc *misc = &value->misc;
1555 
1556 	DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1557 		       geneve_protocol_type, misc, geneve_protocol_type);
1558 	DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1559 		       geneve_oam, misc, geneve_oam);
1560 	DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1561 		       geneve_opt_len, misc, geneve_opt_len);
1562 	DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
1563 		       geneve_vni, misc, geneve_vni);
1564 
1565 	return 0;
1566 }
1567 
1568 static void
1569 dr_ste_v0_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
1570 					    struct mlx5dr_match_param *mask)
1571 {
1572 	dr_ste_v0_build_flex_parser_tnl_geneve_tag(mask, sb, sb->bit_mask);
1573 	sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1574 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1575 	sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_geneve_tag;
1576 }
1577 
1578 static int
1579 dr_ste_v0_build_register_0_tag(struct mlx5dr_match_param *value,
1580 			       struct mlx5dr_ste_build *sb,
1581 			       u8 *tag)
1582 {
1583 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1584 
1585 	DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
1586 	DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
1587 	DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
1588 	DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
1589 
1590 	return 0;
1591 }
1592 
1593 static void
1594 dr_ste_v0_build_register_0_init(struct mlx5dr_ste_build *sb,
1595 				struct mlx5dr_match_param *mask)
1596 {
1597 	dr_ste_v0_build_register_0_tag(mask, sb, sb->bit_mask);
1598 
1599 	sb->lu_type = DR_STE_V0_LU_TYPE_STEERING_REGISTERS_0;
1600 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1601 	sb->ste_build_tag_func = &dr_ste_v0_build_register_0_tag;
1602 }
1603 
1604 static int
1605 dr_ste_v0_build_register_1_tag(struct mlx5dr_match_param *value,
1606 			       struct mlx5dr_ste_build *sb,
1607 			       u8 *tag)
1608 {
1609 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
1610 
1611 	DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
1612 	DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
1613 	DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
1614 	DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
1615 
1616 	return 0;
1617 }
1618 
1619 static void
1620 dr_ste_v0_build_register_1_init(struct mlx5dr_ste_build *sb,
1621 				struct mlx5dr_match_param *mask)
1622 {
1623 	dr_ste_v0_build_register_1_tag(mask, sb, sb->bit_mask);
1624 
1625 	sb->lu_type = DR_STE_V0_LU_TYPE_STEERING_REGISTERS_1;
1626 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1627 	sb->ste_build_tag_func = &dr_ste_v0_build_register_1_tag;
1628 }
1629 
1630 static void
1631 dr_ste_v0_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
1632 				      u8 *bit_mask)
1633 {
1634 	struct mlx5dr_match_misc *misc_mask = &value->misc;
1635 
1636 	DR_STE_SET_ONES(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port);
1637 	DR_STE_SET_ONES(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn);
1638 	misc_mask->source_eswitch_owner_vhca_id = 0;
1639 }
1640 
1641 static int
1642 dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
1643 				 struct mlx5dr_ste_build *sb,
1644 				 u8 *tag)
1645 {
1646 	struct mlx5dr_match_misc *misc = &value->misc;
1647 	struct mlx5dr_cmd_vport_cap *vport_cap;
1648 	struct mlx5dr_domain *dmn = sb->dmn;
1649 	struct mlx5dr_domain *vport_dmn;
1650 	u8 *bit_mask = sb->bit_mask;
1651 	bool source_gvmi_set;
1652 
1653 	DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
1654 
1655 	if (sb->vhca_id_valid) {
1656 		/* Find port GVMI based on the eswitch_owner_vhca_id */
1657 		if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
1658 			vport_dmn = dmn;
1659 		else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
1660 					   dmn->peer_dmn->info.caps.gvmi))
1661 			vport_dmn = dmn->peer_dmn;
1662 		else
1663 			return -EINVAL;
1664 
1665 		misc->source_eswitch_owner_vhca_id = 0;
1666 	} else {
1667 		vport_dmn = dmn;
1668 	}
1669 
1670 	source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi);
1671 	if (source_gvmi_set) {
1672 		vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn,
1673 							misc->source_port);
1674 		if (!vport_cap) {
1675 			mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n",
1676 				   misc->source_port);
1677 			return -EINVAL;
1678 		}
1679 
1680 		if (vport_cap->vport_gvmi)
1681 			MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi);
1682 
1683 		misc->source_port = 0;
1684 	}
1685 
1686 	return 0;
1687 }
1688 
1689 static void
1690 dr_ste_v0_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
1691 				  struct mlx5dr_match_param *mask)
1692 {
1693 	dr_ste_v0_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
1694 
1695 	sb->lu_type = DR_STE_V0_LU_TYPE_SRC_GVMI_AND_QP;
1696 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1697 	sb->ste_build_tag_func = &dr_ste_v0_build_src_gvmi_qpn_tag;
1698 }
1699 
1700 static void dr_ste_v0_set_flex_parser(u32 *misc4_field_id,
1701 				      u32 *misc4_field_value,
1702 				      bool *parser_is_used,
1703 				      u8 *tag)
1704 {
1705 	u32 id = *misc4_field_id;
1706 	u8 *parser_ptr;
1707 
1708 	if (id >= DR_NUM_OF_FLEX_PARSERS || parser_is_used[id])
1709 		return;
1710 
1711 	parser_is_used[id] = true;
1712 	parser_ptr = dr_ste_calc_flex_parser_offset(tag, id);
1713 
1714 	*(__be32 *)parser_ptr = cpu_to_be32(*misc4_field_value);
1715 	*misc4_field_id = 0;
1716 	*misc4_field_value = 0;
1717 }
1718 
1719 static int dr_ste_v0_build_flex_parser_tag(struct mlx5dr_match_param *value,
1720 					   struct mlx5dr_ste_build *sb,
1721 					   u8 *tag)
1722 {
1723 	struct mlx5dr_match_misc4 *misc_4_mask = &value->misc4;
1724 	bool parser_is_used[DR_NUM_OF_FLEX_PARSERS] = {};
1725 
1726 	dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_0,
1727 				  &misc_4_mask->prog_sample_field_value_0,
1728 				  parser_is_used, tag);
1729 
1730 	dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_1,
1731 				  &misc_4_mask->prog_sample_field_value_1,
1732 				  parser_is_used, tag);
1733 
1734 	dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_2,
1735 				  &misc_4_mask->prog_sample_field_value_2,
1736 				  parser_is_used, tag);
1737 
1738 	dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_3,
1739 				  &misc_4_mask->prog_sample_field_value_3,
1740 				  parser_is_used, tag);
1741 
1742 	return 0;
1743 }
1744 
1745 static void dr_ste_v0_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
1746 					       struct mlx5dr_match_param *mask)
1747 {
1748 	sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
1749 	dr_ste_v0_build_flex_parser_tag(mask, sb, sb->bit_mask);
1750 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1751 	sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tag;
1752 }
1753 
1754 static void dr_ste_v0_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
1755 					       struct mlx5dr_match_param *mask)
1756 {
1757 	sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_1;
1758 	dr_ste_v0_build_flex_parser_tag(mask, sb, sb->bit_mask);
1759 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1760 	sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tag;
1761 }
1762 
1763 static int
1764 dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param *value,
1765 						   struct mlx5dr_ste_build *sb,
1766 						   u8 *tag)
1767 {
1768 	struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1769 	u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
1770 	u8 *parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
1771 
1772 	MLX5_SET(ste_flex_parser_0, parser_ptr, flex_parser_3,
1773 		 misc3->geneve_tlv_option_0_data);
1774 	misc3->geneve_tlv_option_0_data = 0;
1775 
1776 	return 0;
1777 }
1778 
1779 static void
1780 dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
1781 						    struct mlx5dr_match_param *mask)
1782 {
1783 	dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_tag(mask, sb, sb->bit_mask);
1784 
1785 	/* STEs with lookup type FLEX_PARSER_{0/1} includes
1786 	 * flex parsers_{0-3}/{4-7} respectively.
1787 	 */
1788 	sb->lu_type = sb->caps->flex_parser_id_geneve_tlv_option_0 > 3 ?
1789 		DR_STE_V0_LU_TYPE_FLEX_PARSER_1 :
1790 		DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
1791 
1792 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1793 	sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_tag;
1794 }
1795 
1796 static int dr_ste_v0_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value,
1797 						    struct mlx5dr_ste_build *sb,
1798 						    u8 *tag)
1799 {
1800 	struct mlx5dr_match_misc3 *misc3 = &value->misc3;
1801 
1802 	DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag,
1803 		       gtpu_msg_flags, misc3,
1804 		       gtpu_msg_flags);
1805 	DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag,
1806 		       gtpu_msg_type, misc3,
1807 		       gtpu_msg_type);
1808 	DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag,
1809 		       gtpu_teid, misc3,
1810 		       gtpu_teid);
1811 
1812 	return 0;
1813 }
1814 
1815 static void dr_ste_v0_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
1816 						      struct mlx5dr_match_param *mask)
1817 {
1818 	dr_ste_v0_build_flex_parser_tnl_gtpu_tag(mask, sb, sb->bit_mask);
1819 
1820 	sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER;
1821 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1822 	sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_gtpu_tag;
1823 }
1824 
1825 static int
1826 dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value,
1827 					   struct mlx5dr_ste_build *sb,
1828 					   u8 *tag)
1829 {
1830 	if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_0))
1831 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
1832 	if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_teid))
1833 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
1834 	if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_2))
1835 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
1836 	if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
1837 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
1838 	return 0;
1839 }
1840 
1841 static void
1842 dr_ste_v0_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
1843 					    struct mlx5dr_match_param *mask)
1844 {
1845 	dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag(mask, sb, sb->bit_mask);
1846 
1847 	sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
1848 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1849 	sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag;
1850 }
1851 
1852 static int
1853 dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value,
1854 					   struct mlx5dr_ste_build *sb,
1855 					   u8 *tag)
1856 {
1857 	if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_0))
1858 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
1859 	if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_teid))
1860 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
1861 	if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_2))
1862 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
1863 	if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
1864 		DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
1865 	return 0;
1866 }
1867 
1868 static void
1869 dr_ste_v0_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
1870 					    struct mlx5dr_match_param *mask)
1871 {
1872 	dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag(mask, sb, sb->bit_mask);
1873 
1874 	sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_1;
1875 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1876 	sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag;
1877 }
1878 
1879 static int dr_ste_v0_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value,
1880 					      struct mlx5dr_ste_build *sb,
1881 					      uint8_t *tag)
1882 {
1883 	struct mlx5dr_match_misc5 *misc5 = &value->misc5;
1884 
1885 	DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_0, misc5, tunnel_header_0);
1886 	DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_1, misc5, tunnel_header_1);
1887 
1888 	return 0;
1889 }
1890 
1891 static void dr_ste_v0_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
1892 						struct mlx5dr_match_param *mask)
1893 {
1894 	sb->lu_type = DR_STE_V0_LU_TYPE_TUNNEL_HEADER;
1895 	dr_ste_v0_build_tnl_header_0_1_tag(mask, sb, sb->bit_mask);
1896 	sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1897 	sb->ste_build_tag_func = &dr_ste_v0_build_tnl_header_0_1_tag;
1898 }
1899 
1900 struct mlx5dr_ste_ctx ste_ctx_v0 = {
1901 	/* Builders */
1902 	.build_eth_l2_src_dst_init	= &dr_ste_v0_build_eth_l2_src_dst_init,
1903 	.build_eth_l3_ipv6_src_init	= &dr_ste_v0_build_eth_l3_ipv6_src_init,
1904 	.build_eth_l3_ipv6_dst_init	= &dr_ste_v0_build_eth_l3_ipv6_dst_init,
1905 	.build_eth_l3_ipv4_5_tuple_init	= &dr_ste_v0_build_eth_l3_ipv4_5_tuple_init,
1906 	.build_eth_l2_src_init		= &dr_ste_v0_build_eth_l2_src_init,
1907 	.build_eth_l2_dst_init		= &dr_ste_v0_build_eth_l2_dst_init,
1908 	.build_eth_l2_tnl_init		= &dr_ste_v0_build_eth_l2_tnl_init,
1909 	.build_eth_l3_ipv4_misc_init	= &dr_ste_v0_build_eth_l3_ipv4_misc_init,
1910 	.build_eth_ipv6_l3_l4_init	= &dr_ste_v0_build_eth_ipv6_l3_l4_init,
1911 	.build_mpls_init		= &dr_ste_v0_build_mpls_init,
1912 	.build_tnl_gre_init		= &dr_ste_v0_build_tnl_gre_init,
1913 	.build_tnl_mpls_init		= &dr_ste_v0_build_tnl_mpls_init,
1914 	.build_tnl_mpls_over_udp_init	= &dr_ste_v0_build_tnl_mpls_over_udp_init,
1915 	.build_tnl_mpls_over_gre_init	= &dr_ste_v0_build_tnl_mpls_over_gre_init,
1916 	.build_icmp_init		= &dr_ste_v0_build_icmp_init,
1917 	.build_general_purpose_init	= &dr_ste_v0_build_general_purpose_init,
1918 	.build_eth_l4_misc_init		= &dr_ste_v0_build_eth_l4_misc_init,
1919 	.build_tnl_vxlan_gpe_init	= &dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_init,
1920 	.build_tnl_geneve_init		= &dr_ste_v0_build_flex_parser_tnl_geneve_init,
1921 	.build_tnl_geneve_tlv_opt_init	= &dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_init,
1922 	.build_register_0_init		= &dr_ste_v0_build_register_0_init,
1923 	.build_register_1_init		= &dr_ste_v0_build_register_1_init,
1924 	.build_src_gvmi_qpn_init	= &dr_ste_v0_build_src_gvmi_qpn_init,
1925 	.build_flex_parser_0_init	= &dr_ste_v0_build_flex_parser_0_init,
1926 	.build_flex_parser_1_init	= &dr_ste_v0_build_flex_parser_1_init,
1927 	.build_tnl_gtpu_init		= &dr_ste_v0_build_flex_parser_tnl_gtpu_init,
1928 	.build_tnl_header_0_1_init	= &dr_ste_v0_build_tnl_header_0_1_init,
1929 	.build_tnl_gtpu_flex_parser_0_init   = &dr_ste_v0_build_tnl_gtpu_flex_parser_0_init,
1930 	.build_tnl_gtpu_flex_parser_1_init   = &dr_ste_v0_build_tnl_gtpu_flex_parser_1_init,
1931 
1932 	/* Getters and Setters */
1933 	.ste_init			= &dr_ste_v0_init,
1934 	.set_next_lu_type		= &dr_ste_v0_set_next_lu_type,
1935 	.get_next_lu_type		= &dr_ste_v0_get_next_lu_type,
1936 	.set_miss_addr			= &dr_ste_v0_set_miss_addr,
1937 	.get_miss_addr			= &dr_ste_v0_get_miss_addr,
1938 	.set_hit_addr			= &dr_ste_v0_set_hit_addr,
1939 	.set_byte_mask			= &dr_ste_v0_set_byte_mask,
1940 	.get_byte_mask			= &dr_ste_v0_get_byte_mask,
1941 
1942 	/* Actions */
1943 	.actions_caps			= DR_STE_CTX_ACTION_CAP_NONE,
1944 	.set_actions_rx			= &dr_ste_v0_set_actions_rx,
1945 	.set_actions_tx			= &dr_ste_v0_set_actions_tx,
1946 	.modify_field_arr_sz		= ARRAY_SIZE(dr_ste_v0_action_modify_field_arr),
1947 	.modify_field_arr		= dr_ste_v0_action_modify_field_arr,
1948 	.set_action_set			= &dr_ste_v0_set_action_set,
1949 	.set_action_add			= &dr_ste_v0_set_action_add,
1950 	.set_action_copy		= &dr_ste_v0_set_action_copy,
1951 	.set_action_decap_l3_list	= &dr_ste_v0_set_action_decap_l3_list,
1952 };
1953