xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c (revision f8941e6c4c712948663ec5d7bbb546f1a0f4e3f6)
1 /*
2  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 
34 #include <crypto/aead.h>
35 #include <net/xfrm.h>
36 #include <net/esp.h>
37 #include "ipsec.h"
38 #include "ipsec_rxtx.h"
39 #include "en.h"
40 
41 enum {
42 	MLX5E_IPSEC_TX_SYNDROME_OFFLOAD = 0x8,
43 	MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP = 0x9,
44 };
45 
46 static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x)
47 {
48 	unsigned int alen = crypto_aead_authsize(x->data);
49 	struct ipv6hdr *ipv6hdr = ipv6_hdr(skb);
50 	struct iphdr *ipv4hdr = ip_hdr(skb);
51 	unsigned int trailer_len;
52 	u8 plen;
53 	int ret;
54 
55 	ret = skb_copy_bits(skb, skb->len - alen - 2, &plen, 1);
56 	if (unlikely(ret))
57 		return ret;
58 
59 	trailer_len = alen + plen + 2;
60 
61 	ret = pskb_trim(skb, skb->len - trailer_len);
62 	if (unlikely(ret))
63 		return ret;
64 	if (skb->protocol == htons(ETH_P_IP)) {
65 		ipv4hdr->tot_len = htons(ntohs(ipv4hdr->tot_len) - trailer_len);
66 		ip_send_check(ipv4hdr);
67 	} else {
68 		ipv6hdr->payload_len = htons(ntohs(ipv6hdr->payload_len) -
69 					     trailer_len);
70 	}
71 	return 0;
72 }
73 
74 static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
75 				struct mlx5_wqe_eth_seg *eseg, u8 mode,
76 				struct xfrm_offload *xo)
77 {
78 	/* Tunnel Mode:
79 	 * SWP:      OutL3       InL3  InL4
80 	 * Pkt: MAC  IP     ESP  IP    L4
81 	 *
82 	 * Transport Mode:
83 	 * SWP:      OutL3       OutL4
84 	 * Pkt: MAC  IP     ESP  L4
85 	 *
86 	 * Tunnel(VXLAN TCP/UDP) over Transport Mode
87 	 * SWP:      OutL3                   InL3  InL4
88 	 * Pkt: MAC  IP     ESP  UDP  VXLAN  IP    L4
89 	 */
90 
91 	/* Shared settings */
92 	eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
93 	if (skb->protocol == htons(ETH_P_IPV6))
94 		eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
95 
96 	/* Tunnel mode */
97 	if (mode == XFRM_MODE_TUNNEL) {
98 		eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
99 		if (xo->proto == IPPROTO_IPV6)
100 			eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
101 
102 		switch (xo->inner_ipproto) {
103 		case IPPROTO_UDP:
104 			eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
105 			fallthrough;
106 		case IPPROTO_TCP:
107 			/* IP | ESP | IP | [TCP | UDP] */
108 			eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
109 			break;
110 		default:
111 			break;
112 		}
113 		return;
114 	}
115 
116 	/* Transport mode */
117 	if (mode != XFRM_MODE_TRANSPORT)
118 		return;
119 
120 	if (!xo->inner_ipproto) {
121 		switch (xo->proto) {
122 		case IPPROTO_UDP:
123 			eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
124 			fallthrough;
125 		case IPPROTO_TCP:
126 			/* IP | ESP | TCP */
127 			eseg->swp_outer_l4_offset = skb_inner_transport_offset(skb) / 2;
128 			break;
129 		default:
130 			break;
131 		}
132 	} else {
133 		/* Tunnel(VXLAN TCP/UDP) over Transport Mode */
134 		switch (xo->inner_ipproto) {
135 		case IPPROTO_UDP:
136 			eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
137 			fallthrough;
138 		case IPPROTO_TCP:
139 			eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
140 			eseg->swp_inner_l4_offset =
141 				(skb->csum_start + skb->head - skb->data) / 2;
142 			if (inner_ip_hdr(skb)->version == 6)
143 				eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
144 			break;
145 		default:
146 			break;
147 		}
148 	}
149 
150 }
151 
152 void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
153 			    struct xfrm_offload *xo)
154 {
155 	struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
156 	__u32 oseq = replay_esn->oseq;
157 	int iv_offset;
158 	__be64 seqno;
159 	u32 seq_hi;
160 
161 	if (unlikely(skb_is_gso(skb) && oseq < MLX5E_IPSEC_ESN_SCOPE_MID &&
162 		     MLX5E_IPSEC_ESN_SCOPE_MID < (oseq - skb_shinfo(skb)->gso_segs))) {
163 		seq_hi = xo->seq.hi - 1;
164 	} else {
165 		seq_hi = xo->seq.hi;
166 	}
167 
168 	/* Place the SN in the IV field */
169 	seqno = cpu_to_be64(xo->seq.low + ((u64)seq_hi << 32));
170 	iv_offset = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr);
171 	skb_store_bits(skb, iv_offset, &seqno, 8);
172 }
173 
174 void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
175 			struct xfrm_offload *xo)
176 {
177 	int iv_offset;
178 	__be64 seqno;
179 
180 	/* Place the SN in the IV field */
181 	seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
182 	iv_offset = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr);
183 	skb_store_bits(skb, iv_offset, &seqno, 8);
184 }
185 
186 void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
187 			       struct mlx5e_accel_tx_ipsec_state *ipsec_st,
188 			       struct mlx5_wqe_inline_seg *inlseg)
189 {
190 	inlseg->byte_count = cpu_to_be32(ipsec_st->tailen | MLX5_INLINE_SEG);
191 	esp_output_fill_trailer((u8 *)inlseg->data, 0, ipsec_st->plen, ipsec_st->xo->proto);
192 }
193 
194 static int mlx5e_ipsec_set_state(struct mlx5e_priv *priv,
195 				 struct sk_buff *skb,
196 				 struct xfrm_state *x,
197 				 struct xfrm_offload *xo,
198 				 struct mlx5e_accel_tx_ipsec_state *ipsec_st)
199 {
200 	unsigned int blksize, clen, alen, plen;
201 	struct crypto_aead *aead;
202 	unsigned int tailen;
203 
204 	ipsec_st->x = x;
205 	ipsec_st->xo = xo;
206 	aead = x->data;
207 	alen = crypto_aead_authsize(aead);
208 	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
209 	clen = ALIGN(skb->len + 2, blksize);
210 	plen = max_t(u32, clen - skb->len, 4);
211 	tailen = plen + alen;
212 	ipsec_st->plen = plen;
213 	ipsec_st->tailen = tailen;
214 
215 	return 0;
216 }
217 
218 void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
219 			       struct mlx5_wqe_eth_seg *eseg)
220 {
221 	struct xfrm_offload *xo = xfrm_offload(skb);
222 	struct xfrm_encap_tmpl  *encap;
223 	struct xfrm_state *x;
224 	struct sec_path *sp;
225 	u8 l3_proto;
226 
227 	sp = skb_sec_path(skb);
228 	if (unlikely(sp->len != 1))
229 		return;
230 
231 	x = xfrm_input_state(skb);
232 	if (unlikely(!x))
233 		return;
234 
235 	if (unlikely(!x->xso.offload_handle ||
236 		     (skb->protocol != htons(ETH_P_IP) &&
237 		      skb->protocol != htons(ETH_P_IPV6))))
238 		return;
239 
240 	mlx5e_ipsec_set_swp(skb, eseg, x->props.mode, xo);
241 
242 	l3_proto = (x->props.family == AF_INET) ?
243 		   ((struct iphdr *)skb_network_header(skb))->protocol :
244 		   ((struct ipv6hdr *)skb_network_header(skb))->nexthdr;
245 
246 	eseg->flow_table_metadata |= cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC);
247 	eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER);
248 	encap = x->encap;
249 	if (!encap) {
250 		eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
251 			cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC) :
252 			cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC);
253 	} else if (encap->encap_type == UDP_ENCAP_ESPINUDP) {
254 		eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
255 			cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC) :
256 			cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC);
257 	}
258 }
259 
260 bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
261 			       struct sk_buff *skb,
262 			       struct mlx5e_accel_tx_ipsec_state *ipsec_st)
263 {
264 	struct mlx5e_priv *priv = netdev_priv(netdev);
265 	struct xfrm_offload *xo = xfrm_offload(skb);
266 	struct mlx5e_ipsec_sa_entry *sa_entry;
267 	struct xfrm_state *x;
268 	struct sec_path *sp;
269 
270 	sp = skb_sec_path(skb);
271 	if (unlikely(sp->len != 1)) {
272 		atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_bundle);
273 		goto drop;
274 	}
275 
276 	x = xfrm_input_state(skb);
277 	if (unlikely(!x)) {
278 		atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_no_state);
279 		goto drop;
280 	}
281 
282 	if (unlikely(!x->xso.offload_handle ||
283 		     (skb->protocol != htons(ETH_P_IP) &&
284 		      skb->protocol != htons(ETH_P_IPV6)))) {
285 		atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_not_ip);
286 		goto drop;
287 	}
288 
289 	if (!skb_is_gso(skb))
290 		if (unlikely(mlx5e_ipsec_remove_trailer(skb, x))) {
291 			atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_trailer);
292 			goto drop;
293 		}
294 
295 	sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
296 	sa_entry->set_iv_op(skb, x, xo);
297 	mlx5e_ipsec_set_state(priv, skb, x, xo, ipsec_st);
298 
299 	return true;
300 
301 drop:
302 	kfree_skb(skb);
303 	return false;
304 }
305 
306 enum {
307 	MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED,
308 	MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED,
309 	MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER,
310 };
311 
312 void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
313 				       struct sk_buff *skb,
314 				       struct mlx5_cqe64 *cqe)
315 {
316 	u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata);
317 	struct mlx5e_priv *priv = netdev_priv(netdev);
318 	struct mlx5e_ipsec *ipsec = priv->ipsec;
319 	struct mlx5e_ipsec_sa_entry *sa_entry;
320 	struct xfrm_offload *xo;
321 	struct sec_path *sp;
322 	u32  sa_handle;
323 
324 	sa_handle = MLX5_IPSEC_METADATA_HANDLE(ipsec_meta_data);
325 	sp = secpath_set(skb);
326 	if (unlikely(!sp)) {
327 		atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
328 		return;
329 	}
330 
331 	rcu_read_lock();
332 	sa_entry = xa_load(&ipsec->sadb, sa_handle);
333 	if (unlikely(!sa_entry)) {
334 		rcu_read_unlock();
335 		atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
336 		return;
337 	}
338 	xfrm_state_hold(sa_entry->x);
339 	rcu_read_unlock();
340 
341 	sp->xvec[sp->len++] = sa_entry->x;
342 	sp->olen++;
343 
344 	xo = xfrm_offload(skb);
345 	xo->flags = CRYPTO_DONE;
346 
347 	switch (MLX5_IPSEC_METADATA_SYNDROM(ipsec_meta_data)) {
348 	case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED:
349 		xo->status = CRYPTO_SUCCESS;
350 		break;
351 	case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED:
352 		xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
353 		break;
354 	case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER:
355 		xo->status = CRYPTO_INVALID_PACKET_SYNTAX;
356 		break;
357 	default:
358 		atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_syndrome);
359 	}
360 }
361