1 /* 2 * Copyright (c) 2018 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34 #ifndef __MLX5E_EN_ACCEL_H__ 35 #define __MLX5E_EN_ACCEL_H__ 36 37 #include <linux/skbuff.h> 38 #include <linux/netdevice.h> 39 #include "en_accel/ipsec_rxtx.h" 40 #include "en_accel/tls.h" 41 #include "en_accel/tls_rxtx.h" 42 #include "en.h" 43 #include "en/txrx.h" 44 45 #if IS_ENABLED(CONFIG_GENEVE) 46 #include <net/geneve.h> 47 48 static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev) 49 { 50 return mlx5_tx_swp_supported(mdev); 51 } 52 53 static inline void 54 mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, u16 ihs) 55 { 56 struct mlx5e_swp_spec swp_spec = {}; 57 unsigned int offset = 0; 58 __be16 l3_proto; 59 u8 l4_proto; 60 61 l3_proto = vlan_get_protocol(skb); 62 switch (l3_proto) { 63 case htons(ETH_P_IP): 64 l4_proto = ip_hdr(skb)->protocol; 65 break; 66 case htons(ETH_P_IPV6): 67 l4_proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL); 68 break; 69 default: 70 return; 71 } 72 73 if (l4_proto != IPPROTO_UDP || 74 udp_hdr(skb)->dest != cpu_to_be16(GENEVE_UDP_PORT)) 75 return; 76 swp_spec.l3_proto = l3_proto; 77 swp_spec.l4_proto = l4_proto; 78 swp_spec.is_tun = true; 79 if (inner_ip_hdr(skb)->version == 6) { 80 swp_spec.tun_l3_proto = htons(ETH_P_IPV6); 81 swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr; 82 } else { 83 swp_spec.tun_l3_proto = htons(ETH_P_IP); 84 swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol; 85 } 86 87 mlx5e_set_eseg_swp(skb, eseg, &swp_spec); 88 if (skb_vlan_tag_present(skb) && ihs) 89 mlx5e_eseg_swp_offsets_add_vlan(eseg); 90 } 91 92 #else 93 static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev) 94 { 95 return false; 96 } 97 98 #endif /* CONFIG_GENEVE */ 99 100 static inline void 101 mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb) 102 { 103 int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr); 104 105 udp_hdr(skb)->len = htons(payload_len); 106 } 107 108 struct mlx5e_accel_tx_state { 109 #ifdef CONFIG_MLX5_EN_TLS 110 struct mlx5e_accel_tx_tls_state tls; 111 #endif 112 #ifdef CONFIG_MLX5_EN_IPSEC 113 struct mlx5e_accel_tx_ipsec_state ipsec; 114 #endif 115 }; 116 117 static inline bool mlx5e_accel_tx_begin(struct net_device *dev, 118 struct mlx5e_txqsq *sq, 119 struct sk_buff *skb, 120 struct mlx5e_accel_tx_state *state) 121 { 122 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) 123 mlx5e_udp_gso_handle_tx_skb(skb); 124 125 #ifdef CONFIG_MLX5_EN_TLS 126 /* May send SKBs and WQEs. */ 127 if (mlx5e_tls_skb_offloaded(skb)) 128 if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, &state->tls))) 129 return false; 130 #endif 131 132 #ifdef CONFIG_MLX5_EN_IPSEC 133 if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) && xfrm_offload(skb)) { 134 if (unlikely(!mlx5e_ipsec_handle_tx_skb(dev, skb, &state->ipsec))) 135 return false; 136 } 137 #endif 138 139 return true; 140 } 141 142 static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq, 143 struct mlx5e_accel_tx_state *state) 144 { 145 #ifdef CONFIG_MLX5_EN_IPSEC 146 if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state)) 147 return mlx5e_ipsec_tx_ids_len(&state->ipsec); 148 #endif 149 150 return 0; 151 } 152 153 /* Part of the eseg touched by TX offloads */ 154 #define MLX5E_ACCEL_ESEG_LEN offsetof(struct mlx5_wqe_eth_seg, mss) 155 156 static inline void mlx5e_accel_tx_eseg(struct mlx5e_priv *priv, 157 struct sk_buff *skb, 158 struct mlx5_wqe_eth_seg *eseg, u16 ihs) 159 { 160 #ifdef CONFIG_MLX5_EN_IPSEC 161 if (xfrm_offload(skb)) 162 mlx5e_ipsec_tx_build_eseg(priv, skb, eseg); 163 #endif 164 165 #if IS_ENABLED(CONFIG_GENEVE) 166 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL) 167 mlx5e_tx_tunnel_accel(skb, eseg, ihs); 168 #endif 169 } 170 171 static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq, 172 struct mlx5e_tx_wqe *wqe, 173 struct mlx5e_accel_tx_state *state, 174 struct mlx5_wqe_inline_seg *inlseg) 175 { 176 #ifdef CONFIG_MLX5_EN_TLS 177 mlx5e_tls_handle_tx_wqe(&wqe->ctrl, &state->tls); 178 #endif 179 180 #ifdef CONFIG_MLX5_EN_IPSEC 181 if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) && 182 state->ipsec.xo && state->ipsec.tailen) 183 mlx5e_ipsec_handle_tx_wqe(wqe, &state->ipsec, inlseg); 184 #endif 185 } 186 187 static inline int mlx5e_accel_init_rx(struct mlx5e_priv *priv) 188 { 189 return mlx5e_ktls_init_rx(priv); 190 } 191 192 static inline void mlx5e_accel_cleanup_rx(struct mlx5e_priv *priv) 193 { 194 mlx5e_ktls_cleanup_rx(priv); 195 } 196 #endif /* __MLX5E_EN_ACCEL_H__ */ 197