1 /* 2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/tcp.h> 34 #include <linux/if_vlan.h> 35 #include <linux/ptp_classify.h> 36 #include <net/geneve.h> 37 #include <net/dsfield.h> 38 #include "en.h" 39 #include "en/txrx.h" 40 #include "ipoib/ipoib.h" 41 #include "en_accel/en_accel.h" 42 #include "en/ptp.h" 43 44 static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma) 45 { 46 int i; 47 48 for (i = 0; i < num_dma; i++) { 49 struct mlx5e_sq_dma *last_pushed_dma = 50 mlx5e_dma_get(sq, --sq->dma_fifo_pc); 51 52 mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma); 53 } 54 } 55 56 #ifdef CONFIG_MLX5_CORE_EN_DCB 57 static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb) 58 { 59 int dscp_cp = 0; 60 61 if (skb->protocol == htons(ETH_P_IP)) 62 dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; 63 else if (skb->protocol == htons(ETH_P_IPV6)) 64 dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; 65 66 return priv->dcbx_dp.dscp2prio[dscp_cp]; 67 } 68 #endif 69 70 static bool mlx5e_use_ptpsq(struct sk_buff *skb) 71 { 72 struct flow_keys fk; 73 74 if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) 75 return false; 76 77 if (fk.basic.n_proto == htons(ETH_P_1588)) 78 return true; 79 80 if (fk.basic.n_proto != htons(ETH_P_IP) && 81 fk.basic.n_proto != htons(ETH_P_IPV6)) 82 return false; 83 84 return (fk.basic.ip_proto == IPPROTO_UDP && 85 fk.ports.dst == htons(PTP_EV_PORT)); 86 } 87 88 static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb) 89 { 90 struct mlx5e_priv *priv = netdev_priv(dev); 91 int up = 0; 92 93 if (!netdev_get_num_tc(dev)) 94 goto return_txq; 95 96 #ifdef CONFIG_MLX5_CORE_EN_DCB 97 if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP) 98 up = mlx5e_get_dscp_up(priv, skb); 99 else 100 #endif 101 if (skb_vlan_tag_present(skb)) 102 up = skb_vlan_tag_get_prio(skb); 103 104 return_txq: 105 return priv->port_ptp_tc2realtxq[up]; 106 } 107 108 static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb, 109 u16 htb_maj_id) 110 { 111 u16 classid; 112 113 if ((TC_H_MAJ(skb->priority) >> 16) == htb_maj_id) 114 classid = TC_H_MIN(skb->priority); 115 else 116 classid = READ_ONCE(priv->htb.defcls); 117 118 if (!classid) 119 return 0; 120 121 return mlx5e_get_txq_by_classid(priv, classid); 122 } 123 124 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, 125 struct net_device *sb_dev) 126 { 127 struct mlx5e_priv *priv = netdev_priv(dev); 128 int num_tc_x_num_ch; 129 int txq_ix; 130 int up = 0; 131 int ch_ix; 132 133 /* Sync with mlx5e_update_num_tc_x_num_ch - avoid refetching. */ 134 num_tc_x_num_ch = READ_ONCE(priv->num_tc_x_num_ch); 135 if (unlikely(dev->real_num_tx_queues > num_tc_x_num_ch)) { 136 struct mlx5e_ptp *ptp_channel; 137 138 /* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */ 139 u16 htb_maj_id = smp_load_acquire(&priv->htb.maj_id); 140 141 if (unlikely(htb_maj_id)) { 142 txq_ix = mlx5e_select_htb_queue(priv, skb, htb_maj_id); 143 if (txq_ix > 0) 144 return txq_ix; 145 } 146 147 ptp_channel = READ_ONCE(priv->channels.ptp); 148 if (unlikely(ptp_channel) && 149 test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) && 150 mlx5e_use_ptpsq(skb)) 151 return mlx5e_select_ptpsq(dev, skb); 152 153 txq_ix = netdev_pick_tx(dev, skb, NULL); 154 /* Fix netdev_pick_tx() not to choose ptp_channel and HTB txqs. 155 * If they are selected, switch to regular queues. 156 * Driver to select these queues only at mlx5e_select_ptpsq() 157 * and mlx5e_select_htb_queue(). 158 */ 159 if (unlikely(txq_ix >= num_tc_x_num_ch)) 160 txq_ix %= num_tc_x_num_ch; 161 } else { 162 txq_ix = netdev_pick_tx(dev, skb, NULL); 163 } 164 165 if (!netdev_get_num_tc(dev)) 166 return txq_ix; 167 168 #ifdef CONFIG_MLX5_CORE_EN_DCB 169 if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP) 170 up = mlx5e_get_dscp_up(priv, skb); 171 else 172 #endif 173 if (skb_vlan_tag_present(skb)) 174 up = skb_vlan_tag_get_prio(skb); 175 176 /* Normalize any picked txq_ix to [0, num_channels), 177 * So we can return a txq_ix that matches the channel and 178 * packet UP. 179 */ 180 ch_ix = priv->txq2sq[txq_ix]->ch_ix; 181 182 return priv->channel_tc2realtxq[ch_ix][up]; 183 } 184 185 static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb) 186 { 187 #define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN) 188 189 return max(skb_network_offset(skb), MLX5E_MIN_INLINE); 190 } 191 192 static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb) 193 { 194 if (skb_transport_header_was_set(skb)) 195 return skb_transport_offset(skb); 196 else 197 return mlx5e_skb_l2_header_offset(skb); 198 } 199 200 static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode, 201 struct sk_buff *skb) 202 { 203 u16 hlen; 204 205 switch (mode) { 206 case MLX5_INLINE_MODE_NONE: 207 return 0; 208 case MLX5_INLINE_MODE_TCP_UDP: 209 hlen = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb)); 210 if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb)) 211 hlen += VLAN_HLEN; 212 break; 213 case MLX5_INLINE_MODE_IP: 214 hlen = mlx5e_skb_l3_header_offset(skb); 215 break; 216 case MLX5_INLINE_MODE_L2: 217 default: 218 hlen = mlx5e_skb_l2_header_offset(skb); 219 } 220 return min_t(u16, hlen, skb_headlen(skb)); 221 } 222 223 static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs) 224 { 225 struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start; 226 int cpy1_sz = 2 * ETH_ALEN; 227 int cpy2_sz = ihs - cpy1_sz; 228 229 memcpy(vhdr, skb->data, cpy1_sz); 230 vhdr->h_vlan_proto = skb->vlan_proto; 231 vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb)); 232 memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz); 233 } 234 235 /* If packet is not IP's CHECKSUM_PARTIAL (e.g. icmd packet), 236 * need to set L3 checksum flag for IPsec 237 */ 238 static void 239 ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, 240 struct mlx5_wqe_eth_seg *eseg) 241 { 242 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM; 243 if (skb->encapsulation) { 244 eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM; 245 sq->stats->csum_partial_inner++; 246 } else { 247 sq->stats->csum_partial++; 248 } 249 } 250 251 static inline void 252 mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, 253 struct mlx5e_accel_tx_state *accel, 254 struct mlx5_wqe_eth_seg *eseg) 255 { 256 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 257 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM; 258 if (skb->encapsulation) { 259 eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM | 260 MLX5_ETH_WQE_L4_INNER_CSUM; 261 sq->stats->csum_partial_inner++; 262 } else { 263 eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; 264 sq->stats->csum_partial++; 265 } 266 #ifdef CONFIG_MLX5_EN_TLS 267 } else if (unlikely(accel && accel->tls.tls_tisn)) { 268 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM; 269 sq->stats->csum_partial++; 270 #endif 271 } else if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) { 272 ipsec_txwqe_build_eseg_csum(sq, skb, eseg); 273 } else 274 sq->stats->csum_none++; 275 } 276 277 static inline u16 278 mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb) 279 { 280 struct mlx5e_sq_stats *stats = sq->stats; 281 u16 ihs; 282 283 if (skb->encapsulation) { 284 ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); 285 stats->tso_inner_packets++; 286 stats->tso_inner_bytes += skb->len - ihs; 287 } else { 288 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) 289 ihs = skb_transport_offset(skb) + sizeof(struct udphdr); 290 else 291 ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); 292 stats->tso_packets++; 293 stats->tso_bytes += skb->len - ihs; 294 } 295 296 return ihs; 297 } 298 299 static inline int 300 mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb, 301 unsigned char *skb_data, u16 headlen, 302 struct mlx5_wqe_data_seg *dseg) 303 { 304 dma_addr_t dma_addr = 0; 305 u8 num_dma = 0; 306 int i; 307 308 if (headlen) { 309 dma_addr = dma_map_single(sq->pdev, skb_data, headlen, 310 DMA_TO_DEVICE); 311 if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) 312 goto dma_unmap_wqe_err; 313 314 dseg->addr = cpu_to_be64(dma_addr); 315 dseg->lkey = sq->mkey_be; 316 dseg->byte_count = cpu_to_be32(headlen); 317 318 mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE); 319 num_dma++; 320 dseg++; 321 } 322 323 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 324 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 325 int fsz = skb_frag_size(frag); 326 327 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, 328 DMA_TO_DEVICE); 329 if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) 330 goto dma_unmap_wqe_err; 331 332 dseg->addr = cpu_to_be64(dma_addr); 333 dseg->lkey = sq->mkey_be; 334 dseg->byte_count = cpu_to_be32(fsz); 335 336 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE); 337 num_dma++; 338 dseg++; 339 } 340 341 return num_dma; 342 343 dma_unmap_wqe_err: 344 mlx5e_dma_unmap_wqe_err(sq, num_dma); 345 return -ENOMEM; 346 } 347 348 struct mlx5e_tx_attr { 349 u32 num_bytes; 350 u16 headlen; 351 u16 ihs; 352 __be16 mss; 353 u16 insz; 354 u8 opcode; 355 }; 356 357 struct mlx5e_tx_wqe_attr { 358 u16 ds_cnt; 359 u16 ds_cnt_inl; 360 u16 ds_cnt_ids; 361 u8 num_wqebbs; 362 }; 363 364 static u8 365 mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct sk_buff *skb, 366 struct mlx5e_accel_tx_state *accel) 367 { 368 u8 mode; 369 370 #ifdef CONFIG_MLX5_EN_TLS 371 if (accel && accel->tls.tls_tisn) 372 return MLX5_INLINE_MODE_TCP_UDP; 373 #endif 374 375 mode = sq->min_inline_mode; 376 377 if (skb_vlan_tag_present(skb) && 378 test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state)) 379 mode = max_t(u8, MLX5_INLINE_MODE_L2, mode); 380 381 return mode; 382 } 383 384 static void mlx5e_sq_xmit_prepare(struct mlx5e_txqsq *sq, struct sk_buff *skb, 385 struct mlx5e_accel_tx_state *accel, 386 struct mlx5e_tx_attr *attr) 387 { 388 struct mlx5e_sq_stats *stats = sq->stats; 389 390 if (skb_is_gso(skb)) { 391 u16 ihs = mlx5e_tx_get_gso_ihs(sq, skb); 392 393 *attr = (struct mlx5e_tx_attr) { 394 .opcode = MLX5_OPCODE_LSO, 395 .mss = cpu_to_be16(skb_shinfo(skb)->gso_size), 396 .ihs = ihs, 397 .num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs, 398 .headlen = skb_headlen(skb) - ihs, 399 }; 400 401 stats->packets += skb_shinfo(skb)->gso_segs; 402 } else { 403 u8 mode = mlx5e_tx_wqe_inline_mode(sq, skb, accel); 404 u16 ihs = mlx5e_calc_min_inline(mode, skb); 405 406 *attr = (struct mlx5e_tx_attr) { 407 .opcode = MLX5_OPCODE_SEND, 408 .mss = cpu_to_be16(0), 409 .ihs = ihs, 410 .num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN), 411 .headlen = skb_headlen(skb) - ihs, 412 }; 413 414 stats->packets++; 415 } 416 417 attr->insz = mlx5e_accel_tx_ids_len(sq, accel); 418 stats->bytes += attr->num_bytes; 419 } 420 421 static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_attr *attr, 422 struct mlx5e_tx_wqe_attr *wqe_attr) 423 { 424 u16 ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT; 425 u16 ds_cnt_inl = 0; 426 u16 ds_cnt_ids = 0; 427 428 if (attr->insz) 429 ds_cnt_ids = DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + attr->insz, 430 MLX5_SEND_WQE_DS); 431 432 ds_cnt += !!attr->headlen + skb_shinfo(skb)->nr_frags + ds_cnt_ids; 433 if (attr->ihs) { 434 u16 inl = attr->ihs - INL_HDR_START_SZ; 435 436 if (skb_vlan_tag_present(skb)) 437 inl += VLAN_HLEN; 438 439 ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS); 440 ds_cnt += ds_cnt_inl; 441 } 442 443 *wqe_attr = (struct mlx5e_tx_wqe_attr) { 444 .ds_cnt = ds_cnt, 445 .ds_cnt_inl = ds_cnt_inl, 446 .ds_cnt_ids = ds_cnt_ids, 447 .num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS), 448 }; 449 } 450 451 static void mlx5e_tx_skb_update_hwts_flags(struct sk_buff *skb) 452 { 453 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) 454 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 455 } 456 457 static void mlx5e_tx_check_stop(struct mlx5e_txqsq *sq) 458 { 459 if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room))) { 460 netif_tx_stop_queue(sq->txq); 461 sq->stats->stopped++; 462 } 463 } 464 465 static inline void 466 mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, 467 const struct mlx5e_tx_attr *attr, 468 const struct mlx5e_tx_wqe_attr *wqe_attr, u8 num_dma, 469 struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg, 470 bool xmit_more) 471 { 472 struct mlx5_wq_cyc *wq = &sq->wq; 473 bool send_doorbell; 474 475 *wi = (struct mlx5e_tx_wqe_info) { 476 .skb = skb, 477 .num_bytes = attr->num_bytes, 478 .num_dma = num_dma, 479 .num_wqebbs = wqe_attr->num_wqebbs, 480 .num_fifo_pkts = 0, 481 }; 482 483 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | attr->opcode); 484 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | wqe_attr->ds_cnt); 485 486 mlx5e_tx_skb_update_hwts_flags(skb); 487 488 sq->pc += wi->num_wqebbs; 489 490 mlx5e_tx_check_stop(sq); 491 492 if (unlikely(sq->ptpsq)) { 493 mlx5e_skb_cb_hwtstamp_init(skb); 494 mlx5e_skb_fifo_push(&sq->ptpsq->skb_fifo, skb); 495 skb_get(skb); 496 } 497 498 send_doorbell = __netdev_tx_sent_queue(sq->txq, attr->num_bytes, xmit_more); 499 if (send_doorbell) 500 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg); 501 } 502 503 static void 504 mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb, 505 const struct mlx5e_tx_attr *attr, const struct mlx5e_tx_wqe_attr *wqe_attr, 506 struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more) 507 { 508 struct mlx5_wqe_ctrl_seg *cseg; 509 struct mlx5_wqe_eth_seg *eseg; 510 struct mlx5_wqe_data_seg *dseg; 511 struct mlx5e_tx_wqe_info *wi; 512 513 struct mlx5e_sq_stats *stats = sq->stats; 514 int num_dma; 515 516 stats->xmit_more += xmit_more; 517 518 /* fill wqe */ 519 wi = &sq->db.wqe_info[pi]; 520 cseg = &wqe->ctrl; 521 eseg = &wqe->eth; 522 dseg = wqe->data; 523 524 eseg->mss = attr->mss; 525 526 if (attr->ihs) { 527 if (skb_vlan_tag_present(skb)) { 528 eseg->inline_hdr.sz |= cpu_to_be16(attr->ihs + VLAN_HLEN); 529 mlx5e_insert_vlan(eseg->inline_hdr.start, skb, attr->ihs); 530 stats->added_vlan_packets++; 531 } else { 532 eseg->inline_hdr.sz |= cpu_to_be16(attr->ihs); 533 memcpy(eseg->inline_hdr.start, skb->data, attr->ihs); 534 } 535 dseg += wqe_attr->ds_cnt_inl; 536 } else if (skb_vlan_tag_present(skb)) { 537 eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN); 538 if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD)) 539 eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN); 540 eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb)); 541 stats->added_vlan_packets++; 542 } 543 544 dseg += wqe_attr->ds_cnt_ids; 545 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs, 546 attr->headlen, dseg); 547 if (unlikely(num_dma < 0)) 548 goto err_drop; 549 550 mlx5e_txwqe_complete(sq, skb, attr, wqe_attr, num_dma, wi, cseg, xmit_more); 551 552 return; 553 554 err_drop: 555 stats->dropped++; 556 dev_kfree_skb_any(skb); 557 } 558 559 static bool mlx5e_tx_skb_supports_mpwqe(struct sk_buff *skb, struct mlx5e_tx_attr *attr) 560 { 561 return !skb_is_nonlinear(skb) && !skb_vlan_tag_present(skb) && !attr->ihs && 562 !attr->insz; 563 } 564 565 static bool mlx5e_tx_mpwqe_same_eseg(struct mlx5e_txqsq *sq, struct mlx5_wqe_eth_seg *eseg) 566 { 567 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; 568 569 /* Assumes the session is already running and has at least one packet. */ 570 return !memcmp(&session->wqe->eth, eseg, MLX5E_ACCEL_ESEG_LEN); 571 } 572 573 static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq, 574 struct mlx5_wqe_eth_seg *eseg) 575 { 576 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; 577 struct mlx5e_tx_wqe *wqe; 578 u16 pi; 579 580 pi = mlx5e_txqsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS); 581 wqe = MLX5E_TX_FETCH_WQE(sq, pi); 582 net_prefetchw(wqe->data); 583 584 *session = (struct mlx5e_tx_mpwqe) { 585 .wqe = wqe, 586 .bytes_count = 0, 587 .ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT, 588 .pkt_count = 0, 589 .inline_on = 0, 590 }; 591 592 memcpy(&session->wqe->eth, eseg, MLX5E_ACCEL_ESEG_LEN); 593 594 sq->stats->mpwqe_blks++; 595 } 596 597 static bool mlx5e_tx_mpwqe_session_is_active(struct mlx5e_txqsq *sq) 598 { 599 return sq->mpwqe.wqe; 600 } 601 602 static void mlx5e_tx_mpwqe_add_dseg(struct mlx5e_txqsq *sq, struct mlx5e_xmit_data *txd) 603 { 604 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; 605 struct mlx5_wqe_data_seg *dseg; 606 607 dseg = (struct mlx5_wqe_data_seg *)session->wqe + session->ds_count; 608 609 session->pkt_count++; 610 session->bytes_count += txd->len; 611 612 dseg->addr = cpu_to_be64(txd->dma_addr); 613 dseg->byte_count = cpu_to_be32(txd->len); 614 dseg->lkey = sq->mkey_be; 615 session->ds_count++; 616 617 sq->stats->mpwqe_pkts++; 618 } 619 620 static struct mlx5_wqe_ctrl_seg *mlx5e_tx_mpwqe_session_complete(struct mlx5e_txqsq *sq) 621 { 622 struct mlx5e_tx_mpwqe *session = &sq->mpwqe; 623 u8 ds_count = session->ds_count; 624 struct mlx5_wqe_ctrl_seg *cseg; 625 struct mlx5e_tx_wqe_info *wi; 626 u16 pi; 627 628 cseg = &session->wqe->ctrl; 629 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_ENHANCED_MPSW); 630 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_count); 631 632 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); 633 wi = &sq->db.wqe_info[pi]; 634 *wi = (struct mlx5e_tx_wqe_info) { 635 .skb = NULL, 636 .num_bytes = session->bytes_count, 637 .num_wqebbs = DIV_ROUND_UP(ds_count, MLX5_SEND_WQEBB_NUM_DS), 638 .num_dma = session->pkt_count, 639 .num_fifo_pkts = session->pkt_count, 640 }; 641 642 sq->pc += wi->num_wqebbs; 643 644 session->wqe = NULL; 645 646 mlx5e_tx_check_stop(sq); 647 648 return cseg; 649 } 650 651 static void 652 mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb, 653 struct mlx5_wqe_eth_seg *eseg, bool xmit_more) 654 { 655 struct mlx5_wqe_ctrl_seg *cseg; 656 struct mlx5e_xmit_data txd; 657 658 if (!mlx5e_tx_mpwqe_session_is_active(sq)) { 659 mlx5e_tx_mpwqe_session_start(sq, eseg); 660 } else if (!mlx5e_tx_mpwqe_same_eseg(sq, eseg)) { 661 mlx5e_tx_mpwqe_session_complete(sq); 662 mlx5e_tx_mpwqe_session_start(sq, eseg); 663 } 664 665 sq->stats->xmit_more += xmit_more; 666 667 txd.data = skb->data; 668 txd.len = skb->len; 669 670 txd.dma_addr = dma_map_single(sq->pdev, txd.data, txd.len, DMA_TO_DEVICE); 671 if (unlikely(dma_mapping_error(sq->pdev, txd.dma_addr))) 672 goto err_unmap; 673 mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE); 674 675 mlx5e_skb_fifo_push(&sq->db.skb_fifo, skb); 676 677 mlx5e_tx_mpwqe_add_dseg(sq, &txd); 678 679 mlx5e_tx_skb_update_hwts_flags(skb); 680 681 if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe))) { 682 /* Might stop the queue and affect the retval of __netdev_tx_sent_queue. */ 683 cseg = mlx5e_tx_mpwqe_session_complete(sq); 684 685 if (__netdev_tx_sent_queue(sq->txq, txd.len, xmit_more)) 686 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); 687 } else if (__netdev_tx_sent_queue(sq->txq, txd.len, xmit_more)) { 688 /* Might stop the queue, but we were asked to ring the doorbell anyway. */ 689 cseg = mlx5e_tx_mpwqe_session_complete(sq); 690 691 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); 692 } 693 694 return; 695 696 err_unmap: 697 mlx5e_dma_unmap_wqe_err(sq, 1); 698 sq->stats->dropped++; 699 dev_kfree_skb_any(skb); 700 } 701 702 void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq) 703 { 704 /* Unlikely in non-MPWQE workloads; not important in MPWQE workloads. */ 705 if (unlikely(mlx5e_tx_mpwqe_session_is_active(sq))) 706 mlx5e_tx_mpwqe_session_complete(sq); 707 } 708 709 static bool mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq, 710 struct sk_buff *skb, struct mlx5e_accel_tx_state *accel, 711 struct mlx5_wqe_eth_seg *eseg, u16 ihs) 712 { 713 if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg, ihs))) 714 return false; 715 716 mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg); 717 718 return true; 719 } 720 721 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) 722 { 723 struct mlx5e_priv *priv = netdev_priv(dev); 724 struct mlx5e_accel_tx_state accel = {}; 725 struct mlx5e_tx_wqe_attr wqe_attr; 726 struct mlx5e_tx_attr attr; 727 struct mlx5e_tx_wqe *wqe; 728 struct mlx5e_txqsq *sq; 729 u16 pi; 730 731 sq = priv->txq2sq[skb_get_queue_mapping(skb)]; 732 if (unlikely(!sq)) { 733 dev_kfree_skb_any(skb); 734 return NETDEV_TX_OK; 735 } 736 737 /* May send SKBs and WQEs. */ 738 if (unlikely(!mlx5e_accel_tx_begin(dev, sq, skb, &accel))) 739 return NETDEV_TX_OK; 740 741 mlx5e_sq_xmit_prepare(sq, skb, &accel, &attr); 742 743 if (test_bit(MLX5E_SQ_STATE_MPWQE, &sq->state)) { 744 if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) { 745 struct mlx5_wqe_eth_seg eseg = {}; 746 747 if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg, 748 attr.ihs))) 749 return NETDEV_TX_OK; 750 751 mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more()); 752 return NETDEV_TX_OK; 753 } 754 755 mlx5e_tx_mpwqe_ensure_complete(sq); 756 } 757 758 mlx5e_sq_calc_wqe_attr(skb, &attr, &wqe_attr); 759 pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs); 760 wqe = MLX5E_TX_FETCH_WQE(sq, pi); 761 762 /* May update the WQE, but may not post other WQEs. */ 763 mlx5e_accel_tx_finish(sq, wqe, &accel, 764 (struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl)); 765 if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth, attr.ihs))) 766 return NETDEV_TX_OK; 767 768 mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more()); 769 770 return NETDEV_TX_OK; 771 } 772 773 void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more) 774 { 775 struct mlx5e_tx_wqe_attr wqe_attr; 776 struct mlx5e_tx_attr attr; 777 struct mlx5e_tx_wqe *wqe; 778 u16 pi; 779 780 mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr); 781 mlx5e_sq_calc_wqe_attr(skb, &attr, &wqe_attr); 782 pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs); 783 wqe = MLX5E_TX_FETCH_WQE(sq, pi); 784 mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, &wqe->eth); 785 mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, xmit_more); 786 } 787 788 static void mlx5e_tx_wi_dma_unmap(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi, 789 u32 *dma_fifo_cc) 790 { 791 int i; 792 793 for (i = 0; i < wi->num_dma; i++) { 794 struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++); 795 796 mlx5e_tx_dma_unmap(sq->pdev, dma); 797 } 798 } 799 800 static void mlx5e_consume_skb(struct mlx5e_txqsq *sq, struct sk_buff *skb, 801 struct mlx5_cqe64 *cqe, int napi_budget) 802 { 803 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 804 struct skb_shared_hwtstamps hwts = {}; 805 u64 ts = get_cqe_ts(cqe); 806 807 hwts.hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, ts); 808 if (sq->ptpsq) 809 mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_CQE_HWTSTAMP, 810 hwts.hwtstamp, sq->ptpsq->cq_stats); 811 else 812 skb_tstamp_tx(skb, &hwts); 813 } 814 815 napi_consume_skb(skb, napi_budget); 816 } 817 818 static void mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi, 819 struct mlx5_cqe64 *cqe, int napi_budget) 820 { 821 int i; 822 823 for (i = 0; i < wi->num_fifo_pkts; i++) { 824 struct sk_buff *skb = mlx5e_skb_fifo_pop(&sq->db.skb_fifo); 825 826 mlx5e_consume_skb(sq, skb, cqe, napi_budget); 827 } 828 } 829 830 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) 831 { 832 struct mlx5e_sq_stats *stats; 833 struct mlx5e_txqsq *sq; 834 struct mlx5_cqe64 *cqe; 835 u32 dma_fifo_cc; 836 u32 nbytes; 837 u16 npkts; 838 u16 sqcc; 839 int i; 840 841 sq = container_of(cq, struct mlx5e_txqsq, cq); 842 843 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) 844 return false; 845 846 cqe = mlx5_cqwq_get_cqe(&cq->wq); 847 if (!cqe) 848 return false; 849 850 stats = sq->stats; 851 852 npkts = 0; 853 nbytes = 0; 854 855 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), 856 * otherwise a cq overrun may occur 857 */ 858 sqcc = sq->cc; 859 860 /* avoid dirtying sq cache line every cqe */ 861 dma_fifo_cc = sq->dma_fifo_cc; 862 863 i = 0; 864 do { 865 struct mlx5e_tx_wqe_info *wi; 866 u16 wqe_counter; 867 bool last_wqe; 868 u16 ci; 869 870 mlx5_cqwq_pop(&cq->wq); 871 872 wqe_counter = be16_to_cpu(cqe->wqe_counter); 873 874 do { 875 last_wqe = (sqcc == wqe_counter); 876 877 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); 878 wi = &sq->db.wqe_info[ci]; 879 880 sqcc += wi->num_wqebbs; 881 882 if (likely(wi->skb)) { 883 mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc); 884 mlx5e_consume_skb(sq, wi->skb, cqe, napi_budget); 885 886 npkts++; 887 nbytes += wi->num_bytes; 888 continue; 889 } 890 891 if (unlikely(mlx5e_ktls_tx_try_handle_resync_dump_comp(sq, wi, 892 &dma_fifo_cc))) 893 continue; 894 895 if (wi->num_fifo_pkts) { 896 mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc); 897 mlx5e_tx_wi_consume_fifo_skbs(sq, wi, cqe, napi_budget); 898 899 npkts += wi->num_fifo_pkts; 900 nbytes += wi->num_bytes; 901 } 902 } while (!last_wqe); 903 904 if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) { 905 if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, 906 &sq->state)) { 907 mlx5e_dump_error_cqe(&sq->cq, sq->sqn, 908 (struct mlx5_err_cqe *)cqe); 909 mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); 910 queue_work(cq->priv->wq, &sq->recover_work); 911 } 912 stats->cqe_err++; 913 } 914 915 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); 916 917 stats->cqes += i; 918 919 mlx5_cqwq_update_db_record(&cq->wq); 920 921 /* ensure cq space is freed before enabling more cqes */ 922 wmb(); 923 924 sq->dma_fifo_cc = dma_fifo_cc; 925 sq->cc = sqcc; 926 927 netdev_tx_completed_queue(sq->txq, npkts, nbytes); 928 929 if (netif_tx_queue_stopped(sq->txq) && 930 mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) && 931 !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) { 932 netif_tx_wake_queue(sq->txq); 933 stats->wake++; 934 } 935 936 return (i == MLX5E_TX_CQ_POLL_BUDGET); 937 } 938 939 static void mlx5e_tx_wi_kfree_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi) 940 { 941 int i; 942 943 for (i = 0; i < wi->num_fifo_pkts; i++) 944 dev_kfree_skb_any(mlx5e_skb_fifo_pop(&sq->db.skb_fifo)); 945 } 946 947 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) 948 { 949 struct mlx5e_tx_wqe_info *wi; 950 u32 dma_fifo_cc, nbytes = 0; 951 u16 ci, sqcc, npkts = 0; 952 953 sqcc = sq->cc; 954 dma_fifo_cc = sq->dma_fifo_cc; 955 956 while (sqcc != sq->pc) { 957 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); 958 wi = &sq->db.wqe_info[ci]; 959 960 sqcc += wi->num_wqebbs; 961 962 if (likely(wi->skb)) { 963 mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc); 964 dev_kfree_skb_any(wi->skb); 965 966 npkts++; 967 nbytes += wi->num_bytes; 968 continue; 969 } 970 971 if (unlikely(mlx5e_ktls_tx_try_handle_resync_dump_comp(sq, wi, &dma_fifo_cc))) 972 continue; 973 974 if (wi->num_fifo_pkts) { 975 mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc); 976 mlx5e_tx_wi_kfree_fifo_skbs(sq, wi); 977 978 npkts += wi->num_fifo_pkts; 979 nbytes += wi->num_bytes; 980 } 981 } 982 983 sq->dma_fifo_cc = dma_fifo_cc; 984 sq->cc = sqcc; 985 986 netdev_tx_completed_queue(sq->txq, npkts, nbytes); 987 } 988 989 #ifdef CONFIG_MLX5_CORE_IPOIB 990 static inline void 991 mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey, 992 struct mlx5_wqe_datagram_seg *dseg) 993 { 994 memcpy(&dseg->av, av, sizeof(struct mlx5_av)); 995 dseg->av.dqp_dct = cpu_to_be32(dqpn | MLX5_EXTENDED_UD_AV); 996 dseg->av.key.qkey.qkey = cpu_to_be32(dqkey); 997 } 998 999 static void mlx5i_sq_calc_wqe_attr(struct sk_buff *skb, 1000 const struct mlx5e_tx_attr *attr, 1001 struct mlx5e_tx_wqe_attr *wqe_attr) 1002 { 1003 u16 ds_cnt = sizeof(struct mlx5i_tx_wqe) / MLX5_SEND_WQE_DS; 1004 u16 ds_cnt_inl = 0; 1005 1006 ds_cnt += !!attr->headlen + skb_shinfo(skb)->nr_frags; 1007 1008 if (attr->ihs) { 1009 u16 inl = attr->ihs - INL_HDR_START_SZ; 1010 1011 ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS); 1012 ds_cnt += ds_cnt_inl; 1013 } 1014 1015 *wqe_attr = (struct mlx5e_tx_wqe_attr) { 1016 .ds_cnt = ds_cnt, 1017 .ds_cnt_inl = ds_cnt_inl, 1018 .num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS), 1019 }; 1020 } 1021 1022 void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, 1023 struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more) 1024 { 1025 struct mlx5e_tx_wqe_attr wqe_attr; 1026 struct mlx5e_tx_attr attr; 1027 struct mlx5i_tx_wqe *wqe; 1028 1029 struct mlx5_wqe_datagram_seg *datagram; 1030 struct mlx5_wqe_ctrl_seg *cseg; 1031 struct mlx5_wqe_eth_seg *eseg; 1032 struct mlx5_wqe_data_seg *dseg; 1033 struct mlx5e_tx_wqe_info *wi; 1034 1035 struct mlx5e_sq_stats *stats = sq->stats; 1036 int num_dma; 1037 u16 pi; 1038 1039 mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr); 1040 mlx5i_sq_calc_wqe_attr(skb, &attr, &wqe_attr); 1041 1042 pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs); 1043 wqe = MLX5I_SQ_FETCH_WQE(sq, pi); 1044 1045 stats->xmit_more += xmit_more; 1046 1047 /* fill wqe */ 1048 wi = &sq->db.wqe_info[pi]; 1049 cseg = &wqe->ctrl; 1050 datagram = &wqe->datagram; 1051 eseg = &wqe->eth; 1052 dseg = wqe->data; 1053 1054 mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram); 1055 1056 mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, eseg); 1057 1058 eseg->mss = attr.mss; 1059 1060 if (attr.ihs) { 1061 memcpy(eseg->inline_hdr.start, skb->data, attr.ihs); 1062 eseg->inline_hdr.sz = cpu_to_be16(attr.ihs); 1063 dseg += wqe_attr.ds_cnt_inl; 1064 } 1065 1066 num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr.ihs, 1067 attr.headlen, dseg); 1068 if (unlikely(num_dma < 0)) 1069 goto err_drop; 1070 1071 mlx5e_txwqe_complete(sq, skb, &attr, &wqe_attr, num_dma, wi, cseg, xmit_more); 1072 1073 return; 1074 1075 err_drop: 1076 stats->dropped++; 1077 dev_kfree_skb_any(skb); 1078 } 1079 #endif 1080