1 /* 2 * Copyright (c) 2017, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <rdma/ib_verbs.h> 34 #include <linux/mlx5/fs.h> 35 #include "en.h" 36 #include "ipoib.h" 37 38 #define IB_DEFAULT_Q_KEY 0xb1b 39 #define MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE 9 40 41 static int mlx5i_open(struct net_device *netdev); 42 static int mlx5i_close(struct net_device *netdev); 43 static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu); 44 45 static const struct net_device_ops mlx5i_netdev_ops = { 46 .ndo_open = mlx5i_open, 47 .ndo_stop = mlx5i_close, 48 .ndo_get_stats64 = mlx5i_get_stats, 49 .ndo_init = mlx5i_dev_init, 50 .ndo_uninit = mlx5i_dev_cleanup, 51 .ndo_change_mtu = mlx5i_change_mtu, 52 .ndo_do_ioctl = mlx5i_ioctl, 53 }; 54 55 /* IPoIB mlx5 netdev profile */ 56 static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev, 57 struct mlx5e_params *params) 58 { 59 /* Override RQ params as IPoIB supports only LINKED LIST RQ for now */ 60 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, false); 61 mlx5e_set_rq_type(mdev, params); 62 mlx5e_init_rq_type_params(mdev, params); 63 64 /* RQ size in ipoib by default is 512 */ 65 params->log_rq_mtu_frames = is_kdump_kernel() ? 66 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE : 67 MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE; 68 69 params->lro_en = false; 70 params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN; 71 params->tunneled_offload_en = false; 72 } 73 74 /* Called directly after IPoIB netdevice was created to initialize SW structs */ 75 int mlx5i_init(struct mlx5_core_dev *mdev, 76 struct net_device *netdev, 77 const struct mlx5e_profile *profile, 78 void *ppriv) 79 { 80 struct mlx5e_priv *priv = mlx5i_epriv(netdev); 81 int err; 82 83 err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv); 84 if (err) 85 return err; 86 87 mlx5e_set_netdev_mtu_boundaries(priv); 88 netdev->mtu = netdev->max_mtu; 89 90 mlx5e_build_nic_params(priv, NULL, &priv->rss_params, &priv->channels.params, 91 netdev->mtu); 92 mlx5i_build_nic_params(mdev, &priv->channels.params); 93 94 mlx5e_timestamp_init(priv); 95 96 /* netdev init */ 97 netdev->hw_features |= NETIF_F_SG; 98 netdev->hw_features |= NETIF_F_IP_CSUM; 99 netdev->hw_features |= NETIF_F_IPV6_CSUM; 100 netdev->hw_features |= NETIF_F_GRO; 101 netdev->hw_features |= NETIF_F_TSO; 102 netdev->hw_features |= NETIF_F_TSO6; 103 netdev->hw_features |= NETIF_F_RXCSUM; 104 netdev->hw_features |= NETIF_F_RXHASH; 105 106 netdev->netdev_ops = &mlx5i_netdev_ops; 107 netdev->ethtool_ops = &mlx5i_ethtool_ops; 108 109 return 0; 110 } 111 112 /* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */ 113 void mlx5i_cleanup(struct mlx5e_priv *priv) 114 { 115 mlx5e_netdev_cleanup(priv->netdev, priv); 116 } 117 118 static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv) 119 { 120 struct mlx5e_sw_stats s = { 0 }; 121 int i, j; 122 123 for (i = 0; i < priv->max_nch; i++) { 124 struct mlx5e_channel_stats *channel_stats; 125 struct mlx5e_rq_stats *rq_stats; 126 127 channel_stats = &priv->channel_stats[i]; 128 rq_stats = &channel_stats->rq; 129 130 s.rx_packets += rq_stats->packets; 131 s.rx_bytes += rq_stats->bytes; 132 133 for (j = 0; j < priv->max_opened_tc; j++) { 134 struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j]; 135 136 s.tx_packets += sq_stats->packets; 137 s.tx_bytes += sq_stats->bytes; 138 s.tx_queue_dropped += sq_stats->dropped; 139 } 140 } 141 142 memcpy(&priv->stats.sw, &s, sizeof(s)); 143 } 144 145 void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) 146 { 147 struct mlx5e_priv *priv = mlx5i_epriv(dev); 148 struct mlx5e_sw_stats *sstats = &priv->stats.sw; 149 150 mlx5i_grp_sw_update_stats(priv); 151 152 stats->rx_packets = sstats->rx_packets; 153 stats->rx_bytes = sstats->rx_bytes; 154 stats->tx_packets = sstats->tx_packets; 155 stats->tx_bytes = sstats->tx_bytes; 156 stats->tx_dropped = sstats->tx_queue_dropped; 157 } 158 159 int mlx5i_init_underlay_qp(struct mlx5e_priv *priv) 160 { 161 struct mlx5_core_dev *mdev = priv->mdev; 162 struct mlx5i_priv *ipriv = priv->ppriv; 163 int ret; 164 165 { 166 u32 in[MLX5_ST_SZ_DW(rst2init_qp_in)] = {}; 167 u32 *qpc; 168 169 qpc = MLX5_ADDR_OF(rst2init_qp_in, in, qpc); 170 171 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 172 MLX5_SET(qpc, qpc, primary_address_path.pkey_index, 173 ipriv->pkey_index); 174 MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, 1); 175 MLX5_SET(qpc, qpc, q_key, IB_DEFAULT_Q_KEY); 176 177 MLX5_SET(rst2init_qp_in, in, opcode, MLX5_CMD_OP_RST2INIT_QP); 178 MLX5_SET(rst2init_qp_in, in, qpn, ipriv->qpn); 179 ret = mlx5_cmd_exec_in(mdev, rst2init_qp, in); 180 if (ret) 181 goto err_qp_modify_to_err; 182 } 183 { 184 u32 in[MLX5_ST_SZ_DW(init2rtr_qp_in)] = {}; 185 186 MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP); 187 MLX5_SET(init2rtr_qp_in, in, qpn, ipriv->qpn); 188 ret = mlx5_cmd_exec_in(mdev, init2rtr_qp, in); 189 if (ret) 190 goto err_qp_modify_to_err; 191 } 192 { 193 u32 in[MLX5_ST_SZ_DW(rtr2rts_qp_in)] = {}; 194 195 MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP); 196 MLX5_SET(rtr2rts_qp_in, in, qpn, ipriv->qpn); 197 ret = mlx5_cmd_exec_in(mdev, rtr2rts_qp, in); 198 if (ret) 199 goto err_qp_modify_to_err; 200 } 201 return 0; 202 203 err_qp_modify_to_err: 204 { 205 u32 in[MLX5_ST_SZ_DW(qp_2err_in)] = {}; 206 207 MLX5_SET(qp_2err_in, in, opcode, MLX5_CMD_OP_2ERR_QP); 208 MLX5_SET(qp_2err_in, in, qpn, ipriv->qpn); 209 mlx5_cmd_exec_in(mdev, qp_2err, in); 210 } 211 return ret; 212 } 213 214 void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv) 215 { 216 struct mlx5i_priv *ipriv = priv->ppriv; 217 struct mlx5_core_dev *mdev = priv->mdev; 218 u32 in[MLX5_ST_SZ_DW(qp_2rst_in)] = {}; 219 220 MLX5_SET(qp_2rst_in, in, opcode, MLX5_CMD_OP_2RST_QP); 221 MLX5_SET(qp_2rst_in, in, qpn, ipriv->qpn); 222 mlx5_cmd_exec_in(mdev, qp_2rst, in); 223 } 224 225 #define MLX5_QP_ENHANCED_ULP_STATELESS_MODE 2 226 227 int mlx5i_create_underlay_qp(struct mlx5e_priv *priv) 228 { 229 u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {}; 230 u32 in[MLX5_ST_SZ_DW(create_qp_in)] = {}; 231 struct mlx5i_priv *ipriv = priv->ppriv; 232 void *addr_path; 233 int ret = 0; 234 void *qpc; 235 236 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 237 MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD); 238 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 239 MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 240 MLX5_QP_ENHANCED_ULP_STATELESS_MODE); 241 242 addr_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path); 243 MLX5_SET(ads, addr_path, vhca_port_num, 1); 244 MLX5_SET(ads, addr_path, grh, 1); 245 246 MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP); 247 ret = mlx5_cmd_exec_inout(priv->mdev, create_qp, in, out); 248 if (ret) 249 return ret; 250 251 ipriv->qpn = MLX5_GET(create_qp_out, out, qpn); 252 253 return 0; 254 } 255 256 void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, u32 qpn) 257 { 258 u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {}; 259 260 MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP); 261 MLX5_SET(destroy_qp_in, in, qpn, qpn); 262 mlx5_cmd_exec_in(mdev, destroy_qp, in); 263 } 264 265 int mlx5i_update_nic_rx(struct mlx5e_priv *priv) 266 { 267 return mlx5e_refresh_tirs(priv, true, true); 268 } 269 270 int mlx5i_create_tis(struct mlx5_core_dev *mdev, u32 underlay_qpn, u32 *tisn) 271 { 272 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {}; 273 void *tisc; 274 275 tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); 276 277 MLX5_SET(tisc, tisc, underlay_qpn, underlay_qpn); 278 279 return mlx5e_create_tis(mdev, in, tisn); 280 } 281 282 static int mlx5i_init_tx(struct mlx5e_priv *priv) 283 { 284 struct mlx5i_priv *ipriv = priv->ppriv; 285 int err; 286 287 err = mlx5i_create_underlay_qp(priv); 288 if (err) { 289 mlx5_core_warn(priv->mdev, "create underlay QP failed, %d\n", err); 290 return err; 291 } 292 293 err = mlx5i_create_tis(priv->mdev, ipriv->qpn, &priv->tisn[0][0]); 294 if (err) { 295 mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err); 296 goto err_destroy_underlay_qp; 297 } 298 299 return 0; 300 301 err_destroy_underlay_qp: 302 mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn); 303 return err; 304 } 305 306 static void mlx5i_cleanup_tx(struct mlx5e_priv *priv) 307 { 308 struct mlx5i_priv *ipriv = priv->ppriv; 309 310 mlx5e_destroy_tis(priv->mdev, priv->tisn[0][0]); 311 mlx5i_destroy_underlay_qp(priv->mdev, ipriv->qpn); 312 } 313 314 static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) 315 { 316 struct ttc_params ttc_params = {}; 317 int tt, err; 318 319 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, 320 MLX5_FLOW_NAMESPACE_KERNEL); 321 322 if (!priv->fs.ns) 323 return -EINVAL; 324 325 err = mlx5e_arfs_create_tables(priv); 326 if (err) { 327 netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n", 328 err); 329 priv->netdev->hw_features &= ~NETIF_F_NTUPLE; 330 } 331 332 mlx5e_set_ttc_basic_params(priv, &ttc_params); 333 mlx5e_set_inner_ttc_ft_params(&ttc_params); 334 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) 335 ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn; 336 337 err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc); 338 if (err) { 339 netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n", 340 err); 341 goto err_destroy_arfs_tables; 342 } 343 344 mlx5e_set_ttc_ft_params(&ttc_params); 345 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) 346 ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn; 347 348 err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc); 349 if (err) { 350 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", 351 err); 352 goto err_destroy_inner_ttc_table; 353 } 354 355 return 0; 356 357 err_destroy_inner_ttc_table: 358 mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc); 359 err_destroy_arfs_tables: 360 mlx5e_arfs_destroy_tables(priv); 361 362 return err; 363 } 364 365 static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv) 366 { 367 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); 368 mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc); 369 mlx5e_arfs_destroy_tables(priv); 370 } 371 372 static int mlx5i_init_rx(struct mlx5e_priv *priv) 373 { 374 struct mlx5_core_dev *mdev = priv->mdev; 375 int err; 376 377 mlx5e_create_q_counters(priv); 378 379 err = mlx5e_open_drop_rq(priv, &priv->drop_rq); 380 if (err) { 381 mlx5_core_err(mdev, "open drop rq failed, %d\n", err); 382 goto err_destroy_q_counters; 383 } 384 385 err = mlx5e_create_indirect_rqt(priv); 386 if (err) 387 goto err_close_drop_rq; 388 389 err = mlx5e_create_direct_rqts(priv, priv->direct_tir); 390 if (err) 391 goto err_destroy_indirect_rqts; 392 393 err = mlx5e_create_indirect_tirs(priv, true); 394 if (err) 395 goto err_destroy_direct_rqts; 396 397 err = mlx5e_create_direct_tirs(priv, priv->direct_tir); 398 if (err) 399 goto err_destroy_indirect_tirs; 400 401 err = mlx5i_create_flow_steering(priv); 402 if (err) 403 goto err_destroy_direct_tirs; 404 405 return 0; 406 407 err_destroy_direct_tirs: 408 mlx5e_destroy_direct_tirs(priv, priv->direct_tir); 409 err_destroy_indirect_tirs: 410 mlx5e_destroy_indirect_tirs(priv); 411 err_destroy_direct_rqts: 412 mlx5e_destroy_direct_rqts(priv, priv->direct_tir); 413 err_destroy_indirect_rqts: 414 mlx5e_destroy_rqt(priv, &priv->indir_rqt); 415 err_close_drop_rq: 416 mlx5e_close_drop_rq(&priv->drop_rq); 417 err_destroy_q_counters: 418 mlx5e_destroy_q_counters(priv); 419 return err; 420 } 421 422 static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) 423 { 424 mlx5i_destroy_flow_steering(priv); 425 mlx5e_destroy_direct_tirs(priv, priv->direct_tir); 426 mlx5e_destroy_indirect_tirs(priv); 427 mlx5e_destroy_direct_rqts(priv, priv->direct_tir); 428 mlx5e_destroy_rqt(priv, &priv->indir_rqt); 429 mlx5e_close_drop_rq(&priv->drop_rq); 430 mlx5e_destroy_q_counters(priv); 431 } 432 433 /* The stats groups order is opposite to the update_stats() order calls */ 434 static mlx5e_stats_grp_t mlx5i_stats_grps[] = { 435 &MLX5E_STATS_GRP(sw), 436 &MLX5E_STATS_GRP(qcnt), 437 &MLX5E_STATS_GRP(vnic_env), 438 &MLX5E_STATS_GRP(vport), 439 &MLX5E_STATS_GRP(802_3), 440 &MLX5E_STATS_GRP(2863), 441 &MLX5E_STATS_GRP(2819), 442 &MLX5E_STATS_GRP(phy), 443 &MLX5E_STATS_GRP(pcie), 444 &MLX5E_STATS_GRP(per_prio), 445 &MLX5E_STATS_GRP(pme), 446 &MLX5E_STATS_GRP(channels), 447 &MLX5E_STATS_GRP(per_port_buff_congest), 448 }; 449 450 static unsigned int mlx5i_stats_grps_num(struct mlx5e_priv *priv) 451 { 452 return ARRAY_SIZE(mlx5i_stats_grps); 453 } 454 455 static const struct mlx5e_profile mlx5i_nic_profile = { 456 .init = mlx5i_init, 457 .cleanup = mlx5i_cleanup, 458 .init_tx = mlx5i_init_tx, 459 .cleanup_tx = mlx5i_cleanup_tx, 460 .init_rx = mlx5i_init_rx, 461 .cleanup_rx = mlx5i_cleanup_rx, 462 .enable = NULL, /* mlx5i_enable */ 463 .disable = NULL, /* mlx5i_disable */ 464 .update_rx = mlx5i_update_nic_rx, 465 .update_stats = NULL, /* mlx5i_update_stats */ 466 .update_carrier = NULL, /* no HW update in IB link */ 467 .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe, 468 .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */ 469 .max_tc = MLX5I_MAX_NUM_TC, 470 .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), 471 .stats_grps = mlx5i_stats_grps, 472 .stats_grps_num = mlx5i_stats_grps_num, 473 }; 474 475 /* mlx5i netdev NDos */ 476 477 static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu) 478 { 479 struct mlx5e_priv *priv = mlx5i_epriv(netdev); 480 struct mlx5e_channels new_channels = {}; 481 struct mlx5e_params *params; 482 int err = 0; 483 484 mutex_lock(&priv->state_lock); 485 486 params = &priv->channels.params; 487 488 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { 489 params->sw_mtu = new_mtu; 490 netdev->mtu = params->sw_mtu; 491 goto out; 492 } 493 494 new_channels.params = *params; 495 new_channels.params.sw_mtu = new_mtu; 496 497 err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); 498 if (err) 499 goto out; 500 501 netdev->mtu = new_channels.params.sw_mtu; 502 503 out: 504 mutex_unlock(&priv->state_lock); 505 return err; 506 } 507 508 int mlx5i_dev_init(struct net_device *dev) 509 { 510 struct mlx5e_priv *priv = mlx5i_epriv(dev); 511 struct mlx5i_priv *ipriv = priv->ppriv; 512 513 /* Set dev address using underlay QP */ 514 dev->dev_addr[1] = (ipriv->qpn >> 16) & 0xff; 515 dev->dev_addr[2] = (ipriv->qpn >> 8) & 0xff; 516 dev->dev_addr[3] = (ipriv->qpn) & 0xff; 517 518 /* Add QPN to net-device mapping to HT */ 519 mlx5i_pkey_add_qpn(dev, ipriv->qpn); 520 521 return 0; 522 } 523 524 int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 525 { 526 struct mlx5e_priv *priv = mlx5i_epriv(dev); 527 528 switch (cmd) { 529 case SIOCSHWTSTAMP: 530 return mlx5e_hwstamp_set(priv, ifr); 531 case SIOCGHWTSTAMP: 532 return mlx5e_hwstamp_get(priv, ifr); 533 default: 534 return -EOPNOTSUPP; 535 } 536 } 537 538 void mlx5i_dev_cleanup(struct net_device *dev) 539 { 540 struct mlx5e_priv *priv = mlx5i_epriv(dev); 541 struct mlx5i_priv *ipriv = priv->ppriv; 542 543 mlx5i_uninit_underlay_qp(priv); 544 545 /* Delete QPN to net-device mapping from HT */ 546 mlx5i_pkey_del_qpn(dev, ipriv->qpn); 547 } 548 549 static int mlx5i_open(struct net_device *netdev) 550 { 551 struct mlx5e_priv *epriv = mlx5i_epriv(netdev); 552 struct mlx5i_priv *ipriv = epriv->ppriv; 553 struct mlx5_core_dev *mdev = epriv->mdev; 554 int err; 555 556 mutex_lock(&epriv->state_lock); 557 558 set_bit(MLX5E_STATE_OPENED, &epriv->state); 559 560 err = mlx5i_init_underlay_qp(epriv); 561 if (err) { 562 mlx5_core_warn(mdev, "prepare underlay qp state failed, %d\n", err); 563 goto err_clear_state_opened_flag; 564 } 565 566 err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qpn); 567 if (err) { 568 mlx5_core_warn(mdev, "attach underlay qp to ft failed, %d\n", err); 569 goto err_reset_qp; 570 } 571 572 err = mlx5e_open_channels(epriv, &epriv->channels); 573 if (err) 574 goto err_remove_fs_underlay_qp; 575 576 epriv->profile->update_rx(epriv); 577 mlx5e_activate_priv_channels(epriv); 578 579 mutex_unlock(&epriv->state_lock); 580 return 0; 581 582 err_remove_fs_underlay_qp: 583 mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn); 584 err_reset_qp: 585 mlx5i_uninit_underlay_qp(epriv); 586 err_clear_state_opened_flag: 587 clear_bit(MLX5E_STATE_OPENED, &epriv->state); 588 mutex_unlock(&epriv->state_lock); 589 return err; 590 } 591 592 static int mlx5i_close(struct net_device *netdev) 593 { 594 struct mlx5e_priv *epriv = mlx5i_epriv(netdev); 595 struct mlx5i_priv *ipriv = epriv->ppriv; 596 struct mlx5_core_dev *mdev = epriv->mdev; 597 598 /* May already be CLOSED in case a previous configuration operation 599 * (e.g RX/TX queue size change) that involves close&open failed. 600 */ 601 mutex_lock(&epriv->state_lock); 602 603 if (!test_bit(MLX5E_STATE_OPENED, &epriv->state)) 604 goto unlock; 605 606 clear_bit(MLX5E_STATE_OPENED, &epriv->state); 607 608 netif_carrier_off(epriv->netdev); 609 mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qpn); 610 mlx5e_deactivate_priv_channels(epriv); 611 mlx5e_close_channels(&epriv->channels); 612 mlx5i_uninit_underlay_qp(epriv); 613 unlock: 614 mutex_unlock(&epriv->state_lock); 615 return 0; 616 } 617 618 /* IPoIB RDMA netdev callbacks */ 619 static int mlx5i_attach_mcast(struct net_device *netdev, struct ib_device *hca, 620 union ib_gid *gid, u16 lid, int set_qkey, 621 u32 qkey) 622 { 623 struct mlx5e_priv *epriv = mlx5i_epriv(netdev); 624 struct mlx5_core_dev *mdev = epriv->mdev; 625 struct mlx5i_priv *ipriv = epriv->ppriv; 626 int err; 627 628 mlx5_core_dbg(mdev, "attaching QPN 0x%x, MGID %pI6\n", ipriv->qpn, 629 gid->raw); 630 err = mlx5_core_attach_mcg(mdev, gid, ipriv->qpn); 631 if (err) 632 mlx5_core_warn(mdev, "failed attaching QPN 0x%x, MGID %pI6\n", 633 ipriv->qpn, gid->raw); 634 635 if (set_qkey) { 636 mlx5_core_dbg(mdev, "%s setting qkey 0x%x\n", 637 netdev->name, qkey); 638 ipriv->qkey = qkey; 639 } 640 641 return err; 642 } 643 644 static int mlx5i_detach_mcast(struct net_device *netdev, struct ib_device *hca, 645 union ib_gid *gid, u16 lid) 646 { 647 struct mlx5e_priv *epriv = mlx5i_epriv(netdev); 648 struct mlx5_core_dev *mdev = epriv->mdev; 649 struct mlx5i_priv *ipriv = epriv->ppriv; 650 int err; 651 652 mlx5_core_dbg(mdev, "detaching QPN 0x%x, MGID %pI6\n", ipriv->qpn, 653 gid->raw); 654 655 err = mlx5_core_detach_mcg(mdev, gid, ipriv->qpn); 656 if (err) 657 mlx5_core_dbg(mdev, "failed detaching QPN 0x%x, MGID %pI6\n", 658 ipriv->qpn, gid->raw); 659 660 return err; 661 } 662 663 static int mlx5i_xmit(struct net_device *dev, struct sk_buff *skb, 664 struct ib_ah *address, u32 dqpn) 665 { 666 struct mlx5e_priv *epriv = mlx5i_epriv(dev); 667 struct mlx5e_txqsq *sq = epriv->txq2sq[skb_get_queue_mapping(skb)]; 668 struct mlx5_ib_ah *mah = to_mah(address); 669 struct mlx5i_priv *ipriv = epriv->ppriv; 670 671 mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey, netdev_xmit_more()); 672 673 return NETDEV_TX_OK; 674 } 675 676 static void mlx5i_set_pkey_index(struct net_device *netdev, int id) 677 { 678 struct mlx5i_priv *ipriv = netdev_priv(netdev); 679 680 ipriv->pkey_index = (u16)id; 681 } 682 683 static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev) 684 { 685 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_IB) 686 return -EOPNOTSUPP; 687 688 if (!MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) { 689 mlx5_core_warn(mdev, "IPoIB enhanced offloads are not supported\n"); 690 return -EOPNOTSUPP; 691 } 692 693 return 0; 694 } 695 696 static void mlx5_rdma_netdev_free(struct net_device *netdev) 697 { 698 struct mlx5e_priv *priv = mlx5i_epriv(netdev); 699 struct mlx5i_priv *ipriv = priv->ppriv; 700 const struct mlx5e_profile *profile = priv->profile; 701 702 mlx5e_detach_netdev(priv); 703 profile->cleanup(priv); 704 705 if (!ipriv->sub_interface) { 706 mlx5i_pkey_qpn_ht_cleanup(netdev); 707 mlx5e_destroy_mdev_resources(priv->mdev); 708 } 709 } 710 711 static bool mlx5_is_sub_interface(struct mlx5_core_dev *mdev) 712 { 713 return mdev->mlx5e_res.pdn != 0; 714 } 715 716 static const struct mlx5e_profile *mlx5_get_profile(struct mlx5_core_dev *mdev) 717 { 718 if (mlx5_is_sub_interface(mdev)) 719 return mlx5i_pkey_get_profile(); 720 return &mlx5i_nic_profile; 721 } 722 723 static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u8 port_num, 724 struct net_device *netdev, void *param) 725 { 726 struct mlx5_core_dev *mdev = (struct mlx5_core_dev *)param; 727 const struct mlx5e_profile *prof = mlx5_get_profile(mdev); 728 struct mlx5i_priv *ipriv; 729 struct mlx5e_priv *epriv; 730 struct rdma_netdev *rn; 731 int err; 732 733 ipriv = netdev_priv(netdev); 734 epriv = mlx5i_epriv(netdev); 735 736 ipriv->sub_interface = mlx5_is_sub_interface(mdev); 737 if (!ipriv->sub_interface) { 738 err = mlx5i_pkey_qpn_ht_init(netdev); 739 if (err) { 740 mlx5_core_warn(mdev, "allocate qpn_to_netdev ht failed\n"); 741 return err; 742 } 743 744 /* This should only be called once per mdev */ 745 err = mlx5e_create_mdev_resources(mdev); 746 if (err) 747 goto destroy_ht; 748 } 749 750 prof->init(mdev, netdev, prof, ipriv); 751 752 err = mlx5e_attach_netdev(epriv); 753 if (err) 754 goto detach; 755 netif_carrier_off(netdev); 756 757 /* set rdma_netdev func pointers */ 758 rn = &ipriv->rn; 759 rn->hca = ibdev; 760 rn->send = mlx5i_xmit; 761 rn->attach_mcast = mlx5i_attach_mcast; 762 rn->detach_mcast = mlx5i_detach_mcast; 763 rn->set_id = mlx5i_set_pkey_index; 764 765 netdev->priv_destructor = mlx5_rdma_netdev_free; 766 netdev->needs_free_netdev = 1; 767 768 return 0; 769 770 detach: 771 prof->cleanup(epriv); 772 if (ipriv->sub_interface) 773 return err; 774 mlx5e_destroy_mdev_resources(mdev); 775 destroy_ht: 776 mlx5i_pkey_qpn_ht_cleanup(netdev); 777 return err; 778 } 779 780 int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev, 781 struct ib_device *device, 782 struct rdma_netdev_alloc_params *params) 783 { 784 int nch; 785 int rc; 786 787 rc = mlx5i_check_required_hca_cap(mdev); 788 if (rc) 789 return rc; 790 791 nch = mlx5e_get_max_num_channels(mdev); 792 793 *params = (struct rdma_netdev_alloc_params){ 794 .sizeof_priv = sizeof(struct mlx5i_priv) + 795 sizeof(struct mlx5e_priv), 796 .txqs = nch * MLX5E_MAX_NUM_TC, 797 .rxqs = nch, 798 .param = mdev, 799 .initialize_rdma_netdev = mlx5_rdma_setup_rn, 800 }; 801 802 return 0; 803 } 804 EXPORT_SYMBOL(mlx5_rdma_rn_get_params); 805