1 /* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ 2 /* Copyright (c) 2015 - 2021 Intel Corporation */ 3 #ifndef IRDMA_TYPE_H 4 #define IRDMA_TYPE_H 5 #include "status.h" 6 #include "osdep.h" 7 #include "irdma.h" 8 #include "user.h" 9 #include "hmc.h" 10 #include "uda.h" 11 #include "ws.h" 12 #define IRDMA_DEBUG_ERR "ERR" 13 #define IRDMA_DEBUG_INIT "INIT" 14 #define IRDMA_DEBUG_DEV "DEV" 15 #define IRDMA_DEBUG_CM "CM" 16 #define IRDMA_DEBUG_VERBS "VERBS" 17 #define IRDMA_DEBUG_PUDA "PUDA" 18 #define IRDMA_DEBUG_ILQ "ILQ" 19 #define IRDMA_DEBUG_IEQ "IEQ" 20 #define IRDMA_DEBUG_QP "QP" 21 #define IRDMA_DEBUG_CQ "CQ" 22 #define IRDMA_DEBUG_MR "MR" 23 #define IRDMA_DEBUG_PBLE "PBLE" 24 #define IRDMA_DEBUG_WQE "WQE" 25 #define IRDMA_DEBUG_AEQ "AEQ" 26 #define IRDMA_DEBUG_CQP "CQP" 27 #define IRDMA_DEBUG_HMC "HMC" 28 #define IRDMA_DEBUG_USER "USER" 29 #define IRDMA_DEBUG_VIRT "VIRT" 30 #define IRDMA_DEBUG_DCB "DCB" 31 #define IRDMA_DEBUG_CQE "CQE" 32 #define IRDMA_DEBUG_CLNT "CLNT" 33 #define IRDMA_DEBUG_WS "WS" 34 #define IRDMA_DEBUG_STATS "STATS" 35 36 enum irdma_page_size { 37 IRDMA_PAGE_SIZE_4K = 0, 38 IRDMA_PAGE_SIZE_2M, 39 IRDMA_PAGE_SIZE_1G, 40 }; 41 42 enum irdma_hdrct_flags { 43 DDP_LEN_FLAG = 0x80, 44 DDP_HDR_FLAG = 0x40, 45 RDMA_HDR_FLAG = 0x20, 46 }; 47 48 enum irdma_term_layers { 49 LAYER_RDMA = 0, 50 LAYER_DDP = 1, 51 LAYER_MPA = 2, 52 }; 53 54 enum irdma_term_error_types { 55 RDMAP_REMOTE_PROT = 1, 56 RDMAP_REMOTE_OP = 2, 57 DDP_CATASTROPHIC = 0, 58 DDP_TAGGED_BUF = 1, 59 DDP_UNTAGGED_BUF = 2, 60 DDP_LLP = 3, 61 }; 62 63 enum irdma_term_rdma_errors { 64 RDMAP_INV_STAG = 0x00, 65 RDMAP_INV_BOUNDS = 0x01, 66 RDMAP_ACCESS = 0x02, 67 RDMAP_UNASSOC_STAG = 0x03, 68 RDMAP_TO_WRAP = 0x04, 69 RDMAP_INV_RDMAP_VER = 0x05, 70 RDMAP_UNEXPECTED_OP = 0x06, 71 RDMAP_CATASTROPHIC_LOCAL = 0x07, 72 RDMAP_CATASTROPHIC_GLOBAL = 0x08, 73 RDMAP_CANT_INV_STAG = 0x09, 74 RDMAP_UNSPECIFIED = 0xff, 75 }; 76 77 enum irdma_term_ddp_errors { 78 DDP_CATASTROPHIC_LOCAL = 0x00, 79 DDP_TAGGED_INV_STAG = 0x00, 80 DDP_TAGGED_BOUNDS = 0x01, 81 DDP_TAGGED_UNASSOC_STAG = 0x02, 82 DDP_TAGGED_TO_WRAP = 0x03, 83 DDP_TAGGED_INV_DDP_VER = 0x04, 84 DDP_UNTAGGED_INV_QN = 0x01, 85 DDP_UNTAGGED_INV_MSN_NO_BUF = 0x02, 86 DDP_UNTAGGED_INV_MSN_RANGE = 0x03, 87 DDP_UNTAGGED_INV_MO = 0x04, 88 DDP_UNTAGGED_INV_TOO_LONG = 0x05, 89 DDP_UNTAGGED_INV_DDP_VER = 0x06, 90 }; 91 92 enum irdma_term_mpa_errors { 93 MPA_CLOSED = 0x01, 94 MPA_CRC = 0x02, 95 MPA_MARKER = 0x03, 96 MPA_REQ_RSP = 0x04, 97 }; 98 99 enum irdma_qp_event_type { 100 IRDMA_QP_EVENT_CATASTROPHIC, 101 IRDMA_QP_EVENT_ACCESS_ERR, 102 }; 103 104 enum irdma_hw_stats_index_32b { 105 IRDMA_HW_STAT_INDEX_IP4RXDISCARD = 0, 106 IRDMA_HW_STAT_INDEX_IP4RXTRUNC = 1, 107 IRDMA_HW_STAT_INDEX_IP4TXNOROUTE = 2, 108 IRDMA_HW_STAT_INDEX_IP6RXDISCARD = 3, 109 IRDMA_HW_STAT_INDEX_IP6RXTRUNC = 4, 110 IRDMA_HW_STAT_INDEX_IP6TXNOROUTE = 5, 111 IRDMA_HW_STAT_INDEX_TCPRTXSEG = 6, 112 IRDMA_HW_STAT_INDEX_TCPRXOPTERR = 7, 113 IRDMA_HW_STAT_INDEX_TCPRXPROTOERR = 8, 114 IRDMA_HW_STAT_INDEX_MAX_32_GEN_1 = 9, /* Must be same value as next entry */ 115 IRDMA_HW_STAT_INDEX_RXVLANERR = 9, 116 IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED = 10, 117 IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED = 11, 118 IRDMA_HW_STAT_INDEX_TXNPCNPSENT = 12, 119 IRDMA_HW_STAT_INDEX_MAX_32, /* Must be last entry */ 120 }; 121 122 enum irdma_hw_stats_index_64b { 123 IRDMA_HW_STAT_INDEX_IP4RXOCTS = 0, 124 IRDMA_HW_STAT_INDEX_IP4RXPKTS = 1, 125 IRDMA_HW_STAT_INDEX_IP4RXFRAGS = 2, 126 IRDMA_HW_STAT_INDEX_IP4RXMCPKTS = 3, 127 IRDMA_HW_STAT_INDEX_IP4TXOCTS = 4, 128 IRDMA_HW_STAT_INDEX_IP4TXPKTS = 5, 129 IRDMA_HW_STAT_INDEX_IP4TXFRAGS = 6, 130 IRDMA_HW_STAT_INDEX_IP4TXMCPKTS = 7, 131 IRDMA_HW_STAT_INDEX_IP6RXOCTS = 8, 132 IRDMA_HW_STAT_INDEX_IP6RXPKTS = 9, 133 IRDMA_HW_STAT_INDEX_IP6RXFRAGS = 10, 134 IRDMA_HW_STAT_INDEX_IP6RXMCPKTS = 11, 135 IRDMA_HW_STAT_INDEX_IP6TXOCTS = 12, 136 IRDMA_HW_STAT_INDEX_IP6TXPKTS = 13, 137 IRDMA_HW_STAT_INDEX_IP6TXFRAGS = 14, 138 IRDMA_HW_STAT_INDEX_IP6TXMCPKTS = 15, 139 IRDMA_HW_STAT_INDEX_TCPRXSEGS = 16, 140 IRDMA_HW_STAT_INDEX_TCPTXSEG = 17, 141 IRDMA_HW_STAT_INDEX_RDMARXRDS = 18, 142 IRDMA_HW_STAT_INDEX_RDMARXSNDS = 19, 143 IRDMA_HW_STAT_INDEX_RDMARXWRS = 20, 144 IRDMA_HW_STAT_INDEX_RDMATXRDS = 21, 145 IRDMA_HW_STAT_INDEX_RDMATXSNDS = 22, 146 IRDMA_HW_STAT_INDEX_RDMATXWRS = 23, 147 IRDMA_HW_STAT_INDEX_RDMAVBND = 24, 148 IRDMA_HW_STAT_INDEX_RDMAVINV = 25, 149 IRDMA_HW_STAT_INDEX_MAX_64_GEN_1 = 26, /* Must be same value as next entry */ 150 IRDMA_HW_STAT_INDEX_IP4RXMCOCTS = 26, 151 IRDMA_HW_STAT_INDEX_IP4TXMCOCTS = 27, 152 IRDMA_HW_STAT_INDEX_IP6RXMCOCTS = 28, 153 IRDMA_HW_STAT_INDEX_IP6TXMCOCTS = 29, 154 IRDMA_HW_STAT_INDEX_UDPRXPKTS = 30, 155 IRDMA_HW_STAT_INDEX_UDPTXPKTS = 31, 156 IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS = 32, 157 IRDMA_HW_STAT_INDEX_MAX_64, /* Must be last entry */ 158 }; 159 160 enum irdma_feature_type { 161 IRDMA_FEATURE_FW_INFO = 0, 162 IRDMA_HW_VERSION_INFO = 1, 163 IRDMA_QSETS_MAX = 26, 164 IRDMA_MAX_FEATURES, /* Must be last entry */ 165 }; 166 167 enum irdma_sched_prio_type { 168 IRDMA_PRIO_WEIGHTED_RR = 1, 169 IRDMA_PRIO_STRICT = 2, 170 IRDMA_PRIO_WEIGHTED_STRICT = 3, 171 }; 172 173 enum irdma_vm_vf_type { 174 IRDMA_VF_TYPE = 0, 175 IRDMA_VM_TYPE, 176 IRDMA_PF_TYPE, 177 }; 178 179 enum irdma_cqp_hmc_profile { 180 IRDMA_HMC_PROFILE_DEFAULT = 1, 181 IRDMA_HMC_PROFILE_FAVOR_VF = 2, 182 IRDMA_HMC_PROFILE_EQUAL = 3, 183 }; 184 185 enum irdma_quad_entry_type { 186 IRDMA_QHASH_TYPE_TCP_ESTABLISHED = 1, 187 IRDMA_QHASH_TYPE_TCP_SYN, 188 IRDMA_QHASH_TYPE_UDP_UNICAST, 189 IRDMA_QHASH_TYPE_UDP_MCAST, 190 IRDMA_QHASH_TYPE_ROCE_MCAST, 191 IRDMA_QHASH_TYPE_ROCEV2_HW, 192 }; 193 194 enum irdma_quad_hash_manage_type { 195 IRDMA_QHASH_MANAGE_TYPE_DELETE = 0, 196 IRDMA_QHASH_MANAGE_TYPE_ADD, 197 IRDMA_QHASH_MANAGE_TYPE_MODIFY, 198 }; 199 200 enum irdma_syn_rst_handling { 201 IRDMA_SYN_RST_HANDLING_HW_TCP_SECURE = 0, 202 IRDMA_SYN_RST_HANDLING_HW_TCP, 203 IRDMA_SYN_RST_HANDLING_FW_TCP_SECURE, 204 IRDMA_SYN_RST_HANDLING_FW_TCP, 205 }; 206 207 enum irdma_queue_type { 208 IRDMA_QUEUE_TYPE_SQ_RQ = 0, 209 IRDMA_QUEUE_TYPE_CQP, 210 }; 211 212 struct irdma_sc_dev; 213 struct irdma_vsi_pestat; 214 215 struct irdma_dcqcn_cc_params { 216 u8 cc_cfg_valid; 217 u8 min_dec_factor; 218 u8 min_rate; 219 u8 dcqcn_f; 220 u16 rai_factor; 221 u16 hai_factor; 222 u16 dcqcn_t; 223 u32 dcqcn_b; 224 u32 rreduce_mperiod; 225 }; 226 227 struct irdma_cqp_init_info { 228 u64 cqp_compl_ctx; 229 u64 host_ctx_pa; 230 u64 sq_pa; 231 struct irdma_sc_dev *dev; 232 struct irdma_cqp_quanta *sq; 233 struct irdma_dcqcn_cc_params dcqcn_params; 234 __le64 *host_ctx; 235 u64 *scratch_array; 236 u32 sq_size; 237 u16 hw_maj_ver; 238 u16 hw_min_ver; 239 u8 struct_ver; 240 u8 hmc_profile; 241 u8 ena_vf_count; 242 u8 ceqs_per_vf; 243 bool en_datacenter_tcp:1; 244 bool disable_packed:1; 245 bool rocev2_rto_policy:1; 246 enum irdma_protocol_used protocol_used; 247 }; 248 249 struct irdma_terminate_hdr { 250 u8 layer_etype; 251 u8 error_code; 252 u8 hdrct; 253 u8 rsvd; 254 }; 255 256 struct irdma_cqp_sq_wqe { 257 __le64 buf[IRDMA_CQP_WQE_SIZE]; 258 }; 259 260 struct irdma_sc_aeqe { 261 __le64 buf[IRDMA_AEQE_SIZE]; 262 }; 263 264 struct irdma_ceqe { 265 __le64 buf[IRDMA_CEQE_SIZE]; 266 }; 267 268 struct irdma_cqp_ctx { 269 __le64 buf[IRDMA_CQP_CTX_SIZE]; 270 }; 271 272 struct irdma_cq_shadow_area { 273 __le64 buf[IRDMA_SHADOW_AREA_SIZE]; 274 }; 275 276 struct irdma_dev_hw_stats_offsets { 277 u32 stats_offset_32[IRDMA_HW_STAT_INDEX_MAX_32]; 278 u32 stats_offset_64[IRDMA_HW_STAT_INDEX_MAX_64]; 279 }; 280 281 struct irdma_dev_hw_stats { 282 u64 stats_val_32[IRDMA_HW_STAT_INDEX_MAX_32]; 283 u64 stats_val_64[IRDMA_HW_STAT_INDEX_MAX_64]; 284 }; 285 286 struct irdma_gather_stats { 287 u32 rsvd1; 288 u32 rxvlanerr; 289 u64 ip4rxocts; 290 u64 ip4rxpkts; 291 u32 ip4rxtrunc; 292 u32 ip4rxdiscard; 293 u64 ip4rxfrags; 294 u64 ip4rxmcocts; 295 u64 ip4rxmcpkts; 296 u64 ip6rxocts; 297 u64 ip6rxpkts; 298 u32 ip6rxtrunc; 299 u32 ip6rxdiscard; 300 u64 ip6rxfrags; 301 u64 ip6rxmcocts; 302 u64 ip6rxmcpkts; 303 u64 ip4txocts; 304 u64 ip4txpkts; 305 u64 ip4txfrag; 306 u64 ip4txmcocts; 307 u64 ip4txmcpkts; 308 u64 ip6txocts; 309 u64 ip6txpkts; 310 u64 ip6txfrags; 311 u64 ip6txmcocts; 312 u64 ip6txmcpkts; 313 u32 ip6txnoroute; 314 u32 ip4txnoroute; 315 u64 tcprxsegs; 316 u32 tcprxprotoerr; 317 u32 tcprxopterr; 318 u64 tcptxsegs; 319 u32 rsvd2; 320 u32 tcprtxseg; 321 u64 udprxpkts; 322 u64 udptxpkts; 323 u64 rdmarxwrs; 324 u64 rdmarxrds; 325 u64 rdmarxsnds; 326 u64 rdmatxwrs; 327 u64 rdmatxrds; 328 u64 rdmatxsnds; 329 u64 rdmavbn; 330 u64 rdmavinv; 331 u64 rxnpecnmrkpkts; 332 u32 rxrpcnphandled; 333 u32 rxrpcnpignored; 334 u32 txnpcnpsent; 335 u32 rsvd3[88]; 336 }; 337 338 struct irdma_stats_gather_info { 339 bool use_hmc_fcn_index:1; 340 bool use_stats_inst:1; 341 u8 hmc_fcn_index; 342 u8 stats_inst_index; 343 struct irdma_dma_mem stats_buff_mem; 344 void *gather_stats_va; 345 void *last_gather_stats_va; 346 }; 347 348 struct irdma_vsi_pestat { 349 struct irdma_hw *hw; 350 struct irdma_dev_hw_stats hw_stats; 351 struct irdma_stats_gather_info gather_info; 352 struct timer_list stats_timer; 353 struct irdma_sc_vsi *vsi; 354 struct irdma_dev_hw_stats last_hw_stats; 355 spinlock_t lock; /* rdma stats lock */ 356 }; 357 358 struct irdma_hw { 359 u8 __iomem *hw_addr; 360 u8 __iomem *priv_hw_addr; 361 struct device *device; 362 struct irdma_hmc_info hmc; 363 }; 364 365 struct irdma_pfpdu { 366 struct list_head rxlist; 367 u32 rcv_nxt; 368 u32 fps; 369 u32 max_fpdu_data; 370 u32 nextseqnum; 371 u32 rcv_start_seq; 372 bool mode:1; 373 bool mpa_crc_err:1; 374 u8 marker_len; 375 u64 total_ieq_bufs; 376 u64 fpdu_processed; 377 u64 bad_seq_num; 378 u64 crc_err; 379 u64 no_tx_bufs; 380 u64 tx_err; 381 u64 out_of_order; 382 u64 pmode_count; 383 struct irdma_sc_ah *ah; 384 struct irdma_puda_buf *ah_buf; 385 spinlock_t lock; /* fpdu processing lock */ 386 struct irdma_puda_buf *lastrcv_buf; 387 }; 388 389 struct irdma_sc_pd { 390 struct irdma_sc_dev *dev; 391 u32 pd_id; 392 int abi_ver; 393 }; 394 395 struct irdma_cqp_quanta { 396 __le64 elem[IRDMA_CQP_WQE_SIZE]; 397 }; 398 399 struct irdma_sc_cqp { 400 u32 size; 401 u64 sq_pa; 402 u64 host_ctx_pa; 403 void *back_cqp; 404 struct irdma_sc_dev *dev; 405 enum irdma_status_code (*process_cqp_sds)(struct irdma_sc_dev *dev, 406 struct irdma_update_sds_info *info); 407 struct irdma_dma_mem sdbuf; 408 struct irdma_ring sq_ring; 409 struct irdma_cqp_quanta *sq_base; 410 struct irdma_dcqcn_cc_params dcqcn_params; 411 __le64 *host_ctx; 412 u64 *scratch_array; 413 u32 cqp_id; 414 u32 sq_size; 415 u32 hw_sq_size; 416 u16 hw_maj_ver; 417 u16 hw_min_ver; 418 u8 struct_ver; 419 u8 polarity; 420 u8 hmc_profile; 421 u8 ena_vf_count; 422 u8 timeout_count; 423 u8 ceqs_per_vf; 424 bool en_datacenter_tcp:1; 425 bool disable_packed:1; 426 bool rocev2_rto_policy:1; 427 enum irdma_protocol_used protocol_used; 428 }; 429 430 struct irdma_sc_aeq { 431 u32 size; 432 u64 aeq_elem_pa; 433 struct irdma_sc_dev *dev; 434 struct irdma_sc_aeqe *aeqe_base; 435 void *pbl_list; 436 u32 elem_cnt; 437 struct irdma_ring aeq_ring; 438 u8 pbl_chunk_size; 439 u32 first_pm_pbl_idx; 440 u32 msix_idx; 441 u8 polarity; 442 bool virtual_map:1; 443 }; 444 445 struct irdma_sc_ceq { 446 u32 size; 447 u64 ceq_elem_pa; 448 struct irdma_sc_dev *dev; 449 struct irdma_ceqe *ceqe_base; 450 void *pbl_list; 451 u32 ceq_id; 452 u32 elem_cnt; 453 struct irdma_ring ceq_ring; 454 u8 pbl_chunk_size; 455 u8 tph_val; 456 u32 first_pm_pbl_idx; 457 u8 polarity; 458 struct irdma_sc_vsi *vsi; 459 struct irdma_sc_cq **reg_cq; 460 u32 reg_cq_size; 461 spinlock_t req_cq_lock; /* protect access to reg_cq array */ 462 bool virtual_map:1; 463 bool tph_en:1; 464 bool itr_no_expire:1; 465 }; 466 467 struct irdma_sc_cq { 468 struct irdma_cq_uk cq_uk; 469 u64 cq_pa; 470 u64 shadow_area_pa; 471 struct irdma_sc_dev *dev; 472 struct irdma_sc_vsi *vsi; 473 void *pbl_list; 474 void *back_cq; 475 u32 ceq_id; 476 u32 shadow_read_threshold; 477 u8 pbl_chunk_size; 478 u8 cq_type; 479 u8 tph_val; 480 u32 first_pm_pbl_idx; 481 bool ceqe_mask:1; 482 bool virtual_map:1; 483 bool check_overflow:1; 484 bool ceq_id_valid:1; 485 bool tph_en; 486 }; 487 488 struct irdma_sc_qp { 489 struct irdma_qp_uk qp_uk; 490 u64 sq_pa; 491 u64 rq_pa; 492 u64 hw_host_ctx_pa; 493 u64 shadow_area_pa; 494 u64 q2_pa; 495 struct irdma_sc_dev *dev; 496 struct irdma_sc_vsi *vsi; 497 struct irdma_sc_pd *pd; 498 __le64 *hw_host_ctx; 499 void *llp_stream_handle; 500 struct irdma_pfpdu pfpdu; 501 u32 ieq_qp; 502 u8 *q2_buf; 503 u64 qp_compl_ctx; 504 u32 push_idx; 505 u16 qs_handle; 506 u16 push_offset; 507 u8 flush_wqes_count; 508 u8 sq_tph_val; 509 u8 rq_tph_val; 510 u8 qp_state; 511 u8 hw_sq_size; 512 u8 hw_rq_size; 513 u8 src_mac_addr_idx; 514 bool on_qoslist:1; 515 bool ieq_pass_thru:1; 516 bool sq_tph_en:1; 517 bool rq_tph_en:1; 518 bool rcv_tph_en:1; 519 bool xmit_tph_en:1; 520 bool virtual_map:1; 521 bool flush_sq:1; 522 bool flush_rq:1; 523 bool sq_flush_code:1; 524 bool rq_flush_code:1; 525 enum irdma_flush_opcode flush_code; 526 enum irdma_qp_event_type event_type; 527 u8 term_flags; 528 u8 user_pri; 529 struct list_head list; 530 }; 531 532 struct irdma_stats_inst_info { 533 bool use_hmc_fcn_index; 534 u8 hmc_fn_id; 535 u8 stats_idx; 536 }; 537 538 struct irdma_up_info { 539 u8 map[8]; 540 u8 cnp_up_override; 541 u8 hmc_fcn_idx; 542 bool use_vlan:1; 543 bool use_cnp_up_override:1; 544 }; 545 546 #define IRDMA_MAX_WS_NODES 0x3FF 547 #define IRDMA_WS_NODE_INVALID 0xFFFF 548 549 struct irdma_ws_node_info { 550 u16 id; 551 u16 vsi; 552 u16 parent_id; 553 u16 qs_handle; 554 bool type_leaf:1; 555 bool enable:1; 556 u8 prio_type; 557 u8 tc; 558 u8 weight; 559 }; 560 561 struct irdma_hmc_fpm_misc { 562 u32 max_ceqs; 563 u32 max_sds; 564 u32 xf_block_size; 565 u32 q1_block_size; 566 u32 ht_multiplier; 567 u32 timer_bucket; 568 u32 rrf_block_size; 569 u32 ooiscf_block_size; 570 }; 571 572 #define IRDMA_LEAF_DEFAULT_REL_BW 64 573 #define IRDMA_PARENT_DEFAULT_REL_BW 1 574 575 struct irdma_qos { 576 struct list_head qplist; 577 struct mutex qos_mutex; /* protect QoS attributes per QoS level */ 578 u64 lan_qos_handle; 579 u32 l2_sched_node_id; 580 u16 qs_handle; 581 u8 traffic_class; 582 u8 rel_bw; 583 u8 prio_type; 584 bool valid; 585 }; 586 587 #define IRDMA_INVALID_FCN_ID 0xff 588 struct irdma_sc_vsi { 589 u16 vsi_idx; 590 struct irdma_sc_dev *dev; 591 void *back_vsi; 592 u32 ilq_count; 593 struct irdma_virt_mem ilq_mem; 594 struct irdma_puda_rsrc *ilq; 595 u32 ieq_count; 596 struct irdma_virt_mem ieq_mem; 597 struct irdma_puda_rsrc *ieq; 598 u32 exception_lan_q; 599 u16 mtu; 600 u16 vm_id; 601 u8 fcn_id; 602 enum irdma_vm_vf_type vm_vf_type; 603 bool stats_fcn_id_alloc:1; 604 bool tc_change_pending:1; 605 struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY]; 606 struct irdma_vsi_pestat *pestat; 607 atomic_t qp_suspend_reqs; 608 enum irdma_status_code (*register_qset)(struct irdma_sc_vsi *vsi, 609 struct irdma_ws_node *tc_node); 610 void (*unregister_qset)(struct irdma_sc_vsi *vsi, 611 struct irdma_ws_node *tc_node); 612 u8 qos_rel_bw; 613 u8 qos_prio_type; 614 }; 615 616 struct irdma_sc_dev { 617 struct list_head cqp_cmd_head; /* head of the CQP command list */ 618 spinlock_t cqp_lock; /* protect CQP list access */ 619 bool fcn_id_array[IRDMA_MAX_STATS_COUNT]; 620 struct irdma_dma_mem vf_fpm_query_buf[IRDMA_MAX_PE_ENA_VF_COUNT]; 621 u64 fpm_query_buf_pa; 622 u64 fpm_commit_buf_pa; 623 __le64 *fpm_query_buf; 624 __le64 *fpm_commit_buf; 625 struct irdma_hw *hw; 626 u8 __iomem *db_addr; 627 u32 __iomem *wqe_alloc_db; 628 u32 __iomem *cq_arm_db; 629 u32 __iomem *aeq_alloc_db; 630 u32 __iomem *cqp_db; 631 u32 __iomem *cq_ack_db; 632 u32 __iomem *ceq_itr_mask_db; 633 u32 __iomem *aeq_itr_mask_db; 634 u32 __iomem *hw_regs[IRDMA_MAX_REGS]; 635 u32 ceq_itr; /* Interrupt throttle, usecs between interrupts: 0 disabled. 2 - 8160 */ 636 u64 hw_masks[IRDMA_MAX_MASKS]; 637 u64 hw_shifts[IRDMA_MAX_SHIFTS]; 638 u64 hw_stats_regs_32[IRDMA_HW_STAT_INDEX_MAX_32]; 639 u64 hw_stats_regs_64[IRDMA_HW_STAT_INDEX_MAX_64]; 640 u64 feature_info[IRDMA_MAX_FEATURES]; 641 u64 cqp_cmd_stats[IRDMA_MAX_CQP_OPS]; 642 struct irdma_hw_attrs hw_attrs; 643 struct irdma_hmc_info *hmc_info; 644 struct irdma_sc_cqp *cqp; 645 struct irdma_sc_aeq *aeq; 646 struct irdma_sc_ceq *ceq[IRDMA_CEQ_MAX_COUNT]; 647 struct irdma_sc_cq *ccq; 648 const struct irdma_irq_ops *irq_ops; 649 struct irdma_hmc_fpm_misc hmc_fpm_misc; 650 struct irdma_ws_node *ws_tree_root; 651 struct mutex ws_mutex; /* ws tree mutex */ 652 u16 num_vfs; 653 u8 hmc_fn_id; 654 u8 vf_id; 655 bool vchnl_up:1; 656 bool ceq_valid:1; 657 u8 pci_rev; 658 enum irdma_status_code (*ws_add)(struct irdma_sc_vsi *vsi, u8 user_pri); 659 void (*ws_remove)(struct irdma_sc_vsi *vsi, u8 user_pri); 660 void (*ws_reset)(struct irdma_sc_vsi *vsi); 661 }; 662 663 struct irdma_modify_cq_info { 664 u64 cq_pa; 665 struct irdma_cqe *cq_base; 666 u32 cq_size; 667 u32 shadow_read_threshold; 668 u8 pbl_chunk_size; 669 u32 first_pm_pbl_idx; 670 bool virtual_map:1; 671 bool check_overflow; 672 bool cq_resize:1; 673 }; 674 675 struct irdma_create_qp_info { 676 bool ord_valid:1; 677 bool tcp_ctx_valid:1; 678 bool cq_num_valid:1; 679 bool arp_cache_idx_valid:1; 680 bool mac_valid:1; 681 bool force_lpb; 682 u8 next_iwarp_state; 683 }; 684 685 struct irdma_modify_qp_info { 686 u64 rx_win0; 687 u64 rx_win1; 688 u16 new_mss; 689 u8 next_iwarp_state; 690 u8 curr_iwarp_state; 691 u8 termlen; 692 bool ord_valid:1; 693 bool tcp_ctx_valid:1; 694 bool udp_ctx_valid:1; 695 bool cq_num_valid:1; 696 bool arp_cache_idx_valid:1; 697 bool reset_tcp_conn:1; 698 bool remove_hash_idx:1; 699 bool dont_send_term:1; 700 bool dont_send_fin:1; 701 bool cached_var_valid:1; 702 bool mss_change:1; 703 bool force_lpb:1; 704 bool mac_valid:1; 705 }; 706 707 struct irdma_ccq_cqe_info { 708 struct irdma_sc_cqp *cqp; 709 u64 scratch; 710 u32 op_ret_val; 711 u16 maj_err_code; 712 u16 min_err_code; 713 u8 op_code; 714 bool error; 715 }; 716 717 struct irdma_dcb_app_info { 718 u8 priority; 719 u8 selector; 720 u16 prot_id; 721 }; 722 723 struct irdma_qos_tc_info { 724 u64 tc_ctx; 725 u8 rel_bw; 726 u8 prio_type; 727 u8 egress_virt_up; 728 u8 ingress_virt_up; 729 }; 730 731 struct irdma_l2params { 732 struct irdma_qos_tc_info tc_info[IRDMA_MAX_USER_PRIORITY]; 733 struct irdma_dcb_app_info apps[IRDMA_MAX_APPS]; 734 u32 num_apps; 735 u16 qs_handle_list[IRDMA_MAX_USER_PRIORITY]; 736 u16 mtu; 737 u8 up2tc[IRDMA_MAX_USER_PRIORITY]; 738 u8 num_tc; 739 u8 vsi_rel_bw; 740 u8 vsi_prio_type; 741 bool mtu_changed:1; 742 bool tc_changed:1; 743 }; 744 745 struct irdma_vsi_init_info { 746 struct irdma_sc_dev *dev; 747 void *back_vsi; 748 struct irdma_l2params *params; 749 u16 exception_lan_q; 750 u16 pf_data_vsi_num; 751 enum irdma_vm_vf_type vm_vf_type; 752 u16 vm_id; 753 enum irdma_status_code (*register_qset)(struct irdma_sc_vsi *vsi, 754 struct irdma_ws_node *tc_node); 755 void (*unregister_qset)(struct irdma_sc_vsi *vsi, 756 struct irdma_ws_node *tc_node); 757 }; 758 759 struct irdma_vsi_stats_info { 760 struct irdma_vsi_pestat *pestat; 761 u8 fcn_id; 762 bool alloc_fcn_id; 763 }; 764 765 struct irdma_device_init_info { 766 u64 fpm_query_buf_pa; 767 u64 fpm_commit_buf_pa; 768 __le64 *fpm_query_buf; 769 __le64 *fpm_commit_buf; 770 struct irdma_hw *hw; 771 void __iomem *bar0; 772 u8 hmc_fn_id; 773 }; 774 775 struct irdma_ceq_init_info { 776 u64 ceqe_pa; 777 struct irdma_sc_dev *dev; 778 u64 *ceqe_base; 779 void *pbl_list; 780 u32 elem_cnt; 781 u32 ceq_id; 782 bool virtual_map:1; 783 bool tph_en:1; 784 bool itr_no_expire:1; 785 u8 pbl_chunk_size; 786 u8 tph_val; 787 u32 first_pm_pbl_idx; 788 struct irdma_sc_vsi *vsi; 789 struct irdma_sc_cq **reg_cq; 790 u32 reg_cq_idx; 791 }; 792 793 struct irdma_aeq_init_info { 794 u64 aeq_elem_pa; 795 struct irdma_sc_dev *dev; 796 u32 *aeqe_base; 797 void *pbl_list; 798 u32 elem_cnt; 799 bool virtual_map; 800 u8 pbl_chunk_size; 801 u32 first_pm_pbl_idx; 802 u32 msix_idx; 803 }; 804 805 struct irdma_ccq_init_info { 806 u64 cq_pa; 807 u64 shadow_area_pa; 808 struct irdma_sc_dev *dev; 809 struct irdma_cqe *cq_base; 810 __le64 *shadow_area; 811 void *pbl_list; 812 u32 num_elem; 813 u32 ceq_id; 814 u32 shadow_read_threshold; 815 bool ceqe_mask:1; 816 bool ceq_id_valid:1; 817 bool avoid_mem_cflct:1; 818 bool virtual_map:1; 819 bool tph_en:1; 820 u8 tph_val; 821 u8 pbl_chunk_size; 822 u32 first_pm_pbl_idx; 823 struct irdma_sc_vsi *vsi; 824 }; 825 826 struct irdma_udp_offload_info { 827 bool ipv4:1; 828 bool insert_vlan_tag:1; 829 u8 ttl; 830 u8 tos; 831 u16 src_port; 832 u16 dst_port; 833 u32 dest_ip_addr[4]; 834 u32 snd_mss; 835 u16 vlan_tag; 836 u16 arp_idx; 837 u32 flow_label; 838 u8 udp_state; 839 u32 psn_nxt; 840 u32 lsn; 841 u32 epsn; 842 u32 psn_max; 843 u32 psn_una; 844 u32 local_ipaddr[4]; 845 u32 cwnd; 846 u8 rexmit_thresh; 847 u8 rnr_nak_thresh; 848 }; 849 850 struct irdma_roce_offload_info { 851 u16 p_key; 852 u16 err_rq_idx; 853 u32 qkey; 854 u32 dest_qp; 855 u8 roce_tver; 856 u8 ack_credits; 857 u8 err_rq_idx_valid; 858 u32 pd_id; 859 u16 ord_size; 860 u16 ird_size; 861 bool is_qp1:1; 862 bool udprivcq_en:1; 863 bool dcqcn_en:1; 864 bool rcv_no_icrc:1; 865 bool wr_rdresp_en:1; 866 bool bind_en:1; 867 bool fast_reg_en:1; 868 bool priv_mode_en:1; 869 bool rd_en:1; 870 bool timely_en:1; 871 bool dctcp_en:1; 872 bool fw_cc_enable:1; 873 bool use_stats_inst:1; 874 u16 t_high; 875 u16 t_low; 876 u8 last_byte_sent; 877 u8 mac_addr[ETH_ALEN]; 878 u8 rtomin; 879 }; 880 881 struct irdma_iwarp_offload_info { 882 u16 rcv_mark_offset; 883 u16 snd_mark_offset; 884 u8 ddp_ver; 885 u8 rdmap_ver; 886 u8 iwarp_mode; 887 u16 err_rq_idx; 888 u32 pd_id; 889 u16 ord_size; 890 u16 ird_size; 891 bool ib_rd_en:1; 892 bool align_hdrs:1; 893 bool rcv_no_mpa_crc:1; 894 bool err_rq_idx_valid:1; 895 bool snd_mark_en:1; 896 bool rcv_mark_en:1; 897 bool wr_rdresp_en:1; 898 bool bind_en:1; 899 bool fast_reg_en:1; 900 bool priv_mode_en:1; 901 bool rd_en:1; 902 bool timely_en:1; 903 bool use_stats_inst:1; 904 bool ecn_en:1; 905 bool dctcp_en:1; 906 u16 t_high; 907 u16 t_low; 908 u8 last_byte_sent; 909 u8 mac_addr[ETH_ALEN]; 910 u8 rtomin; 911 }; 912 913 struct irdma_tcp_offload_info { 914 bool ipv4:1; 915 bool no_nagle:1; 916 bool insert_vlan_tag:1; 917 bool time_stamp:1; 918 bool drop_ooo_seg:1; 919 bool avoid_stretch_ack:1; 920 bool wscale:1; 921 bool ignore_tcp_opt:1; 922 bool ignore_tcp_uns_opt:1; 923 u8 cwnd_inc_limit; 924 u8 dup_ack_thresh; 925 u8 ttl; 926 u8 src_mac_addr_idx; 927 u8 tos; 928 u16 src_port; 929 u16 dst_port; 930 u32 dest_ip_addr[4]; 931 //u32 dest_ip_addr0; 932 //u32 dest_ip_addr1; 933 //u32 dest_ip_addr2; 934 //u32 dest_ip_addr3; 935 u32 snd_mss; 936 u16 syn_rst_handling; 937 u16 vlan_tag; 938 u16 arp_idx; 939 u32 flow_label; 940 u8 tcp_state; 941 u8 snd_wscale; 942 u8 rcv_wscale; 943 u32 time_stamp_recent; 944 u32 time_stamp_age; 945 u32 snd_nxt; 946 u32 snd_wnd; 947 u32 rcv_nxt; 948 u32 rcv_wnd; 949 u32 snd_max; 950 u32 snd_una; 951 u32 srtt; 952 u32 rtt_var; 953 u32 ss_thresh; 954 u32 cwnd; 955 u32 snd_wl1; 956 u32 snd_wl2; 957 u32 max_snd_window; 958 u8 rexmit_thresh; 959 u32 local_ipaddr[4]; 960 }; 961 962 struct irdma_qp_host_ctx_info { 963 u64 qp_compl_ctx; 964 union { 965 struct irdma_tcp_offload_info *tcp_info; 966 struct irdma_udp_offload_info *udp_info; 967 }; 968 union { 969 struct irdma_iwarp_offload_info *iwarp_info; 970 struct irdma_roce_offload_info *roce_info; 971 }; 972 u32 send_cq_num; 973 u32 rcv_cq_num; 974 u32 rem_endpoint_idx; 975 u8 stats_idx; 976 bool srq_valid:1; 977 bool tcp_info_valid:1; 978 bool iwarp_info_valid:1; 979 bool stats_idx_valid:1; 980 u8 user_pri; 981 }; 982 983 struct irdma_aeqe_info { 984 u64 compl_ctx; 985 u32 qp_cq_id; 986 u16 ae_id; 987 u16 wqe_idx; 988 u8 tcp_state; 989 u8 iwarp_state; 990 bool qp:1; 991 bool cq:1; 992 bool sq:1; 993 bool rq:1; 994 bool in_rdrsp_wr:1; 995 bool out_rdrsp:1; 996 bool aeqe_overflow:1; 997 u8 q2_data_written; 998 u8 ae_src; 999 }; 1000 1001 struct irdma_allocate_stag_info { 1002 u64 total_len; 1003 u64 first_pm_pbl_idx; 1004 u32 chunk_size; 1005 u32 stag_idx; 1006 u32 page_size; 1007 u32 pd_id; 1008 u16 access_rights; 1009 bool remote_access:1; 1010 bool use_hmc_fcn_index:1; 1011 bool use_pf_rid:1; 1012 u8 hmc_fcn_index; 1013 }; 1014 1015 struct irdma_mw_alloc_info { 1016 u32 mw_stag_index; 1017 u32 page_size; 1018 u32 pd_id; 1019 bool remote_access:1; 1020 bool mw_wide:1; 1021 bool mw1_bind_dont_vldt_key:1; 1022 }; 1023 1024 struct irdma_reg_ns_stag_info { 1025 u64 reg_addr_pa; 1026 u64 va; 1027 u64 total_len; 1028 u32 page_size; 1029 u32 chunk_size; 1030 u32 first_pm_pbl_index; 1031 enum irdma_addressing_type addr_type; 1032 irdma_stag_index stag_idx; 1033 u16 access_rights; 1034 u32 pd_id; 1035 irdma_stag_key stag_key; 1036 bool use_hmc_fcn_index:1; 1037 u8 hmc_fcn_index; 1038 bool use_pf_rid:1; 1039 }; 1040 1041 struct irdma_fast_reg_stag_info { 1042 u64 wr_id; 1043 u64 reg_addr_pa; 1044 u64 fbo; 1045 void *va; 1046 u64 total_len; 1047 u32 page_size; 1048 u32 chunk_size; 1049 u32 first_pm_pbl_index; 1050 enum irdma_addressing_type addr_type; 1051 irdma_stag_index stag_idx; 1052 u16 access_rights; 1053 u32 pd_id; 1054 irdma_stag_key stag_key; 1055 bool local_fence:1; 1056 bool read_fence:1; 1057 bool signaled:1; 1058 bool push_wqe:1; 1059 bool use_hmc_fcn_index:1; 1060 u8 hmc_fcn_index; 1061 bool use_pf_rid:1; 1062 bool defer_flag:1; 1063 }; 1064 1065 struct irdma_dealloc_stag_info { 1066 u32 stag_idx; 1067 u32 pd_id; 1068 bool mr:1; 1069 bool dealloc_pbl:1; 1070 }; 1071 1072 struct irdma_register_shared_stag { 1073 u64 va; 1074 enum irdma_addressing_type addr_type; 1075 irdma_stag_index new_stag_idx; 1076 irdma_stag_index parent_stag_idx; 1077 u32 access_rights; 1078 u32 pd_id; 1079 u32 page_size; 1080 irdma_stag_key new_stag_key; 1081 }; 1082 1083 struct irdma_qp_init_info { 1084 struct irdma_qp_uk_init_info qp_uk_init_info; 1085 struct irdma_sc_pd *pd; 1086 struct irdma_sc_vsi *vsi; 1087 __le64 *host_ctx; 1088 u8 *q2; 1089 u64 sq_pa; 1090 u64 rq_pa; 1091 u64 host_ctx_pa; 1092 u64 q2_pa; 1093 u64 shadow_area_pa; 1094 u8 sq_tph_val; 1095 u8 rq_tph_val; 1096 bool sq_tph_en:1; 1097 bool rq_tph_en:1; 1098 bool rcv_tph_en:1; 1099 bool xmit_tph_en:1; 1100 bool virtual_map:1; 1101 }; 1102 1103 struct irdma_cq_init_info { 1104 struct irdma_sc_dev *dev; 1105 u64 cq_base_pa; 1106 u64 shadow_area_pa; 1107 u32 ceq_id; 1108 u32 shadow_read_threshold; 1109 u8 pbl_chunk_size; 1110 u32 first_pm_pbl_idx; 1111 bool virtual_map:1; 1112 bool ceqe_mask:1; 1113 bool ceq_id_valid:1; 1114 bool tph_en:1; 1115 u8 tph_val; 1116 u8 type; 1117 struct irdma_cq_uk_init_info cq_uk_init_info; 1118 struct irdma_sc_vsi *vsi; 1119 }; 1120 1121 struct irdma_upload_context_info { 1122 u64 buf_pa; 1123 u32 qp_id; 1124 u8 qp_type; 1125 bool freeze_qp:1; 1126 bool raw_format:1; 1127 }; 1128 1129 struct irdma_local_mac_entry_info { 1130 u8 mac_addr[6]; 1131 u16 entry_idx; 1132 }; 1133 1134 struct irdma_add_arp_cache_entry_info { 1135 u8 mac_addr[ETH_ALEN]; 1136 u32 reach_max; 1137 u16 arp_index; 1138 bool permanent; 1139 }; 1140 1141 struct irdma_apbvt_info { 1142 u16 port; 1143 bool add; 1144 }; 1145 1146 struct irdma_qhash_table_info { 1147 struct irdma_sc_vsi *vsi; 1148 enum irdma_quad_hash_manage_type manage; 1149 enum irdma_quad_entry_type entry_type; 1150 bool vlan_valid:1; 1151 bool ipv4_valid:1; 1152 u8 mac_addr[ETH_ALEN]; 1153 u16 vlan_id; 1154 u8 user_pri; 1155 u32 qp_num; 1156 u32 dest_ip[4]; 1157 u32 src_ip[4]; 1158 u16 dest_port; 1159 u16 src_port; 1160 }; 1161 1162 struct irdma_cqp_manage_push_page_info { 1163 u32 push_idx; 1164 u16 qs_handle; 1165 u8 free_page; 1166 u8 push_page_type; 1167 }; 1168 1169 struct irdma_qp_flush_info { 1170 u16 sq_minor_code; 1171 u16 sq_major_code; 1172 u16 rq_minor_code; 1173 u16 rq_major_code; 1174 u16 ae_code; 1175 u8 ae_src; 1176 bool sq:1; 1177 bool rq:1; 1178 bool userflushcode:1; 1179 bool generate_ae:1; 1180 }; 1181 1182 struct irdma_gen_ae_info { 1183 u16 ae_code; 1184 u8 ae_src; 1185 }; 1186 1187 struct irdma_cqp_timeout { 1188 u64 compl_cqp_cmds; 1189 u32 count; 1190 }; 1191 1192 struct irdma_irq_ops { 1193 void (*irdma_cfg_aeq)(struct irdma_sc_dev *dev, u32 idx, bool enable); 1194 void (*irdma_cfg_ceq)(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx, 1195 bool enable); 1196 void (*irdma_dis_irq)(struct irdma_sc_dev *dev, u32 idx); 1197 void (*irdma_en_irq)(struct irdma_sc_dev *dev, u32 idx); 1198 }; 1199 1200 void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq); 1201 enum irdma_status_code irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch, 1202 bool check_overflow, bool post_sq); 1203 enum irdma_status_code irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, 1204 bool post_sq); 1205 enum irdma_status_code irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq, 1206 struct irdma_ccq_cqe_info *info); 1207 enum irdma_status_code irdma_sc_ccq_init(struct irdma_sc_cq *ccq, 1208 struct irdma_ccq_init_info *info); 1209 1210 enum irdma_status_code irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch); 1211 enum irdma_status_code irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq); 1212 1213 enum irdma_status_code irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, 1214 bool post_sq); 1215 enum irdma_status_code irdma_sc_ceq_init(struct irdma_sc_ceq *ceq, 1216 struct irdma_ceq_init_info *info); 1217 void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq); 1218 void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq); 1219 1220 enum irdma_status_code irdma_sc_aeq_init(struct irdma_sc_aeq *aeq, 1221 struct irdma_aeq_init_info *info); 1222 enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq, 1223 struct irdma_aeqe_info *info); 1224 void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count); 1225 1226 void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id, 1227 int abi_ver); 1228 void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable); 1229 void irdma_check_cqp_progress(struct irdma_cqp_timeout *cqp_timeout, 1230 struct irdma_sc_dev *dev); 1231 enum irdma_status_code irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, 1232 u16 *min_err); 1233 enum irdma_status_code irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp); 1234 enum irdma_status_code irdma_sc_cqp_init(struct irdma_sc_cqp *cqp, 1235 struct irdma_cqp_init_info *info); 1236 void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp); 1237 enum irdma_status_code irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 opcode, 1238 struct irdma_ccq_cqe_info *cmpl_info); 1239 enum irdma_status_code irdma_sc_fast_register(struct irdma_sc_qp *qp, 1240 struct irdma_fast_reg_stag_info *info, 1241 bool post_sq); 1242 enum irdma_status_code irdma_sc_qp_create(struct irdma_sc_qp *qp, 1243 struct irdma_create_qp_info *info, 1244 u64 scratch, bool post_sq); 1245 enum irdma_status_code irdma_sc_qp_destroy(struct irdma_sc_qp *qp, 1246 u64 scratch, bool remove_hash_idx, 1247 bool ignore_mw_bnd, bool post_sq); 1248 enum irdma_status_code irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp, 1249 struct irdma_qp_flush_info *info, 1250 u64 scratch, bool post_sq); 1251 enum irdma_status_code irdma_sc_qp_init(struct irdma_sc_qp *qp, 1252 struct irdma_qp_init_info *info); 1253 enum irdma_status_code irdma_sc_qp_modify(struct irdma_sc_qp *qp, 1254 struct irdma_modify_qp_info *info, 1255 u64 scratch, bool post_sq); 1256 void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size, 1257 irdma_stag stag); 1258 1259 void irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read); 1260 void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx, 1261 struct irdma_qp_host_ctx_info *info); 1262 void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx, 1263 struct irdma_qp_host_ctx_info *info); 1264 enum irdma_status_code irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, 1265 bool post_sq); 1266 enum irdma_status_code irdma_sc_cq_init(struct irdma_sc_cq *cq, 1267 struct irdma_cq_init_info *info); 1268 void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info); 1269 enum irdma_status_code irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, 1270 u64 scratch, u8 hmc_fn_id, 1271 bool post_sq, bool poll_registers); 1272 1273 void sc_vsi_update_stats(struct irdma_sc_vsi *vsi); 1274 struct cqp_info { 1275 union { 1276 struct { 1277 struct irdma_sc_qp *qp; 1278 struct irdma_create_qp_info info; 1279 u64 scratch; 1280 } qp_create; 1281 1282 struct { 1283 struct irdma_sc_qp *qp; 1284 struct irdma_modify_qp_info info; 1285 u64 scratch; 1286 } qp_modify; 1287 1288 struct { 1289 struct irdma_sc_qp *qp; 1290 u64 scratch; 1291 bool remove_hash_idx; 1292 bool ignore_mw_bnd; 1293 } qp_destroy; 1294 1295 struct { 1296 struct irdma_sc_cq *cq; 1297 u64 scratch; 1298 bool check_overflow; 1299 } cq_create; 1300 1301 struct { 1302 struct irdma_sc_cq *cq; 1303 struct irdma_modify_cq_info info; 1304 u64 scratch; 1305 } cq_modify; 1306 1307 struct { 1308 struct irdma_sc_cq *cq; 1309 u64 scratch; 1310 } cq_destroy; 1311 1312 struct { 1313 struct irdma_sc_dev *dev; 1314 struct irdma_allocate_stag_info info; 1315 u64 scratch; 1316 } alloc_stag; 1317 1318 struct { 1319 struct irdma_sc_dev *dev; 1320 struct irdma_mw_alloc_info info; 1321 u64 scratch; 1322 } mw_alloc; 1323 1324 struct { 1325 struct irdma_sc_dev *dev; 1326 struct irdma_reg_ns_stag_info info; 1327 u64 scratch; 1328 } mr_reg_non_shared; 1329 1330 struct { 1331 struct irdma_sc_dev *dev; 1332 struct irdma_dealloc_stag_info info; 1333 u64 scratch; 1334 } dealloc_stag; 1335 1336 struct { 1337 struct irdma_sc_cqp *cqp; 1338 struct irdma_add_arp_cache_entry_info info; 1339 u64 scratch; 1340 } add_arp_cache_entry; 1341 1342 struct { 1343 struct irdma_sc_cqp *cqp; 1344 u64 scratch; 1345 u16 arp_index; 1346 } del_arp_cache_entry; 1347 1348 struct { 1349 struct irdma_sc_cqp *cqp; 1350 struct irdma_local_mac_entry_info info; 1351 u64 scratch; 1352 } add_local_mac_entry; 1353 1354 struct { 1355 struct irdma_sc_cqp *cqp; 1356 u64 scratch; 1357 u8 entry_idx; 1358 u8 ignore_ref_count; 1359 } del_local_mac_entry; 1360 1361 struct { 1362 struct irdma_sc_cqp *cqp; 1363 u64 scratch; 1364 } alloc_local_mac_entry; 1365 1366 struct { 1367 struct irdma_sc_cqp *cqp; 1368 struct irdma_cqp_manage_push_page_info info; 1369 u64 scratch; 1370 } manage_push_page; 1371 1372 struct { 1373 struct irdma_sc_dev *dev; 1374 struct irdma_upload_context_info info; 1375 u64 scratch; 1376 } qp_upload_context; 1377 1378 struct { 1379 struct irdma_sc_dev *dev; 1380 struct irdma_hmc_fcn_info info; 1381 u64 scratch; 1382 } manage_hmc_pm; 1383 1384 struct { 1385 struct irdma_sc_ceq *ceq; 1386 u64 scratch; 1387 } ceq_create; 1388 1389 struct { 1390 struct irdma_sc_ceq *ceq; 1391 u64 scratch; 1392 } ceq_destroy; 1393 1394 struct { 1395 struct irdma_sc_aeq *aeq; 1396 u64 scratch; 1397 } aeq_create; 1398 1399 struct { 1400 struct irdma_sc_aeq *aeq; 1401 u64 scratch; 1402 } aeq_destroy; 1403 1404 struct { 1405 struct irdma_sc_qp *qp; 1406 struct irdma_qp_flush_info info; 1407 u64 scratch; 1408 } qp_flush_wqes; 1409 1410 struct { 1411 struct irdma_sc_qp *qp; 1412 struct irdma_gen_ae_info info; 1413 u64 scratch; 1414 } gen_ae; 1415 1416 struct { 1417 struct irdma_sc_cqp *cqp; 1418 void *fpm_val_va; 1419 u64 fpm_val_pa; 1420 u8 hmc_fn_id; 1421 u64 scratch; 1422 } query_fpm_val; 1423 1424 struct { 1425 struct irdma_sc_cqp *cqp; 1426 void *fpm_val_va; 1427 u64 fpm_val_pa; 1428 u8 hmc_fn_id; 1429 u64 scratch; 1430 } commit_fpm_val; 1431 1432 struct { 1433 struct irdma_sc_cqp *cqp; 1434 struct irdma_apbvt_info info; 1435 u64 scratch; 1436 } manage_apbvt_entry; 1437 1438 struct { 1439 struct irdma_sc_cqp *cqp; 1440 struct irdma_qhash_table_info info; 1441 u64 scratch; 1442 } manage_qhash_table_entry; 1443 1444 struct { 1445 struct irdma_sc_dev *dev; 1446 struct irdma_update_sds_info info; 1447 u64 scratch; 1448 } update_pe_sds; 1449 1450 struct { 1451 struct irdma_sc_cqp *cqp; 1452 struct irdma_sc_qp *qp; 1453 u64 scratch; 1454 } suspend_resume; 1455 1456 struct { 1457 struct irdma_sc_cqp *cqp; 1458 struct irdma_ah_info info; 1459 u64 scratch; 1460 } ah_create; 1461 1462 struct { 1463 struct irdma_sc_cqp *cqp; 1464 struct irdma_ah_info info; 1465 u64 scratch; 1466 } ah_destroy; 1467 1468 struct { 1469 struct irdma_sc_cqp *cqp; 1470 struct irdma_mcast_grp_info info; 1471 u64 scratch; 1472 } mc_create; 1473 1474 struct { 1475 struct irdma_sc_cqp *cqp; 1476 struct irdma_mcast_grp_info info; 1477 u64 scratch; 1478 } mc_destroy; 1479 1480 struct { 1481 struct irdma_sc_cqp *cqp; 1482 struct irdma_mcast_grp_info info; 1483 u64 scratch; 1484 } mc_modify; 1485 1486 struct { 1487 struct irdma_sc_cqp *cqp; 1488 struct irdma_stats_inst_info info; 1489 u64 scratch; 1490 } stats_manage; 1491 1492 struct { 1493 struct irdma_sc_cqp *cqp; 1494 struct irdma_stats_gather_info info; 1495 u64 scratch; 1496 } stats_gather; 1497 1498 struct { 1499 struct irdma_sc_cqp *cqp; 1500 struct irdma_ws_node_info info; 1501 u64 scratch; 1502 } ws_node; 1503 1504 struct { 1505 struct irdma_sc_cqp *cqp; 1506 struct irdma_up_info info; 1507 u64 scratch; 1508 } up_map; 1509 1510 struct { 1511 struct irdma_sc_cqp *cqp; 1512 struct irdma_dma_mem query_buff_mem; 1513 u64 scratch; 1514 } query_rdma; 1515 } u; 1516 }; 1517 1518 struct cqp_cmds_info { 1519 struct list_head cqp_cmd_entry; 1520 u8 cqp_cmd; 1521 u8 post_sq; 1522 struct cqp_info in; 1523 }; 1524 1525 __le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch, 1526 u32 *wqe_idx); 1527 1528 /** 1529 * irdma_sc_cqp_get_next_send_wqe - get next wqe on cqp sq 1530 * @cqp: struct for cqp hw 1531 * @scratch: private data for CQP WQE 1532 */ 1533 static inline __le64 *irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp *cqp, u64 scratch) 1534 { 1535 u32 wqe_idx; 1536 1537 return irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx); 1538 } 1539 #endif /* IRDMA_TYPE_H */ 1540