xref: /illumos-gate/usr/src/uts/common/sys/ib/ibtl/ibtl_types.h (revision 7c478bd95313f5f23a4c958a745db2134aa03244)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #ifndef	_SYS_IB_IBTL_IBTL_TYPES_H
28 #define	_SYS_IB_IBTL_IBTL_TYPES_H
29 
30 #pragma ident	"%Z%%M%	%I%	%E% SMI"
31 
32 /*
33  * ibtl_types.h
34  *
35  * All common IBTL defined types. These are common data types
36  * that are shared by the IBTI and IBCI interfaces, it is only included
37  * by ibti.h and ibci.h
38  */
39 #include <sys/ddi.h>
40 #include <sys/sunddi.h>
41 #include <sys/ib/ib_types.h>
42 #include <sys/ib/ibtl/ibtl_status.h>
43 
44 #ifdef	__cplusplus
45 extern "C" {
46 #endif
47 
48 /*
49  * Define Internal IBTL handles
50  */
51 typedef	struct	ibtl_clnt_s	*ibt_clnt_hdl_t;    /* ibt_attach() */
52 typedef	struct	ibtl_hca_s	*ibt_hca_hdl_t;	    /* ibt_open_hca() */
53 typedef	struct	ibtl_channel_s	*ibt_channel_hdl_t; /* alloc_rc|ud_channel() */
54 typedef	struct	ibtl_srq_s	*ibt_srq_hdl_t;	    /* ibt_alloc_srq() */
55 typedef	struct	ibtl_cq_s	*ibt_cq_hdl_t;	    /* ibt_alloc_cq() */
56 typedef	struct	ibcm_svc_info_s	*ibt_srv_hdl_t;	    /* ibt_register_service() */
57 typedef	struct	ibcm_svc_bind_s	*ibt_sbind_hdl_t;   /* ibt_bind_service() */
58 
59 typedef	struct	ibc_ma_s	*ibt_ma_hdl_t;	    /* ibt_map_mem_area() */
60 typedef	struct	ibc_pd_s	*ibt_pd_hdl_t;	    /* ibt_alloc_pd() */
61 typedef	struct	ibc_sched_s	*ibt_sched_hdl_t;   /* ibt_alloc_cq_sched() */
62 typedef	struct	ibc_mr_s	*ibt_mr_hdl_t;	    /* ibt_register_mr() */
63 typedef	struct	ibc_mw_s	*ibt_mw_hdl_t;	    /* ibt_alloc_mw() */
64 typedef	struct	ibt_ud_dest_s	*ibt_ud_dest_hdl_t; /* UD dest handle */
65 typedef	struct	ibc_ah_s	*ibt_ah_hdl_t;	    /* ibt_alloc_ah() */
66 typedef struct	ibtl_eec_s	*ibt_eec_hdl_t;
67 typedef	struct	ibt_rd_dest_s	*ibt_rd_dest_hdl_t;	/* Reserved for */
68 							/* Future use */
69 
70 /*
71  * Some General Types.
72  */
73 typedef uint32_t	ibt_lkey_t;		/* L_Key */
74 typedef uint32_t	ibt_rkey_t;		/* R_Key */
75 typedef uint64_t	ibt_wrid_t;		/* Client assigned WR ID */
76 typedef uint32_t	ibt_immed_t;		/* WR Immediate Data */
77 typedef uint64_t	ibt_atom_arg_t;		/* WR Atomic Operation arg */
78 typedef	uint_t		ibt_cq_handler_id_t;	/* Event handler ID */
79 
80 /*
81  * IBT selector type, used when looking up/requesting either an
82  * MTU, Pkt lifetime, or Static rate.
83  * The interpretation of IBT_BEST depends on the attribute being selected.
84  */
85 typedef enum ibt_selector_e {
86 	IBT_GT		= 0,	/* Greater than */
87 	IBT_LT		= 1,	/* Less than */
88 	IBT_EQU		= 2,	/* Equal to */
89 	IBT_BEST	= 3	/* Best */
90 } ibt_selector_t;
91 
92 
93 /*
94  * Static rate definitions.
95  */
96 typedef enum ibt_srate_e {
97 	IBT_SRATE_NOT_SPECIFIED	= 0,
98 	IBT_SRATE_1X		= 2,
99 	IBT_SRATE_4X		= 3,
100 	IBT_SRATE_12X		= 4
101 } ibt_srate_t;
102 
103 /*
104  * Static rate request type.
105  */
106 typedef struct ibt_srate_req_s {
107 	ibt_srate_t	r_srate;	/* Requested srate */
108 	ibt_selector_t	r_selector;	/* Qualifier for r_srate */
109 } ibt_srate_req_t;
110 
111 /*
112  * Packet Life Time Request Type.
113  */
114 typedef struct ibt_pkt_lt_req_s {
115 	clock_t		p_pkt_lt;	/* Requested Packet Life Time */
116 	ibt_selector_t	p_selector;	/* Qualifier for p_pkt_lt */
117 } ibt_pkt_lt_req_t;
118 
119 /*
120  * Queue size struct.
121  */
122 typedef struct ibt_queue_sizes_s {
123 	uint_t	qs_sq;		/* SendQ size. */
124 	uint_t	qs_rq;		/* RecvQ size. */
125 } ibt_queue_sizes_t;
126 
127 /*
128  * Channel sizes struct, used by functions that allocate/query RC or UD
129  * channels.
130  */
131 typedef struct ibt_chan_sizes_s {
132 	uint_t	cs_sq;		/* SendQ size. */
133 	uint_t	cs_rq;		/* ReceiveQ size. */
134 	uint_t	cs_sq_sgl;	/* Max SGL elements in a SQ WR. */
135 	uint_t	cs_rq_sgl;	/* Max SGL elements in a RQ Wr. */
136 } ibt_chan_sizes_t;
137 
138 /*
139  * Shared Queue size struct.
140  */
141 typedef struct ibt_srq_sizes_s {
142 	uint_t	srq_wr_sz;
143 	uint_t	srq_sgl_sz;
144 } ibt_srq_sizes_t;
145 
146 /*
147  * SRQ Modify Flags
148  */
149 typedef enum ibt_srq_modify_flags_e {
150 	IBT_SRQ_SET_NOTHING		= 0,
151 	IBT_SRQ_SET_SIZE		= (1 << 1),
152 	IBT_SRQ_SET_LIMIT		= (1 << 2)
153 } ibt_srq_modify_flags_t;
154 
155 
156 /*
157  * Execution flags, indicates if the function should block or not.
158  * Note: in some cases, e.g., a NULL rc_cm_handler, IBT_NONBLOCKING
159  * will not have an effect, and the thread will block.
160  * IBT_NOCALLBACKS is valid for ibt_close_rc_channel only.
161  */
162 typedef enum ibt_execution_mode_e {
163 	IBT_BLOCKING	= 0,	/* Block */
164 	IBT_NONBLOCKING	= 1,	/* Return as soon as possible */
165 	IBT_NOCALLBACKS	= 2	/* cm_handler is not invoked after */
166 				/* ibt_close_rc_channel returns */
167 } ibt_execution_mode_t;
168 
169 /*
170  * Memory window alloc flags
171  */
172 typedef enum ibt_mw_flags_e {
173 	IBT_MW_SLEEP		= 0,		/* Can block */
174 	IBT_MW_NOSLEEP		= (1 << 0),	/* Can't block */
175 	IBT_MW_USER_MAP		= (1 << 1),
176 	IBT_MW_DEFER_ALLOC	= (1 << 2),
177 	IBT_MW_TYPE_1		= (1 << 3),
178 	IBT_MW_TYPE_2		= (1 << 4)
179 } ibt_mw_flags_t;
180 
181 /*
182  * PD alloc flags
183  */
184 typedef enum ibt_pd_flags_e {
185 	IBT_PD_NO_FLAGS		= 0,
186 	IBT_PD_USER_MAP		= (1 << 0),
187 	IBT_PD_DEFER_ALLOC	= (1 << 1)
188 } ibt_pd_flags_t;
189 
190 /*
191  * UD Dest alloc flags
192  */
193 typedef enum ibt_ud_dest_flags_e {
194 	IBT_UD_DEST_NO_FLAGS	= 0,
195 	IBT_UD_DEST_USER_MAP	= (1 << 0),
196 	IBT_UD_DEST_DEFER_ALLOC	= (1 << 1)
197 } ibt_ud_dest_flags_t;
198 
199 /*
200  * SRQ alloc flags
201  */
202 typedef enum ibt_srq_flags_e {
203 	IBT_SRQ_NO_FLAGS	= 0,
204 	IBT_SRQ_USER_MAP	= (1 << 0),
205 	IBT_SRQ_DEFER_ALLOC	= (1 << 1)
206 } ibt_srq_flags_t;
207 
208 /*
209  * L_Key alloc flags
210  */
211 typedef enum ibt_lkey_flags_e {
212 	IBT_KEY_NO_FLAGS	= 0,
213 	IBT_KEY_REMOTE		= (1 << 0)
214 } ibt_lkey_flags_t;
215 
216 /*
217  *  RNR NAK retry counts.
218  */
219 typedef enum ibt_rnr_retry_cnt_e {
220 	IBT_RNR_NO_RETRY	= 0x0,	/* Don't retry, fail on first timeout */
221 	IBT_RNR_RETRY_1		= 0x1,	/* Retry once */
222 	IBT_RNR_RETRY_2		= 0x2,	/* Retry twice */
223 	IBT_RNR_RETRY_3		= 0x3,	/* Retry three times */
224 	IBT_RNR_RETRY_4		= 0x4,	/* Retry four times */
225 	IBT_RNR_RETRY_5		= 0x5,	/* Retry five times */
226 	IBT_RNR_RETRY_6		= 0x6,	/* Retry six times */
227 	IBT_RNR_INFINITE_RETRY	= 0x7	/* Retry forever */
228 } ibt_rnr_retry_cnt_t;
229 
230 /*
231  * Valid values for RNR NAK timer fields, part of a channel's context.
232  */
233 typedef enum ibt_rnr_nak_time_e {
234 	IBT_RNR_NAK_655ms	= 0x0,
235 	IBT_RNR_NAK_10us	= 0x1,
236 	IBT_RNR_NAK_20us	= 0x2,
237 	IBT_RNR_NAK_30us	= 0x3,
238 	IBT_RNR_NAK_40us	= 0x4,
239 	IBT_RNR_NAK_60us	= 0x5,
240 	IBT_RNR_NAK_80us	= 0x6,
241 	IBT_RNR_NAK_120us	= 0x7,
242 	IBT_RNR_NAK_160us	= 0x8,
243 	IBT_RNR_NAK_240us	= 0x9,
244 	IBT_RNR_NAK_320us	= 0xA,
245 	IBT_RNR_NAK_480us	= 0xB,
246 	IBT_RNR_NAK_640us	= 0xC,
247 	IBT_RNR_NAK_960us	= 0xD,
248 	IBT_RNR_NAK_1280us	= 0xE,
249 	IBT_RNR_NAK_1920us	= 0xF,
250 	IBT_RNR_NAK_2560us	= 0x10,
251 	IBT_RNR_NAK_3840us	= 0x11,
252 	IBT_RNR_NAK_5120us	= 0x12,
253 	IBT_RNR_NAK_7680us	= 0x13,
254 	IBT_RNR_NAK_10ms	= 0x14,
255 	IBT_RNR_NAK_15ms	= 0x15,
256 	IBT_RNR_NAK_20ms	= 0x16,
257 	IBT_RNR_NAK_31ms	= 0x17,
258 	IBT_RNR_NAK_41ms	= 0x18,
259 	IBT_RNR_NAK_61ms	= 0x19,
260 	IBT_RNR_NAK_82ms	= 0x1A,
261 	IBT_RNR_NAK_123ms	= 0x1B,
262 	IBT_RNR_NAK_164ms	= 0x1C,
263 	IBT_RNR_NAK_246ms	= 0x1D,
264 	IBT_RNR_NAK_328ms	= 0x1E,
265 	IBT_RNR_NAK_492ms	= 0x1F
266 } ibt_rnr_nak_time_t;
267 
268 /*
269  * The definition of HCA capabilities etc as a bitfield.
270  */
271 typedef enum ibt_hca_flags_e {
272 	IBT_HCA_NO_FLAGS	= 0,
273 
274 	IBT_HCA_RD		= 1 << 0,
275 	IBT_HCA_UD_MULTICAST	= 1 << 1,
276 	IBT_HCA_RAW_MULTICAST	= 1 << 2,
277 
278 	IBT_HCA_ATOMICS_HCA	= 1 << 3,
279 	IBT_HCA_ATOMICS_GLOBAL	= 1 << 4,
280 
281 	IBT_HCA_RESIZE_CHAN	= 1 << 5,	/* Is resize supported? */
282 	IBT_HCA_AUTO_PATH_MIG	= 1 << 6,	/* Is APM supported? */
283 	IBT_HCA_SQD_SQD_PORT	= 1 << 7,	/* Can change physical port */
284 						/* on transit from SQD to SQD */
285 	IBT_HCA_PKEY_CNTR	= 1 << 8,
286 	IBT_HCA_QKEY_CNTR	= 1 << 9,
287 	IBT_HCA_AH_PORT_CHECK	= 1 << 10,	/* HCA checks AH port match */
288 						/* in UD WRs */
289 	IBT_HCA_PORT_UP		= 1 << 11,	/* PortActive event supported */
290 	IBT_HCA_INIT_TYPE	= 1 << 12,	/* InitType supported */
291 	IBT_HCA_SI_GUID		= 1 << 13,	/* System Image GUID */
292 						/* supported */
293 	IBT_HCA_SHUTDOWN_PORT	= 1 << 14,	/* ShutdownPort supported */
294 	IBT_HCA_RNR_NAK		= 1 << 15,	/* RNR-NAK supported for RC */
295 	IBT_HCA_CURRENT_QP_STATE = 1 << 16,	/* Does modify_qp support */
296 						/* checking of current state? */
297 	IBT_HCA_SRQ 		= 1 << 17,	/* Shared Receive Queue */
298 	IBT_HCA_RESIZE_SRQ	= 1 << 18,	/* Is resize SRQ supported? */
299 	IBT_HCA_BASE_MEM_MGT	= 1 << 19,	/* Base memory mgt supported? */
300 	IBT_HCA_MULT_PAGE_SZ_MR	= 1 << 20,	/* Support of multiple page */
301 						/* sizes per memory region? */
302 	IBT_HCA_BLOCK_LIST	= 1 << 21,	/* Block list physical buffer */
303 						/* lists supported? */
304 	IBT_HCA_ZERO_BASED_VA	= 1 << 22,	/* Zero Based Virtual */
305 						/* Addresses supported? */
306 	IBT_HCA_LOCAL_INVAL_FENCE = 1 << 23,	/* Local invalidate fencing? */
307 	IBT_HCA_BASE_QUEUE_MGT	= 1 << 24,	/* Base Queue Mgt supported? */
308 	IBT_HCA_CKSUM_FULL	= 1 << 25,	/* Checksum offload supported */
309 	IBT_HCA_MEM_WIN_TYPE_2B	= 1 << 26,	/* Type 2B memory windows */
310 	IBT_HCA_PHYS_BUF_BLOCK	= 1 << 27	/* Block mode phys buf lists */
311 } ibt_hca_flags_t;
312 
313 /*
314  * The definition of HCA page size capabilities as a bitfield
315  */
316 typedef enum ibt_page_sizes_e {
317 	IBT_PAGE_4K		= 0x1 << 2,
318 	IBT_PAGE_8K		= 0x1 << 3,
319 	IBT_PAGE_16K		= 0x1 << 4,
320 	IBT_PAGE_32K		= 0x1 << 5,
321 	IBT_PAGE_64K		= 0x1 << 6,
322 	IBT_PAGE_128K		= 0x1 << 7,
323 	IBT_PAGE_256K		= 0x1 << 8,
324 	IBT_PAGE_512K		= 0x1 << 9,
325 	IBT_PAGE_1M		= 0x1 << 10,
326 	IBT_PAGE_2M		= 0x1 << 11,
327 	IBT_PAGE_4M		= 0x1 << 12,
328 	IBT_PAGE_8M		= 0x1 << 13,
329 	IBT_PAGE_16M		= 0x1 << 14,
330 	IBT_PAGE_32M		= 0x1 << 15,
331 	IBT_PAGE_64M		= 0x1 << 16,
332 	IBT_PAGE_128M		= 0x1 << 17,
333 	IBT_PAGE_256M		= 0x1 << 18,
334 	IBT_PAGE_512M		= 0x1 << 19,
335 	IBT_PAGE_1G		= 0x1 << 20,
336 	IBT_PAGE_2G		= 0x1 << 21,
337 	IBT_PAGE_4G		= 0x1 << 22,
338 	IBT_PAGE_8G		= 0x1 << 23,
339 	IBT_PAGE_16G		= 0x1 << 24
340 } ibt_page_sizes_t;
341 
342 /*
343  * Memory Window Type.
344  */
345 typedef enum ibt_mem_win_type_e {
346 	IBT_MEM_WIN_TYPE_NOT_DEFINED	= 0,
347 	IBT_MEM_WIN_TYPE_1		= (1 << 0),
348 	IBT_MEM_WIN_TYPE_2		= (1 << 1)
349 } ibt_mem_win_type_t;
350 
351 /*
352  * HCA attributes.
353  * Contains all HCA static attributes.
354  */
355 typedef struct ibt_hca_attr_s {
356 	ibt_hca_flags_t	hca_flags;		/* HCA capabilities etc */
357 
358 	/* device/version inconsistency w/ NodeInfo and IOControllerProfile */
359 	uint32_t	hca_vendor_id:24;	/* 24 bit Vendor ID */
360 	uint16_t	hca_device_id;
361 	uint32_t	hca_version_id;
362 
363 	uint_t		hca_max_chans;		/* Max Chans supported */
364 	uint_t		hca_max_chan_sz;	/* Max outstanding WRs on any */
365 						/* channel */
366 
367 	uint_t		hca_max_sgl;		/* Max SGL entries per WR */
368 
369 	uint_t		hca_max_cq;		/* Max num of CQs supported  */
370 	uint_t		hca_max_cq_sz;		/* Max capacity of each CQ */
371 
372 	ibt_page_sizes_t	hca_page_sz;	/* Bit mask of page sizes */
373 
374 	uint_t		hca_max_memr;		/* Max num of HCA mem regions */
375 	ib_memlen_t	hca_max_memr_len;	/* Largest block, in bytes of */
376 						/* mem that can be registered */
377 	uint_t		hca_max_mem_win;	/* Max Memory windows in HCA */
378 
379 	uint_t		hca_max_rsc; 		/* Max Responder Resources of */
380 						/* this HCA for RDMAR/Atomics */
381 						/* with this HCA as target. */
382 	uint8_t		hca_max_rdma_in_chan;	/* Max RDMAR/Atomics in per */
383 						/* chan this HCA as target. */
384 	uint8_t		hca_max_rdma_out_chan;	/* Max RDMA Reads/Atomics out */
385 						/* per channel by this HCA */
386 	uint_t		hca_max_ipv6_chan;	/* Max IPV6 channels in HCA */
387 	uint_t		hca_max_ether_chan;	/* Max Ether channels in HCA */
388 
389 	uint_t		hca_max_mcg_chans;	/* Max number of channels */
390 						/* that can join multicast */
391 						/* groups */
392 	uint_t		hca_max_mcg;		/* Max multicast groups */
393 	uint_t		hca_max_chan_per_mcg;	/* Max number of channels per */
394 						/* Multicast group in HCA */
395 
396 	uint16_t	hca_max_partitions;	/* Max partitions in HCA */
397 	uint8_t		hca_nports;		/* Number of physical ports */
398 	ib_guid_t	hca_node_guid;		/* Node GUID */
399 
400 	ib_time_t	hca_local_ack_delay;
401 
402 	uint_t		hca_max_port_sgid_tbl_sz;
403 	uint16_t	hca_max_port_pkey_tbl_sz;
404 	uint_t		hca_max_pd;		/* Max# of Protection Domains */
405 	ib_guid_t	hca_si_guid;		/* Optional System Image GUID */
406 	uint_t		hca_hca_max_ci_priv_sz;
407 	uint_t		hca_chan_max_ci_priv_sz;
408 	uint_t		hca_cq_max_ci_priv_sz;
409 	uint_t		hca_pd_max_ci_priv_sz;
410 	uint_t		hca_mr_max_ci_priv_sz;
411 	uint_t		hca_mw_max_ci_priv_sz;
412 	uint_t		hca_ud_dest_max_ci_priv_sz;
413 	uint_t		hca_cq_sched_max_ci_priv_sz;
414 	uint_t		hca_max_ud_dest;
415 	uint_t		hca_opaque2;
416 	uint_t		hca_opaque3;
417 	uint_t		hca_opaque4;
418 	uint8_t		hca_opaque5;
419 	uint8_t		hca_opaque6;
420 	uint_t		hca_opaque7;
421 	uint_t		hca_opaque8;
422 	uint_t		hca_max_srqs;		/* Max SRQs supported */
423 	uint_t		hca_max_srqs_sz;	/* Max outstanding WRs on any */
424 						/* SRQ */
425 	uint_t		hca_max_srq_sgl;	/* Max SGL entries per SRQ WR */
426 	uint_t		hca_max_phys_buf_list_sz;
427 	size_t		hca_block_sz_lo;	/* Range of block sizes */
428 	size_t		hca_block_sz_hi;	/* supported by the HCA */
429 	uint_t		hca_max_cq_handlers;
430 	ibt_lkey_t	hca_reserved_lkey;
431 } ibt_hca_attr_t;
432 
433 /*
434  * HCA Port link states.
435  */
436 typedef enum ibt_port_state_e {
437 	IBT_PORT_DOWN	= 1,
438 	IBT_PORT_INIT,
439 	IBT_PORT_ARM,
440 	IBT_PORT_ACTIVE
441 } ibt_port_state_t;
442 
443 /*
444  * HCA Port capabilities as a bitfield.
445  */
446 typedef enum ibt_port_caps_e {
447 	IBT_PORT_CAP_NO_FLAGS		= 0,
448 	IBT_PORT_CAP_SM			= 1 << 0,	/* SM port */
449 	IBT_PORT_CAP_SM_DISABLED	= 1 << 1,
450 	IBT_PORT_CAP_SNMP_TUNNEL	= 1 << 2,	/* SNMP Tunneling */
451 	IBT_PORT_CAP_DM			= 1 << 3,	/* DM supported */
452 	IBT_PORT_CAP_VENDOR		= 1 << 4	/* Vendor Class */
453 } ibt_port_caps_t;
454 
455 
456 /*
457  * HCA port attributes structure definition. The number of ports per HCA
458  * can be found from the "ibt_hca_attr_t" structure.
459  *
460  * p_pkey_tbl is a pointer to an array of ib_pkey_t, members are
461  * accessed as:
462  *		hca_portinfo->p_pkey_tbl[i]
463  *
464  * Where 0 <= i < hca_portinfo.p_pkey_tbl_sz
465  *
466  * Similarly p_sgid_tbl is a pointer to an array of ib_gid_t.
467  *
468  * The Query Port function - ibt_query_hca_ports() allocates the memory
469  * required for the ibt_hca_portinfo_t struct as well as the memory
470  * required for the SGID and P_Key tables. The memory is freed by calling
471  * ibt_free_portinfo().
472  */
473 typedef struct ibt_hca_portinfo_s {
474 	ib_lid_t		p_opaque1;	/* Base LID of port */
475 	ib_qkey_cntr_t		p_qkey_violations; /* Bad Q_Key cnt */
476 	ib_pkey_cntr_t		p_pkey_violations; /* Optional bad P_Key cnt */
477 	uint8_t			p_sm_sl:4;	/* SM Service level */
478 	ib_lid_t		p_sm_lid;	/* SM LID */
479 	ibt_port_state_t	p_linkstate;	/* Port state */
480 	uint8_t			p_port_num;
481 	ib_mtu_t		p_mtu;		/* Max transfer unit - pkt */
482 	uint8_t			p_lmc:3;	/* Local mask control */
483 	ib_gid_t		*p_sgid_tbl;	/* SGID Table */
484 	uint_t			p_sgid_tbl_sz;	/* Size of SGID table */
485 	uint16_t		p_pkey_tbl_sz;	/* Size of P_Key table */
486 	uint16_t		p_def_pkey_ix;	/* default pkey index for TI */
487 	ib_pkey_t		*p_pkey_tbl;	/* P_Key table */
488 	uint8_t			p_max_vl;	/* Max num of virtual lanes */
489 	uint8_t			p_init_type_reply; /* Optional InitTypeReply */
490 	ib_time_t		p_subnet_timeout; /* Max Subnet Timeout */
491 	ibt_port_caps_t		p_capabilities;	/* Port Capabilities */
492 	uint32_t		p_msg_sz;	/* Max message size */
493 } ibt_hca_portinfo_t;
494 
495 /*
496  * Modify HCA port attributes flags, specifies which HCA port
497  * attributes to modify.
498  */
499 typedef enum ibt_port_modify_flags_e {
500 	IBT_PORT_NO_FLAGS	= 0,
501 
502 	IBT_PORT_RESET_QKEY	= 1 << 0,	/* Reset Q_Key violation */
503 						/* counter */
504 	IBT_PORT_RESET_SM	= 1 << 1,	/* SM */
505 	IBT_PORT_SET_SM		= 1 << 2,
506 	IBT_PORT_RESET_SNMP	= 1 << 3,	/* SNMP Tunneling */
507 	IBT_PORT_SET_SNMP	= 1 << 4,
508 	IBT_PORT_RESET_DEVMGT	= 1 << 5,	/* Device Management */
509 	IBT_PORT_SET_DEVMGT	= 1 << 6,
510 	IBT_PORT_RESET_VENDOR	= 1 << 7,	/* Vendor Class */
511 	IBT_PORT_SET_VENDOR	= 1 << 8,
512 	IBT_PORT_SHUTDOWN	= 1 << 9,	/* Shut down the port */
513 	IBT_PORT_SET_INIT_TYPE	= 1 << 10	/* InitTypeReply value */
514 } ibt_port_modify_flags_t;
515 
516 /*
517  * Modify HCA port InitType bit definitions.
518  */
519 #define	IBT_PINIT_NO_LOAD		0x1
520 #define	IBT_PINIT_PRESERVE_CONTENT	0x2
521 #define	IBT_PINIT_PRESERVE_PRESENCE	0x4
522 #define	IBT_PINIT_NO_RESUSCITATE	0x8
523 
524 
525 /*
526  * Address vector definition.
527  */
528 typedef struct ibt_adds_vect_s {
529 	ib_gid_t	av_dgid;	/* IPV6 dest GID in GRH */
530 	ib_gid_t	av_sgid;	/* SGID */
531 	ibt_srate_t	av_srate;	/* Max static rate */
532 	uint8_t		av_srvl:4;	/* Service level in LRH */
533 	uint_t		av_flow:20;	/* 20 bit Flow Label */
534 	uint8_t		av_tclass;	/* Traffic Class */
535 	uint8_t		av_hop;		/* Hop Limit */
536 	uint8_t		av_port_num;	/* Port number for UD */
537 	boolean_t	av_opaque1;
538 	ib_lid_t	av_opaque2;
539 	ib_path_bits_t	av_opaque3;
540 	uint32_t	av_opaque4;
541 } ibt_adds_vect_t;
542 
543 typedef struct ibt_cep_path_s {
544 	ibt_adds_vect_t	cep_adds_vect;		/* Address Vector */
545 	uint16_t	cep_pkey_ix;		/* P_Key Index */
546 	uint8_t		cep_hca_port_num;	/* Port number for connected */
547 						/* channels.  A value of 0 */
548 						/* indicates an invalid path */
549 	ib_time_t	cep_cm_opaque1;
550 } ibt_cep_path_t;
551 
552 /*
553  * Channel Migration State.
554  */
555 typedef enum ibt_cep_cmstate_e {
556 	IBT_STATE_NOT_SUPPORTED	= 0,
557 	IBT_STATE_MIGRATED	= 1,
558 	IBT_STATE_REARMED	= 2,
559 	IBT_STATE_ARMED		= 3
560 } ibt_cep_cmstate_t;
561 
562 /*
563  * Transport service type
564  *
565  * NOTE: this was converted from an enum to a uint8_t to save space.
566  */
567 typedef uint8_t ibt_tran_srv_t;
568 
569 #define	IBT_RC_SRV		0
570 #define	IBT_UC_SRV		1
571 #define	IBT_RD_SRV		2
572 #define	IBT_UD_SRV		3
573 #define	IBT_RAWIP_SRV		4
574 #define	IBT_RAWETHER_SRV	5
575 
576 /*
577  * Channel (QP/EEC) state definitions.
578  */
579 typedef enum ibt_cep_state_e {
580 	IBT_STATE_RESET	= 0,		/* Reset */
581 	IBT_STATE_INIT,			/* Initialized */
582 	IBT_STATE_RTR,			/* Ready to Receive */
583 	IBT_STATE_RTS,			/* Ready to Send */
584 	IBT_STATE_SQD,			/* Send Queue Drained */
585 	IBT_STATE_SQE,			/* Send Queue Error */
586 	IBT_STATE_ERROR,		/* Error */
587 	IBT_STATE_SQDRAIN,		/* Send Queue Draining */
588 	IBT_STATE_NUM			/* Number of states */
589 } ibt_cep_state_t;
590 
591 
592 /*
593  * Channel Attribute flags.
594  */
595 typedef enum ibt_attr_flags_e {
596 	IBT_ALL_SIGNALED	= 0,	/* All sends signaled */
597 	IBT_WR_SIGNALED		= 1,	/* Signaled on a WR basis */
598 	IBT_FAST_REG_RES_LKEY	= (1 << 1)
599 } ibt_attr_flags_t;
600 
601 /*
602  * Channel End Point (CEP) Control Flags.
603  */
604 typedef enum ibt_cep_flags_e {
605 	IBT_CEP_NO_FLAGS	= 0,		/* Enable Nothing */
606 	IBT_CEP_RDMA_RD		= (1 << 0),	/* Enable incoming RDMA RD's */
607 						/* RC & RD only */
608 	IBT_CEP_RDMA_WR		= (1 << 1),	/* Enable incoming RDMA WR's */
609 						/* RC & RD only */
610 	IBT_CEP_ATOMIC		= (1 << 2)	/* Enable incoming Atomics, */
611 						/* RC & RD only */
612 } ibt_cep_flags_t;
613 
614 /*
615  * Channel Modify Flags
616  *
617  * These flags specify which attributes in either ibt_rc_chan_modify_attr_t or
618  * ibt_ud_chan_modify_attr_t should be modified on an ibt_modify_rc_channel()
619  * or ibt_modify_ud_channel() call.
620  *
621  *
622  *	Flag			Comments
623  *	----			--------
624  *	IBT_CEP_SET_RDMA_R	Modify RDMA reads as indicated by the control
625  *				ibt_cep_flags_t attribute (RC & RD only).
626  *
627  *				  IBT_CEP_RDMA_RD = 0 - Disable RDMA reads.
628  *				  IBT_CEP_RDMA_RD = 1 - Enable RDMA reads.
629  *
630  *	IBT_CEP_SET_RDMA_W	Modify RDMA writes as indicated by the control
631  *				ibt_cep_flags_t attribute (RC, UC & RD only).
632  *
633  *				  IBT_CEP_RDMA_WR = 0 - Disable RDMA writes.
634  *				  IBT_CEP_RDMA_WR = 1 - Enable RDMA writes.
635  *
636  *	IBT_CEP_SET_ATOMIC	Modify atomic operations as indicated by the
637  *				control ibt_cep_flags_t attribute
638  *				(RC & RD only).
639  *
640  *				  IBT_CEP_ATOMIC = 0 - Disable atomics.
641  *				  IBT_CEP_ATOMIC = 1 - Enable atomics.
642  *
643  *	IBT_CEP_SET_SQ_SIZE	Resize the maximum outstanding Work Requests
644  *				on Send Queue.
645  *
646  *	IBT_CEP_SET_RQ_SIZE	Resize the maximum outstanding Work Requests
647  *				on Receive Queue.
648  *
649  *	IBT_CEP_SET_ALT_PATH 	Modify Alternate Path Address Vector and HCA
650  *				Port number (RC & UC only).
651  *
652  *	IBT_CEP_SET_ADDS_VECT	Modify Primary Path Address Vector information.
653  *				(RC & UC Only).
654  *
655  *	IBT_CEP_SET_PORT	Modify Primary physical Port (RC, UC & UD only).
656  *
657  *	IBT_CEP_SET_RETRY	Modify Retry Count (RC only). This limits the
658  *				number of times a requester can retry a request
659  *				due to a Local ACK timeout or NAK-Sequence
660  *				Error.
661  *
662  *	IBT_CEP_SET_RNR_NAK_RETRY	Modify RNR Retry Count (RC only). The
663  *					RNR NAK retry counter limits the number
664  *					of times a requester can retry a request
665  *					that was RNR NAK'ed.
666  *
667  *	IBT_CEP_SET_MIN_RNR_NAK	Minimum RNR NAK timer field value
668  *				(RC & RD only).
669  *
670  *	IBT_CEP_SET_QKEY	Modify Q_Key (UD & RD only).
671  *
672  *	IBT_CEP_SET_RDMARA_OUT	Modify Initiator depth, Number of outstanding
673  *				RDMA Read/atomic operations at destination
674  *				(RC Only).
675  *
676  *	IBT_CEP_SET_RDMARA_IN	Modify Responder Resources, Number of local
677  *				RDMA Read/ atomic responder resources (RC Only).
678  *
679  *	IBT_CEP_SET_SQD_EVENT	Cause the SQD async event (only for RTS => SQD).
680  *
681  */
682 typedef enum ibt_cep_modify_flags_e {
683 	IBT_CEP_SET_NOTHING		= 0,
684 	IBT_CEP_SET_SQ_SIZE		= (1 << 1),
685 	IBT_CEP_SET_RQ_SIZE		= (1 << 2),
686 
687 	IBT_CEP_SET_RDMA_R		= (1 << 3),
688 	IBT_CEP_SET_RDMA_W		= (1 << 4),
689 	IBT_CEP_SET_ATOMIC		= (1 << 5),
690 
691 	IBT_CEP_SET_ALT_PATH		= (1 << 6),	/* Alternate Path */
692 
693 	IBT_CEP_SET_ADDS_VECT		= (1 << 7),
694 	IBT_CEP_SET_PORT		= (1 << 8),
695 	IBT_CEP_SET_OPAQUE5		= (1 << 9),
696 	IBT_CEP_SET_RETRY		= (1 << 10),
697 	IBT_CEP_SET_RNR_NAK_RETRY 	= (1 << 11),
698 	IBT_CEP_SET_MIN_RNR_NAK		= (1 << 12),
699 
700 	IBT_CEP_SET_QKEY		= (1 << 13),
701 	IBT_CEP_SET_RDMARA_OUT		= (1 << 14),
702 	IBT_CEP_SET_RDMARA_IN		= (1 << 15),
703 
704 	IBT_CEP_SET_OPAQUE1		= (1 << 16),
705 	IBT_CEP_SET_OPAQUE2		= (1 << 17),
706 	IBT_CEP_SET_OPAQUE3		= (1 << 18),
707 	IBT_CEP_SET_OPAQUE4		= (1 << 19),
708 	IBT_CEP_SET_SQD_EVENT		= (1 << 20),
709 	IBT_CEP_SET_OPAQUE6		= (1 << 21),
710 	IBT_CEP_SET_OPAQUE7		= (1 << 22),
711 	IBT_CEP_SET_OPAQUE8		= (1 << 23)
712 } ibt_cep_modify_flags_t;
713 
714 /*
715  * CQ notify types.
716  */
717 typedef enum ibt_cq_notify_flags_e {
718 	IBT_NEXT_COMPLETION	= 1,
719 	IBT_NEXT_SOLICITED	= 2
720 } ibt_cq_notify_flags_t;
721 
722 /*
723  * CQ types shared across TI and CI.
724  */
725 typedef enum ibt_cq_flags_e {
726 	IBT_CQ_NO_FLAGS			= 0,
727 	IBT_CQ_HANDLER_IN_THREAD	= 1 << 0,	/* A thread calls the */
728 							/* CQ handler */
729 	IBT_CQ_USER_MAP			= 1 << 1,
730 	IBT_CQ_DEFER_ALLOC		= 1 << 2
731 } ibt_cq_flags_t;
732 
733 /*
734  * CQ types shared across TI and CI.
735  */
736 typedef enum ibt_cq_sched_flags_e {
737 	IBT_CQS_NO_FLAGS	= 0,
738 	IBT_CQS_WARM_CACHE	= 1 << 0, /* run on same CPU */
739 	IBT_CQS_AFFINITY	= 1 << 1,
740 	IBT_CQS_SCHED_GROUP	= 1 << 2,
741 	IBT_CQS_USER_MAP	= 1 << 3,
742 	IBT_CQS_DEFER_ALLOC	= 1 << 4
743 } ibt_cq_sched_flags_t;
744 
745 /*
746  * Attributes when creating a Completion Queue.
747  *
748  * Note:
749  *	The IBT_CQ_HANDLER_IN_THREAD cq_flags bit should be ignored by the CI.
750  */
751 typedef struct ibt_cq_attr_s {
752 	uint_t			cq_size;
753 	ibt_sched_hdl_t		cq_sched;	/* 0 = no hint, */
754 						/* other = cq_sched value */
755 	ibt_cq_flags_t		cq_flags;
756 } ibt_cq_attr_t;
757 
758 /*
759  * Memory Management
760  */
761 
762 /* Memory management flags */
763 typedef enum ibt_mr_flags_e {
764 	IBT_MR_SLEEP			= 0,
765 	IBT_MR_NOSLEEP			= (1 << 1),
766 	IBT_MR_NONCOHERENT		= (1 << 2),
767 	IBT_MR_PHYS_IOVA		= (1 << 3),  /* ibt_(re)register_buf */
768 
769 	/* Access control flags */
770 	IBT_MR_ENABLE_WINDOW_BIND	= (1 << 4),
771 	IBT_MR_ENABLE_LOCAL_WRITE	= (1 << 5),
772 	IBT_MR_ENABLE_REMOTE_READ	= (1 << 6),
773 	IBT_MR_ENABLE_REMOTE_WRITE	= (1 << 7),
774 	IBT_MR_ENABLE_REMOTE_ATOMIC	= (1 << 8),
775 
776 	/* Reregister flags */
777 	IBT_MR_CHANGE_TRANSLATION	= (1 << 9),
778 	IBT_MR_CHANGE_ACCESS		= (1 << 10),
779 	IBT_MR_CHANGE_PD		= (1 << 11),
780 
781 	/* Additional registration flags */
782 	IBT_MR_ZBVA			= (1 << 12),
783 
784 	/* Additional physical registration flags */
785 	IBT_MR_CONSUMER_KEY		= (1 << 13)	/* Consumer owns key */
786 							/* portion of keys */
787 } ibt_mr_flags_t;
788 
789 
790 /* Memory Region attribute flags */
791 typedef enum ibt_mr_attr_flags_e {
792 	/* Access control flags */
793 	IBT_MR_WINDOW_BIND		= (1 << 0),
794 	IBT_MR_LOCAL_WRITE		= (1 << 1),
795 	IBT_MR_REMOTE_READ		= (1 << 2),
796 	IBT_MR_REMOTE_WRITE		= (1 << 3),
797 	IBT_MR_REMOTE_ATOMIC		= (1 << 4),
798 	IBT_MR_ZERO_BASED_VA		= (1 << 5),
799 	IBT_MR_CONSUMER_OWNED_KEY	= (1 << 6),
800 	IBT_MR_SHARED			= (1 << 7)
801 } ibt_mr_attr_flags_t;
802 
803 /* Memory region physical descriptor. */
804 typedef struct ibt_phys_buf_s {
805 	union {
806 		uint64_t	_p_ll;		/* 64 bit DMA address */
807 		uint32_t	_p_la[2];	/* 2 x 32 bit address */
808 	} _phys_buf;
809 	size_t	p_size;
810 } ibt_phys_buf_t;
811 
812 #define	p_laddr		_phys_buf._p_ll
813 #ifdef	_LONG_LONG_HTOL
814 #define	p_notused	_phys_buf._p_la[0]
815 #define	p_addr		_phys_buf._p_la[1]
816 #else
817 #define	p_addr		_phys_buf._p_la[0]
818 #define	p_notused	_phys_buf._p_la[1]
819 #endif
820 
821 
822 /* Memory region descriptor. */
823 typedef struct ibt_mr_desc_s {
824 	ib_vaddr_t	md_vaddr;	/* IB virtual adds of memory */
825 	ibt_lkey_t	md_lkey;
826 	ibt_rkey_t	md_rkey;
827 	boolean_t	md_sync_required;
828 } ibt_mr_desc_t;
829 
830 /* Physical Memory region descriptor. */
831 typedef struct ibt_pmr_desc_s {
832 	ib_vaddr_t	pmd_iova;	/* Returned I/O Virtual Address */
833 	ibt_lkey_t	pmd_lkey;
834 	ibt_rkey_t	pmd_rkey;
835 	uint_t 		pmd_phys_buf_list_sz;	/* Allocated Phys buf sz */
836 	boolean_t	pmd_sync_required;
837 } ibt_pmr_desc_t;
838 
839 /* Memory region protection bounds. */
840 typedef struct ibt_mr_prot_bounds_s {
841 	ib_vaddr_t	pb_addr;	/* Beginning address */
842 	size_t		pb_len;		/* Length of protected region */
843 } ibt_mr_prot_bounds_t;
844 
845 /* Memory Region (Re)Register attributes */
846 typedef struct ibt_mr_attr_s {
847 	ib_vaddr_t	mr_vaddr;	/* Virtual address to register */
848 	ib_memlen_t	mr_len;		/* Length of region to register */
849 	struct as	*mr_as;		/* A pointer to an address space */
850 					/* structure. This parameter should */
851 					/* be set to NULL, which implies */
852 					/* kernel address space. */
853 	ibt_mr_flags_t	mr_flags;
854 } ibt_mr_attr_t;
855 
856 /* Physical Memory Region (Re)Register */
857 typedef struct ibt_pmr_attr_s {
858 	ib_vaddr_t	pmr_iova;	/* I/O virtual address requested by */
859 					/* client for the first byte of the */
860 					/* region */
861 	ib_memlen_t	pmr_len;	/* Length of region to register */
862 	ib_memlen_t	pmr_offset;	/* Offset of the regions starting */
863 					/* IOVA within the 1st physical */
864 					/* buffer */
865 	ibt_mr_flags_t	pmr_flags;
866 	ibt_lkey_t	pmr_lkey;	/* Reregister only */
867 	ibt_rkey_t	pmr_rkey;	/* Reregister only */
868 	uint8_t		pmr_key;	/* Key to use on new Lkey & Rkey */
869 	uint_t		pmr_num_buf;	/* Num of entries in the mr_buf_list */
870 	ibt_phys_buf_t	*pmr_buf_list;	/* List of physical buffers accessed */
871 					/* as an array */
872 } ibt_pmr_attr_t;
873 
874 
875 /*
876  * Memory Region (Re)Register attributes - used by ibt_register_shared_mr(),
877  * ibt_register_buf() and ibt_reregister_buf().
878  */
879 typedef struct ibt_smr_attr_s {
880 	ib_vaddr_t		mr_vaddr;
881 	ibt_mr_flags_t		mr_flags;
882 	uint8_t			mr_key;		/* Only for physical */
883 						/* ibt_(Re)register_buf() */
884 	ibt_lkey_t		mr_lkey;	/* Only for physical */
885 	ibt_rkey_t		mr_rkey;	/* ibt_Reregister_buf() */
886 } ibt_smr_attr_t;
887 
888 /*
889  * key states.
890  */
891 typedef enum ibt_key_state_e {
892 	IBT_KEY_INVALID	= 0,
893 	IBT_KEY_FREE,
894 	IBT_KEY_VALID
895 } ibt_key_state_t;
896 
897 /* Memory region query attributes */
898 typedef struct ibt_mr_query_attr_s {
899 	ibt_lkey_t		mr_lkey;
900 	ibt_rkey_t		mr_rkey;
901 	ibt_mr_prot_bounds_t	mr_lbounds;	/* Actual local CI protection */
902 						/* bounds */
903 	ibt_mr_prot_bounds_t	mr_rbounds;	/* Actual remote CI */
904 						/* protection bounds */
905 	ibt_mr_attr_flags_t	mr_attr_flags;	/* Access rights etc. */
906 	ibt_pd_hdl_t		mr_pd;		/* Protection domain */
907 	boolean_t		mr_sync_required;
908 	ibt_key_state_t		mr_lkey_state;
909 	uint_t			mr_phys_buf_list_sz;
910 } ibt_mr_query_attr_t;
911 
912 /* Memory window query attributes */
913 typedef struct ibt_mw_query_attr_s {
914 	ibt_pd_hdl_t		mw_pd;
915 	ibt_mem_win_type_t	mw_type;
916 	ibt_rkey_t		mw_rkey;
917 	ibt_key_state_t		mw_state;
918 } ibt_mw_query_attr_t;
919 
920 
921 #define	IBT_SYNC_READ	0x1	/* Make memory changes visible to incoming */
922 				/* RDMA reads */
923 
924 #define	IBT_SYNC_WRITE	0x2	/* Make the affects of an incoming RDMA write */
925 				/* visible to the consumer */
926 
927 /* Memory region sync args */
928 typedef struct ibt_mr_sync_s {
929 	ibt_mr_hdl_t	ms_handle;
930 	ib_vaddr_t	ms_vaddr;
931 	ib_memlen_t	ms_len;
932 	uint32_t	ms_flags;	/* IBT_SYNC_READ or  IBT_SYNC_WRITE */
933 } ibt_mr_sync_t;
934 
935 /*
936  * Flags for Virtual Address to HCA Physical Address translation.
937  */
938 typedef enum ibt_va_flags_e {
939 	IBT_VA_NO_FLAGS		= 0
940 } ibt_va_flags_t;
941 
942 
943 /*  Address Translation parameters */
944 typedef struct ibt_va_attr_s {
945 	ib_vaddr_t	va_vaddr;	/* Virtual address to register */
946 	ib_memlen_t	va_len;		/* Length of region to register */
947 	struct as	*va_as;		/* A pointer to an address space */
948 					/* structure. */
949 	ibt_va_flags_t	va_flags;
950 } ibt_va_attr_t;
951 
952 /*
953  * WORK REQUEST AND WORK REQUEST COMPLETION DEFINITIONS.
954  */
955 
956 /*
957  * Work Request and Work Request Completion types - These types are used
958  *   to indicate the type of work requests posted to a work queue
959  *   or the type of completion received.  Immediate Data is indicated via
960  *   ibt_wr_flags_t or ibt_wc_flags_t.
961  *
962  *   IBT_WRC_RECV and IBT_WRC_RECV_RDMAWI are only used as opcodes in the
963  *   work completions.
964  *
965  * NOTE: this was converted from an enum to a uint8_t to save space.
966  */
967 typedef uint8_t ibt_wrc_opcode_t;
968 
969 #define	IBT_WRC_SEND		1	/* Send */
970 #define	IBT_WRC_RDMAR		2	/* RDMA Read */
971 #define	IBT_WRC_RDMAW		3	/* RDMA Write */
972 #define	IBT_WRC_CSWAP		4	/* Compare & Swap Atomic */
973 #define	IBT_WRC_FADD		5	/* Fetch & Add Atomic */
974 #define	IBT_WRC_BIND		6	/* Bind Memory Window */
975 #define	IBT_WRC_RECV		7	/* Receive */
976 #define	IBT_WRC_RECV_RDMAWI	8	/* Received RDMA Write w/ Immediate */
977 #define	IBT_WRC_FAST_REG_PMR	9	/* Fast Register Physical mem region */
978 #define	IBT_WRC_LOCAL_INVALIDATE 10
979 
980 
981 /*
982  * Work Request Completion flags - These flags indicate what type
983  *   of data is present in the Work Request Completion structure
984  *
985  *   IBT_WC_GRH_PRESENT 	- indicates that a Global Route Header was
986  *				  received and inserted into the first 40
987  *				  bytes of the buffer pointed to by the recv
988  *				  SGL.
989  *
990  *   IBT_WC_IMMED_DATA_PRESENT	- indicates that the received request
991  *				  contained immediate data.
992  */
993 typedef uint8_t ibt_wc_flags_t;
994 
995 #define	IBT_WC_NO_FLAGS			0
996 #define	IBT_WC_GRH_PRESENT		(1 << 0)
997 #define	IBT_WC_IMMED_DATA_PRESENT	(1 << 1)
998 #define	IBT_WC_RKEY_INVALIDATED		(1 << 2)
999 #define	IBT_WC_CKSUM_OK			(1 << 3)
1000 
1001 
1002 /*
1003  * Work Request Completion - This structure encapsulates the information
1004  *   necessary to define a work request completion.
1005  *
1006  *   wc_id 		- contains the work request ID of the completing WR.
1007  *   wc_bytes_xfer 	- indicates the number of bytes transferred in the
1008  *			  request.
1009  *   wc_flags 		- Work Request Completion Flags, see ibt_wc_flags_t
1010  *   wc_immed_data 	- Immediate Data.
1011  *   wc_freed_rc	- indicates the freed resource count. Always valid
1012  *			  regardless of the wc_status.
1013  *   wc_type 		- indicates the type of WR completion
1014  *			  (see ibt_wrc_opcode_t above).
1015  *   wc_status		- indicates request completion status.
1016  *   wc_sl		- Service Lane
1017  *   wc_ethertype	- Ethertype, RawEther only.
1018  */
1019 typedef struct ibt_wc_s {
1020 	ibt_wrid_t		wc_id;		/* Work Request Id */
1021 	uint64_t		wc_fma_ena;	/* fault management err data */
1022 	ib_msglen_t		wc_bytes_xfer;	/* Number of Bytes */
1023 						/* Transferred */
1024 	ibt_wc_flags_t		wc_flags;	/* WR Completion Flags */
1025 	ibt_wrc_opcode_t	wc_type;	/* Operation Type */
1026 	uint16_t		wc_cksum;	/* payload checksum */
1027 	ibt_immed_t		wc_immed_data;	/* Immediate Data */
1028 	uint32_t		wc_freed_rc;	/* Freed Resource Count */
1029 	ibt_wc_status_t		wc_status;	/* Completion Status */
1030 	uint8_t			wc_sl:4;	/* Remote SL */
1031 	uint16_t		wc_ethertype;	/* Ethertype Field - RE */
1032 	ib_lid_t		wc_opaque1;
1033 	uint16_t		wc_opaque2;
1034 	ib_qpn_t		wc_qpn;		/* Source QPN Datagram only */
1035 	ib_eecn_t		wc_opaque3;
1036 	ib_qpn_t		wc_local_qpn;
1037 	ibt_rkey_t		wc_rkey;
1038 	ib_path_bits_t		wc_opaque4;
1039 } ibt_wc_t;
1040 
1041 
1042 /*
1043  * WR Flags. Common for both RC and UD
1044  *
1045  * NOTE: this was converted from an enum to a uint8_t to save space.
1046  */
1047 typedef uint8_t ibt_wr_flags_t;
1048 
1049 #define	IBT_WR_NO_FLAGS		0
1050 #define	IBT_WR_SEND_IMMED	(1 << 0)	/* Immediate Data Indicator */
1051 #define	IBT_WR_SEND_SIGNAL	(1 << 1)	/* Signaled, if set */
1052 #define	IBT_WR_SEND_FENCE	(1 << 2)	/* Fence Indicator */
1053 #define	IBT_WR_SEND_SOLICIT	(1 << 3)	/* Solicited Event Indicator */
1054 #define	IBT_WR_SEND_REMOTE_INVAL	(1 << 4) /* Remote Invalidate */
1055 #define	IBT_WR_SEND_CKSUM	(1 << 5)	/* Checksum offload Indicator */
1056 
1057 /*
1058  * Access control flags for Bind Memory Window operation,
1059  * applicable for RC/UC/RD only.
1060  *
1061  * If IBT_WR_BIND_WRITE or IBT_WR_BIND_ATOMIC is desired then
1062  * it is required that Memory Region should have Local Write Access.
1063  */
1064 typedef enum ibt_bind_flags_e {
1065 	IBT_WR_BIND_READ	= (1 << 0),	/* enable remote read */
1066 	IBT_WR_BIND_WRITE	= (1 << 1),	/* enable remote write */
1067 	IBT_WR_BIND_ATOMIC	= (1 << 2),	/* enable remote atomics */
1068 	IBT_WR_BIND_ZBVA	= (1 << 3)	/* Zero Based Virtual Address */
1069 } ibt_bind_flags_t;
1070 
1071 /*
1072  * Data Segment for scatter-gather list
1073  *
1074  * SGL consists of an array of data segments and the length of the SGL.
1075  */
1076 typedef struct ibt_wr_ds_s {
1077 	ib_vaddr_t	ds_va;		/* Virtual Address */
1078 	ibt_lkey_t	ds_key;		/* L_Key */
1079 	ib_msglen_t	ds_len;		/* Length of DS */
1080 } ibt_wr_ds_t;
1081 
1082 /*
1083  * Bind Memory Window WR
1084  *
1085  * WR ID from ibt_send_wr_t applies here too, SWG_0038 errata.
1086  */
1087 typedef struct ibt_wr_bind_s {
1088 	ibt_bind_flags_t	bind_flags;
1089 	ibt_rkey_t		bind_rkey;		/* Mem Window's R_key */
1090 	ibt_lkey_t		bind_lkey;		/* Mem Region's L_Key */
1091 	ibt_rkey_t		bind_rkey_out;		/* OUT: new R_Key */
1092 	ibt_mr_hdl_t		bind_ibt_mr_hdl;	/* Mem Region handle */
1093 	ibt_mw_hdl_t		bind_ibt_mw_hdl;	/* Mem Window handle */
1094 	ib_vaddr_t		bind_va;		/* Virtual Address */
1095 	ib_memlen_t		bind_len;		/* Length of Window */
1096 } ibt_wr_bind_t;
1097 
1098 /*
1099  * Atomic WR
1100  *
1101  * Operation type (compare & swap or fetch & add) in ibt_wrc_opcode_t.
1102  *
1103  * A copy of the original contents of the remote memory will be stored
1104  * in the local data segment described by wr_sgl within ibt_send_wr_t,
1105  * and wr_nds should be set to 1.
1106  *
1107  * Atomic operation operands:
1108  *   Compare & Swap Operation:
1109  *	atom_arg1 - Compare Operand
1110  *	atom_arg2 - Swap Operand
1111  *
1112  *   Fetch & Add Operation:
1113  *	atom_arg1 - Add Operand
1114  *	atom_arg2 - ignored
1115  */
1116 typedef struct ibt_wr_atomic_s {
1117 	ib_vaddr_t	atom_raddr;	/* Remote address. */
1118 	ibt_atom_arg_t	atom_arg1;	/* operand #1 */
1119 	ibt_atom_arg_t	atom_arg2;	/* operand #2 */
1120 	ibt_rkey_t	atom_rkey;	/* R_Key. */
1121 } ibt_wr_atomic_t;
1122 
1123 /*
1124  * RDMA WR
1125  * Immediate Data indicator in ibt_wr_flags_t.
1126  */
1127 typedef struct ibt_wr_rdma_s {
1128 	ib_vaddr_t	rdma_raddr;	/* Remote address. */
1129 	ibt_rkey_t	rdma_rkey;	/* R_Key. */
1130 	ibt_immed_t	rdma_immed;	/* Immediate Data */
1131 } ibt_wr_rdma_t;
1132 
1133 /*
1134  * Fast Register Physical Memory Region Work Request.
1135  */
1136 typedef struct ibt_wr_reg_pmr_s {
1137 	ib_vaddr_t	pmr_iova;	/* I/O virtual address requested by */
1138 					/* client for the first byte of the */
1139 					/* region */
1140 	ib_memlen_t	pmr_len;	/* Length of region to register */
1141 	ib_memlen_t	pmr_offset;	/* Offset of the regions starting */
1142 					/* IOVA within the 1st physical */
1143 					/* buffer */
1144 	ibt_mr_hdl_t	pmr_mr_hdl;
1145 	ibt_phys_buf_t	*pmr_buf_list;	/* List of physical buffers accessed */
1146 					/* as an array */
1147 	uint_t		pmr_num_buf;	/* Num of entries in the mr_buf_list */
1148 	ibt_lkey_t	pmr_lkey;
1149 	ibt_rkey_t	pmr_rkey;
1150 	ibt_mr_flags_t	pmr_flags;
1151 	uint8_t		pmr_key;	/* Key to use on new Lkey & Rkey */
1152 } ibt_wr_reg_pmr_t;
1153 
1154 /*
1155  * Local Invalidate.
1156  */
1157 typedef struct ibt_wr_li_s {
1158 	ibt_mr_hdl_t	li_mr_hdl;	/* Null for MW invalidates */
1159 	ibt_mw_hdl_t	li_mw_hdl;	/* Null for MR invalidates */
1160 	ibt_lkey_t	li_lkey;	/* Ignore for MW invalidates */
1161 	ibt_rkey_t	li_rkey;
1162 } ibt_wr_li_t;
1163 
1164 /*
1165  * Reserved For Future Use.
1166  * Raw IPv6 Send WR
1167  */
1168 typedef struct ibt_wr_ripv6_s {
1169 	ib_lid_t	rip_dlid;	/* DLID */
1170 	ib_path_bits_t  rip_slid_bits;	/* SLID path bits, SWG_0033 errata */
1171 	uint8_t		rip_sl:4;	/* SL */
1172 	ibt_srate_t	rip_rate;	/* Max Static Rate, SWG_0007 errata */
1173 } ibt_wr_ripv6_t;
1174 
1175 /*
1176  * Reserved For Future Use.
1177  * Raw Ethertype Send WR
1178  */
1179 typedef struct ibt_wr_reth_s {
1180 	ib_ethertype_t  reth_type;	/* Ethertype */
1181 	ib_lid_t	reth_dlid;	/* DLID */
1182 	ib_path_bits_t	reth_slid_bits;	/* SLID path bits, SWG_0033 errata */
1183 	uint8_t		reth_sl:4;	/* SL */
1184 	ibt_srate_t	reth_rate;	/* Max Static Rate, SWG_0007 errata */
1185 } ibt_wr_reth_t;
1186 
1187 /*
1188  * Reserved For future Use.
1189  * RD Send WR, Operation type in ibt_wrc_opcode_t.
1190  */
1191 typedef struct ibt_wr_rd_s {
1192 	ibt_rd_dest_hdl_t	rdwr_dest_hdl;
1193 	union {
1194 	    ibt_immed_t		send_immed;	/* IBT_WRC_SEND */
1195 	    ibt_wr_rdma_t	rdma;		/* IBT_WRC_RDMAR */
1196 						/* IBT_WRC_RDMAW */
1197 	    ibt_wr_li_t		*li;		/* IBT_WRC_LOCAL_INVALIDATE */
1198 	    ibt_wr_atomic_t	*atomic;	/* IBT_WRC_FADD */
1199 						/* IBT_WRC_CSWAP */
1200 	    ibt_wr_bind_t	*bind;		/* IBT_WRC_BIND */
1201 	    ibt_wr_reg_pmr_t	*reg_pmr;	/* IBT_WRC_FAST_REG_PMR */
1202 	} rdwr;
1203 } ibt_wr_rd_t;
1204 
1205 /*
1206  * Reserved For Future Use.
1207  * UC Send WR, Operation type in ibt_wrc_opcode_t, the only valid
1208  * ones are:
1209  *		IBT_WRC_SEND
1210  *		IBT_WRC_RDMAW
1211  *		IBT_WRC_BIND
1212  */
1213 typedef struct ibt_wr_uc_s {
1214 	union {
1215 	    ibt_immed_t		send_immed;	/* IBT_WRC_SEND */
1216 	    ibt_wr_rdma_t	rdma;		/* IBT_WRC_RDMAW */
1217 	    ibt_wr_li_t		*li;		/* IBT_WRC_LOCAL_INVALIDATE */
1218 	    ibt_wr_bind_t	*bind;		/* IBT_WRC_BIND */
1219 	    ibt_wr_reg_pmr_t	*reg_pmr;	/* IBT_WRC_FAST_REG_PMR */
1220 	} ucwr;
1221 } ibt_wr_uc_t;
1222 
1223 /*
1224  * RC Send WR, Operation type in ibt_wrc_opcode_t.
1225  */
1226 typedef struct ibt_wr_rc_s {
1227 	union {
1228 	    ibt_immed_t		send_immed;	/* IBT_WRC_SEND w/ immediate */
1229 	    ibt_rkey_t		send_inval;	/* IBT_WRC_SEND w/ invalidate */
1230 	    ibt_wr_rdma_t	rdma;		/* IBT_WRC_RDMAR */
1231 						/* IBT_WRC_RDMAW */
1232 	    ibt_wr_li_t		*li;		/* IBT_WRC_LOCAL_INVALIDATE */
1233 	    ibt_wr_atomic_t	*atomic;	/* IBT_WRC_CSWAP */
1234 						/* IBT_WRC_FADD */
1235 	    ibt_wr_bind_t	*bind;		/* IBT_WRC_BIND */
1236 	    ibt_wr_reg_pmr_t	*reg_pmr;	/* IBT_WRC_FAST_REG_PMR */
1237 	} rcwr;
1238 } ibt_wr_rc_t;
1239 
1240 /*
1241  * UD Send WR, the only valid Operation is IBT_WRC_SEND.
1242  */
1243 typedef struct ibt_wr_ud_s {
1244 	ibt_immed_t		udwr_immed;
1245 	ibt_ud_dest_hdl_t	udwr_dest;
1246 } ibt_wr_ud_t;
1247 
1248 /*
1249  * Send Work Request (WR) attributes structure.
1250  *
1251  * Operation type in ibt_wrc_opcode_t.
1252  * Immediate Data indicator in ibt_wr_flags_t.
1253  */
1254 typedef struct ibt_send_wr_s {
1255 	ibt_wrid_t		wr_id;		/* WR ID */
1256 	ibt_wr_flags_t		wr_flags;	/* Work Request Flags. */
1257 	ibt_tran_srv_t		wr_trans;	/* Transport Type. */
1258 	ibt_wrc_opcode_t	wr_opcode;	/* Operation Type. */
1259 	uint8_t			wr_rsvd;	/* maybe later */
1260 	uint32_t		wr_nds;		/* Number of data segments */
1261 						/* pointed to by wr_sgl */
1262 	ibt_wr_ds_t		*wr_sgl;	/* SGL */
1263 	union {
1264 		ibt_wr_ud_t	ud;
1265 		ibt_wr_rc_t	rc;
1266 		ibt_wr_rd_t	rd;	/* Reserved For Future Use */
1267 		ibt_wr_uc_t	uc;	/* Reserved For Future Use */
1268 		ibt_wr_reth_t	reth;	/* Reserved For Future Use */
1269 		ibt_wr_ripv6_t	ripv6;	/* Reserved For Future Use */
1270 	} wr;				/* operation specific */
1271 } ibt_send_wr_t;
1272 
1273 /*
1274  * Receive Work Request (WR) attributes structure.
1275  */
1276 typedef struct ibt_recv_wr_s {
1277 	ibt_wrid_t		wr_id;		/* WR ID */
1278 	uint32_t		wr_nds;		/* number of data segments */
1279 						/* pointed to by wr_sgl */
1280 	ibt_wr_ds_t		*wr_sgl;	/* SGL */
1281 } ibt_recv_wr_t;
1282 
1283 
1284 /*
1285  * Asynchronous Events and Errors.
1286  *
1287  *  IBT_EVENT_PATH_MIGRATED	- Connection migrated to alternate path.
1288  *  IBT_EVENT_SQD		- Send queue has drained.
1289  *  IBT_EVENT_COM_EST		- First packet arrived on receive WQ while the
1290  *				  QP is in RTR state (before receiving RTU).
1291  *				  This event is not given to the client,
1292  *				  but instead, is given to CM.
1293  *  IBT_ERROR_CATASTROPHIC_CHAN	- Channel, or SRQ  (if channel is associated
1294  *				  with an SRQ) error, prevents reporting of
1295  *				  completions.
1296  *  IBT_ERROR_INVALID_REQUEST_CHAN - Detection of a transport opcode violation
1297  *				     at the responder.
1298  *  IBT_ERROR_ACCESS_VIOLATION_CHAN - Detection of a request access violation
1299  *				      at the responder.
1300  *  IBT_ERROR_PATH_MIGRATE_REQ	- Incoming path migration req not accepted.
1301  *  IBT_ERROR_CQ		- CQ protection error or CQ overrun.
1302  *  IBT_EVENT_PORT_UP		- HCA port/link available.
1303  *  IBT_ERROR_PORT_DOWN		- HCA port/link unavailable.
1304  *  IBT_ERROR_LOCAL_CATASTROPHIC - HCA Local Catastrophic (all QPs in error).
1305  *  IBT_EVENT_LIMIT_REACHED_SRQ - Shared Receive Queue Limit is reached. Number
1306  *				  of SRQ WQEs is less than the SRQ limit.
1307  *  IBT_EVENT_EMPTY_CHAN	- Channel in Error state associated with an
1308  *				  SRQ is empty (last WQE reached).
1309  *  IBT_ERROR_CATASTROPHIC_SRQ	- SRQ error, prevents reporting of
1310  *                                completions.
1311  *
1312  * Here are codes that are not used in calls to ibc_async_handler, but
1313  * are used by IBTL to inform IBT clients of a significant event.
1314  *
1315  *  IBT_HCA_ATTACH_EVENT	- New HCA available.
1316  *  IBT_HCA_DETACH_EVENT	- HCA is requesting not to be used.
1317  *
1318  * ERRORs on a channel indicate that the channel has entered error state.
1319  * EVENTs on a channel indicate that the channel has not changed state.
1320  *
1321  */
1322 typedef enum ibt_async_code_e {
1323 	IBT_EVENT_PATH_MIGRATED			= 0x000001,
1324 	IBT_EVENT_SQD				= 0x000002,
1325 	IBT_EVENT_COM_EST			= 0x000004,
1326 	IBT_ERROR_CATASTROPHIC_CHAN		= 0x000008,
1327 	IBT_ERROR_INVALID_REQUEST_CHAN		= 0x000010,
1328 	IBT_ERROR_ACCESS_VIOLATION_CHAN		= 0x000020,
1329 	IBT_ERROR_PATH_MIGRATE_REQ		= 0x000040,
1330 
1331 	IBT_ERROR_CQ				= 0x000080,
1332 
1333 	IBT_EVENT_PORT_UP			= 0x000100,
1334 	IBT_ERROR_PORT_DOWN			= 0x000200,
1335 	IBT_ERROR_LOCAL_CATASTROPHIC		= 0x000400,
1336 
1337 	IBT_HCA_ATTACH_EVENT			= 0x000800,
1338 	IBT_HCA_DETACH_EVENT			= 0x001000,
1339 	IBT_ASYNC_OPAQUE1			= 0x002000,
1340 	IBT_ASYNC_OPAQUE2			= 0x004000,
1341 	IBT_ASYNC_OPAQUE3			= 0x008000,
1342 	IBT_ASYNC_OPAQUE4			= 0x010000,
1343 	IBT_EVENT_LIMIT_REACHED_SRQ		= 0x020000,
1344 	IBT_EVENT_EMPTY_CHAN			= 0x040000,
1345 	IBT_ERROR_CATASTROPHIC_SRQ		= 0x080000
1346 } ibt_async_code_t;
1347 
1348 
1349 /*
1350  * ibt_ci_data_in() and ibt_ci_data_out() flags.
1351  */
1352 typedef enum ibt_ci_data_flags_e {
1353 	IBT_CI_NO_FLAGS		= 0,
1354 	IBT_CI_COMPLETE_ALLOC	= (1 << 0)
1355 } ibt_ci_data_flags_t;
1356 
1357 /*
1358  * Used by ibt_ci_data_in() and ibt_ci_data_out() identifies the type of handle
1359  * mapping data is being obtained for.
1360  */
1361 typedef enum ibt_object_type_e {
1362 	IBT_HDL_HCA	=	1,
1363 	IBT_HDL_CHANNEL,
1364 	IBT_HDL_CQ,
1365 	IBT_HDL_PD,
1366 	IBT_HDL_MR,
1367 	IBT_HDL_MW,
1368 	IBT_HDL_UD_DEST,
1369 	IBT_HDL_SCHED,
1370 	IBT_HDL_OPAQUE1,
1371 	IBT_HDL_OPAQUE2,
1372 	IBT_HDL_SRQ
1373 } ibt_object_type_t;
1374 
1375 /*
1376  * Memory error handler data structures; code, and payload data.
1377  */
1378 typedef enum ibt_mem_code_s {
1379 	IBT_MEM_AREA	= 0x1,
1380 	IBT_MEM_REGION	= 0x2
1381 } ibt_mem_code_t;
1382 
1383 typedef struct ibt_mem_data_s {
1384 	uint64_t	ev_fma_ena;	/* FMA Error data */
1385 	ibt_mr_hdl_t	ev_mr_hdl;	/* MR handle */
1386 	ibt_ma_hdl_t	ev_ma_hdl;	/* MA handle */
1387 } ibt_mem_data_t;
1388 
1389 /*
1390  * Special case failure type.
1391  */
1392 typedef enum ibt_failure_type_e {
1393 	IBT_FAILURE_STANDARD	= 0,
1394 	IBT_FAILURE_CI,
1395 	IBT_FAILURE_IBMF,
1396 	IBT_FAILURE_IBTL,
1397 	IBT_FAILURE_IBCM,
1398 	IBT_FAILURE_IBDM
1399 } ibt_failure_type_t;
1400 
1401 #ifdef	__cplusplus
1402 }
1403 #endif
1404 
1405 #endif /* _SYS_IB_IBTL_IBTL_TYPES_H */
1406