xref: /illumos-gate/usr/src/uts/common/sys/ib/ibtl/ibtl_types.h (revision 581cede61ac9c14d8d4ea452562a567189eead78)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #ifndef	_SYS_IB_IBTL_IBTL_TYPES_H
27 #define	_SYS_IB_IBTL_IBTL_TYPES_H
28 
29 /*
30  * ibtl_types.h
31  *
32  * All common IBTL defined types. These are common data types
33  * that are shared by the IBTI and IBCI interfaces, it is only included
34  * by ibti.h and ibci.h
35  */
36 #include <sys/ddi.h>
37 #include <sys/sunddi.h>
38 #include <sys/ib/ib_types.h>
39 #include <sys/ib/ibtl/ibtl_status.h>
40 #include <sys/socket.h>
41 #include <sys/byteorder.h>
42 
43 
44 #ifdef	__cplusplus
45 extern "C" {
46 #endif
47 
48 /*
49  * Endian Macros
50  *    h2b - host endian to big endian protocol
51  *    b2h - big endian protocol to host endian
52  *    h2l - host endian to little endian protocol
53  *    l2h - little endian protocol to host endian
54  */
55 #if defined(_LITTLE_ENDIAN)
56 #define	h2b16(x)	(htons(x))
57 #define	h2b32(x)	(htonl(x))
58 #define	h2b64(x)	(htonll(x))
59 #define	b2h16(x)	(ntohs(x))
60 #define	b2h32(x)	(ntohl(x))
61 #define	b2h64(x)	(htonll(x))
62 
63 #define	h2l16(x)	(x)
64 #define	h2l32(x)	(x)
65 #define	h2l64(x)	(x)
66 #define	l2h16(x)	(x)
67 #define	l2h32(x)	(x)
68 #define	l2h64(x)	(x)
69 
70 #elif defined(_BIG_ENDIAN)
71 #define	h2b16(x)	(x)
72 #define	h2b32(x)	(x)
73 #define	h2b64(x)	(x)
74 #define	b2h16(x)	(x)
75 #define	b2h32(x)	(x)
76 #define	b2h64(x)	(x)
77 
78 #define	h2l16(x)	(ddi_swap16(x))
79 #define	h2l32(x)	(ddi_swap32(x))
80 #define	h2l64(x)	(ddi_swap64(x))
81 #define	l2h16(x)	(ddi_swap16(x))
82 #define	l2h32(x)	(ddi_swap32(x))
83 #define	l2h64(x)	(ddi_swap64(x))
84 
85 #else
86 #error	"what endian is this machine?"
87 #endif
88 
89 /*
90  * Define Internal IBTL handles
91  */
92 typedef	struct	ibtl_clnt_s	*ibt_clnt_hdl_t;    /* ibt_attach() */
93 typedef	struct	ibtl_hca_s	*ibt_hca_hdl_t;	    /* ibt_open_hca() */
94 typedef	struct	ibtl_channel_s	*ibt_channel_hdl_t; /* alloc_rc|ud_channel() */
95 typedef	struct	ibtl_srq_s	*ibt_srq_hdl_t;	    /* ibt_alloc_srq() */
96 typedef	struct	ibtl_cq_s	*ibt_cq_hdl_t;	    /* ibt_alloc_cq() */
97 typedef	struct	ibcm_svc_info_s	*ibt_srv_hdl_t;	    /* ibt_register_service() */
98 typedef	struct	ibcm_svc_bind_s	*ibt_sbind_hdl_t;   /* ibt_bind_service() */
99 
100 typedef	struct	ibc_fmr_pool_s	*ibt_fmr_pool_hdl_t; /* ibt_create_fmr_pool() */
101 typedef	struct	ibc_ma_s	*ibt_ma_hdl_t;	    /* ibt_map_mem_area() */
102 typedef	struct	ibc_pd_s	*ibt_pd_hdl_t;	    /* ibt_alloc_pd() */
103 typedef	struct	ibc_sched_s	*ibt_sched_hdl_t;   /* ibt_alloc_cq_sched() */
104 typedef	struct	ibc_mr_s	*ibt_mr_hdl_t;	    /* ibt_register_mr() */
105 typedef	struct	ibc_mw_s	*ibt_mw_hdl_t;	    /* ibt_alloc_mw() */
106 typedef	struct	ibt_ud_dest_s	*ibt_ud_dest_hdl_t; /* UD dest handle */
107 typedef	struct	ibc_ah_s	*ibt_ah_hdl_t;	    /* ibt_alloc_ah() */
108 typedef struct	ibtl_eec_s	*ibt_eec_hdl_t;
109 typedef	struct	ibt_rd_dest_s	*ibt_rd_dest_hdl_t;	/* Reserved for */
110 							/* Future use */
111 typedef struct  ibc_mem_alloc_s *ibt_mem_alloc_hdl_t; /* ibt_alloc_io_mem() */
112 typedef struct	ibc_mi_s	*ibt_mi_hdl_t;		/* ibt_map_mem_iov() */
113 
114 /*
115  * Some General Types.
116  */
117 typedef uint32_t	ibt_lkey_t;		/* L_Key */
118 typedef uint32_t	ibt_rkey_t;		/* R_Key */
119 typedef uint64_t	ibt_wrid_t;		/* Client assigned WR ID */
120 typedef uint32_t	ibt_immed_t;		/* WR Immediate Data */
121 typedef uint64_t	ibt_atom_arg_t;		/* WR Atomic Operation arg */
122 typedef	uint_t		ibt_cq_handler_id_t;	/* Event handler ID */
123 
124 /*
125  * IBT selector type, used when looking up/requesting either an
126  * MTU, Pkt lifetime, or Static rate.
127  * The interpretation of IBT_BEST depends on the attribute being selected.
128  */
129 typedef enum ibt_selector_e {
130 	IBT_GT		= 0,	/* Greater than */
131 	IBT_LT		= 1,	/* Less than */
132 	IBT_EQU		= 2,	/* Equal to */
133 	IBT_BEST	= 3	/* Best */
134 } ibt_selector_t;
135 
136 
137 /*
138  * Static rate definitions.
139  */
140 typedef enum ibt_srate_e {
141 	IBT_SRATE_NOT_SPECIFIED	= 0,
142 	IBT_SRATE_2		= 2,	/*  1X SDR i.e 2.5 Gbps */
143 	IBT_SRATE_10		= 3,	/*  4X SDR or 1X QDR i.e 10 Gbps */
144 	IBT_SRATE_30		= 4,	/* 12X SDR i.e 30 Gbps */
145 
146 	IBT_SRATE_5		= 5,	/*  1X DDR i.e  5 Gbps */
147 	IBT_SRATE_20		= 6,	/*  4X DDR or 8X SDR i.e 20 Gbps */
148 	IBT_SRATE_40		= 7,	/*  8X DDR or 4X QDR i.e 40 Gbps */
149 
150 	IBT_SRATE_60		= 8,	/* 12X DDR i.e 60 Gbps */
151 	IBT_SRATE_80		= 9,	/*  8X QDR i.e 80 Gbps */
152 	IBT_SRATE_120		= 10	/* 12X QDR i.e 120 Gbps */
153 } ibt_srate_t;
154 
155 /* retain old definition to be compatible with older bits. */
156 #define	IBT_SRATE_1X	IBT_SRATE_2
157 #define	IBT_SRATE_4X	IBT_SRATE_10
158 #define	IBT_SRATE_12X	IBT_SRATE_30
159 
160 /*
161  * Static rate request type.
162  */
163 typedef struct ibt_srate_req_s {
164 	ibt_srate_t	r_srate;	/* Requested srate */
165 	ibt_selector_t	r_selector;	/* Qualifier for r_srate */
166 } ibt_srate_req_t;
167 
168 /*
169  * Packet Life Time Request Type.
170  */
171 typedef struct ibt_pkt_lt_req_s {
172 	clock_t		p_pkt_lt;	/* Requested Packet Life Time */
173 	ibt_selector_t	p_selector;	/* Qualifier for p_pkt_lt */
174 } ibt_pkt_lt_req_t;
175 
176 /*
177  * Queue size struct.
178  */
179 typedef struct ibt_queue_sizes_s {
180 	uint_t	qs_sq;		/* SendQ size. */
181 	uint_t	qs_rq;		/* RecvQ size. */
182 } ibt_queue_sizes_t;
183 
184 /*
185  * Channel sizes struct, used by functions that allocate/query RC or UD
186  * channels.
187  */
188 typedef struct ibt_chan_sizes_s {
189 	uint_t	cs_sq;		/* SendQ size. */
190 	uint_t	cs_rq;		/* ReceiveQ size. */
191 	uint_t	cs_sq_sgl;	/* Max SGL elements in a SQ WR. */
192 	uint_t	cs_rq_sgl;	/* Max SGL elements in a RQ Wr. */
193 	uint_t  cs_inline;	/* max inline payload size */
194 } ibt_chan_sizes_t;
195 
196 /*
197  * Shared Queue size struct.
198  */
199 typedef struct ibt_srq_sizes_s {
200 	uint_t	srq_wr_sz;
201 	uint_t	srq_sgl_sz;
202 } ibt_srq_sizes_t;
203 
204 /*
205  * SRQ Modify Flags
206  */
207 typedef enum ibt_srq_modify_flags_e {
208 	IBT_SRQ_SET_NOTHING		= 0,
209 	IBT_SRQ_SET_SIZE		= (1 << 1),
210 	IBT_SRQ_SET_LIMIT		= (1 << 2)
211 } ibt_srq_modify_flags_t;
212 
213 
214 /*
215  * Execution flags, indicates if the function should block or not.
216  * Note: in some cases, e.g., a NULL rc_cm_handler, IBT_NONBLOCKING
217  * will not have an effect, and the thread will block.
218  * IBT_NOCALLBACKS is valid for ibt_close_rc_channel only.
219  */
220 typedef enum ibt_execution_mode_e {
221 	IBT_BLOCKING	= 0,	/* Block */
222 	IBT_NONBLOCKING	= 1,	/* Return as soon as possible */
223 	IBT_NOCALLBACKS	= 2	/* cm_handler is not invoked after */
224 				/* ibt_close_rc_channel returns */
225 } ibt_execution_mode_t;
226 
227 /*
228  * Memory window alloc flags
229  */
230 typedef enum ibt_mw_flags_e {
231 	IBT_MW_SLEEP		= 0,		/* Can block */
232 	IBT_MW_NOSLEEP		= (1 << 0),	/* Can't block */
233 	IBT_MW_USER_MAP		= (1 << 1),
234 	IBT_MW_DEFER_ALLOC	= (1 << 2),
235 	IBT_MW_TYPE_1		= (1 << 3),
236 	IBT_MW_TYPE_2		= (1 << 4)
237 } ibt_mw_flags_t;
238 
239 /*
240  * PD alloc flags
241  */
242 typedef enum ibt_pd_flags_e {
243 	IBT_PD_NO_FLAGS		= 0,
244 	IBT_PD_USER_MAP		= (1 << 0),
245 	IBT_PD_DEFER_ALLOC	= (1 << 1)
246 } ibt_pd_flags_t;
247 
248 /*
249  * UD Dest alloc flags
250  */
251 typedef enum ibt_ud_dest_flags_e {
252 	IBT_UD_DEST_NO_FLAGS	= 0,
253 	IBT_UD_DEST_USER_MAP	= (1 << 0),
254 	IBT_UD_DEST_DEFER_ALLOC	= (1 << 1)
255 } ibt_ud_dest_flags_t;
256 
257 /*
258  * SRQ alloc flags
259  */
260 typedef enum ibt_srq_flags_e {
261 	IBT_SRQ_NO_FLAGS	= 0,
262 	IBT_SRQ_USER_MAP	= (1 << 0),
263 	IBT_SRQ_DEFER_ALLOC	= (1 << 1)
264 } ibt_srq_flags_t;
265 
266 /*
267  * ibt_alloc_lkey() alloc flags
268  */
269 typedef enum ibt_lkey_flags_e {
270 	IBT_KEY_NO_FLAGS	= 0,
271 	IBT_KEY_REMOTE		= (1 << 0)
272 } ibt_lkey_flags_t;
273 
274 /*
275  *  RNR NAK retry counts.
276  */
277 typedef enum ibt_rnr_retry_cnt_e {
278 	IBT_RNR_NO_RETRY	= 0x0,	/* Don't retry, fail on first timeout */
279 	IBT_RNR_RETRY_1		= 0x1,	/* Retry once */
280 	IBT_RNR_RETRY_2		= 0x2,	/* Retry twice */
281 	IBT_RNR_RETRY_3		= 0x3,	/* Retry three times */
282 	IBT_RNR_RETRY_4		= 0x4,	/* Retry four times */
283 	IBT_RNR_RETRY_5		= 0x5,	/* Retry five times */
284 	IBT_RNR_RETRY_6		= 0x6,	/* Retry six times */
285 	IBT_RNR_INFINITE_RETRY	= 0x7	/* Retry forever */
286 } ibt_rnr_retry_cnt_t;
287 
288 /*
289  * Valid values for RNR NAK timer fields, part of a channel's context.
290  */
291 typedef enum ibt_rnr_nak_time_e {
292 	IBT_RNR_NAK_655ms	= 0x0,
293 	IBT_RNR_NAK_10us	= 0x1,
294 	IBT_RNR_NAK_20us	= 0x2,
295 	IBT_RNR_NAK_30us	= 0x3,
296 	IBT_RNR_NAK_40us	= 0x4,
297 	IBT_RNR_NAK_60us	= 0x5,
298 	IBT_RNR_NAK_80us	= 0x6,
299 	IBT_RNR_NAK_120us	= 0x7,
300 	IBT_RNR_NAK_160us	= 0x8,
301 	IBT_RNR_NAK_240us	= 0x9,
302 	IBT_RNR_NAK_320us	= 0xA,
303 	IBT_RNR_NAK_480us	= 0xB,
304 	IBT_RNR_NAK_640us	= 0xC,
305 	IBT_RNR_NAK_960us	= 0xD,
306 	IBT_RNR_NAK_1280us	= 0xE,
307 	IBT_RNR_NAK_1920us	= 0xF,
308 	IBT_RNR_NAK_2560us	= 0x10,
309 	IBT_RNR_NAK_3840us	= 0x11,
310 	IBT_RNR_NAK_5120us	= 0x12,
311 	IBT_RNR_NAK_7680us	= 0x13,
312 	IBT_RNR_NAK_10ms	= 0x14,
313 	IBT_RNR_NAK_15ms	= 0x15,
314 	IBT_RNR_NAK_20ms	= 0x16,
315 	IBT_RNR_NAK_31ms	= 0x17,
316 	IBT_RNR_NAK_41ms	= 0x18,
317 	IBT_RNR_NAK_61ms	= 0x19,
318 	IBT_RNR_NAK_82ms	= 0x1A,
319 	IBT_RNR_NAK_123ms	= 0x1B,
320 	IBT_RNR_NAK_164ms	= 0x1C,
321 	IBT_RNR_NAK_246ms	= 0x1D,
322 	IBT_RNR_NAK_328ms	= 0x1E,
323 	IBT_RNR_NAK_492ms	= 0x1F
324 } ibt_rnr_nak_time_t;
325 
326 /*
327  * The definition of HCA capabilities etc as a bitfield.
328  */
329 typedef enum ibt_hca_flags_e {
330 	IBT_HCA_NO_FLAGS	= 0,
331 
332 	IBT_HCA_RD		= 1 << 0,
333 	IBT_HCA_UD_MULTICAST	= 1 << 1,
334 	IBT_HCA_RAW_MULTICAST	= 1 << 2,
335 
336 	IBT_HCA_ATOMICS_HCA	= 1 << 3,
337 	IBT_HCA_ATOMICS_GLOBAL	= 1 << 4,
338 
339 	IBT_HCA_RESIZE_CHAN	= 1 << 5,	/* Is resize supported? */
340 	IBT_HCA_AUTO_PATH_MIG	= 1 << 6,	/* Is APM supported? */
341 	IBT_HCA_SQD_SQD_PORT	= 1 << 7,	/* Can change physical port */
342 						/* on transit from SQD to SQD */
343 	IBT_HCA_PKEY_CNTR	= 1 << 8,
344 	IBT_HCA_QKEY_CNTR	= 1 << 9,
345 	IBT_HCA_AH_PORT_CHECK	= 1 << 10,	/* HCA checks AH port match */
346 						/* in UD WRs */
347 	IBT_HCA_PORT_UP		= 1 << 11,	/* PortActive event supported */
348 	IBT_HCA_INIT_TYPE	= 1 << 12,	/* InitType supported */
349 	IBT_HCA_SI_GUID		= 1 << 13,	/* System Image GUID */
350 						/* supported */
351 	IBT_HCA_SHUTDOWN_PORT	= 1 << 14,	/* ShutdownPort supported */
352 	IBT_HCA_RNR_NAK		= 1 << 15,	/* RNR-NAK supported for RC */
353 	IBT_HCA_CURRENT_QP_STATE = 1 << 16,	/* Does modify_qp support */
354 						/* checking of current state? */
355 	IBT_HCA_SRQ 		= 1 << 17,	/* Shared Receive Queue (RC) */
356 	IBT_HCA_RC_SRQ 		= IBT_HCA_SRQ,
357 	IBT_HCA_RESIZE_SRQ	= 1 << 18,	/* Is resize SRQ supported? */
358 	IBT_HCA_UD_SRQ		= 1 << 19,	/* UD with SRQ */
359 
360 	IBT_HCA_MULT_PAGE_SZ_MR	= 1 << 20,	/* Support of multiple page */
361 						/* sizes per memory region? */
362 	IBT_HCA_BLOCK_LIST	= 1 << 21,	/* Block list physical buffer */
363 						/* lists supported? */
364 	IBT_HCA_ZERO_BASED_VA	= 1 << 22,	/* Zero Based Virtual */
365 						/* Addresses supported? */
366 	IBT_HCA_LOCAL_INVAL_FENCE = 1 << 23,	/* Local invalidate fencing? */
367 	IBT_HCA_BASE_QUEUE_MGT	= 1 << 24,	/* Base Queue Mgt supported? */
368 	IBT_HCA_CKSUM_FULL	= 1 << 25,	/* Checksum offload supported */
369 	IBT_HCA_MEM_WIN_TYPE_2B	= 1 << 26,	/* Type 2B memory windows */
370 	IBT_HCA_PHYS_BUF_BLOCK	= 1 << 27,	/* Block mode phys buf lists */
371 	IBT_HCA_FMR		= 1 << 28,	/* FMR Support */
372 	IBT_HCA_WQE_SIZE_INFO	= 1 << 29,	/* detailed WQE size info */
373 	IBT_HCA_SQD_STATE	= 1 << 30	/* SQD QP state */
374 } ibt_hca_flags_t;
375 
376 typedef enum ibt_hca_flags2_e {
377 	IBT_HCA2_NO_FLAGS	= 0,
378 
379 	IBT_HCA2_UC		= 1 << 1,	/* Unreliable Connected */
380 	IBT_HCA2_UC_SRQ		= 1 << 2,	/* UC with SRQ */
381 	IBT_HCA2_RES_LKEY	= 1 << 3,	/* Reserved L_Key */
382 	IBT_HCA2_PORT_CHANGE	= 1 << 4,	/* Port Change event */
383 	IBT_HCA2_IP_CLASS	= 1 << 5,	/* IP Classification flags */
384 	IBT_HCA2_RSS_TPL_ALG	= 1 << 6,	/* RSS: Toeplitz algorithm */
385 	IBT_HCA2_RSS_XOR_ALG	= 1 << 7,	/* RSS: XOR algorithm */
386 	IBT_HCA2_XRC		= 1 << 8,	/* Extended RC (XRC) */
387 	IBT_HCA2_XRC_SRQ_RESIZE	= 1 << 9,	/* resize XRC SRQ */
388 	IBT_HCA2_MEM_MGT_EXT	= 1 << 10 /* FMR-WR, send-inv, local-inv */
389 } ibt_hca_flags2_t;
390 
391 /*
392  * The definition of HCA page size capabilities as a bitfield
393  */
394 typedef enum ibt_page_sizes_e {
395 	IBT_PAGE_4K		= 0x1 << 2,
396 	IBT_PAGE_8K		= 0x1 << 3,
397 	IBT_PAGE_16K		= 0x1 << 4,
398 	IBT_PAGE_32K		= 0x1 << 5,
399 	IBT_PAGE_64K		= 0x1 << 6,
400 	IBT_PAGE_128K		= 0x1 << 7,
401 	IBT_PAGE_256K		= 0x1 << 8,
402 	IBT_PAGE_512K		= 0x1 << 9,
403 	IBT_PAGE_1M		= 0x1 << 10,
404 	IBT_PAGE_2M		= 0x1 << 11,
405 	IBT_PAGE_4M		= 0x1 << 12,
406 	IBT_PAGE_8M		= 0x1 << 13,
407 	IBT_PAGE_16M		= 0x1 << 14,
408 	IBT_PAGE_32M		= 0x1 << 15,
409 	IBT_PAGE_64M		= 0x1 << 16,
410 	IBT_PAGE_128M		= 0x1 << 17,
411 	IBT_PAGE_256M		= 0x1 << 18,
412 	IBT_PAGE_512M		= 0x1 << 19,
413 	IBT_PAGE_1G		= 0x1 << 20,
414 	IBT_PAGE_2G		= 0x1 << 21,
415 	IBT_PAGE_4G		= 0x1 << 22,
416 	IBT_PAGE_8G		= 0x1 << 23,
417 	IBT_PAGE_16G		= 0x1 << 24
418 } ibt_page_sizes_t;
419 
420 /*
421  * Memory Window Type.
422  */
423 typedef enum ibt_mem_win_type_e {
424 	IBT_MEM_WIN_TYPE_NOT_DEFINED	= 0,
425 	IBT_MEM_WIN_TYPE_1		= (1 << 0),
426 	IBT_MEM_WIN_TYPE_2		= (1 << 1)
427 } ibt_mem_win_type_t;
428 
429 /*
430  * HCA attributes.
431  * Contains all HCA static attributes.
432  */
433 typedef struct ibt_hca_attr_s {
434 	ibt_hca_flags_t	hca_flags;		/* HCA capabilities etc */
435 	ibt_hca_flags2_t hca_flags2;
436 
437 	/* device/version inconsistency w/ NodeInfo and IOControllerProfile */
438 	uint32_t	hca_vendor_id:24;	/* 24 bit Vendor ID */
439 	uint16_t	hca_device_id;
440 	uint32_t	hca_version_id;
441 
442 	uint_t		hca_max_chans;		/* Max Chans supported */
443 	uint_t		hca_max_chan_sz;	/* Max outstanding WRs on any */
444 						/* channel */
445 
446 	uint_t		hca_max_sgl;		/* Max SGL entries per WR */
447 
448 	uint_t		hca_max_cq;		/* Max num of CQs supported  */
449 	uint_t		hca_max_cq_sz;		/* Max capacity of each CQ */
450 
451 	ibt_page_sizes_t	hca_page_sz;	/* Bit mask of page sizes */
452 
453 	uint_t		hca_max_memr;		/* Max num of HCA mem regions */
454 	ib_memlen_t	hca_max_memr_len;	/* Largest block, in bytes of */
455 						/* mem that can be registered */
456 	uint_t		hca_max_mem_win;	/* Max Memory windows in HCA */
457 
458 	uint_t		hca_max_rsc; 		/* Max Responder Resources of */
459 						/* this HCA for RDMAR/Atomics */
460 						/* with this HCA as target. */
461 	uint8_t		hca_max_rdma_in_chan;	/* Max RDMAR/Atomics in per */
462 						/* chan this HCA as target. */
463 	uint8_t		hca_max_rdma_out_chan;	/* Max RDMA Reads/Atomics out */
464 						/* per channel by this HCA */
465 	uint_t		hca_max_ipv6_chan;	/* Max IPV6 channels in HCA */
466 	uint_t		hca_max_ether_chan;	/* Max Ether channels in HCA */
467 
468 	uint_t		hca_max_mcg_chans;	/* Max number of channels */
469 						/* that can join multicast */
470 						/* groups */
471 	uint_t		hca_max_mcg;		/* Max multicast groups */
472 	uint_t		hca_max_chan_per_mcg;	/* Max number of channels per */
473 						/* Multicast group in HCA */
474 
475 	uint16_t	hca_max_partitions;	/* Max partitions in HCA */
476 	uint8_t		hca_nports;		/* Number of physical ports */
477 	ib_guid_t	hca_node_guid;		/* Node GUID */
478 
479 	ib_time_t	hca_local_ack_delay;
480 
481 	uint_t		hca_max_port_sgid_tbl_sz;
482 	uint16_t	hca_max_port_pkey_tbl_sz;
483 	uint_t		hca_max_pd;		/* Max# of Protection Domains */
484 	ib_guid_t	hca_si_guid;		/* Optional System Image GUID */
485 	uint_t		hca_hca_max_ci_priv_sz;
486 	uint_t		hca_chan_max_ci_priv_sz;
487 	uint_t		hca_cq_max_ci_priv_sz;
488 	uint_t		hca_pd_max_ci_priv_sz;
489 	uint_t		hca_mr_max_ci_priv_sz;
490 	uint_t		hca_mw_max_ci_priv_sz;
491 	uint_t		hca_ud_dest_max_ci_priv_sz;
492 	uint_t		hca_cq_sched_max_ci_priv_sz;
493 	uint_t		hca_max_ud_dest;
494 	uint_t		hca_opaque2;
495 	uint_t		hca_opaque3;
496 	uint_t		hca_opaque4;
497 	uint8_t		hca_opaque5;
498 	uint8_t		hca_opaque6;
499 	uint8_t		hca_rss_max_log2_table;	/* max RSS log2 table size */
500 	uint_t		hca_opaque7;
501 	uint_t		hca_opaque8;
502 	uint_t		hca_max_srqs;		/* Max SRQs supported */
503 	uint_t		hca_max_srqs_sz;	/* Max outstanding WRs on any */
504 						/* SRQ */
505 	uint_t		hca_max_srq_sgl;	/* Max SGL entries per SRQ WR */
506 	uint_t		hca_max_phys_buf_list_sz;
507 	size_t		hca_block_sz_lo;	/* Range of block sizes */
508 	size_t		hca_block_sz_hi;	/* supported by the HCA */
509 	uint_t		hca_max_cq_handlers;
510 	ibt_lkey_t	hca_reserved_lkey;	/* Reserved L_Key value */
511 	uint_t		hca_max_fmrs;		/* Max FMR Supported */
512 	uint_t		hca_opaque9;
513 
514 	uint_t		hca_max_lso_size;
515 	uint_t		hca_max_lso_hdr_size;
516 	uint_t		hca_max_inline_size;
517 
518 	uint_t		hca_max_cq_mod_count;	/* CQ notify moderation */
519 	uint_t		hca_max_cq_mod_usec;
520 
521 	uint32_t	hca_fw_major_version;	/* firmware version */
522 	uint16_t	hca_fw_minor_version;
523 	uint16_t	hca_fw_micro_version;
524 
525 	uint_t		hca_max_xrc_domains;	/* XRC items */
526 	uint_t		hca_max_xrc_srqs;
527 	uint_t		hca_max_xrc_srq_size;
528 	uint_t		hca_max_xrc_srq_sgl;
529 
530 	/* detailed WQE size info */
531 	uint_t		hca_ud_send_inline_sz;	/* inline size in bytes */
532 	uint_t		hca_conn_send_inline_sz;
533 	uint_t		hca_conn_rdmaw_inline_overhead;
534 	uint_t		hca_recv_sgl_sz;	/* detailed SGL sizes */
535 	uint_t		hca_ud_send_sgl_sz;
536 	uint_t		hca_conn_send_sgl_sz;
537 	uint_t		hca_conn_rdma_sgl_overhead;
538 } ibt_hca_attr_t;
539 
540 /*
541  * HCA Port link states.
542  */
543 typedef enum ibt_port_state_e {
544 	IBT_PORT_DOWN	= 1,
545 	IBT_PORT_INIT,
546 	IBT_PORT_ARM,
547 	IBT_PORT_ACTIVE
548 } ibt_port_state_t;
549 
550 /*
551  * HCA Port capabilities as a bitfield.
552  */
553 typedef enum ibt_port_caps_e {
554 	IBT_PORT_CAP_NO_FLAGS		= 0,
555 	IBT_PORT_CAP_SM			= 1 << 0,	/* SM port */
556 	IBT_PORT_CAP_SM_DISABLED	= 1 << 1,
557 	IBT_PORT_CAP_SNMP_TUNNEL	= 1 << 2,	/* SNMP Tunneling */
558 	IBT_PORT_CAP_DM			= 1 << 3,	/* DM supported */
559 	IBT_PORT_CAP_VENDOR		= 1 << 4,	/* Vendor Class */
560 	IBT_PORT_CAP_CLNT_REREG		= 1 << 5	/* Client Rereg */
561 } ibt_port_caps_t;
562 
563 
564 /* LinkWidth fields from PortInfo */
565 typedef uint8_t ib_link_width_t;
566 
567 /*
568  * When reading LinkWidthSupported and LinkWidthEnabled, these
569  * values will be OR-ed together. See IB spec 14.2.5.6 for allowed
570  * combinations. For LinkWidthActive, only one will be returned.
571  */
572 #define	IBT_LINK_WIDTH_1X	(1)
573 #define	IBT_LINK_WIDTH_4X	(2)
574 #define	IBT_LINK_WIDTH_8X	(4)
575 #define	IBT_LINK_WIDTH_12X	(8)
576 
577 /* LinkSpeed fields from PortInfo */
578 typedef uint8_t ib_link_speed_t;
579 
580 /*
581  * When reading LinkSpeedSupported and LinkSpeedEnabled, these
582  * values will be OR-ed together. See IB spec 14.2.5.6 for allowed
583  * combinations. For LinkSpeedActive, only one will be returned.
584  */
585 #define	IBT_LINK_SPEED_SDR	(1)
586 #define	IBT_LINK_SPEED_DDR	(2)
587 #define	IBT_LINK_SPEED_QDR	(4)
588 
589 /* PortPhysicalState field from PortInfo */
590 typedef uint8_t ib_port_phys_state_t;
591 
592 #define	IBT_PORT_PHYS_STATE_SLEEP	(1)
593 #define	IBT_PORT_PHYS_STATE_POLLING	(2)
594 #define	IBT_PORT_PHYS_STATE_DISABLED	(3)
595 #define	IBT_PORT_PHYS_STATE_TRAINING	(4)
596 #define	IBT_PORT_PHYS_STATE_UP		(5)
597 #define	IBT_PORT_PHYS_STATE_RECOVERY	(6)
598 #define	IBT_PORT_PHYS_STATE_TEST	(7)
599 
600 /*
601  * HCA port attributes structure definition. The number of ports per HCA
602  * can be found from the "ibt_hca_attr_t" structure.
603  *
604  * p_pkey_tbl is a pointer to an array of ib_pkey_t, members are
605  * accessed as:
606  *		hca_portinfo->p_pkey_tbl[i]
607  *
608  * Where 0 <= i < hca_portinfo.p_pkey_tbl_sz
609  *
610  * Similarly p_sgid_tbl is a pointer to an array of ib_gid_t.
611  *
612  * The Query Port function - ibt_query_hca_ports() allocates the memory
613  * required for the ibt_hca_portinfo_t struct as well as the memory
614  * required for the SGID and P_Key tables. The memory is freed by calling
615  * ibt_free_portinfo().
616  */
617 typedef struct ibt_hca_portinfo_s {
618 	ib_lid_t		p_opaque1;	/* Base LID of port */
619 	ib_qkey_cntr_t		p_qkey_violations; /* Bad Q_Key cnt */
620 	ib_pkey_cntr_t		p_pkey_violations; /* Optional bad P_Key cnt */
621 	uint8_t			p_sm_sl:4;	/* SM Service level */
622 	ib_port_phys_state_t	p_phys_state;
623 	ib_lid_t		p_sm_lid;	/* SM LID */
624 	ibt_port_state_t	p_linkstate;	/* Port state */
625 	uint8_t			p_port_num;
626 	ib_link_width_t		p_width_supported;
627 	ib_link_width_t		p_width_enabled;
628 	ib_link_width_t		p_width_active;
629 	ib_mtu_t		p_mtu;		/* Max transfer unit - pkt */
630 	uint8_t			p_lmc:3;	/* Local mask control */
631 	ib_link_speed_t		p_speed_supported;
632 	ib_link_speed_t		p_speed_enabled;
633 	ib_link_speed_t		p_speed_active;
634 	ib_gid_t		*p_sgid_tbl;	/* SGID Table */
635 	uint_t			p_sgid_tbl_sz;	/* Size of SGID table */
636 	uint16_t		p_pkey_tbl_sz;	/* Size of P_Key table */
637 	uint16_t		p_def_pkey_ix;	/* default pkey index for TI */
638 	ib_pkey_t		*p_pkey_tbl;	/* P_Key table */
639 	uint8_t			p_max_vl;	/* Max num of virtual lanes */
640 	uint8_t			p_init_type_reply; /* Optional InitTypeReply */
641 	ib_time_t		p_subnet_timeout; /* Max Subnet Timeout */
642 	ibt_port_caps_t		p_capabilities;	/* Port Capabilities */
643 	uint32_t		p_msg_sz;	/* Max message size */
644 } ibt_hca_portinfo_t;
645 
646 /*
647  * Modify HCA port attributes flags, specifies which HCA port
648  * attributes to modify.
649  */
650 typedef enum ibt_port_modify_flags_e {
651 	IBT_PORT_NO_FLAGS	= 0,
652 
653 	IBT_PORT_RESET_QKEY	= 1 << 0,	/* Reset Q_Key violation */
654 						/* counter */
655 	IBT_PORT_RESET_SM	= 1 << 1,	/* SM */
656 	IBT_PORT_SET_SM		= 1 << 2,
657 	IBT_PORT_RESET_SNMP	= 1 << 3,	/* SNMP Tunneling */
658 	IBT_PORT_SET_SNMP	= 1 << 4,
659 	IBT_PORT_RESET_DEVMGT	= 1 << 5,	/* Device Management */
660 	IBT_PORT_SET_DEVMGT	= 1 << 6,
661 	IBT_PORT_RESET_VENDOR	= 1 << 7,	/* Vendor Class */
662 	IBT_PORT_SET_VENDOR	= 1 << 8,
663 	IBT_PORT_SHUTDOWN	= 1 << 9,	/* Shut down the port */
664 	IBT_PORT_SET_INIT_TYPE	= 1 << 10	/* InitTypeReply value */
665 } ibt_port_modify_flags_t;
666 
667 /*
668  * Modify HCA port InitType bit definitions, applicable only if
669  * IBT_PORT_SET_INIT_TYPE modify flag (ibt_port_modify_flags_t) is set.
670  */
671 #define	IBT_PINIT_NO_LOAD		0x1
672 #define	IBT_PINIT_PRESERVE_CONTENT	0x2
673 #define	IBT_PINIT_PRESERVE_PRESENCE	0x4
674 #define	IBT_PINIT_NO_RESUSCITATE	0x8
675 
676 
677 /*
678  * Address vector definition.
679  */
680 typedef struct ibt_adds_vect_s {
681 	ib_gid_t	av_dgid;	/* IPV6 dest GID in GRH */
682 	ib_gid_t	av_sgid;	/* SGID */
683 	ibt_srate_t	av_srate;	/* Max static rate */
684 	uint8_t		av_srvl:4;	/* Service level in LRH */
685 	uint_t		av_flow:20;	/* 20 bit Flow Label */
686 	uint8_t		av_tclass;	/* Traffic Class */
687 	uint8_t		av_hop;		/* Hop Limit */
688 	uint8_t		av_port_num;	/* Port number for UD */
689 	boolean_t	av_opaque1;
690 	ib_lid_t	av_opaque2;
691 	ib_path_bits_t	av_opaque3;
692 	uint32_t	av_opaque4;
693 } ibt_adds_vect_t;
694 
695 typedef struct ibt_cep_path_s {
696 	ibt_adds_vect_t	cep_adds_vect;		/* Address Vector */
697 	uint16_t	cep_pkey_ix;		/* P_Key Index */
698 	uint8_t		cep_hca_port_num;	/* Port number for connected */
699 						/* channels.  A value of 0 */
700 						/* indicates an invalid path */
701 	ib_time_t	cep_cm_opaque1;
702 } ibt_cep_path_t;
703 
704 /*
705  * Define Receive Side Scaling types for IP over IB.
706  */
707 typedef enum ibt_rss_flags_e {
708 	IBT_RSS_ALG_TPL		= (1 << 0),	/* RSS: Toeplitz hash */
709 	IBT_RSS_ALG_XOR		= (1 << 1),	/* RSS: XOR hash */
710 	IBT_RSS_HASH_IPV4	= (1 << 2),	/* RSS: hash IPv4 headers */
711 	IBT_RSS_HASH_IPV6	= (1 << 3),	/* RSS: hash IPv6 headers */
712 	IBT_RSS_HASH_TCP_IPV4	= (1 << 4),	/* RSS: hash TCP/IPv4 hdrs */
713 	IBT_RSS_HASH_TCP_IPV6	= (1 << 5)	/* RSS: hash TCP/IPv6 hdrs */
714 } ibt_rss_flags_t;
715 
716 typedef struct ibt_rss_attr_s {
717 	ibt_rss_flags_t	rss_flags;		/* RSS: flags */
718 	uint_t		rss_log2_table;		/* RSS: log2 table size */
719 	ib_qpn_t	rss_base_qpn;		/* RSS: base QPN */
720 	ib_qpn_t	rss_def_qpn;		/* RSS: default QPN */
721 	uint8_t		rss_toe_key[40];	/* RSS: Toeplitz hash key */
722 } ibt_rss_attr_t;
723 
724 /*
725  * Channel Migration State.
726  */
727 typedef enum ibt_cep_cmstate_e {
728 	IBT_STATE_NOT_SUPPORTED	= 0,
729 	IBT_STATE_MIGRATED	= 1,
730 	IBT_STATE_REARMED	= 2,
731 	IBT_STATE_ARMED		= 3
732 } ibt_cep_cmstate_t;
733 
734 /*
735  * Transport service type
736  *
737  * NOTE: this was converted from an enum to a uint8_t to save space.
738  */
739 typedef uint8_t ibt_tran_srv_t;
740 
741 #define	IBT_RC_SRV		0
742 #define	IBT_UC_SRV		1
743 #define	IBT_RD_SRV		2
744 #define	IBT_UD_SRV		3
745 #define	IBT_RAWIP_SRV		4
746 #define	IBT_RAWETHER_SRV	5
747 
748 /*
749  * Channel (QP/EEC) state definitions.
750  */
751 typedef enum ibt_cep_state_e {
752 	IBT_STATE_RESET	= 0,		/* Reset */
753 	IBT_STATE_INIT,			/* Initialized */
754 	IBT_STATE_RTR,			/* Ready to Receive */
755 	IBT_STATE_RTS,			/* Ready to Send */
756 	IBT_STATE_SQD,			/* Send Queue Drained */
757 	IBT_STATE_SQE,			/* Send Queue Error */
758 	IBT_STATE_ERROR,		/* Error */
759 	IBT_STATE_SQDRAIN,		/* Send Queue Draining */
760 	IBT_STATE_NUM			/* Number of states */
761 } ibt_cep_state_t;
762 
763 
764 /*
765  * Channel Attribute flags.
766  */
767 typedef enum ibt_attr_flags_e {
768 	IBT_ALL_SIGNALED	= 0,	/* All sends signaled */
769 	IBT_WR_SIGNALED		= 1,	/* Signaled on a WR basis */
770 	IBT_FAST_REG_RES_LKEY	= (1 << 1),
771 	IBT_USES_LSO		= (1 << 2)
772 } ibt_attr_flags_t;
773 
774 /*
775  * Channel End Point (CEP) Control Flags.
776  */
777 typedef enum ibt_cep_flags_e {
778 	IBT_CEP_NO_FLAGS	= 0,		/* Enable Nothing */
779 	IBT_CEP_RDMA_RD		= (1 << 0),	/* Enable incoming RDMA RD's */
780 						/* RC & RD only */
781 	IBT_CEP_RDMA_WR		= (1 << 1),	/* Enable incoming RDMA WR's */
782 						/* RC & RD only */
783 	IBT_CEP_ATOMIC		= (1 << 2)	/* Enable incoming Atomics, */
784 						/* RC & RD only */
785 } ibt_cep_flags_t;
786 
787 /*
788  * Channel Modify Flags
789  */
790 typedef enum ibt_cep_modify_flags_e {
791 	IBT_CEP_SET_NOTHING		= 0,
792 	IBT_CEP_SET_SQ_SIZE		= (1 << 1),
793 	IBT_CEP_SET_RQ_SIZE		= (1 << 2),
794 
795 	IBT_CEP_SET_RDMA_R		= (1 << 3),
796 	IBT_CEP_SET_RDMA_W		= (1 << 4),
797 	IBT_CEP_SET_ATOMIC		= (1 << 5),
798 
799 	IBT_CEP_SET_ALT_PATH		= (1 << 6),	/* Alternate Path */
800 
801 	IBT_CEP_SET_ADDS_VECT		= (1 << 7),
802 	IBT_CEP_SET_PORT		= (1 << 8),
803 	IBT_CEP_SET_OPAQUE5		= (1 << 9),
804 	IBT_CEP_SET_RETRY		= (1 << 10),
805 	IBT_CEP_SET_RNR_NAK_RETRY 	= (1 << 11),
806 	IBT_CEP_SET_MIN_RNR_NAK		= (1 << 12),
807 
808 	IBT_CEP_SET_QKEY		= (1 << 13),
809 	IBT_CEP_SET_RDMARA_OUT		= (1 << 14),
810 	IBT_CEP_SET_RDMARA_IN		= (1 << 15),
811 
812 	IBT_CEP_SET_OPAQUE1		= (1 << 16),
813 	IBT_CEP_SET_OPAQUE2		= (1 << 17),
814 	IBT_CEP_SET_OPAQUE3		= (1 << 18),
815 	IBT_CEP_SET_OPAQUE4		= (1 << 19),
816 	IBT_CEP_SET_SQD_EVENT		= (1 << 20),
817 	IBT_CEP_SET_OPAQUE6		= (1 << 21),
818 	IBT_CEP_SET_OPAQUE7		= (1 << 22),
819 	IBT_CEP_SET_OPAQUE8		= (1 << 23),
820 	IBT_CEP_SET_RSS			= (1 << 24)
821 } ibt_cep_modify_flags_t;
822 
823 /*
824  * CQ notify types.
825  */
826 typedef enum ibt_cq_notify_flags_e {
827 	IBT_NEXT_COMPLETION	= 1,
828 	IBT_NEXT_SOLICITED	= 2
829 } ibt_cq_notify_flags_t;
830 
831 /*
832  * CQ types shared across TI and CI.
833  */
834 typedef enum ibt_cq_flags_e {
835 	IBT_CQ_NO_FLAGS			= 0,
836 	IBT_CQ_HANDLER_IN_THREAD	= 1 << 0,	/* A thread calls the */
837 							/* CQ handler */
838 	IBT_CQ_USER_MAP			= 1 << 1,
839 	IBT_CQ_DEFER_ALLOC		= 1 << 2
840 } ibt_cq_flags_t;
841 
842 /*
843  * CQ types shared across TI and CI.
844  */
845 typedef enum ibt_cq_sched_flags_e {
846 	IBT_CQS_NO_FLAGS	= 0,
847 	IBT_CQS_WARM_CACHE	= 1 << 0, /* run on same CPU */
848 	IBT_CQS_AFFINITY	= 1 << 1,
849 	IBT_CQS_SCHED_GROUP	= 1 << 2,
850 	IBT_CQS_USER_MAP	= 1 << 3,
851 	IBT_CQS_DEFER_ALLOC	= 1 << 4
852 } ibt_cq_sched_flags_t;
853 
854 /*
855  * Attributes when creating a Completion Queue.
856  *
857  * Note:
858  *	The IBT_CQ_HANDLER_IN_THREAD cq_flags bit should be ignored by the CI.
859  */
860 typedef struct ibt_cq_attr_s {
861 	uint_t			cq_size;
862 	ibt_sched_hdl_t		cq_sched;	/* 0 = no hint, */
863 						/* other = cq_sched value */
864 	ibt_cq_flags_t		cq_flags;
865 } ibt_cq_attr_t;
866 
867 /*
868  * Memory Management
869  */
870 
871 /* Memory management flags */
872 typedef enum ibt_mr_flags_e {
873 	IBT_MR_SLEEP			= 0,
874 	IBT_MR_NOSLEEP			= (1 << 1),
875 	IBT_MR_NONCOHERENT		= (1 << 2),
876 	IBT_MR_PHYS_IOVA		= (1 << 3),  /* ibt_(re)register_buf */
877 
878 	/* Access control flags */
879 	IBT_MR_ENABLE_WINDOW_BIND	= (1 << 4),
880 	IBT_MR_ENABLE_LOCAL_WRITE	= (1 << 5),
881 	IBT_MR_ENABLE_REMOTE_READ	= (1 << 6),
882 	IBT_MR_ENABLE_REMOTE_WRITE	= (1 << 7),
883 	IBT_MR_ENABLE_REMOTE_ATOMIC	= (1 << 8),
884 
885 	/* Reregister flags */
886 	IBT_MR_CHANGE_TRANSLATION	= (1 << 9),
887 	IBT_MR_CHANGE_ACCESS		= (1 << 10),
888 	IBT_MR_CHANGE_PD		= (1 << 11),
889 
890 	/* Additional registration flags */
891 	IBT_MR_ZBVA			= (1 << 12),
892 
893 	/* Additional physical registration flags */
894 	IBT_MR_CONSUMER_KEY		= (1 << 13),	/* Consumer owns key */
895 							/* portion of keys */
896 	IBT_MR_DISABLE_RO		= (1 << 14)
897 } ibt_mr_flags_t;
898 
899 
900 /* Memory Region attribute flags */
901 typedef enum ibt_mr_attr_flags_e {
902 	/* Access control flags */
903 	IBT_MR_WINDOW_BIND		= (1 << 0),
904 	IBT_MR_LOCAL_WRITE		= (1 << 1),
905 	IBT_MR_REMOTE_READ		= (1 << 2),
906 	IBT_MR_REMOTE_WRITE		= (1 << 3),
907 	IBT_MR_REMOTE_ATOMIC		= (1 << 4),
908 	IBT_MR_ZERO_BASED_VA		= (1 << 5),
909 	IBT_MR_CONSUMER_OWNED_KEY	= (1 << 6),
910 	IBT_MR_SHARED			= (1 << 7),
911 	IBT_MR_FMR			= (1 << 8),
912 	IBT_MR_RO_DISABLED		= (1 << 9)
913 } ibt_mr_attr_flags_t;
914 
915 /* Memory region physical descriptor. */
916 typedef struct ibt_phys_buf_s {
917 	union {
918 		uint64_t	_p_ll;		/* 64 bit DMA address */
919 		uint32_t	_p_la[2];	/* 2 x 32 bit address */
920 	} _phys_buf;
921 	size_t	p_size;
922 } ibt_phys_buf_t;
923 
924 /* version of above for uniform buffer size */
925 typedef struct ib_phys_addr_t {
926 	union {
927 		uint64_t	_p_ll;		/* 64 bit DMA address */
928 		uint32_t	_p_la[2];	/* 2 x 32 bit address */
929 	} _phys_buf;
930 } ibt_phys_addr_t;
931 
932 #define	p_laddr		_phys_buf._p_ll
933 #ifdef	_LONG_LONG_HTOL
934 #define	p_notused	_phys_buf._p_la[0]
935 #define	p_addr		_phys_buf._p_la[1]
936 #else
937 #define	p_addr		_phys_buf._p_la[0]
938 #define	p_notused	_phys_buf._p_la[1]
939 #endif
940 
941 
942 /* Memory region descriptor. */
943 typedef struct ibt_mr_desc_s {
944 	ib_vaddr_t	md_vaddr;	/* IB virtual adds of memory */
945 	ibt_lkey_t	md_lkey;
946 	ibt_rkey_t	md_rkey;
947 	boolean_t	md_sync_required;
948 } ibt_mr_desc_t;
949 
950 /* Physical Memory region descriptor. */
951 typedef struct ibt_pmr_desc_s {
952 	ib_vaddr_t	pmd_iova;	/* Returned I/O Virtual Address */
953 	ibt_lkey_t	pmd_lkey;
954 	ibt_rkey_t	pmd_rkey;
955 	uint_t 		pmd_phys_buf_list_sz;	/* Allocated Phys buf sz */
956 	boolean_t	pmd_sync_required;
957 } ibt_pmr_desc_t;
958 
959 /* Memory region protection bounds. */
960 typedef struct ibt_mr_prot_bounds_s {
961 	ib_vaddr_t	pb_addr;	/* Beginning address */
962 	size_t		pb_len;		/* Length of protected region */
963 } ibt_mr_prot_bounds_t;
964 
965 /* Memory Region (Re)Register attributes */
966 typedef struct ibt_mr_attr_s {
967 	ib_vaddr_t	mr_vaddr;	/* Virtual address to register */
968 	ib_memlen_t	mr_len;		/* Length of region to register */
969 	struct as	*mr_as;		/* A pointer to an address space */
970 					/* structure. This parameter should */
971 					/* be set to NULL, which implies */
972 					/* kernel address space. */
973 	ibt_mr_flags_t	mr_flags;
974 } ibt_mr_attr_t;
975 
976 /* Physical Memory Region (Re)Register */
977 typedef struct ibt_pmr_attr_s {
978 	ib_vaddr_t	pmr_iova;	/* I/O virtual address requested by */
979 					/* client for the first byte of the */
980 					/* region */
981 	ib_memlen_t	pmr_len;	/* Length of region to register */
982 	ib_memlen_t	pmr_offset;	/* Offset of the regions starting */
983 					/* IOVA within the 1st physical */
984 					/* buffer */
985 	ibt_ma_hdl_t	pmr_ma;		/* Memory handle used to obtain the */
986 					/* pmr_buf_list */
987 	ibt_phys_addr_t	*pmr_addr_list;	/* List of physical buffers accessed */
988 					/* as an array */
989 	size_t		pmr_buf_sz;
990 	uint_t		pmr_num_buf;	/* Num of entries in the pmr_buf_list */
991 	ibt_lkey_t	pmr_lkey;	/* Reregister only */
992 	ibt_rkey_t	pmr_rkey;	/* Reregister only */
993 	ibt_mr_flags_t	pmr_flags;
994 	uint8_t		pmr_key;	/* Key to use on new Lkey & Rkey */
995 } ibt_pmr_attr_t;
996 
997 /* addr/length pair */
998 typedef struct ibt_iov_s {
999 	caddr_t	iov_addr;	/* Beginning address */
1000 	size_t	iov_len;	/* Length */
1001 } ibt_iov_t;
1002 
1003 /* Map memory IOV */
1004 typedef enum ibt_iov_flags_e {
1005 	IBT_IOV_SLEEP		= 0,
1006 	IBT_IOV_NOSLEEP		= (1 << 0),
1007 	IBT_IOV_BUF		= (1 << 1),
1008 	IBT_IOV_RECV		= (1 << 2)
1009 } ibt_iov_flags_t;
1010 
1011 typedef struct ibt_iov_attr_s {
1012 	struct as		*iov_as;
1013 	ibt_iov_t		*iov;
1014 	struct buf		*iov_buf;
1015 	uint32_t		iov_list_len;
1016 	uint32_t		iov_wr_nds;
1017 	ib_msglen_t		iov_lso_hdr_sz;
1018 	ibt_iov_flags_t		iov_flags;
1019 } ibt_iov_attr_t;
1020 
1021 /*
1022  * Memory Region (Re)Register attributes - used by ibt_register_shared_mr(),
1023  * ibt_register_buf() and ibt_reregister_buf().
1024  */
1025 typedef struct ibt_smr_attr_s {
1026 	ib_vaddr_t		mr_vaddr;
1027 	ibt_mr_flags_t		mr_flags;
1028 	uint8_t			mr_key;		/* Only for physical */
1029 						/* ibt_(Re)register_buf() */
1030 	ibt_lkey_t		mr_lkey;	/* Only for physical */
1031 	ibt_rkey_t		mr_rkey;	/* ibt_Reregister_buf() */
1032 } ibt_smr_attr_t;
1033 
1034 /*
1035  * key states.
1036  */
1037 typedef enum ibt_key_state_e {
1038 	IBT_KEY_INVALID	= 0,
1039 	IBT_KEY_FREE,
1040 	IBT_KEY_VALID
1041 } ibt_key_state_t;
1042 
1043 /* Memory region query attributes */
1044 typedef struct ibt_mr_query_attr_s {
1045 	ibt_lkey_t		mr_lkey;
1046 	ibt_rkey_t		mr_rkey;
1047 	ibt_mr_prot_bounds_t	mr_lbounds;	/* Actual local CI protection */
1048 						/* bounds */
1049 	ibt_mr_prot_bounds_t	mr_rbounds;	/* Actual remote CI */
1050 						/* protection bounds */
1051 	ibt_mr_attr_flags_t	mr_attr_flags;	/* Access rights etc. */
1052 	ibt_pd_hdl_t		mr_pd;		/* Protection domain */
1053 	boolean_t		mr_sync_required;
1054 	ibt_key_state_t		mr_lkey_state;
1055 	uint_t			mr_phys_buf_list_sz;
1056 } ibt_mr_query_attr_t;
1057 
1058 /* Memory window query attributes */
1059 typedef struct ibt_mw_query_attr_s {
1060 	ibt_pd_hdl_t		mw_pd;
1061 	ibt_mem_win_type_t	mw_type;
1062 	ibt_rkey_t		mw_rkey;
1063 	ibt_key_state_t		mw_state;
1064 } ibt_mw_query_attr_t;
1065 
1066 
1067 /* Memory Region Sync Flags. */
1068 #define	IBT_SYNC_READ	0x1	/* Make memory changes visible to incoming */
1069 				/* RDMA reads */
1070 
1071 #define	IBT_SYNC_WRITE	0x2	/* Make the affects of an incoming RDMA write */
1072 				/* visible to the consumer */
1073 
1074 /* Memory region sync args */
1075 typedef struct ibt_mr_sync_s {
1076 	ibt_mr_hdl_t	ms_handle;
1077 	ib_vaddr_t	ms_vaddr;
1078 	ib_memlen_t	ms_len;
1079 	uint32_t	ms_flags;	/* IBT_SYNC_READ or  IBT_SYNC_WRITE */
1080 } ibt_mr_sync_t;
1081 
1082 /*
1083  * Flags for Virtual Address to HCA Physical Address translation.
1084  */
1085 typedef enum ibt_va_flags_e {
1086 	IBT_VA_SLEEP		= 0,
1087 	IBT_VA_NOSLEEP		= (1 << 0),
1088 	IBT_VA_NONCOHERENT	= (1 << 1),
1089 	IBT_VA_FMR		= (1 << 2),
1090 	IBT_VA_BLOCK_MODE	= (1 << 3),
1091 	IBT_VA_BUF		= (1 << 4)
1092 } ibt_va_flags_t;
1093 
1094 
1095 /*  Address Translation parameters */
1096 typedef struct ibt_va_attr_s {
1097 	ib_vaddr_t	va_vaddr;	/* Virtual address to register */
1098 	ib_memlen_t	va_len;		/* Length of region to register */
1099 	struct as	*va_as;		/* A pointer to an address space */
1100 					/* structure. */
1101 	size_t		va_phys_buf_min;	/* block mode only */
1102 	size_t		va_phys_buf_max;	/* block mode only */
1103 	ibt_va_flags_t	va_flags;
1104 	struct buf	*va_buf;
1105 } ibt_va_attr_t;
1106 
1107 
1108 /*
1109  * Fast Memory Registration (FMR) support.
1110  */
1111 
1112 /* FMR flush function handler. */
1113 typedef void (*ibt_fmr_flush_handler_t)(ibt_fmr_pool_hdl_t fmr_pool,
1114     void *fmr_func_arg);
1115 
1116 /* FMR Pool create attributes. */
1117 typedef struct ibt_fmr_pool_attr_s {
1118 	uint_t			fmr_max_pages_per_fmr;
1119 	uint_t			fmr_pool_size;
1120 	uint_t			fmr_dirty_watermark;
1121 	size_t			fmr_page_sz;
1122 	boolean_t		fmr_cache;
1123 	ibt_mr_flags_t		fmr_flags;
1124 	ibt_fmr_flush_handler_t	fmr_func_hdlr;
1125 	void			*fmr_func_arg;
1126 } ibt_fmr_pool_attr_t;
1127 
1128 
1129 /*
1130  * WORK REQUEST AND WORK REQUEST COMPLETION DEFINITIONS.
1131  */
1132 
1133 /*
1134  * Work Request and Work Request Completion types - These types are used
1135  *   to indicate the type of work requests posted to a work queue
1136  *   or the type of completion received.  Immediate Data is indicated via
1137  *   ibt_wr_flags_t or ibt_wc_flags_t.
1138  *
1139  *   IBT_WRC_RECV and IBT_WRC_RECV_RDMAWI are only used as opcodes in the
1140  *   work completions.
1141  *
1142  * NOTE: this was converted from an enum to a uint8_t to save space.
1143  */
1144 typedef uint8_t ibt_wrc_opcode_t;
1145 
1146 #define	IBT_WRC_SEND		1	/* Send */
1147 #define	IBT_WRC_RDMAR		2	/* RDMA Read */
1148 #define	IBT_WRC_RDMAW		3	/* RDMA Write */
1149 #define	IBT_WRC_CSWAP		4	/* Compare & Swap Atomic */
1150 #define	IBT_WRC_FADD		5	/* Fetch & Add Atomic */
1151 #define	IBT_WRC_BIND		6	/* Bind Memory Window */
1152 #define	IBT_WRC_RECV		7	/* Receive */
1153 #define	IBT_WRC_RECV_RDMAWI	8	/* Received RDMA Write w/ Immediate */
1154 #define	IBT_WRC_FAST_REG_PMR	9	/* Fast Register Physical mem region */
1155 #define	IBT_WRC_LOCAL_INVALIDATE 10
1156 #define	IBT_WRC_SEND_LSO	11
1157 
1158 
1159 /*
1160  * Work Request Completion flags - These flags indicate what type
1161  *   of data is present in the Work Request Completion structure
1162  */
1163 typedef uint8_t ibt_wc_flags_t;
1164 
1165 #define	IBT_WC_NO_FLAGS			0
1166 #define	IBT_WC_GRH_PRESENT		(1 << 0)
1167 #define	IBT_WC_IMMED_DATA_PRESENT	(1 << 1)
1168 #define	IBT_WC_RKEY_INVALIDATED		(1 << 2)
1169 #define	IBT_WC_CKSUM_OK			(1 << 3)
1170 
1171 /* IPoIB flags for wc_detail field */
1172 #define	IBT_WC_DETAIL_ALL_FLAGS_MASK	(0x0FC00000)
1173 #define	IBT_WC_DETAIL_IPV4		(1 << 22)
1174 #define	IBT_WC_DETAIL_IPV4_FRAG		(1 << 23)
1175 #define	IBT_WC_DETAIL_IPV6		(1 << 24)
1176 #define	IBT_WC_DETAIL_IPV4_OPT		(1 << 25)
1177 #define	IBT_WC_DETAIL_TCP		(1 << 26)
1178 #define	IBT_WC_DETAIL_UDP		(1 << 27)
1179 
1180 #define	IBT_WC_DETAIL_RSS_MATCH_MASK	(0x003F0000)
1181 #define	IBT_WC_DETAIL_RSS_TCP_IPV6	(1 << 18)
1182 #define	IBT_WC_DETAIL_RSS_IPV6		(1 << 19)
1183 #define	IBT_WC_DETAIL_RSS_TCP_IPV4	(1 << 20)
1184 #define	IBT_WC_DETAIL_RSS_IPV4		(1 << 21)
1185 
1186 /*
1187  * Work Request Completion - This structure encapsulates the information
1188  *   necessary to define a work request completion.
1189  */
1190 typedef struct ibt_wc_s {
1191 	ibt_wrid_t		wc_id;		/* Work Request Id */
1192 	uint64_t		wc_fma_ena;	/* fault management err data */
1193 	ib_msglen_t		wc_bytes_xfer;	/* Number of Bytes */
1194 						/* Transferred */
1195 	ibt_wc_flags_t		wc_flags;	/* WR Completion Flags */
1196 	ibt_wrc_opcode_t	wc_type;	/* Operation Type */
1197 	uint16_t		wc_cksum;	/* payload checksum */
1198 	ibt_immed_t		wc_immed_data;	/* Immediate Data */
1199 	uint32_t		wc_res_hash;	/* RD: Freed Res, RSS: hash */
1200 	ibt_wc_status_t		wc_status;	/* Completion Status */
1201 	uint8_t			wc_sl:4;	/* Remote SL */
1202 	uint16_t		wc_ethertype;	/* Ethertype Field - RE */
1203 	ib_lid_t		wc_opaque1;
1204 	uint16_t		wc_opaque2;
1205 	ib_qpn_t		wc_qpn;		/* Source QPN Datagram only */
1206 	uint32_t		wc_detail;	/* RD: EECN, UD: IPoIB flags */
1207 	ib_qpn_t		wc_local_qpn;
1208 	ibt_rkey_t		wc_rkey;
1209 	ib_path_bits_t		wc_opaque4;
1210 } ibt_wc_t;
1211 
1212 /*
1213  * WR Flags. Common for both RC and UD
1214  *
1215  * NOTE: this was converted from an enum to a uint8_t to save space.
1216  */
1217 typedef uint8_t ibt_wr_flags_t;
1218 
1219 #define	IBT_WR_NO_FLAGS		0
1220 #define	IBT_WR_SEND_IMMED	(1 << 0)	/* Immediate Data Indicator */
1221 #define	IBT_WR_SEND_SIGNAL	(1 << 1)	/* Signaled, if set */
1222 #define	IBT_WR_SEND_FENCE	(1 << 2)	/* Fence Indicator */
1223 #define	IBT_WR_SEND_SOLICIT	(1 << 3)	/* Solicited Event Indicator */
1224 #define	IBT_WR_SEND_REMOTE_INVAL	(1 << 4) /* Remote Invalidate */
1225 #define	IBT_WR_SEND_CKSUM	(1 << 5)	/* Checksum offload Indicator */
1226 #define	IBT_WR_SEND_INLINE	(1 << 6)	/* INLINE required (no lkey) */
1227 
1228 /*
1229  * Access control flags for Bind Memory Window operation,
1230  * applicable for RC/UC/RD only.
1231  *
1232  * If IBT_WR_BIND_WRITE or IBT_WR_BIND_ATOMIC is desired then
1233  * it is required that Memory Region should have Local Write Access.
1234  */
1235 typedef enum ibt_bind_flags_e {
1236 	IBT_WR_BIND_READ	= (1 << 0),	/* enable remote read */
1237 	IBT_WR_BIND_WRITE	= (1 << 1),	/* enable remote write */
1238 	IBT_WR_BIND_ATOMIC	= (1 << 2),	/* enable remote atomics */
1239 	IBT_WR_BIND_ZBVA	= (1 << 3)	/* Zero Based Virtual Address */
1240 } ibt_bind_flags_t;
1241 
1242 /*
1243  * Data Segment for scatter-gather list
1244  *
1245  * SGL consists of an array of data segments and the length of the SGL.
1246  */
1247 typedef struct ibt_wr_ds_s {
1248 	ib_vaddr_t	ds_va;		/* Virtual Address */
1249 	ibt_lkey_t	ds_key;		/* L_Key */
1250 	ib_msglen_t	ds_len;		/* Length of DS */
1251 } ibt_wr_ds_t;
1252 
1253 /*
1254  * Bind Memory Window WR
1255  *
1256  * WR ID from ibt_send_wr_t applies here too, SWG_0038 errata.
1257  */
1258 typedef struct ibt_wr_bind_s {
1259 	ibt_bind_flags_t	bind_flags;
1260 	ibt_rkey_t		bind_rkey;		/* Mem Window's R_key */
1261 	ibt_lkey_t		bind_lkey;		/* Mem Region's L_Key */
1262 	ibt_rkey_t		bind_rkey_out;		/* OUT: new R_Key */
1263 	ibt_mr_hdl_t		bind_ibt_mr_hdl;	/* Mem Region handle */
1264 	ibt_mw_hdl_t		bind_ibt_mw_hdl;	/* Mem Window handle */
1265 	ib_vaddr_t		bind_va;		/* Virtual Address */
1266 	ib_memlen_t		bind_len;		/* Length of Window */
1267 } ibt_wr_bind_t;
1268 
1269 /*
1270  * Atomic WR
1271  *
1272  * Operation type (compare & swap or fetch & add) in ibt_wrc_opcode_t.
1273  *
1274  * A copy of the original contents of the remote memory will be stored
1275  * in the local data segment described by wr_sgl within ibt_send_wr_t,
1276  * and wr_nds should be set to 1.
1277  *
1278  * Atomic operation operands:
1279  *   Compare & Swap Operation:
1280  *	atom_arg1 - Compare Operand
1281  *	atom_arg2 - Swap Operand
1282  *
1283  *   Fetch & Add Operation:
1284  *	atom_arg1 - Add Operand
1285  *	atom_arg2 - ignored
1286  */
1287 typedef struct ibt_wr_atomic_s {
1288 	ib_vaddr_t	atom_raddr;	/* Remote address. */
1289 	ibt_atom_arg_t	atom_arg1;	/* operand #1 */
1290 	ibt_atom_arg_t	atom_arg2;	/* operand #2 */
1291 	ibt_rkey_t	atom_rkey;	/* R_Key. */
1292 } ibt_wr_atomic_t;
1293 
1294 /*
1295  * RDMA WR
1296  * Immediate Data indicator in ibt_wr_flags_t.
1297  */
1298 typedef struct ibt_wr_rdma_s {
1299 	ib_vaddr_t	rdma_raddr;	/* Remote address. */
1300 	ibt_rkey_t	rdma_rkey;	/* R_Key. */
1301 	ibt_immed_t	rdma_immed;	/* Immediate Data */
1302 } ibt_wr_rdma_t;
1303 
1304 /*
1305  * Fast Register Physical Memory Region Work Request.
1306  */
1307 typedef struct ibt_wr_reg_pmr_s {
1308 	ib_vaddr_t	pmr_iova;	/* I/O virtual address requested by */
1309 					/* client for the first byte of the */
1310 					/* region */
1311 	ib_memlen_t	pmr_len;	/* Length of region to register */
1312 	ib_memlen_t	pmr_offset;	/* Offset of the regions starting */
1313 					/* IOVA within the 1st physical */
1314 					/* buffer */
1315 	ibt_mr_hdl_t	pmr_mr_hdl;
1316 	ibt_phys_addr_t	*pmr_addr_list; /* List of physical buffers accessed */
1317 					/* as an array */
1318 	size_t		pmr_buf_sz;	/* size of uniform size PBEs */
1319 	uint_t		pmr_num_buf;	/* Num of entries in the pmr_buf_list */
1320 	ibt_lkey_t	pmr_lkey;
1321 	ibt_rkey_t	pmr_rkey;
1322 	ibt_mr_flags_t	pmr_flags;
1323 	uint8_t		pmr_key;	/* Key to use on new Lkey & Rkey */
1324 } ibt_wr_reg_pmr_t;
1325 
1326 /* phys reg function or WR */
1327 typedef union ibt_reg_req_u {
1328 	ibt_pmr_attr_t		fn_arg;
1329 	ibt_wr_reg_pmr_t	wr;
1330 } ibt_reg_req_t;
1331 
1332 /*
1333  * Local Invalidate.
1334  */
1335 typedef struct ibt_wr_li_s {
1336 	ibt_mr_hdl_t	li_mr_hdl;	/* Null for MW invalidates */
1337 	ibt_mw_hdl_t	li_mw_hdl;	/* Null for MR invalidates */
1338 	ibt_lkey_t	li_lkey;	/* Ignore for MW invalidates */
1339 	ibt_rkey_t	li_rkey;
1340 } ibt_wr_li_t;
1341 
1342 /*
1343  * Reserved For Future Use.
1344  * Raw IPv6 Send WR
1345  */
1346 typedef struct ibt_wr_ripv6_s {
1347 	ib_lid_t	rip_dlid;	/* DLID */
1348 	ib_path_bits_t  rip_slid_bits;	/* SLID path bits, SWG_0033 errata */
1349 	uint8_t		rip_sl:4;	/* SL */
1350 	ibt_srate_t	rip_rate;	/* Max Static Rate, SWG_0007 errata */
1351 } ibt_wr_ripv6_t;
1352 
1353 /*
1354  * Reserved For Future Use.
1355  * Raw Ethertype Send WR
1356  */
1357 typedef struct ibt_wr_reth_s {
1358 	ib_ethertype_t  reth_type;	/* Ethertype */
1359 	ib_lid_t	reth_dlid;	/* DLID */
1360 	ib_path_bits_t	reth_slid_bits;	/* SLID path bits, SWG_0033 errata */
1361 	uint8_t		reth_sl:4;	/* SL */
1362 	ibt_srate_t	reth_rate;	/* Max Static Rate, SWG_0007 errata */
1363 } ibt_wr_reth_t;
1364 
1365 /*
1366  * Reserved For future Use.
1367  * RD Send WR, Operation type in ibt_wrc_opcode_t.
1368  */
1369 typedef struct ibt_wr_rd_s {
1370 	ibt_rd_dest_hdl_t	rdwr_dest_hdl;
1371 	union {
1372 	    ibt_immed_t		send_immed;	/* IBT_WRC_SEND */
1373 	    ibt_wr_rdma_t	rdma;		/* IBT_WRC_RDMAR */
1374 						/* IBT_WRC_RDMAW */
1375 	    ibt_wr_li_t		*li;		/* IBT_WRC_LOCAL_INVALIDATE */
1376 	    ibt_wr_atomic_t	*atomic;	/* IBT_WRC_FADD */
1377 						/* IBT_WRC_CSWAP */
1378 	    ibt_wr_bind_t	*bind;		/* IBT_WRC_BIND */
1379 	    ibt_wr_reg_pmr_t	*reg_pmr;	/* IBT_WRC_FAST_REG_PMR */
1380 	} rdwr;
1381 } ibt_wr_rd_t;
1382 
1383 /*
1384  * Reserved For Future Use.
1385  * UC Send WR, Operation type in ibt_wrc_opcode_t, the only valid
1386  * ones are:
1387  *		IBT_WRC_SEND
1388  *		IBT_WRC_RDMAW
1389  *		IBT_WRC_BIND
1390  */
1391 typedef struct ibt_wr_uc_s {
1392 	union {
1393 	    ibt_immed_t		send_immed;	/* IBT_WRC_SEND */
1394 	    ibt_wr_rdma_t	rdma;		/* IBT_WRC_RDMAW */
1395 	    ibt_wr_li_t		*li;		/* IBT_WRC_LOCAL_INVALIDATE */
1396 	    ibt_wr_bind_t	*bind;		/* IBT_WRC_BIND */
1397 	    ibt_wr_reg_pmr_t	*reg_pmr;	/* IBT_WRC_FAST_REG_PMR */
1398 	} ucwr;
1399 } ibt_wr_uc_t;
1400 
1401 /*
1402  * RC Send WR, Operation type in ibt_wrc_opcode_t.
1403  */
1404 typedef struct ibt_wr_rc_s {
1405 	union {
1406 	    ibt_immed_t		send_immed;	/* IBT_WRC_SEND w/ immediate */
1407 	    ibt_rkey_t		send_inval;	/* IBT_WRC_SEND w/ invalidate */
1408 	    ibt_wr_rdma_t	rdma;		/* IBT_WRC_RDMAR */
1409 						/* IBT_WRC_RDMAW */
1410 	    ibt_wr_li_t		*li;		/* IBT_WRC_LOCAL_INVALIDATE */
1411 	    ibt_wr_atomic_t	*atomic;	/* IBT_WRC_CSWAP */
1412 						/* IBT_WRC_FADD */
1413 	    ibt_wr_bind_t	*bind;		/* IBT_WRC_BIND */
1414 	    ibt_wr_reg_pmr_t	*reg_pmr;	/* IBT_WRC_FAST_REG_PMR */
1415 	} rcwr;
1416 } ibt_wr_rc_t;
1417 
1418 /*
1419  * UD Send WR, the only valid Operation is IBT_WRC_SEND.
1420  */
1421 typedef struct ibt_wr_ud_s {
1422 	ibt_immed_t		udwr_immed;
1423 	ibt_ud_dest_hdl_t	udwr_dest;
1424 } ibt_wr_ud_t;
1425 
1426 /* LSO variant */
1427 typedef struct ibt_wr_lso_s {
1428 	ibt_ud_dest_hdl_t	lso_ud_dest;
1429 	uint8_t			*lso_hdr;
1430 	ib_msglen_t		lso_hdr_sz;
1431 	ib_msglen_t		lso_mss;
1432 } ibt_wr_lso_t;
1433 
1434 /*
1435  * Send Work Request (WR) attributes structure.
1436  *
1437  * Operation type in ibt_wrc_opcode_t.
1438  * Immediate Data indicator in ibt_wr_flags_t.
1439  */
1440 typedef struct ibt_send_wr_s {
1441 	ibt_wrid_t		wr_id;		/* WR ID */
1442 	ibt_wr_flags_t		wr_flags;	/* Work Request Flags. */
1443 	ibt_tran_srv_t		wr_trans;	/* Transport Type. */
1444 	ibt_wrc_opcode_t	wr_opcode;	/* Operation Type. */
1445 	uint8_t			wr_rsvd;	/* maybe later */
1446 	uint32_t		wr_nds;		/* Number of data segments */
1447 						/* pointed to by wr_sgl */
1448 	ibt_wr_ds_t		*wr_sgl;	/* SGL */
1449 	union {
1450 		ibt_wr_ud_t	ud;
1451 		ibt_wr_rc_t	rc;
1452 		ibt_wr_rd_t	rd;	/* Reserved For Future Use */
1453 		ibt_wr_uc_t	uc;	/* Reserved For Future Use */
1454 		ibt_wr_reth_t	reth;	/* Reserved For Future Use */
1455 		ibt_wr_ripv6_t	ripv6;	/* Reserved For Future Use */
1456 		ibt_wr_lso_t	ud_lso;
1457 	} wr;				/* operation specific */
1458 } ibt_send_wr_t;
1459 
1460 /*
1461  * Receive Work Request (WR) attributes structure.
1462  */
1463 typedef struct ibt_recv_wr_s {
1464 	ibt_wrid_t		wr_id;		/* WR ID */
1465 	uint32_t		wr_nds;		/* number of data segments */
1466 						/* pointed to by wr_sgl */
1467 	ibt_wr_ds_t		*wr_sgl;	/* SGL */
1468 } ibt_recv_wr_t;
1469 
1470 typedef union ibt_all_wr_u {
1471 	ibt_send_wr_t	send;
1472 	ibt_recv_wr_t	recv;
1473 } ibt_all_wr_t;
1474 
1475 
1476 /*
1477  * Asynchronous Events and Errors.
1478  *
1479  * The following codes are not used in calls to ibc_async_handler, but
1480  * are used by IBTL to inform IBT clients of a significant event.
1481  *
1482  *  IBT_HCA_ATTACH_EVENT	- New HCA available.
1483  *  IBT_HCA_DETACH_EVENT	- HCA is requesting not to be used.
1484  *
1485  * ERRORs on a channel indicate that the channel has entered error state.
1486  * EVENTs on a channel indicate that the channel has not changed state.
1487  *
1488  */
1489 typedef enum ibt_async_code_e {
1490 	IBT_EVENT_PATH_MIGRATED			= 0x000001,
1491 	IBT_EVENT_SQD				= 0x000002,
1492 	IBT_EVENT_COM_EST			= 0x000004,
1493 	IBT_ERROR_CATASTROPHIC_CHAN		= 0x000008,
1494 	IBT_ERROR_INVALID_REQUEST_CHAN		= 0x000010,
1495 	IBT_ERROR_ACCESS_VIOLATION_CHAN		= 0x000020,
1496 	IBT_ERROR_PATH_MIGRATE_REQ		= 0x000040,
1497 
1498 	IBT_ERROR_CQ				= 0x000080,
1499 
1500 	IBT_EVENT_PORT_UP			= 0x000100,
1501 	IBT_ERROR_PORT_DOWN			= 0x000200,
1502 	IBT_ERROR_LOCAL_CATASTROPHIC		= 0x000400,
1503 
1504 	IBT_HCA_ATTACH_EVENT			= 0x000800,
1505 	IBT_HCA_DETACH_EVENT			= 0x001000,
1506 	IBT_ASYNC_OPAQUE1			= 0x002000,
1507 	IBT_ASYNC_OPAQUE2			= 0x004000,
1508 	IBT_ASYNC_OPAQUE3			= 0x008000,
1509 	IBT_ASYNC_OPAQUE4			= 0x010000,
1510 	IBT_EVENT_LIMIT_REACHED_SRQ		= 0x020000,
1511 	IBT_EVENT_EMPTY_CHAN			= 0x040000,
1512 	IBT_ERROR_CATASTROPHIC_SRQ		= 0x080000,
1513 
1514 	IBT_PORT_CHANGE_EVENT			= 0x100000,
1515 	IBT_CLNT_REREG_EVENT			= 0x200000
1516 } ibt_async_code_t;
1517 
1518 #define	IBT_PORT_EVENTS (IBT_EVENT_PORT_UP|IBT_PORT_CHANGE_EVENT|\
1519     IBT_ERROR_PORT_DOWN|IBT_CLNT_REREG_EVENT)
1520 
1521 typedef enum ibt_port_change_e {
1522 	IBT_PORT_CHANGE_SGID		= 0x000001, /* SGID table */
1523 	IBT_PORT_CHANGE_PKEY		= 0x000002, /* P_Key table */
1524 	IBT_PORT_CHANGE_SM_LID		= 0x000004, /* Master SM LID */
1525 	IBT_PORT_CHANGE_SM_SL		= 0x000008, /* Master SM SL */
1526 	IBT_PORT_CHANGE_SUB_TIMEOUT	= 0x000010, /* Subnet Timeout */
1527 	IBT_PORT_CHANGE_SM_FLAG		= 0x000020, /* IsSMDisabled bit */
1528 	IBT_PORT_CHANGE_REREG		= 0x000040  /* IsClientReregSupport */
1529 } ibt_port_change_t;
1530 
1531 /*
1532  * ibt_ci_data_in() and ibt_ci_data_out() flags.
1533  */
1534 typedef enum ibt_ci_data_flags_e {
1535 	IBT_CI_NO_FLAGS		= 0,
1536 	IBT_CI_COMPLETE_ALLOC	= (1 << 0)
1537 } ibt_ci_data_flags_t;
1538 
1539 /*
1540  * Used by ibt_ci_data_in() and ibt_ci_data_out() identifies the type of handle
1541  * mapping data is being obtained for.
1542  */
1543 typedef enum ibt_object_type_e {
1544 	IBT_HDL_HCA	=	1,
1545 	IBT_HDL_CHANNEL,
1546 	IBT_HDL_CQ,
1547 	IBT_HDL_PD,
1548 	IBT_HDL_MR,
1549 	IBT_HDL_MW,
1550 	IBT_HDL_UD_DEST,
1551 	IBT_HDL_SCHED,
1552 	IBT_HDL_OPAQUE1,
1553 	IBT_HDL_OPAQUE2,
1554 	IBT_HDL_SRQ
1555 } ibt_object_type_t;
1556 
1557 /*
1558  * Standard information for ibt_ci_data_in() for memory regions.
1559  *
1560  * IBT_MR_DATA_IN_IF_VERSION is the value used in the mr_rev member.
1561  * mr_func is the callback handler.  mr_arg1 and mr_arg2 are its arguments.
1562  */
1563 #define	IBT_MR_DATA_IN_IF_VERSION	1
1564 typedef struct ibt_mr_data_in_s {
1565 	uint_t	mr_rev;
1566 	void	(*mr_func)(void *, void *);
1567 	void	*mr_arg1;
1568 	void	*mr_arg2;
1569 } ibt_mr_data_in_t;
1570 
1571 /*
1572  * Memory error handler data structures; code, and payload data.
1573  */
1574 typedef enum ibt_mem_code_s {
1575 	IBT_MEM_AREA	= 0x1,
1576 	IBT_MEM_REGION	= 0x2
1577 } ibt_mem_code_t;
1578 
1579 typedef struct ibt_mem_data_s {
1580 	uint64_t	ev_fma_ena;	/* FMA Error data */
1581 	ibt_mr_hdl_t	ev_mr_hdl;	/* MR handle */
1582 	ibt_ma_hdl_t	ev_ma_hdl;	/* MA handle */
1583 } ibt_mem_data_t;
1584 
1585 /*
1586  * Special case failure type.
1587  */
1588 typedef enum ibt_failure_type_e {
1589 	IBT_FAILURE_STANDARD	= 0,
1590 	IBT_FAILURE_CI,
1591 	IBT_FAILURE_IBMF,
1592 	IBT_FAILURE_IBTL,
1593 	IBT_FAILURE_IBCM,
1594 	IBT_FAILURE_IBDM,
1595 	IBT_FAILURE_IBSM
1596 } ibt_failure_type_t;
1597 
1598 /*
1599  * RDMA IP CM service Annex definitions
1600  */
1601 typedef struct ibt_ip_addr_s {
1602 	sa_family_t family;		/* AF_INET or AF_INET6 */
1603 	union {
1604 		in_addr_t	ip4addr;
1605 		in6_addr_t	ip6addr;
1606 	} un;
1607 	uint32_t	ip6_scope_id;	/* Applicable only for AF_INET6 */
1608 } ibt_ip_addr_t;
1609 
1610 #ifdef __cplusplus
1611 }
1612 #endif
1613 
1614 #endif /* _SYS_IB_IBTL_IBTL_TYPES_H */
1615