xref: /illumos-gate/usr/src/uts/common/io/ib/mgt/ibmf/ibmf_wqe.c (revision 525f82272fdf81a1292b8b1d1832d2012cdfc096)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * This file implements the Work Queue Entry (WQE) management in IBMF.
28  */
29 
30 #include <sys/ib/mgt/ibmf/ibmf_impl.h>
31 
32 extern int ibmf_trace_level;
33 extern int ibmf_send_wqes_per_port, ibmf_recv_wqes_per_port;
34 
35 #define	IBMF_INIT_SG_ELEMENT(sg, mem, lkey, size)	{ \
36 	(sg).ds_va = (ib_vaddr_t)(uintptr_t)(mem);	\
37 	(sg).ds_key = (lkey);				\
38 	(sg).ds_len = (size);				\
39 }
40 
41 #define	IBMF_ADDR_TO_SEND_WR_ID(ptr, id)		\
42 	(id) = (ibt_wrid_t)(uintptr_t)(ptr)
43 
44 #define	IBMF_ADDR_TO_RECV_WR_ID(ptr, id)		 \
45 	(id) = ((ibt_wrid_t)(uintptr_t)(ptr) | IBMF_RCV_CQE)
46 
47 #define	IBMF_INIT_RMPP_HDR(hdrp, ver, type, respt, flg, status, seg, lennwl) { \
48 	(hdrp)->rmpp_version = (ver);			\
49 	(hdrp)->rmpp_type = (type);			\
50 	(hdrp)->rmpp_resp_time = (respt);		\
51 	(hdrp)->rmpp_flags = (flg);			\
52 	(hdrp)->rmpp_status = (status);			\
53 	(hdrp)->rmpp_segnum = (h2b32(seg));		\
54 	(hdrp)->rmpp_pyldlen_nwl = (h2b32(lennwl));	\
55 }
56 
57 static int ibmf_send_wqe_cache_constructor(void *buf, void *cdrarg,
58     int kmflags);
59 static void ibmf_send_wqe_cache_destructor(void *buf, void *cdrarg);
60 static int ibmf_recv_wqe_cache_constructor(void *buf, void *cdrarg,
61     int kmflags);
62 static void ibmf_recv_wqe_cache_destructor(void *buf, void *cdrarg);
63 static int ibmf_i_extend_wqe_mem(ibmf_ci_t *cip,
64     ibmf_qp_handle_t ibmf_qp_handle, ibmf_wqe_mgt_t *wqe_mgt,
65     boolean_t block);
66 
67 /*
68  * ibmf_send_wqe_cache_constructor():
69  *	Constructor for the kmem cache used for send WQEs for special QPs
70  */
71 /* ARGSUSED */
72 static int
ibmf_send_wqe_cache_constructor(void * buf,void * cdrarg,int kmflags)73 ibmf_send_wqe_cache_constructor(void *buf, void *cdrarg, int kmflags)
74 {
75 	ibmf_send_wqe_t		*send_wqe = (ibmf_send_wqe_t *)buf;
76 	ibmf_ci_t		*cip = (ibmf_ci_t *)cdrarg;
77 	ibmf_wqe_mgt_t		*wqe_mgt;
78 
79 	IBMF_TRACE_2(IBMF_TNF_DEBUG, DPRINT_L4,
80 	    ibmf_send_wqe_cache_constructor_start, IBMF_TNF_TRACE, "",
81 	    "ibmf_send_wqe_cache_constructor() enter, buf = %p, cdarg = %p\n",
82 	    tnf_opaque, buf, buf, tnf_opaque, cdrarg, cdrarg);
83 
84 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*send_wqe))
85 
86 	/* initialize send WQE context */
87 	send_wqe->send_sg_mem =
88 	    (ib_vaddr_t)(uintptr_t)vmem_alloc(cip->ci_wqe_ib_vmem,
89 	    IBMF_MEM_PER_WQE, kmflags == KM_SLEEP ? VM_SLEEP : VM_NOSLEEP);
90 	if (send_wqe->send_sg_mem == 0) {
91 		IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
92 		    ibmf_send_wqe_cache_constructor_err, IBMF_TNF_ERROR, "",
93 		    "ibmf_send_wqe_cache_constructor(): %s\n", tnf_string, msg,
94 		    "Failed vmem allocation in send WQE cache constructor");
95 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
96 		    ibmf_send_wqe_cache_constructor_end, IBMF_TNF_TRACE, "",
97 		    "ibmf_send_wqe_cache_constructor() exit\n");
98 		return (-1);
99 	}
100 
101 	mutex_enter(&cip->ci_wqe_mutex);
102 	wqe_mgt = cip->ci_wqe_mgt_list;
103 
104 	/* Look for the WQE management struct that includes this address */
105 	while (wqe_mgt != NULL) {
106 		mutex_enter(&wqe_mgt->wqes_mutex);
107 		if ((send_wqe->send_sg_mem >= wqe_mgt->wqes_ib_mem) &&
108 		    (send_wqe->send_sg_mem < (wqe_mgt->wqes_ib_mem +
109 		    wqe_mgt->wqes_kmem_sz))) {
110 			mutex_exit(&wqe_mgt->wqes_mutex);
111 			break;
112 		}
113 		mutex_exit(&wqe_mgt->wqes_mutex);
114 		wqe_mgt = wqe_mgt->wqe_mgt_next;
115 	}
116 
117 	if (wqe_mgt == NULL) {
118 		mutex_exit(&cip->ci_wqe_mutex);
119 		vmem_free(cip->ci_wqe_ib_vmem,
120 		    (void *)(uintptr_t)send_wqe->send_sg_mem, IBMF_MEM_PER_WQE);
121 		IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
122 		    ibmf_send_wqe_cache_constructor_err, IBMF_TNF_ERROR, "",
123 		    "ibmf_send_wqe_cache_constructor(): %s\n", tnf_string, msg,
124 		    "Address not found in WQE mgt list");
125 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
126 		    ibmf_send_wqe_cache_constructor_end, IBMF_TNF_TRACE, "",
127 		    "ibmf_send_wqe_cache_constructor() exit\n");
128 		return (-1);
129 	}
130 
131 	mutex_enter(&wqe_mgt->wqes_mutex);
132 
133 	send_wqe->send_mem = (caddr_t)((uintptr_t)wqe_mgt->wqes_kmem +
134 	    (uintptr_t)(send_wqe->send_sg_mem - wqe_mgt->wqes_ib_mem));
135 	bzero(send_wqe->send_mem, IBMF_MEM_PER_WQE);
136 	send_wqe->send_sg_lkey = wqe_mgt->wqes_ib_lkey;
137 	send_wqe->send_mem_hdl = wqe_mgt->wqes_ib_mem_hdl;
138 	send_wqe->send_wqe_flags = 0;
139 	send_wqe->send_wqe_next = NULL;
140 
141 	mutex_exit(&wqe_mgt->wqes_mutex);
142 	mutex_exit(&cip->ci_wqe_mutex);
143 
144 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
145 	    ibmf_send_wqe_cache_constructor_end, IBMF_TNF_TRACE, "",
146 	    "ibmf_send_wqe_cache_constructor() exit\n");
147 
148 	return (0);
149 }
150 
151 /*
152  * ibmf_send_wqe_cache_destructor():
153  *	Destructor for send WQE kmem cache for special QPs
154  */
155 /* ARGSUSED */
156 static void
ibmf_send_wqe_cache_destructor(void * buf,void * cdrarg)157 ibmf_send_wqe_cache_destructor(void *buf, void *cdrarg)
158 {
159 	ibmf_send_wqe_t		*send_wqe = (ibmf_send_wqe_t *)buf;
160 	ibmf_ci_t		*cip = (ibmf_ci_t *)cdrarg;
161 
162 	IBMF_TRACE_2(IBMF_TNF_DEBUG, DPRINT_L4,
163 	    ibmf_i_send_wqe_cache_destructor_start, IBMF_TNF_TRACE, "",
164 	    "ibmf_send_wqe_cache_destructor() enter, buf = %p, cdarg = %p\n",
165 	    tnf_opaque, buf, buf, tnf_opaque, cdrarg, cdrarg);
166 
167 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*send_wqe))
168 
169 	/* Free the vmem allocated for the WQE */
170 	vmem_free(cip->ci_wqe_ib_vmem,
171 	    (void *)(uintptr_t)send_wqe->send_sg_mem, IBMF_MEM_PER_WQE);
172 	send_wqe->send_mem = NULL;
173 
174 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
175 	    ibmf_i_send_wqe_cache_destructor_end, IBMF_TNF_TRACE, "",
176 	    "ibmf_send_wqe_cache_destructor() exit\n");
177 }
178 
179 /*
180  * ibmf_recv_wqe_cache_constructor():
181  *	Constructor for receive WQE kmem cache for special QPs
182  */
183 /* ARGSUSED */
184 static int
ibmf_recv_wqe_cache_constructor(void * buf,void * cdrarg,int kmflags)185 ibmf_recv_wqe_cache_constructor(void *buf, void *cdrarg, int kmflags)
186 {
187 	ibmf_recv_wqe_t		*recv_wqe = (ibmf_recv_wqe_t *)buf;
188 	ibmf_ci_t		*cip = (ibmf_ci_t *)cdrarg;
189 	ibmf_wqe_mgt_t		*wqe_mgt;
190 
191 	IBMF_TRACE_2(IBMF_TNF_DEBUG, DPRINT_L4,
192 	    ibmf_i_recv_wqe_cache_constructor_start, IBMF_TNF_TRACE, "",
193 	    "ibmf_recv_wqe_cache_constructor() enter, buf = %p, cdarg = %p\n",
194 	    tnf_opaque, buf, buf, tnf_opaque, cdrarg, cdrarg);
195 
196 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*recv_wqe))
197 
198 	/* initialize recv WQE context */
199 	recv_wqe->recv_sg_mem =
200 	    (ib_vaddr_t)(uintptr_t)vmem_alloc(cip->ci_wqe_ib_vmem,
201 	    IBMF_MEM_PER_WQE, kmflags == KM_SLEEP ? VM_SLEEP : VM_NOSLEEP);
202 	if (recv_wqe->recv_sg_mem == 0) {
203 		IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
204 		    ibmf_recv_wqe_cache_constructor_err, IBMF_TNF_ERROR, "",
205 		    "ibmf_recv_wqe_cache_constructor(): %s\n", tnf_string, msg,
206 		    "Failed vmem allocation in receive WQE cache constructor");
207 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
208 		    ibmf_recv_wqe_cache_constructor_end, IBMF_TNF_TRACE, "",
209 		    "ibmf_recv_wqe_cache_constructor() exit\n");
210 		return (-1);
211 	}
212 
213 	mutex_enter(&cip->ci_wqe_mutex);
214 	wqe_mgt = cip->ci_wqe_mgt_list;
215 
216 	/* Look for the WQE management struct that includes this address */
217 	while (wqe_mgt != NULL) {
218 		mutex_enter(&wqe_mgt->wqes_mutex);
219 		if ((recv_wqe->recv_sg_mem >= wqe_mgt->wqes_ib_mem) &&
220 		    (recv_wqe->recv_sg_mem < (wqe_mgt->wqes_ib_mem +
221 		    wqe_mgt->wqes_kmem_sz))) {
222 			mutex_exit(&wqe_mgt->wqes_mutex);
223 			break;
224 		}
225 		mutex_exit(&wqe_mgt->wqes_mutex);
226 		wqe_mgt = wqe_mgt->wqe_mgt_next;
227 	}
228 
229 	if (wqe_mgt == NULL) {
230 		mutex_exit(&cip->ci_wqe_mutex);
231 		vmem_free(cip->ci_wqe_ib_vmem,
232 		    (void *)(uintptr_t)recv_wqe->recv_sg_mem, IBMF_MEM_PER_WQE);
233 		IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
234 		    ibmf_recv_wqe_cache_constructor_err, IBMF_TNF_ERROR, "",
235 		    "ibmf_recv_wqe_cache_constructor(): %s\n", tnf_string, msg,
236 		    "Address not found in WQE mgt list");
237 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
238 		    ibmf_recv_wqe_cache_constructor_end, IBMF_TNF_TRACE, "",
239 		    "ibmf_recv_wqe_cache_constructor() exit\n");
240 		return (-1);
241 	}
242 
243 	mutex_enter(&wqe_mgt->wqes_mutex);
244 
245 	recv_wqe->recv_mem = (caddr_t)((uintptr_t)wqe_mgt->wqes_kmem +
246 	    (uintptr_t)(recv_wqe->recv_sg_mem - wqe_mgt->wqes_ib_mem));
247 	bzero(recv_wqe->recv_mem, IBMF_MEM_PER_WQE);
248 	recv_wqe->recv_sg_lkey = wqe_mgt->wqes_ib_lkey;
249 	recv_wqe->recv_mem_hdl = wqe_mgt->wqes_ib_mem_hdl;
250 	recv_wqe->recv_wqe_next = NULL;
251 	recv_wqe->recv_msg = NULL;
252 	recv_wqe->recv_wqe_flags = 0;
253 
254 	mutex_exit(&wqe_mgt->wqes_mutex);
255 	mutex_exit(&cip->ci_wqe_mutex);
256 
257 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
258 	    ibmf_i_recv_wqe_cache_constructor_end, IBMF_TNF_TRACE, "",
259 	    "ibmf_recv_wqe_cache_constructor() exit\n");
260 
261 	return (0);
262 }
263 
264 /*
265  * ibmf_recv_wqe_cache_destructor():
266  *	Destructor for receive WQE kmem cache for special QPs
267  */
268 /* ARGSUSED */
269 static void
ibmf_recv_wqe_cache_destructor(void * buf,void * cdrarg)270 ibmf_recv_wqe_cache_destructor(void *buf, void *cdrarg)
271 {
272 	ibmf_recv_wqe_t		*recv_wqe = (ibmf_recv_wqe_t *)buf;
273 	ibmf_ci_t		*cip = (ibmf_ci_t *)cdrarg;
274 
275 	IBMF_TRACE_2(IBMF_TNF_DEBUG, DPRINT_L4,
276 	    ibmf_i_recv_wqe_cache_destructor_start, IBMF_TNF_TRACE, "",
277 	    "ibmf_recv_wqe_cache_destructor() enter, buf = %p, cdarg = %p\n",
278 	    tnf_opaque, buf, buf, tnf_opaque, cdrarg, cdrarg);
279 
280 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*recv_wqe))
281 
282 	/* Free the vmem allocated for the WQE */
283 	vmem_free(cip->ci_wqe_ib_vmem,
284 	    (void *)(uintptr_t)recv_wqe->recv_sg_mem, IBMF_MEM_PER_WQE);
285 	recv_wqe->recv_mem = NULL;
286 
287 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
288 	    ibmf_i_recv_wqe_cache_destructor_end, IBMF_TNF_TRACE, "",
289 	    "ibmf_recv_wqe_cache_destructor() exit\n");
290 }
291 
292 /*
293  * ibmf_altqp_send_wqe_cache_constructor():
294  *	Constructor for the kmem cache used for send WQEs for alternate QPs
295  */
296 /* ARGSUSED */
297 int
ibmf_altqp_send_wqe_cache_constructor(void * buf,void * cdrarg,int kmflags)298 ibmf_altqp_send_wqe_cache_constructor(void *buf, void *cdrarg, int kmflags)
299 {
300 	ibmf_send_wqe_t	*send_wqe = (ibmf_send_wqe_t *)buf;
301 	ibmf_alt_qp_t	*qp_ctx = (ibmf_alt_qp_t *)cdrarg;
302 	ibmf_wqe_mgt_t	*wqe_mgt;
303 
304 	IBMF_TRACE_2(IBMF_TNF_DEBUG, DPRINT_L4,
305 	    ibmf_altqp_send_wqe_cache_constructor_start, IBMF_TNF_TRACE, "",
306 	    "ibmf_altqp_send_wqe_cache_constructor() enter, buf = %p, "
307 	    "cdarg = %p\n", tnf_opaque, buf, buf, tnf_opaque, cdrarg, cdrarg);
308 
309 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*send_wqe))
310 
311 	/* initialize send WQE context */
312 	send_wqe->send_sg_mem = (ib_vaddr_t)(uintptr_t)vmem_alloc(
313 	    qp_ctx->isq_wqe_ib_vmem, IBMF_MEM_PER_WQE,
314 	    kmflags == KM_SLEEP ? VM_SLEEP : VM_NOSLEEP);
315 	if (send_wqe->send_sg_mem == 0) {
316 		IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
317 		    ibmf_altqp_send_wqe_cache_constructor_err, IBMF_TNF_ERROR,
318 		    "", "ibmf_altqp_send_wqe_cache_constructor(): %s\n",
319 		    tnf_string, msg, "Failed vmem allocation in "
320 		    "alternate QP send WQE cache constructor");
321 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
322 		    ibmf_altqp_send_wqe_cache_constructor_end, IBMF_TNF_TRACE,
323 		    "", "ibmf_altqp_send_wqe_cache_constructor() exit\n");
324 		return (-1);
325 	}
326 
327 	mutex_enter(&qp_ctx->isq_wqe_mutex);
328 	wqe_mgt = qp_ctx->isq_wqe_mgt_list;
329 
330 	/* Look for the WQE management struct that includes this address */
331 	while (wqe_mgt != NULL) {
332 		mutex_enter(&wqe_mgt->wqes_mutex);
333 		if ((send_wqe->send_sg_mem >= wqe_mgt->wqes_ib_mem) &&
334 		    (send_wqe->send_sg_mem < (wqe_mgt->wqes_ib_mem +
335 		    wqe_mgt->wqes_kmem_sz))) {
336 			mutex_exit(&wqe_mgt->wqes_mutex);
337 			break;
338 		}
339 		mutex_exit(&wqe_mgt->wqes_mutex);
340 		wqe_mgt = wqe_mgt->wqe_mgt_next;
341 	}
342 
343 	if (wqe_mgt == NULL) {
344 		mutex_exit(&qp_ctx->isq_wqe_mutex);
345 		vmem_free(qp_ctx->isq_wqe_ib_vmem,
346 		    (void *)(uintptr_t)send_wqe->send_sg_mem, IBMF_MEM_PER_WQE);
347 		IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
348 		    ibmf_altqp_send_wqe_cache_constructor_err, IBMF_TNF_ERROR,
349 		    "", "ibmf_altqp_send_wqe_cache_constructor(): %s\n",
350 		    tnf_string, msg, "Address not found in WQE mgt list");
351 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
352 		    ibmf_altqp_send_wqe_cache_constructor_end,
353 		    IBMF_TNF_TRACE, "",
354 		    "ibmf_altqp_send_wqe_cache_constructor() exit\n");
355 		return (-1);
356 	}
357 
358 	mutex_enter(&wqe_mgt->wqes_mutex);
359 
360 	send_wqe->send_mem = (caddr_t)((uintptr_t)wqe_mgt->wqes_kmem +
361 	    (uintptr_t)(send_wqe->send_sg_mem - wqe_mgt->wqes_ib_mem));
362 	bzero(send_wqe->send_mem, IBMF_MEM_PER_WQE);
363 	send_wqe->send_sg_lkey = wqe_mgt->wqes_ib_lkey;
364 	send_wqe->send_mem_hdl = wqe_mgt->wqes_ib_mem_hdl;
365 	send_wqe->send_wqe_flags = 0;
366 
367 	mutex_exit(&wqe_mgt->wqes_mutex);
368 	mutex_exit(&qp_ctx->isq_wqe_mutex);
369 
370 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
371 	    ibmf_i_altqp_send_wqe_cache_constructor_end, IBMF_TNF_TRACE, "",
372 	    "ibmf_altqp_send_wqe_cache_constructor() exit\n");
373 
374 	return (0);
375 }
376 
377 /*
378  * ibmf_altqp_send_wqe_cache_destructor():
379  *	Destructor for send WQE kmem cache for alternate QPs
380  */
381 /* ARGSUSED */
382 void
ibmf_altqp_send_wqe_cache_destructor(void * buf,void * cdrarg)383 ibmf_altqp_send_wqe_cache_destructor(void *buf, void *cdrarg)
384 {
385 	ibmf_send_wqe_t	*send_wqe = (ibmf_send_wqe_t *)buf;
386 	ibmf_alt_qp_t	*qp_ctx = (ibmf_alt_qp_t *)cdrarg;
387 
388 	IBMF_TRACE_2(IBMF_TNF_DEBUG, DPRINT_L4,
389 	    ibmf_i_altqp_send_wqe_cache_destructor_start, IBMF_TNF_TRACE, "",
390 	    "ibmf_altqp_send_wqe_cache_destructor() enter, buf = %p, "
391 	    "cdarg = %p\n", tnf_opaque, buf, buf, tnf_opaque, cdrarg, cdrarg);
392 
393 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*send_wqe))
394 
395 	/* Free the vmem allocated for the WQE */
396 	vmem_free(qp_ctx->isq_wqe_ib_vmem,
397 	    (void *)(uintptr_t)send_wqe->send_sg_mem, IBMF_MEM_PER_WQE);
398 	send_wqe->send_mem = NULL;
399 
400 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
401 	    ibmf_i_altqp_send_wqe_cache_destructor_end, IBMF_TNF_TRACE, "",
402 	    "ibmf_altqp_send_wqe_cache_destructor() exit\n");
403 }
404 
405 /*
406  * ibmf_altqp_recv_wqe_cache_constructor():
407  *	Constructor for receive WQE kmem cache for alternate QPs
408  */
409 /* ARGSUSED */
410 int
ibmf_altqp_recv_wqe_cache_constructor(void * buf,void * cdrarg,int kmflags)411 ibmf_altqp_recv_wqe_cache_constructor(void *buf, void *cdrarg, int kmflags)
412 {
413 	ibmf_recv_wqe_t	*recv_wqe = (ibmf_recv_wqe_t *)buf;
414 	ibmf_alt_qp_t	*qp_ctx = (ibmf_alt_qp_t *)cdrarg;
415 	ibmf_wqe_mgt_t	*wqe_mgt;
416 
417 	IBMF_TRACE_2(IBMF_TNF_DEBUG, DPRINT_L4,
418 	    ibmf_i_altqp_recv_wqe_cache_constructor_start, IBMF_TNF_TRACE, "",
419 	    "ibmf_altqp_recv_wqe_cache_constructor() enter, buf = %p, "
420 	    "cdarg = %p\n", tnf_opaque, buf, buf, tnf_opaque, cdrarg, cdrarg);
421 
422 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*recv_wqe))
423 
424 	/* initialize recv WQE context */
425 	recv_wqe->recv_sg_mem = (ib_vaddr_t)(uintptr_t)vmem_alloc(
426 	    qp_ctx->isq_wqe_ib_vmem, IBMF_MEM_PER_WQE,
427 	    kmflags == KM_SLEEP ? VM_SLEEP : VM_NOSLEEP);
428 	if (recv_wqe->recv_sg_mem == 0) {
429 		IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
430 		    ibmf_altqp_recv_wqe_cache_constructor_err, IBMF_TNF_ERROR,
431 		    "", "ibmf_altqp_recv_wqe_cache_constructor(): %s\n",
432 		    tnf_string, msg,
433 		    "Failed vmem allocation in recv WQE cache constructor");
434 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
435 		    ibmf_altqp_recv_wqe_cache_constructor_end, IBMF_TNF_TRACE,
436 		    "", "ibmf_altqp_recv_wqe_cache_constructor() exit\n");
437 		return (-1);
438 	}
439 
440 	mutex_enter(&qp_ctx->isq_wqe_mutex);
441 	wqe_mgt = qp_ctx->isq_wqe_mgt_list;
442 
443 	/* Look for the WQE management struct that includes this address */
444 	while (wqe_mgt != NULL) {
445 		mutex_enter(&wqe_mgt->wqes_mutex);
446 		if ((recv_wqe->recv_sg_mem >= wqe_mgt->wqes_ib_mem) &&
447 		    (recv_wqe->recv_sg_mem < (wqe_mgt->wqes_ib_mem +
448 		    wqe_mgt->wqes_kmem_sz))) {
449 			mutex_exit(&wqe_mgt->wqes_mutex);
450 			break;
451 		}
452 		mutex_exit(&wqe_mgt->wqes_mutex);
453 		wqe_mgt = wqe_mgt->wqe_mgt_next;
454 	}
455 
456 	if (wqe_mgt == NULL) {
457 		mutex_exit(&qp_ctx->isq_wqe_mutex);
458 		vmem_free(qp_ctx->isq_wqe_ib_vmem,
459 		    (void *)(uintptr_t)recv_wqe->recv_sg_mem, IBMF_MEM_PER_WQE);
460 		IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
461 		    ibmf_recv_wqe_cache_constructor_err, IBMF_TNF_ERROR, "",
462 		    "ibmf_altqp_recv_wqe_cache_constructor(): %s\n",
463 		    tnf_string, msg, "Address not found in WQE mgt list");
464 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
465 		    ibmf_recv_wqe_cache_constructor_end, IBMF_TNF_TRACE, "",
466 		    "ibmf_recv_wqe_cache_constructor() exit\n");
467 		return (-1);
468 	}
469 
470 	mutex_enter(&wqe_mgt->wqes_mutex);
471 
472 	recv_wqe->recv_mem = (caddr_t)((uintptr_t)wqe_mgt->wqes_kmem +
473 	    (uintptr_t)(recv_wqe->recv_sg_mem - wqe_mgt->wqes_ib_mem));
474 	bzero(recv_wqe->recv_mem, IBMF_MEM_PER_WQE);
475 	recv_wqe->recv_sg_lkey = wqe_mgt->wqes_ib_lkey;
476 	recv_wqe->recv_mem_hdl = wqe_mgt->wqes_ib_mem_hdl;
477 	recv_wqe->recv_wqe_flags = 0;
478 
479 	mutex_exit(&wqe_mgt->wqes_mutex);
480 	mutex_exit(&qp_ctx->isq_wqe_mutex);
481 
482 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
483 	    ibmf_i_altqp_recv_wqe_cache_constructor_end, IBMF_TNF_TRACE, "",
484 	    "ibmf_altqp_recv_wqe_cache_constructor() exit\n");
485 
486 	return (0);
487 }
488 
489 /*
490  * ibmf_altqp_recv_wqe_cache_destructor():
491  *	Destructor for receive WQE kmem cache for alternate QPs
492  */
493 /* ARGSUSED */
494 void
ibmf_altqp_recv_wqe_cache_destructor(void * buf,void * cdrarg)495 ibmf_altqp_recv_wqe_cache_destructor(void *buf, void *cdrarg)
496 {
497 	ibmf_recv_wqe_t	*recv_wqe = (ibmf_recv_wqe_t *)buf;
498 	ibmf_alt_qp_t	*qp_ctx = (ibmf_alt_qp_t *)cdrarg;
499 
500 	IBMF_TRACE_2(IBMF_TNF_DEBUG, DPRINT_L4,
501 	    ibmf_i_altqp_recv_wqe_cache_destructor_start, IBMF_TNF_TRACE, "",
502 	    "ibmf_altqp_recv_wqe_cache_destructor() enter, buf = %p, "
503 	    "cdarg = %p\n", tnf_opaque, buf, buf, tnf_opaque, cdrarg, cdrarg);
504 
505 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*recv_wqe))
506 
507 	/* Free the vmem allocated for the WQE */
508 	vmem_free(qp_ctx->isq_wqe_ib_vmem,
509 	    (void *)(uintptr_t)recv_wqe->recv_sg_mem, IBMF_MEM_PER_WQE);
510 	recv_wqe->recv_mem = NULL;
511 
512 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
513 	    ibmf_i_altqp_recv_wqe_cache_destructor_end, IBMF_TNF_TRACE, "",
514 	    "ibmf_altqp_recv_wqe_cache_destructor() exit\n");
515 }
516 
517 /*
518  * ibmf_i_init_wqes():
519  *	Create the kmem cache for send and receive WQEs
520  */
521 int
ibmf_i_init_wqes(ibmf_ci_t * cip)522 ibmf_i_init_wqes(ibmf_ci_t *cip)
523 {
524 	ibt_status_t		status;
525 	ibt_mr_hdl_t		mem_hdl;
526 	ibt_mr_desc_t		mem_desc;
527 	ibt_mr_attr_t		mem_attr;
528 	ibmf_wqe_mgt_t		*wqe_mgtp;
529 	char			string[128];
530 
531 	IBMF_TRACE_1(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_init_wqes_start,
532 	    IBMF_TNF_TRACE, "", "ibmf_i_init_wqes() enter, cip = %p\n",
533 	    tnf_opaque, cip, cip);
534 
535 	/*
536 	 * Allocate memory for the WQE management structure
537 	 */
538 	wqe_mgtp = kmem_zalloc(sizeof (ibmf_wqe_mgt_t), KM_SLEEP);
539 	mutex_init(&wqe_mgtp->wqes_mutex, NULL, MUTEX_DRIVER, NULL);
540 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*wqe_mgtp))
541 
542 	/*
543 	 * Allocate memory for the WQEs to be used by the special QPs on this CI
544 	 * There are two special QPs per CI port
545 	 */
546 	wqe_mgtp->wqes_kmem_sz = cip->ci_nports * 2 *
547 	    ((IBMF_MEM_PER_WQE * ibmf_send_wqes_per_port) +
548 	    (IBMF_MEM_PER_WQE * ibmf_recv_wqes_per_port));
549 	wqe_mgtp->wqes_kmem =
550 	    kmem_zalloc(wqe_mgtp->wqes_kmem_sz, KM_SLEEP);
551 
552 	mem_attr.mr_vaddr = (ib_vaddr_t)(uintptr_t)wqe_mgtp->wqes_kmem;
553 	mem_attr.mr_len = wqe_mgtp->wqes_kmem_sz;
554 	mem_attr.mr_flags = IBT_MR_SLEEP | IBT_MR_ENABLE_LOCAL_WRITE;
555 	mem_attr.mr_as = NULL;
556 
557 	/* Register the allocated memory */
558 	status = ibt_register_mr(cip->ci_ci_handle, cip->ci_pd, &mem_attr,
559 	    &mem_hdl, &mem_desc);
560 	if (status != IBT_SUCCESS) {
561 		kmem_free(wqe_mgtp->wqes_kmem,
562 		    wqe_mgtp->wqes_kmem_sz);
563 		IBMF_TRACE_2(IBMF_TNF_NODEBUG, DPRINT_L1,
564 		    ibmf_i_init_wqes_err, IBMF_TNF_ERROR, "",
565 		    "ibmf_i_init_wqes(): %s, status = %d\n", tnf_string, msg,
566 		    "register of WQE mem failed", tnf_uint, status, status);
567 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
568 		    ibmf_i_init_wqes_end, IBMF_TNF_TRACE, "",
569 		    "ibmf_i_init_wqes() exit\n");
570 		return (IBMF_NO_RESOURCES);
571 	}
572 
573 	/* Store the memory registration information */
574 	wqe_mgtp->wqes_ib_mem = mem_desc.md_vaddr;
575 	wqe_mgtp->wqes_ib_lkey = mem_desc.md_lkey;
576 	wqe_mgtp->wqes_ib_mem_hdl = mem_hdl;
577 
578 	/* Create a vmem arena for the IB virtual address space */
579 	bzero(string, 128);
580 	(void) sprintf(string, "ibmf_%016" PRIx64 "_wqes", cip->ci_node_guid);
581 	cip->ci_wqe_ib_vmem = vmem_create(string,
582 	    (void *)(uintptr_t)wqe_mgtp->wqes_ib_mem, wqe_mgtp->wqes_kmem_sz,
583 	    sizeof (uint64_t), NULL, NULL, NULL, 0, VM_SLEEP);
584 
585 	mutex_enter(&cip->ci_wqe_mutex);
586 	cip->ci_wqe_mgt_list = wqe_mgtp;
587 	mutex_exit(&cip->ci_wqe_mutex);
588 
589 	bzero(string, 128);
590 	(void) sprintf(string, "ibmf_%016" PRIx64 "_swqe", cip->ci_node_guid);
591 	/* create a kmem cache for the send WQEs */
592 	cip->ci_send_wqes_cache = kmem_cache_create(string,
593 	    sizeof (ibmf_send_wqe_t), 0, ibmf_send_wqe_cache_constructor,
594 	    ibmf_send_wqe_cache_destructor, NULL, (void *)cip, NULL, 0);
595 
596 	bzero(string, 128);
597 	(void) sprintf(string, "ibmf_%016" PRIx64 "_rwqe", cip->ci_node_guid);
598 	/* create a kmem cache for the receive WQEs */
599 	cip->ci_recv_wqes_cache = kmem_cache_create(string,
600 	    sizeof (ibmf_recv_wqe_t), 0, ibmf_recv_wqe_cache_constructor,
601 	    ibmf_recv_wqe_cache_destructor, NULL, (void *)cip, NULL, 0);
602 
603 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_init_wqes_end,
604 	    IBMF_TNF_TRACE, "", "ibmf_i_init_wqes() exit\n");
605 
606 	return (IBMF_SUCCESS);
607 }
608 
609 /*
610  * ibmf_i_fini_wqes():
611  *	Destroy the kmem cache for send and receive WQEs
612  */
613 void
ibmf_i_fini_wqes(ibmf_ci_t * cip)614 ibmf_i_fini_wqes(ibmf_ci_t *cip)
615 {
616 	ibmf_wqe_mgt_t	*wqe_mgt;
617 	ibt_mr_hdl_t	wqe_ib_mem_hdl;
618 	void		*wqe_kmem;
619 	uint64_t	wqe_kmem_sz;
620 
621 	IBMF_TRACE_1(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_fini_wqes_start,
622 	    IBMF_TNF_TRACE, "", "ibmf_i_fini_wqes() enter, cip = %p\n",
623 	    tnf_opaque, cip, cip);
624 
625 	mutex_enter(&cip->ci_wqe_mutex);
626 
627 	wqe_mgt = cip->ci_wqe_mgt_list;
628 	while (wqe_mgt != NULL) {
629 		/* Remove the WQE mgt struct from the list */
630 		cip->ci_wqe_mgt_list = wqe_mgt->wqe_mgt_next;
631 		mutex_exit(&cip->ci_wqe_mutex);
632 
633 		mutex_enter(&wqe_mgt->wqes_mutex);
634 		wqe_ib_mem_hdl = wqe_mgt->wqes_ib_mem_hdl;
635 		wqe_kmem = wqe_mgt->wqes_kmem;
636 		wqe_kmem_sz = wqe_mgt->wqes_kmem_sz;
637 		mutex_exit(&wqe_mgt->wqes_mutex);
638 
639 		/* Deregister the memory allocated for the WQEs */
640 		(void) ibt_deregister_mr(cip->ci_ci_handle, wqe_ib_mem_hdl);
641 
642 		/* Free the kmem allocated for the WQEs */
643 		kmem_free(wqe_kmem, wqe_kmem_sz);
644 
645 		/* Destroy the mutex */
646 		mutex_destroy(&wqe_mgt->wqes_mutex);
647 
648 		/* Free the WQE management structure */
649 		kmem_free(wqe_mgt, sizeof (ibmf_wqe_mgt_t));
650 
651 		mutex_enter(&cip->ci_wqe_mutex);
652 		wqe_mgt = cip->ci_wqe_mgt_list;
653 	}
654 
655 	mutex_exit(&cip->ci_wqe_mutex);
656 
657 	/* Destroy the kmem_cache for the send WQE */
658 	kmem_cache_destroy(cip->ci_send_wqes_cache);
659 	/* Destroy the kmem_cache for the receive WQE */
660 	kmem_cache_destroy(cip->ci_recv_wqes_cache);
661 
662 	/*
663 	 * Destroy the vmem arena for the WQEs
664 	 * This must be done after the kmem_cache_destroy() calls since
665 	 * the cache destructors call vmem_free()
666 	 */
667 	vmem_destroy((void *)cip->ci_wqe_ib_vmem);
668 
669 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_fini_wqes_end,
670 	    IBMF_TNF_TRACE, "", "ibmf_i_fini_wqes() exit\n");
671 }
672 
673 /*
674  * ibmf_i_init_altqp_wqes():
675  *	Create the kmem cache for send and receive WQEs used by alternate QPs
676  */
677 int
ibmf_i_init_altqp_wqes(ibmf_alt_qp_t * qp_ctx)678 ibmf_i_init_altqp_wqes(ibmf_alt_qp_t *qp_ctx)
679 {
680 	ibt_status_t		status;
681 	ibt_mr_hdl_t		mem_hdl;
682 	ibt_mr_desc_t		mem_desc;
683 	ibt_mr_attr_t		mem_attr;
684 	ibmf_wqe_mgt_t		*wqe_mgtp;
685 	char			string[128];
686 
687 	IBMF_TRACE_1(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_init_altqp_wqes_start,
688 	    IBMF_TNF_TRACE, "", "ibmf_i_init_altqp_wqes() enter, qp_ctx = %p\n",
689 	    tnf_opaque, qp, qp_ctx);
690 
691 	/*
692 	 * Allocate memory for the WQE management structure
693 	 */
694 	wqe_mgtp = kmem_zalloc(sizeof (ibmf_wqe_mgt_t), KM_SLEEP);
695 	mutex_init(&wqe_mgtp->wqes_mutex, NULL, MUTEX_DRIVER, NULL);
696 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*wqe_mgtp))
697 
698 	/*
699 	 * Allocate memory for all the WQEs to be used by this alternate QP
700 	 */
701 	wqe_mgtp->wqes_kmem_sz = (IBMF_MEM_PER_WQE * ibmf_send_wqes_per_port) +
702 	    (IBMF_MEM_PER_WQE * ibmf_recv_wqes_per_port);
703 	wqe_mgtp->wqes_kmem = kmem_zalloc(wqe_mgtp->wqes_kmem_sz, KM_SLEEP);
704 
705 	mem_attr.mr_vaddr = (ib_vaddr_t)(uintptr_t)wqe_mgtp->wqes_kmem;
706 	mem_attr.mr_len = wqe_mgtp->wqes_kmem_sz;
707 	mem_attr.mr_flags = IBT_MR_SLEEP | IBT_MR_ENABLE_LOCAL_WRITE;
708 	mem_attr.mr_as = NULL;
709 
710 	/* Register the allocated memory */
711 	status = ibt_register_mr(qp_ctx->isq_client_hdl->ic_myci->ci_ci_handle,
712 	    qp_ctx->isq_client_hdl->ic_myci->ci_pd, &mem_attr, &mem_hdl,
713 	    &mem_desc);
714 	if (status != IBT_SUCCESS) {
715 		kmem_free(wqe_mgtp->wqes_kmem, wqe_mgtp->wqes_kmem_sz);
716 		IBMF_TRACE_2(IBMF_TNF_NODEBUG, DPRINT_L1,
717 		    ibmf_i_init_altqp_wqes_err, IBMF_TNF_ERROR, "",
718 		    "ibmf_i_init_altqp_wqes(): %s, status = %d\n",
719 		    tnf_string, msg,
720 		    "register of WQE mem failed", tnf_uint, status, status);
721 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
722 		    ibmf_i_init_altqp_wqes_end, IBMF_TNF_TRACE, "",
723 		    "ibmf_i_init_altqp_wqes() exit\n");
724 		return (IBMF_NO_RESOURCES);
725 	}
726 
727 	/* Store the memory registration information */
728 	wqe_mgtp->wqes_ib_mem = mem_desc.md_vaddr;
729 	wqe_mgtp->wqes_ib_lkey = mem_desc.md_lkey;
730 	wqe_mgtp->wqes_ib_mem_hdl = mem_hdl;
731 
732 	/* Create a vmem arena for the IB virtual address space */
733 	bzero(string, 128);
734 	(void) sprintf(string, "ibmf_%016" PRIx64 "_%x_wqes",
735 	    qp_ctx->isq_client_hdl->ic_client_info.ci_guid, qp_ctx->isq_qpn);
736 	qp_ctx->isq_wqe_ib_vmem = vmem_create(string,
737 	    (void *)(uintptr_t)wqe_mgtp->wqes_ib_mem, wqe_mgtp->wqes_kmem_sz,
738 	    sizeof (uint64_t), NULL, NULL, NULL, 0, VM_SLEEP);
739 
740 	bzero(string, 128);
741 	/*
742 	 * CAUTION: Do not exceed 32 characters for the kmem cache name, else,
743 	 * mdb does not exit (bug 4878751). There is some connection between
744 	 * mdb walkers and kmem_caches with the limitation likely to be in the
745 	 * mdb code.
746 	 */
747 	(void) sprintf(string, "ibmf%016" PRIx64 "_%xs",
748 	    qp_ctx->isq_client_hdl->ic_client_info.ci_guid, qp_ctx->isq_qpn);
749 	/* create a kmem cache for the send WQEs */
750 	qp_ctx->isq_send_wqes_cache = kmem_cache_create(string,
751 	    sizeof (ibmf_send_wqe_t), 0, ibmf_altqp_send_wqe_cache_constructor,
752 	    ibmf_altqp_send_wqe_cache_destructor, NULL, (void *)qp_ctx,
753 	    NULL, 0);
754 
755 	bzero(string, 128);
756 	(void) sprintf(string, "ibmf%016" PRIx64 "_%xr",
757 	    qp_ctx->isq_client_hdl->ic_client_info.ci_guid, qp_ctx->isq_qpn);
758 	/* create a kmem cache for the receive WQEs */
759 	qp_ctx->isq_recv_wqes_cache = kmem_cache_create(string,
760 	    sizeof (ibmf_recv_wqe_t), 0, ibmf_altqp_recv_wqe_cache_constructor,
761 	    ibmf_altqp_recv_wqe_cache_destructor, NULL, (void *)qp_ctx,
762 	    NULL, 0);
763 
764 	mutex_enter(&qp_ctx->isq_wqe_mutex);
765 	qp_ctx->isq_wqe_mgt_list = wqe_mgtp;
766 	mutex_exit(&qp_ctx->isq_wqe_mutex);
767 
768 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_init_altqp_wqes_end,
769 	    IBMF_TNF_TRACE, "", "ibmf_i_init_altqp_wqes() exit\n");
770 
771 	return (IBMF_SUCCESS);
772 }
773 
774 /*
775  * ibmf_i_fini_altqp_wqes():
776  *	Destroy the kmem cache for send and receive WQEs for alternate QPs
777  */
778 void
ibmf_i_fini_altqp_wqes(ibmf_alt_qp_t * qp_ctx)779 ibmf_i_fini_altqp_wqes(ibmf_alt_qp_t *qp_ctx)
780 {
781 	ibmf_wqe_mgt_t	*wqe_mgt;
782 	ibt_mr_hdl_t	wqe_ib_mem_hdl;
783 	void		*wqe_kmem;
784 	uint64_t	wqe_kmem_sz;
785 
786 	IBMF_TRACE_1(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_fini_wqes_start,
787 	    IBMF_TNF_TRACE, "", "ibmf_i_fini_wqes() enter, qp_ctx = %p\n",
788 	    tnf_opaque, qp, qp_ctx);
789 
790 	mutex_enter(&qp_ctx->isq_wqe_mutex);
791 	wqe_mgt = qp_ctx->isq_wqe_mgt_list;
792 	while (wqe_mgt != NULL) {
793 		/* Remove the WQE mgt struct from the list */
794 		qp_ctx->isq_wqe_mgt_list = wqe_mgt->wqe_mgt_next;
795 		mutex_exit(&qp_ctx->isq_wqe_mutex);
796 
797 		mutex_enter(&wqe_mgt->wqes_mutex);
798 		wqe_ib_mem_hdl = wqe_mgt->wqes_ib_mem_hdl;
799 		wqe_kmem = wqe_mgt->wqes_kmem;
800 		wqe_kmem_sz = wqe_mgt->wqes_kmem_sz;
801 		mutex_exit(&wqe_mgt->wqes_mutex);
802 
803 		/* Deregister the memory allocated for the WQEs */
804 		(void) ibt_deregister_mr(
805 		    qp_ctx->isq_client_hdl->ic_myci->ci_ci_handle,
806 		    wqe_ib_mem_hdl);
807 
808 		/* Free the kmem allocated for the WQEs */
809 		kmem_free(wqe_kmem, wqe_kmem_sz);
810 
811 		/* Destroy the WQE mgt struct mutex */
812 		mutex_destroy(&wqe_mgt->wqes_mutex);
813 
814 		/* Free the WQE management structure */
815 		kmem_free(wqe_mgt, sizeof (ibmf_wqe_mgt_t));
816 
817 		mutex_enter(&qp_ctx->isq_wqe_mutex);
818 		wqe_mgt = qp_ctx->isq_wqe_mgt_list;
819 	}
820 
821 	mutex_exit(&qp_ctx->isq_wqe_mutex);
822 
823 	/* Destroy the kmem_cache for the send WQE */
824 	kmem_cache_destroy(qp_ctx->isq_send_wqes_cache);
825 	/* Destroy the kmem_cache for the receive WQE */
826 	kmem_cache_destroy(qp_ctx->isq_recv_wqes_cache);
827 
828 	/*
829 	 * Destroy the vmem arena for the WQEs
830 	 * This must be done after the kmem_cache_destroy() calls since
831 	 * the cache destructors call vmem_free()
832 	 */
833 	vmem_destroy((void *)qp_ctx->isq_wqe_ib_vmem);
834 
835 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_fini_wqes_end,
836 	    IBMF_TNF_TRACE, "", "ibmf_i_fini_wqes() exit\n");
837 }
838 
839 /*
840  * ibmf_i_init_send_wqe():
841  *	Initialize a send WQE
842  */
843 /* ARGSUSED */
844 void
ibmf_i_init_send_wqe(ibmf_client_t * clientp,ibmf_msg_impl_t * msgimplp,ibt_wr_ds_t * sglp,ibmf_send_wqe_t * wqep,ibt_ud_dest_hdl_t ud_dest,ibt_qp_hdl_t ibt_qp_handle,ibmf_qp_handle_t ibmf_qp_handle)845 ibmf_i_init_send_wqe(ibmf_client_t *clientp, ibmf_msg_impl_t *msgimplp,
846     ibt_wr_ds_t *sglp, ibmf_send_wqe_t *wqep, ibt_ud_dest_hdl_t ud_dest,
847     ibt_qp_hdl_t ibt_qp_handle, ibmf_qp_handle_t ibmf_qp_handle)
848 {
849 	ibmf_msg_bufs_t	*ipbufs = &msgimplp->im_msgbufs_send;
850 	ibmf_msg_bufs_t	*hdr_ipbufs;
851 	ib_mad_hdr_t	*ibmadhdrp;
852 	ibmf_rmpp_ctx_t	*rmpp_ctx = &msgimplp->im_rmpp_ctx;
853 	ibmf_rmpp_hdr_t	*rmpp_hdr;
854 	ibt_send_wr_t	*swrp;
855 	uchar_t		*buf;
856 	size_t		data_sz, offset;
857 	uint32_t	cl_hdr_sz, cl_hdr_off;
858 
859 	IBMF_TRACE_5(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_init_send_wqe_start,
860 	    IBMF_TNF_TRACE, "", "ibmf_i_init_send_wqe() enter, "
861 	    "clientp = %p, msg = %p, sglp = %p , wqep = %p, qp_hdl = %p\n",
862 	    tnf_opaque, clientp, clientp, tnf_opaque, msg, msgimplp,
863 	    tnf_opaque, sglp, sglp, tnf_opaque, wqep, wqep,
864 	    tnf_opaque, qp_hdl, ibmf_qp_handle);
865 
866 	_NOTE(ASSUMING_PROTECTED(*wqep))
867 	_NOTE(ASSUMING_PROTECTED(*sglp))
868 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*swrp))
869 
870 	swrp = &wqep->send_wr;
871 	/* use send wqe pointer as the WR ID */
872 	IBMF_ADDR_TO_SEND_WR_ID(wqep, swrp->wr_id);
873 	ASSERT(swrp->wr_id != 0);
874 	swrp->wr_flags = IBT_WR_NO_FLAGS;
875 	swrp->wr_opcode = IBT_WRC_SEND;
876 	swrp->wr_trans = IBT_UD_SRV;
877 	wqep->send_client = clientp;
878 	wqep->send_msg = msgimplp;
879 
880 	IBMF_INIT_SG_ELEMENT(sglp[0], wqep->send_mem, wqep->send_sg_lkey,
881 	    IBMF_MAD_SIZE);
882 
883 	bzero(wqep->send_mem, IBMF_MAD_SIZE);
884 	if (msgimplp->im_flags & IBMF_MSG_FLAGS_SEND_RMPP) {
885 		buf = (uchar_t *)ipbufs->im_bufs_cl_data +
886 		    (rmpp_ctx->rmpp_ns - 1) * rmpp_ctx->rmpp_pkt_data_sz;
887 		data_sz = (rmpp_ctx->rmpp_ns == rmpp_ctx->rmpp_num_pkts) ?
888 		    rmpp_ctx->rmpp_last_pkt_sz : rmpp_ctx->rmpp_pkt_data_sz;
889 	} else {
890 		buf = ipbufs->im_bufs_cl_data;
891 		data_sz = ipbufs->im_bufs_cl_data_len;
892 	}
893 
894 	/*
895 	 * We pick the correct msgbuf based on the nature of the transaction.
896 	 * Where the send msgbuf is available, we pick it to provide the
897 	 * context of the outgoing MAD. Note that if this is a termination
898 	 * context, then  the send buffer is invalid even if the sequenced
899 	 * flags is set because the termination message only has a receive
900 	 * buffer set up.
901 	 */
902 	if ((msgimplp->im_flags & IBMF_MSG_FLAGS_SEQUENCED) &&
903 	    ((msgimplp->im_flags & IBMF_MSG_FLAGS_TERMINATION) == 0)) {
904 		hdr_ipbufs = &msgimplp->im_msgbufs_send;
905 	} else if (msgimplp->im_flags & IBMF_MSG_FLAGS_RECV_RMPP) {
906 		hdr_ipbufs = &msgimplp->im_msgbufs_recv;
907 	} else if (msgimplp->im_flags & IBMF_MSG_FLAGS_SEND_RMPP) {
908 		hdr_ipbufs = &msgimplp->im_msgbufs_send;
909 	} else {
910 		if (msgimplp->im_unsolicited == B_TRUE) {
911 			hdr_ipbufs = &msgimplp->im_msgbufs_recv;
912 		} else {
913 			hdr_ipbufs = &msgimplp->im_msgbufs_send;
914 		}
915 	}
916 
917 	bcopy((void *)hdr_ipbufs->im_bufs_mad_hdr,
918 	    (void *)wqep->send_mem, sizeof (ib_mad_hdr_t));
919 
920 	/*
921 	 * For unsolicited messages, we only have the sender's MAD at hand.
922 	 * So, we must flip the response bit in the method for the outgoing MAD.
923 	 */
924 	ibmadhdrp = (ib_mad_hdr_t *)wqep->send_mem;
925 	if (msgimplp->im_unsolicited == B_TRUE) {
926 		ibmadhdrp->R_Method = IBMF_FLIP_RESP_BIT(ibmadhdrp->R_Method);
927 	}
928 
929 	offset = sizeof (ib_mad_hdr_t);
930 
931 	if ((msgimplp->im_flags & IBMF_MSG_FLAGS_SEND_RMPP) ||
932 	    (msgimplp->im_flags & IBMF_MSG_FLAGS_RECV_RMPP)) {
933 
934 		rmpp_hdr = (ibmf_rmpp_hdr_t *)
935 		    ((uintptr_t)wqep->send_mem + offset);
936 
937 		_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*rmpp_hdr));
938 
939 		IBMF_TRACE_4(IBMF_TNF_DEBUG, DPRINT_L3, ibmf_i_init_send_wqe,
940 		    IBMF_TNF_TRACE, "",
941 		    "ibmf_init_send_wqe: msgimplp = %p, rmpp_type = %d,"
942 		    " next_seg = %d, num_pkts = %d\n",
943 		    tnf_opaque, msgimplp, msgimplp,
944 		    tnf_opaque, rmpp_type, rmpp_ctx->rmpp_type,
945 		    tnf_opaque, next_seg, rmpp_ctx->rmpp_ns,
946 		    tnf_opaque, num_pkts, rmpp_ctx->rmpp_num_pkts);
947 
948 		/*
949 		 * Initialize the RMPP header
950 		 */
951 		rmpp_ctx->rmpp_flags = IBMF_RMPP_FLAGS_ACTIVE;
952 
953 		/* first, last packet flags set only for type DATA */
954 		if (rmpp_ctx->rmpp_type == IBMF_RMPP_TYPE_DATA) {
955 
956 			if (rmpp_ctx->rmpp_ns == 1)
957 				rmpp_ctx->rmpp_flags |=
958 				    IBMF_RMPP_FLAGS_FIRST_PKT;
959 			else
960 				rmpp_ctx->rmpp_respt = IBMF_RMPP_DEFAULT_RRESPT;
961 
962 			if (rmpp_ctx->rmpp_ns == rmpp_ctx->rmpp_num_pkts)
963 				rmpp_ctx->rmpp_flags |=
964 				    IBMF_RMPP_FLAGS_LAST_PKT;
965 		} else {
966 			data_sz = 0;
967 			rmpp_ctx->rmpp_respt = IBMF_RMPP_TERM_RRESPT;
968 		}
969 
970 		IBMF_INIT_RMPP_HDR(rmpp_hdr,
971 		    IBMF_RMPP_VERSION, rmpp_ctx->rmpp_type,
972 		    rmpp_ctx->rmpp_respt, rmpp_ctx->rmpp_flags,
973 		    rmpp_ctx->rmpp_status, rmpp_ctx->rmpp_word3,
974 		    rmpp_ctx->rmpp_word4)
975 
976 		IBMF_TRACE_5(IBMF_TNF_DEBUG, DPRINT_L3, ibmf_i_init_send_wqe,
977 		    IBMF_TNF_TRACE, "",
978 		    "ibmf_init_send_wqe: msgimplp = %p, rmpp_type = %d,"
979 		    " rmpp_flags = 0x%x, rmpp_segnum = %d, pyld_nwl = %d\n",
980 		    tnf_opaque, msgimplp, msgimplp,
981 		    tnf_opaque, rmpp_type, rmpp_hdr->rmpp_type,
982 		    tnf_opaque, rmpp_flags, rmpp_hdr->rmpp_flags,
983 		    tnf_opaque, rmpp_segnum, b2h32(rmpp_hdr->rmpp_segnum),
984 		    tnf_opaque, pyld_nwl, b2h32(rmpp_hdr->rmpp_pyldlen_nwl));
985 
986 		_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(rmpp_hdr));
987 	}
988 
989 	/* determine offset to start class header */
990 	ibmf_i_mgt_class_to_hdr_sz_off(
991 	    hdr_ipbufs->im_bufs_mad_hdr->MgmtClass,
992 	    &cl_hdr_sz, &cl_hdr_off);
993 	offset += cl_hdr_off;
994 	if (hdr_ipbufs->im_bufs_cl_hdr != NULL) {
995 		bcopy((void *)hdr_ipbufs->im_bufs_cl_hdr,
996 		    (void *)((uintptr_t)wqep->send_mem + offset),
997 		    hdr_ipbufs->im_bufs_cl_hdr_len);
998 		offset += hdr_ipbufs->im_bufs_cl_hdr_len;
999 	}
1000 	bcopy((void *)buf, (void *)((uintptr_t)wqep->send_mem + offset),
1001 	    data_sz);
1002 	swrp->wr_sgl = sglp;
1003 	swrp->wr_nds = 1;
1004 	swrp->wr.ud.udwr_dest = ud_dest;
1005 	wqep->send_port_num = clientp->ic_client_info.port_num;
1006 	wqep->send_qp_handle = ibt_qp_handle;
1007 	wqep->send_ibmf_qp_handle = ibmf_qp_handle;
1008 
1009 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*swrp))
1010 
1011 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_init_send_wqe_end,
1012 	    IBMF_TNF_TRACE, "", "ibmf_i_init_send_wqe() exit\n");
1013 }
1014 
1015 /*
1016  * ibmf_i_init_recv_wqe():
1017  *	Initialize a receive WQE
1018  */
1019 void
ibmf_i_init_recv_wqe(ibmf_qp_t * qpp,ibt_wr_ds_t * sglp,ibmf_recv_wqe_t * wqep,ibt_qp_hdl_t ibt_qp_handle,ibmf_qp_handle_t ibmf_qp_handle)1020 ibmf_i_init_recv_wqe(ibmf_qp_t *qpp, ibt_wr_ds_t *sglp,
1021     ibmf_recv_wqe_t *wqep, ibt_qp_hdl_t ibt_qp_handle,
1022     ibmf_qp_handle_t ibmf_qp_handle)
1023 {
1024 	ibt_recv_wr_t		*rwrp;
1025 
1026 	IBMF_TRACE_4(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_init_recv_wqe_start,
1027 	    IBMF_TNF_TRACE, "", "ibmf_i_init_recv_wqe() enter, "
1028 	    "qpp = %p, sglp = %p , wqep = %p, ud_dest = %p, qp_hdl = %p\n",
1029 	    tnf_opaque, qpp, qpp, tnf_opaque, sglp, sglp, tnf_opaque,
1030 	    wqep, wqep, tnf_opaque, qp_hdl, ibmf_qp_handle);
1031 
1032 	_NOTE(ASSUMING_PROTECTED(*wqep))
1033 	_NOTE(ASSUMING_PROTECTED(*sglp))
1034 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*rwrp))
1035 
1036 	rwrp = &wqep->recv_wr;
1037 
1038 	/*
1039 	 * we set a bit in the WR ID to be able to easily distinguish
1040 	 * between send completions and recv completions
1041 	 */
1042 	IBMF_ADDR_TO_RECV_WR_ID(wqep, rwrp->wr_id);
1043 
1044 	IBMF_INIT_SG_ELEMENT(sglp[0], wqep->recv_mem, wqep->recv_sg_lkey,
1045 	    sizeof (ib_grh_t) + IBMF_MAD_SIZE);
1046 
1047 	rwrp->wr_sgl = sglp;
1048 	rwrp->wr_nds = IBMF_MAX_RQ_WR_SGL_ELEMENTS;
1049 	if (ibmf_qp_handle == IBMF_QP_HANDLE_DEFAULT) {
1050 		wqep->recv_port_num = qpp->iq_port_num;
1051 	} else {
1052 		ibmf_alt_qp_t	*altqp = (ibmf_alt_qp_t *)ibmf_qp_handle;
1053 		wqep->recv_port_num = altqp->isq_port_num;
1054 	}
1055 	wqep->recv_qpp = qpp;
1056 	wqep->recv_qp_handle = ibt_qp_handle;
1057 	wqep->recv_ibmf_qp_handle = ibmf_qp_handle;
1058 
1059 	_NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*rwrp))
1060 
1061 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_init_recv_wqe_end,
1062 	    IBMF_TNF_TRACE, "", "ibmf_i_init_recv_wqe() exit\n");
1063 }
1064 
1065 /*
1066  * ibmf_i_extend_wqe_cache():
1067  *	Extend the kmem WQE cache
1068  */
1069 int
ibmf_i_extend_wqe_cache(ibmf_ci_t * cip,ibmf_qp_handle_t ibmf_qp_handle,boolean_t block)1070 ibmf_i_extend_wqe_cache(ibmf_ci_t *cip, ibmf_qp_handle_t ibmf_qp_handle,
1071     boolean_t block)
1072 {
1073 	ibmf_wqe_mgt_t		*wqe_mgt;
1074 
1075 	IBMF_TRACE_3(IBMF_TNF_DEBUG, DPRINT_L4,
1076 	    ibmf_i_extend_wqe_cache_start, IBMF_TNF_TRACE, "",
1077 	    "ibmf_i_extend_wqe_cache() enter, cip = %p, qp_hdl = %p, "
1078 	    " block = %d\n", tnf_opaque, cip, cip, tnf_opaque, qp_hdl,
1079 	    ibmf_qp_handle, tnf_uint, block, block);
1080 
1081 	/*
1082 	 * Allocate memory for the WQE management structure
1083 	 */
1084 	wqe_mgt = kmem_zalloc(sizeof (ibmf_wqe_mgt_t),
1085 	    (block == B_TRUE ? KM_SLEEP : KM_NOSLEEP));
1086 	if (wqe_mgt == NULL) {
1087 		IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
1088 		    ibmf_i_extend_wqe_cache_err, IBMF_TNF_ERROR, "",
1089 		    "ibmf_i_extend_wqe_cache(): %s\n",
1090 		    tnf_string, msg, "wqe mgt alloc failed");
1091 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
1092 		    ibmf_i_extend_wqe_cache_end, IBMF_TNF_TRACE, "",
1093 		    "ibmf_i_extend_wqe_cache() exit\n");
1094 		return (IBMF_NO_RESOURCES);
1095 	}
1096 	mutex_init(&wqe_mgt->wqes_mutex, NULL, MUTEX_DRIVER, NULL);
1097 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*wqe_mgt))
1098 
1099 	/* Allocate and register more WQE memory */
1100 	if (ibmf_i_extend_wqe_mem(cip, ibmf_qp_handle, wqe_mgt,
1101 	    block) != IBMF_SUCCESS) {
1102 		mutex_destroy(&wqe_mgt->wqes_mutex);
1103 		kmem_free(wqe_mgt, sizeof (ibmf_wqe_mgt_t));
1104 		IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
1105 		    ibmf_i_extend_wqe_cache_err, IBMF_TNF_ERROR, "",
1106 		    "ibmf_i_extend_wqe_cache(): %s\n",
1107 		    tnf_string, msg, "extension of WQE pool failed");
1108 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
1109 		    ibmf_i_extend_wqe_cache_end, IBMF_TNF_TRACE, "",
1110 		    "ibmf_i_extend_wqe_cache() exit\n");
1111 		return (IBMF_NO_RESOURCES);
1112 	}
1113 
1114 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
1115 	    ibmf_i_extend_wqe_cache_end, IBMF_TNF_TRACE, "",
1116 	    "ibmf_i_extend_wqe_cache() exit\n");
1117 
1118 	return (IBMF_SUCCESS);
1119 }
1120 
1121 /*
1122  * ibmf_i_extend_wqe_mem():
1123  *	Allocate and register more WQE memory, and expand the VMEM arena
1124  */
1125 static int
ibmf_i_extend_wqe_mem(ibmf_ci_t * cip,ibmf_qp_handle_t ibmf_qp_handle,ibmf_wqe_mgt_t * wqe_mgt,boolean_t block)1126 ibmf_i_extend_wqe_mem(ibmf_ci_t *cip, ibmf_qp_handle_t ibmf_qp_handle,
1127     ibmf_wqe_mgt_t *wqe_mgt, boolean_t block)
1128 {
1129 	ibt_status_t		status;
1130 	ibt_mr_hdl_t		mem_hdl;
1131 	ibt_mr_desc_t		mem_desc;
1132 	ibt_mr_attr_t		mem_attr;
1133 	ibmf_alt_qp_t		*qp_ctx;
1134 	ibmf_wqe_mgt_t		*pwqe_mgt;
1135 	vmem_t			*wqe_vmem_arena;
1136 
1137 	_NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*wqe_mgt))
1138 
1139 	IBMF_TRACE_4(IBMF_TNF_DEBUG, DPRINT_L4,
1140 	    ibmf_i_extend_wqe_cache_start, IBMF_TNF_TRACE, "",
1141 	    "ibmf_i_extend_wqe_cache() enter, cip = %p, qp_hdl = %p"
1142 	    "wqe_mgt = %p, block = %d\n",
1143 	    tnf_opaque, cip, cip, tnf_opaque, qp_hdl, ibmf_qp_handle,
1144 	    tnf_opaque, wqe_mgt, wqe_mgt, tnf_uint, block, block);
1145 
1146 	/*
1147 	 * Allocate more memory for the WQEs to be used by the
1148 	 * specified QP
1149 	 */
1150 	wqe_mgt->wqes_kmem_sz = cip->ci_nports * 2 *
1151 	    ((IBMF_MEM_PER_WQE * ibmf_send_wqes_per_port) +
1152 	    (IBMF_MEM_PER_WQE * ibmf_recv_wqes_per_port));
1153 	wqe_mgt->wqes_kmem = kmem_zalloc(wqe_mgt->wqes_kmem_sz,
1154 	    (block == B_TRUE ? KM_SLEEP : KM_NOSLEEP));
1155 	if (wqe_mgt->wqes_kmem == NULL) {
1156 		IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
1157 		    ibmf_i_extend_wqe_mem_err, IBMF_TNF_ERROR, "",
1158 		    "ibmf_i_extend_wqe_mem(): %s\n",
1159 		    tnf_string, msg, "extension of WQE pool failed");
1160 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
1161 		    ibmf_i_extend_wqe_mem_end, IBMF_TNF_TRACE, "",
1162 		    "ibmf_i_extend_wqe_mem() exit\n");
1163 		return (IBMF_NO_RESOURCES);
1164 	}
1165 
1166 	mem_attr.mr_vaddr = (ib_vaddr_t)(uintptr_t)wqe_mgt->wqes_kmem;
1167 	mem_attr.mr_len = wqe_mgt->wqes_kmem_sz;
1168 	mem_attr.mr_flags = (block == B_TRUE ? IBT_MR_SLEEP : IBT_MR_NOSLEEP)
1169 	    | IBT_MR_ENABLE_LOCAL_WRITE;
1170 	mem_attr.mr_as = NULL;
1171 
1172 	/* Register the allocated memory */
1173 	status = ibt_register_mr(cip->ci_ci_handle, cip->ci_pd,
1174 	    &mem_attr, &mem_hdl, &mem_desc);
1175 	if (status != IBT_SUCCESS) {
1176 		kmem_free(wqe_mgt->wqes_kmem, wqe_mgt->wqes_kmem_sz);
1177 		IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
1178 		    ibmf_i_extend_wqe_mem_err, IBMF_TNF_ERROR, "",
1179 		    "ibmf_i_extend_wqe_mem(): %s\n",
1180 		    tnf_string, msg, "wqe extension MR failed");
1181 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
1182 		    ibmf_i_extend_wqe_mem_end, IBMF_TNF_TRACE, "",
1183 		    "ibmf_i_extend_wqe_mem() exit\n");
1184 		return (IBMF_NO_RESOURCES);
1185 	}
1186 
1187 	/* Store the memory registration information */
1188 	wqe_mgt->wqes_ib_mem = mem_desc.md_vaddr;
1189 	wqe_mgt->wqes_ib_lkey = mem_desc.md_lkey;
1190 	wqe_mgt->wqes_ib_mem_hdl = mem_hdl;
1191 
1192 	/* Get the VMEM arena based on the QP type */
1193 	if (ibmf_qp_handle == IBMF_QP_HANDLE_DEFAULT) {
1194 		wqe_vmem_arena = cip->ci_wqe_ib_vmem;
1195 	} else {
1196 		qp_ctx = (ibmf_alt_qp_t *)ibmf_qp_handle;
1197 		wqe_vmem_arena = qp_ctx->isq_wqe_ib_vmem;
1198 	}
1199 
1200 	/* Add these addresses to the vmem arena */
1201 	if (vmem_add(wqe_vmem_arena, (void *)(uintptr_t)wqe_mgt->wqes_ib_mem,
1202 	    wqe_mgt->wqes_kmem_sz,
1203 	    (block == B_TRUE ? VM_SLEEP : VM_NOSLEEP)) == NULL) {
1204 		(void) ibt_deregister_mr(cip->ci_ci_handle,
1205 		    wqe_mgt->wqes_ib_mem_hdl);
1206 		kmem_free(wqe_mgt->wqes_kmem, wqe_mgt->wqes_kmem_sz);
1207 		IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
1208 		    ibmf_i_extend_wqe_mem_err, IBMF_TNF_ERROR, "",
1209 		    "ibmf_i_extend_wqe_mem(): %s\n",
1210 		    tnf_string, msg, "wqe extension vmem_add failed");
1211 		IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
1212 		    ibmf_i_extend_wqe_mem_end, IBMF_TNF_TRACE, "",
1213 		    "ibmf_i_extend_wqe_mem() exit\n");
1214 		return (IBMF_NO_RESOURCES);
1215 	}
1216 
1217 	/* Get the WQE management pointers based on the QP type */
1218 	if (ibmf_qp_handle == IBMF_QP_HANDLE_DEFAULT) {
1219 		mutex_enter(&cip->ci_wqe_mutex);
1220 		pwqe_mgt = cip->ci_wqe_mgt_list;
1221 
1222 		/* Add the new wqe management struct to the end of the list */
1223 		while (pwqe_mgt->wqe_mgt_next != NULL)
1224 			pwqe_mgt = pwqe_mgt->wqe_mgt_next;
1225 		pwqe_mgt->wqe_mgt_next = wqe_mgt;
1226 
1227 		mutex_exit(&cip->ci_wqe_mutex);
1228 	} else {
1229 		mutex_enter(&qp_ctx->isq_wqe_mutex);
1230 		pwqe_mgt = qp_ctx->isq_wqe_mgt_list;
1231 
1232 		/* Add the new wqe management struct to the end of the list */
1233 		while (pwqe_mgt->wqe_mgt_next != NULL)
1234 			pwqe_mgt = pwqe_mgt->wqe_mgt_next;
1235 		pwqe_mgt->wqe_mgt_next = wqe_mgt;
1236 
1237 		mutex_exit(&qp_ctx->isq_wqe_mutex);
1238 	}
1239 
1240 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4, ibmf_i_extend_wqe_mem_end,
1241 	    IBMF_TNF_TRACE, "", "ibmf_i_extend_wqe_mem() exit\n");
1242 
1243 	return (IBMF_SUCCESS);
1244 }
1245 
1246 /*
1247  * ibmf_i_alloc_send_resources():
1248  *	Allocate send resources (the send WQE)
1249  */
1250 int
ibmf_i_alloc_send_resources(ibmf_ci_t * cip,ibmf_msg_impl_t * msgimplp,boolean_t block,ibmf_send_wqe_t ** swqepp)1251 ibmf_i_alloc_send_resources(ibmf_ci_t *cip, ibmf_msg_impl_t *msgimplp,
1252     boolean_t block, ibmf_send_wqe_t **swqepp)
1253 {
1254 	ibmf_send_wqe_t		*send_wqep;
1255 	struct kmem_cache	*kmem_cachep;
1256 	ibmf_qp_handle_t	ibmf_qp_handle = msgimplp->im_qp_hdl;
1257 	ibmf_alt_qp_t		*altqp;
1258 
1259 	IBMF_TRACE_3(IBMF_TNF_DEBUG, DPRINT_L4,
1260 	    ibmf_i_alloc_send_resources_start, IBMF_TNF_TRACE, "",
1261 	    "ibmf_i_alloc_send_resources() enter, cip = %p, msg = %p, "
1262 	    " block = %d\n", tnf_opaque, cip, cip, tnf_opaque, msg,
1263 	    msgimplp, tnf_uint, block, block);
1264 
1265 	/* Get the WQE kmem cache pointer based on the QP type */
1266 	if (ibmf_qp_handle == IBMF_QP_HANDLE_DEFAULT)
1267 		kmem_cachep = cip->ci_send_wqes_cache;
1268 	else {
1269 		altqp = (ibmf_alt_qp_t *)ibmf_qp_handle;
1270 		kmem_cachep = altqp->isq_send_wqes_cache;
1271 	}
1272 
1273 	/*
1274 	 * Allocate a send WQE from the send WQE kmem cache
1275 	 * Do not block here as we are holding the msgimpl mutex.
1276 	 */
1277 	send_wqep = kmem_cache_alloc(kmem_cachep, KM_NOSLEEP);
1278 	if (send_wqep == NULL) {
1279 		/*
1280 		 * Attempt to extend the cache and then retry the
1281 		 * kmem_cache_alloc()
1282 		 * The block argument (third) is set to B_FALSE.
1283 		 */
1284 		if (ibmf_i_extend_wqe_cache(cip, ibmf_qp_handle, B_FALSE) ==
1285 		    IBMF_NO_RESOURCES) {
1286 			mutex_enter(&cip->ci_mutex);
1287 			IBMF_ADD32_PORT_KSTATS(cip, swqe_allocs_failed, 1);
1288 			mutex_exit(&cip->ci_mutex);
1289 			IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
1290 			    ibmf_i_alloc_send_resources_err, IBMF_TNF_ERROR, "",
1291 			    "ibmf_i_alloc_send_resources(): %s\n",
1292 			    tnf_string, msg, "alloc send_wqe failed");
1293 			IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
1294 			    ibmf_i_alloc_send_resources_end, IBMF_TNF_TRACE, "",
1295 			    "ibmf_i_alloc_send_resources() exit\n");
1296 			return (IBMF_NO_RESOURCES);
1297 		} else {
1298 			send_wqep = kmem_cache_alloc(kmem_cachep, KM_NOSLEEP);
1299 			if (send_wqep == NULL) {
1300 				/* Allocation failed again. Give up here. */
1301 				mutex_enter(&cip->ci_mutex);
1302 				IBMF_ADD32_PORT_KSTATS(cip, swqe_allocs_failed,
1303 				    1);
1304 				mutex_exit(&cip->ci_mutex);
1305 				IBMF_TRACE_1(IBMF_TNF_NODEBUG, DPRINT_L1,
1306 				    ibmf_i_alloc_send_resources_err,
1307 				    IBMF_TNF_ERROR, "",
1308 				    "ibmf_i_alloc_send_resources(): %s\n",
1309 				    tnf_string, msg, "alloc send_wqe failed");
1310 				IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
1311 				    ibmf_i_alloc_send_resources_end,
1312 				    IBMF_TNF_TRACE, "",
1313 				    "ibmf_i_alloc_send_resources() exit\n");
1314 				return (IBMF_NO_RESOURCES);
1315 			}
1316 		}
1317 	}
1318 
1319 	mutex_enter(&cip->ci_mutex);
1320 	IBMF_ADD32_PORT_KSTATS(cip, send_wqes_alloced, 1);
1321 	mutex_exit(&cip->ci_mutex);
1322 	if (ibmf_qp_handle == IBMF_QP_HANDLE_DEFAULT) {
1323 		mutex_enter(&cip->ci_mutex);
1324 		cip->ci_wqes_alloced++;
1325 		mutex_exit(&cip->ci_mutex);
1326 	} else {
1327 		mutex_enter(&altqp->isq_mutex);
1328 		altqp->isq_wqes_alloced++;
1329 		mutex_exit(&altqp->isq_mutex);
1330 	}
1331 
1332 	send_wqep->send_msg = msgimplp;
1333 	*swqepp = send_wqep;
1334 
1335 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
1336 	    ibmf_i_alloc_send_resources_end, IBMF_TNF_TRACE, "",
1337 	    "ibmf_i_alloc_send_resources() exit\n");
1338 
1339 	return (IBMF_SUCCESS);
1340 }
1341 
1342 /*
1343  * ibmf_i_free_send_resources():
1344  *	Free send resources (just the send WQE)
1345  */
1346 /* ARGSUSED */
1347 void
ibmf_i_free_send_resources(ibmf_ci_t * cip,ibmf_msg_impl_t * msgimplp,ibmf_send_wqe_t * swqep)1348 ibmf_i_free_send_resources(ibmf_ci_t *cip, ibmf_msg_impl_t *msgimplp,
1349     ibmf_send_wqe_t *swqep)
1350 {
1351 	struct kmem_cache	*kmem_cachep;
1352 	ibmf_qp_handle_t	ibmf_qp_handle = msgimplp->im_qp_hdl;
1353 	ibmf_alt_qp_t		*altqp;
1354 
1355 	IBMF_TRACE_3(IBMF_TNF_DEBUG, DPRINT_L4,
1356 	    ibmf_i_free_send_resources_start, IBMF_TNF_TRACE, "",
1357 	    "ibmf_i_free_send_resources() enter, cip = %p, msg = %p, "
1358 	    " swqep = %p\n", tnf_opaque, cip, cip, tnf_opaque, msg,
1359 	    msgimplp, tnf_opaque, swqep, swqep);
1360 
1361 	/* Get the WQE kmem cache pointer based on the QP type */
1362 	if (ibmf_qp_handle == IBMF_QP_HANDLE_DEFAULT)
1363 		kmem_cachep = cip->ci_send_wqes_cache;
1364 	else {
1365 		altqp = (ibmf_alt_qp_t *)ibmf_qp_handle;
1366 		kmem_cachep = altqp->isq_send_wqes_cache;
1367 	}
1368 
1369 	/* return the send WQE to the kmem cache */
1370 	kmem_cache_free(kmem_cachep, swqep);
1371 
1372 	mutex_enter(&cip->ci_mutex);
1373 	IBMF_SUB32_PORT_KSTATS(cip, send_wqes_alloced, 1);
1374 	mutex_exit(&cip->ci_mutex);
1375 	if (ibmf_qp_handle == IBMF_QP_HANDLE_DEFAULT) {
1376 		mutex_enter(&cip->ci_mutex);
1377 		cip->ci_wqes_alloced--;
1378 		if (cip->ci_wqes_alloced == 0)
1379 			cv_signal(&cip->ci_wqes_cv);
1380 		mutex_exit(&cip->ci_mutex);
1381 	} else {
1382 		mutex_enter(&altqp->isq_mutex);
1383 		altqp->isq_wqes_alloced--;
1384 		if (altqp->isq_wqes_alloced == 0)
1385 			cv_signal(&altqp->isq_wqes_cv);
1386 		mutex_exit(&altqp->isq_mutex);
1387 	}
1388 
1389 	IBMF_TRACE_0(IBMF_TNF_DEBUG, DPRINT_L4,
1390 	    ibmf_i_free_send_resources_end, IBMF_TNF_TRACE, "",
1391 	    "ibmf_i_free_send_resources() exit\n");
1392 }
1393