xref: /linux/drivers/infiniband/hw/qib/qib_ud.c (revision 33619f0d3ff715a2a5499520967d526ad931d70d)
1 /*
2  * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <rdma/ib_smi.h>
35 
36 #include "qib.h"
37 #include "qib_mad.h"
38 
39 /**
40  * qib_ud_loopback - handle send on loopback QPs
41  * @sqp: the sending QP
42  * @swqe: the send work request
43  *
44  * This is called from qib_make_ud_req() to forward a WQE addressed
45  * to the same HCA.
46  * Note that the receive interrupt handler may be calling qib_ud_rcv()
47  * while this is being called.
48  */
49 static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
50 {
51 	struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
52 	struct qib_pportdata *ppd;
53 	struct qib_qp *qp;
54 	struct ib_ah_attr *ah_attr;
55 	unsigned long flags;
56 	struct qib_sge_state ssge;
57 	struct qib_sge *sge;
58 	struct ib_wc wc;
59 	u32 length;
60 
61 	qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn);
62 	if (!qp) {
63 		ibp->n_pkt_drops++;
64 		return;
65 	}
66 	if (qp->ibqp.qp_type != sqp->ibqp.qp_type ||
67 	    !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
68 		ibp->n_pkt_drops++;
69 		goto drop;
70 	}
71 
72 	ah_attr = &to_iah(swqe->wr.wr.ud.ah)->attr;
73 	ppd = ppd_from_ibp(ibp);
74 
75 	if (qp->ibqp.qp_num > 1) {
76 		u16 pkey1;
77 		u16 pkey2;
78 		u16 lid;
79 
80 		pkey1 = qib_get_pkey(ibp, sqp->s_pkey_index);
81 		pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
82 		if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
83 			lid = ppd->lid | (ah_attr->src_path_bits &
84 					  ((1 << ppd->lmc) - 1));
85 			qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, pkey1,
86 				      ah_attr->sl,
87 				      sqp->ibqp.qp_num, qp->ibqp.qp_num,
88 				      cpu_to_be16(lid),
89 				      cpu_to_be16(ah_attr->dlid));
90 			goto drop;
91 		}
92 	}
93 
94 	/*
95 	 * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
96 	 * Qkeys with the high order bit set mean use the
97 	 * qkey from the QP context instead of the WR (see 10.2.5).
98 	 */
99 	if (qp->ibqp.qp_num) {
100 		u32 qkey;
101 
102 		qkey = (int)swqe->wr.wr.ud.remote_qkey < 0 ?
103 			sqp->qkey : swqe->wr.wr.ud.remote_qkey;
104 		if (unlikely(qkey != qp->qkey)) {
105 			u16 lid;
106 
107 			lid = ppd->lid | (ah_attr->src_path_bits &
108 					  ((1 << ppd->lmc) - 1));
109 			qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
110 				      ah_attr->sl,
111 				      sqp->ibqp.qp_num, qp->ibqp.qp_num,
112 				      cpu_to_be16(lid),
113 				      cpu_to_be16(ah_attr->dlid));
114 			goto drop;
115 		}
116 	}
117 
118 	/*
119 	 * A GRH is expected to preceed the data even if not
120 	 * present on the wire.
121 	 */
122 	length = swqe->length;
123 	memset(&wc, 0, sizeof wc);
124 	wc.byte_len = length + sizeof(struct ib_grh);
125 
126 	if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
127 		wc.wc_flags = IB_WC_WITH_IMM;
128 		wc.ex.imm_data = swqe->wr.ex.imm_data;
129 	}
130 
131 	spin_lock_irqsave(&qp->r_lock, flags);
132 
133 	/*
134 	 * Get the next work request entry to find where to put the data.
135 	 */
136 	if (qp->r_flags & QIB_R_REUSE_SGE)
137 		qp->r_flags &= ~QIB_R_REUSE_SGE;
138 	else {
139 		int ret;
140 
141 		ret = qib_get_rwqe(qp, 0);
142 		if (ret < 0) {
143 			qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
144 			goto bail_unlock;
145 		}
146 		if (!ret) {
147 			if (qp->ibqp.qp_num == 0)
148 				ibp->n_vl15_dropped++;
149 			goto bail_unlock;
150 		}
151 	}
152 	/* Silently drop packets which are too big. */
153 	if (unlikely(wc.byte_len > qp->r_len)) {
154 		qp->r_flags |= QIB_R_REUSE_SGE;
155 		ibp->n_pkt_drops++;
156 		goto bail_unlock;
157 	}
158 
159 	if (ah_attr->ah_flags & IB_AH_GRH) {
160 		qib_copy_sge(&qp->r_sge, &ah_attr->grh,
161 			     sizeof(struct ib_grh), 1);
162 		wc.wc_flags |= IB_WC_GRH;
163 	} else
164 		qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
165 	ssge.sg_list = swqe->sg_list + 1;
166 	ssge.sge = *swqe->sg_list;
167 	ssge.num_sge = swqe->wr.num_sge;
168 	sge = &ssge.sge;
169 	while (length) {
170 		u32 len = sge->length;
171 
172 		if (len > length)
173 			len = length;
174 		if (len > sge->sge_length)
175 			len = sge->sge_length;
176 		BUG_ON(len == 0);
177 		qib_copy_sge(&qp->r_sge, sge->vaddr, len, 1);
178 		sge->vaddr += len;
179 		sge->length -= len;
180 		sge->sge_length -= len;
181 		if (sge->sge_length == 0) {
182 			if (--ssge.num_sge)
183 				*sge = *ssge.sg_list++;
184 		} else if (sge->length == 0 && sge->mr->lkey) {
185 			if (++sge->n >= QIB_SEGSZ) {
186 				if (++sge->m >= sge->mr->mapsz)
187 					break;
188 				sge->n = 0;
189 			}
190 			sge->vaddr =
191 				sge->mr->map[sge->m]->segs[sge->n].vaddr;
192 			sge->length =
193 				sge->mr->map[sge->m]->segs[sge->n].length;
194 		}
195 		length -= len;
196 	}
197 	while (qp->r_sge.num_sge) {
198 		atomic_dec(&qp->r_sge.sge.mr->refcount);
199 		if (--qp->r_sge.num_sge)
200 			qp->r_sge.sge = *qp->r_sge.sg_list++;
201 	}
202 	if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
203 		goto bail_unlock;
204 	wc.wr_id = qp->r_wr_id;
205 	wc.status = IB_WC_SUCCESS;
206 	wc.opcode = IB_WC_RECV;
207 	wc.qp = &qp->ibqp;
208 	wc.src_qp = sqp->ibqp.qp_num;
209 	wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
210 		swqe->wr.wr.ud.pkey_index : 0;
211 	wc.slid = ppd->lid | (ah_attr->src_path_bits & ((1 << ppd->lmc) - 1));
212 	wc.sl = ah_attr->sl;
213 	wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1);
214 	wc.port_num = qp->port_num;
215 	/* Signal completion event if the solicited bit is set. */
216 	qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
217 		     swqe->wr.send_flags & IB_SEND_SOLICITED);
218 	ibp->n_loop_pkts++;
219 bail_unlock:
220 	spin_unlock_irqrestore(&qp->r_lock, flags);
221 drop:
222 	if (atomic_dec_and_test(&qp->refcount))
223 		wake_up(&qp->wait);
224 }
225 
226 /**
227  * qib_make_ud_req - construct a UD request packet
228  * @qp: the QP
229  *
230  * Return 1 if constructed; otherwise, return 0.
231  */
232 int qib_make_ud_req(struct qib_qp *qp)
233 {
234 	struct qib_other_headers *ohdr;
235 	struct ib_ah_attr *ah_attr;
236 	struct qib_pportdata *ppd;
237 	struct qib_ibport *ibp;
238 	struct qib_swqe *wqe;
239 	unsigned long flags;
240 	u32 nwords;
241 	u32 extra_bytes;
242 	u32 bth0;
243 	u16 lrh0;
244 	u16 lid;
245 	int ret = 0;
246 	int next_cur;
247 
248 	spin_lock_irqsave(&qp->s_lock, flags);
249 
250 	if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK)) {
251 		if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
252 			goto bail;
253 		/* We are in the error state, flush the work request. */
254 		if (qp->s_last == qp->s_head)
255 			goto bail;
256 		/* If DMAs are in progress, we can't flush immediately. */
257 		if (atomic_read(&qp->s_dma_busy)) {
258 			qp->s_flags |= QIB_S_WAIT_DMA;
259 			goto bail;
260 		}
261 		wqe = get_swqe_ptr(qp, qp->s_last);
262 		qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
263 		goto done;
264 	}
265 
266 	if (qp->s_cur == qp->s_head)
267 		goto bail;
268 
269 	wqe = get_swqe_ptr(qp, qp->s_cur);
270 	next_cur = qp->s_cur + 1;
271 	if (next_cur >= qp->s_size)
272 		next_cur = 0;
273 
274 	/* Construct the header. */
275 	ibp = to_iport(qp->ibqp.device, qp->port_num);
276 	ppd = ppd_from_ibp(ibp);
277 	ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr;
278 	if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE) {
279 		if (ah_attr->dlid != QIB_PERMISSIVE_LID)
280 			ibp->n_multicast_xmit++;
281 		else
282 			ibp->n_unicast_xmit++;
283 	} else {
284 		ibp->n_unicast_xmit++;
285 		lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1);
286 		if (unlikely(lid == ppd->lid)) {
287 			/*
288 			 * If DMAs are in progress, we can't generate
289 			 * a completion for the loopback packet since
290 			 * it would be out of order.
291 			 * XXX Instead of waiting, we could queue a
292 			 * zero length descriptor so we get a callback.
293 			 */
294 			if (atomic_read(&qp->s_dma_busy)) {
295 				qp->s_flags |= QIB_S_WAIT_DMA;
296 				goto bail;
297 			}
298 			qp->s_cur = next_cur;
299 			spin_unlock_irqrestore(&qp->s_lock, flags);
300 			qib_ud_loopback(qp, wqe);
301 			spin_lock_irqsave(&qp->s_lock, flags);
302 			qib_send_complete(qp, wqe, IB_WC_SUCCESS);
303 			goto done;
304 		}
305 	}
306 
307 	qp->s_cur = next_cur;
308 	extra_bytes = -wqe->length & 3;
309 	nwords = (wqe->length + extra_bytes) >> 2;
310 
311 	/* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
312 	qp->s_hdrwords = 7;
313 	qp->s_cur_size = wqe->length;
314 	qp->s_cur_sge = &qp->s_sge;
315 	qp->s_srate = ah_attr->static_rate;
316 	qp->s_wqe = wqe;
317 	qp->s_sge.sge = wqe->sg_list[0];
318 	qp->s_sge.sg_list = wqe->sg_list + 1;
319 	qp->s_sge.num_sge = wqe->wr.num_sge;
320 	qp->s_sge.total_len = wqe->length;
321 
322 	if (ah_attr->ah_flags & IB_AH_GRH) {
323 		/* Header size in 32-bit words. */
324 		qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh,
325 					       &ah_attr->grh,
326 					       qp->s_hdrwords, nwords);
327 		lrh0 = QIB_LRH_GRH;
328 		ohdr = &qp->s_hdr.u.l.oth;
329 		/*
330 		 * Don't worry about sending to locally attached multicast
331 		 * QPs.  It is unspecified by the spec. what happens.
332 		 */
333 	} else {
334 		/* Header size in 32-bit words. */
335 		lrh0 = QIB_LRH_BTH;
336 		ohdr = &qp->s_hdr.u.oth;
337 	}
338 	if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
339 		qp->s_hdrwords++;
340 		ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
341 		bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
342 	} else
343 		bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
344 	lrh0 |= ah_attr->sl << 4;
345 	if (qp->ibqp.qp_type == IB_QPT_SMI)
346 		lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
347 	else
348 		lrh0 |= ibp->sl_to_vl[ah_attr->sl] << 12;
349 	qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
350 	qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid);  /* DEST LID */
351 	qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
352 	lid = ppd->lid;
353 	if (lid) {
354 		lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1);
355 		qp->s_hdr.lrh[3] = cpu_to_be16(lid);
356 	} else
357 		qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE;
358 	if (wqe->wr.send_flags & IB_SEND_SOLICITED)
359 		bth0 |= IB_BTH_SOLICITED;
360 	bth0 |= extra_bytes << 20;
361 	bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY :
362 		qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ?
363 			     wqe->wr.wr.ud.pkey_index : qp->s_pkey_index);
364 	ohdr->bth[0] = cpu_to_be32(bth0);
365 	/*
366 	 * Use the multicast QP if the destination LID is a multicast LID.
367 	 */
368 	ohdr->bth[1] = ah_attr->dlid >= QIB_MULTICAST_LID_BASE &&
369 		ah_attr->dlid != QIB_PERMISSIVE_LID ?
370 		cpu_to_be32(QIB_MULTICAST_QPN) :
371 		cpu_to_be32(wqe->wr.wr.ud.remote_qpn);
372 	ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & QIB_PSN_MASK);
373 	/*
374 	 * Qkeys with the high order bit set mean use the
375 	 * qkey from the QP context instead of the WR (see 10.2.5).
376 	 */
377 	ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->wr.wr.ud.remote_qkey < 0 ?
378 					 qp->qkey : wqe->wr.wr.ud.remote_qkey);
379 	ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
380 
381 done:
382 	ret = 1;
383 	goto unlock;
384 
385 bail:
386 	qp->s_flags &= ~QIB_S_BUSY;
387 unlock:
388 	spin_unlock_irqrestore(&qp->s_lock, flags);
389 	return ret;
390 }
391 
392 static unsigned qib_lookup_pkey(struct qib_ibport *ibp, u16 pkey)
393 {
394 	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
395 	struct qib_devdata *dd = ppd->dd;
396 	unsigned ctxt = ppd->hw_pidx;
397 	unsigned i;
398 
399 	pkey &= 0x7fff;	/* remove limited/full membership bit */
400 
401 	for (i = 0; i < ARRAY_SIZE(dd->rcd[ctxt]->pkeys); ++i)
402 		if ((dd->rcd[ctxt]->pkeys[i] & 0x7fff) == pkey)
403 			return i;
404 
405 	/*
406 	 * Should not get here, this means hardware failed to validate pkeys.
407 	 * Punt and return index 0.
408 	 */
409 	return 0;
410 }
411 
412 /**
413  * qib_ud_rcv - receive an incoming UD packet
414  * @ibp: the port the packet came in on
415  * @hdr: the packet header
416  * @has_grh: true if the packet has a GRH
417  * @data: the packet data
418  * @tlen: the packet length
419  * @qp: the QP the packet came on
420  *
421  * This is called from qib_qp_rcv() to process an incoming UD packet
422  * for the given QP.
423  * Called at interrupt level.
424  */
425 void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
426 		int has_grh, void *data, u32 tlen, struct qib_qp *qp)
427 {
428 	struct qib_other_headers *ohdr;
429 	int opcode;
430 	u32 hdrsize;
431 	u32 pad;
432 	struct ib_wc wc;
433 	u32 qkey;
434 	u32 src_qp;
435 	u16 dlid;
436 
437 	/* Check for GRH */
438 	if (!has_grh) {
439 		ohdr = &hdr->u.oth;
440 		hdrsize = 8 + 12 + 8;   /* LRH + BTH + DETH */
441 	} else {
442 		ohdr = &hdr->u.l.oth;
443 		hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
444 	}
445 	qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
446 	src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK;
447 
448 	/*
449 	 * Get the number of bytes the message was padded by
450 	 * and drop incomplete packets.
451 	 */
452 	pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
453 	if (unlikely(tlen < (hdrsize + pad + 4)))
454 		goto drop;
455 
456 	tlen -= hdrsize + pad + 4;
457 
458 	/*
459 	 * Check that the permissive LID is only used on QP0
460 	 * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
461 	 */
462 	if (qp->ibqp.qp_num) {
463 		if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
464 			     hdr->lrh[3] == IB_LID_PERMISSIVE))
465 			goto drop;
466 		if (qp->ibqp.qp_num > 1) {
467 			u16 pkey1, pkey2;
468 
469 			pkey1 = be32_to_cpu(ohdr->bth[0]);
470 			pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
471 			if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
472 				qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
473 					      pkey1,
474 					      (be16_to_cpu(hdr->lrh[0]) >> 4) &
475 						0xF,
476 					      src_qp, qp->ibqp.qp_num,
477 					      hdr->lrh[3], hdr->lrh[1]);
478 				return;
479 			}
480 		}
481 		if (unlikely(qkey != qp->qkey)) {
482 			qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
483 				      (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
484 				      src_qp, qp->ibqp.qp_num,
485 				      hdr->lrh[3], hdr->lrh[1]);
486 			return;
487 		}
488 		/* Drop invalid MAD packets (see 13.5.3.1). */
489 		if (unlikely(qp->ibqp.qp_num == 1 &&
490 			     (tlen != 256 ||
491 			      (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
492 			goto drop;
493 	} else {
494 		struct ib_smp *smp;
495 
496 		/* Drop invalid MAD packets (see 13.5.3.1). */
497 		if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)
498 			goto drop;
499 		smp = (struct ib_smp *) data;
500 		if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
501 		     hdr->lrh[3] == IB_LID_PERMISSIVE) &&
502 		    smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
503 			goto drop;
504 	}
505 
506 	/*
507 	 * The opcode is in the low byte when its in network order
508 	 * (top byte when in host order).
509 	 */
510 	opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
511 	if (qp->ibqp.qp_num > 1 &&
512 	    opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
513 		wc.ex.imm_data = ohdr->u.ud.imm_data;
514 		wc.wc_flags = IB_WC_WITH_IMM;
515 		tlen -= sizeof(u32);
516 	} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
517 		wc.ex.imm_data = 0;
518 		wc.wc_flags = 0;
519 	} else
520 		goto drop;
521 
522 	/*
523 	 * A GRH is expected to preceed the data even if not
524 	 * present on the wire.
525 	 */
526 	wc.byte_len = tlen + sizeof(struct ib_grh);
527 
528 	/*
529 	 * Get the next work request entry to find where to put the data.
530 	 */
531 	if (qp->r_flags & QIB_R_REUSE_SGE)
532 		qp->r_flags &= ~QIB_R_REUSE_SGE;
533 	else {
534 		int ret;
535 
536 		ret = qib_get_rwqe(qp, 0);
537 		if (ret < 0) {
538 			qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
539 			return;
540 		}
541 		if (!ret) {
542 			if (qp->ibqp.qp_num == 0)
543 				ibp->n_vl15_dropped++;
544 			return;
545 		}
546 	}
547 	/* Silently drop packets which are too big. */
548 	if (unlikely(wc.byte_len > qp->r_len)) {
549 		qp->r_flags |= QIB_R_REUSE_SGE;
550 		goto drop;
551 	}
552 	if (has_grh) {
553 		qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
554 			     sizeof(struct ib_grh), 1);
555 		wc.wc_flags |= IB_WC_GRH;
556 	} else
557 		qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
558 	qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
559 	while (qp->r_sge.num_sge) {
560 		atomic_dec(&qp->r_sge.sge.mr->refcount);
561 		if (--qp->r_sge.num_sge)
562 			qp->r_sge.sge = *qp->r_sge.sg_list++;
563 	}
564 	if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
565 		return;
566 	wc.wr_id = qp->r_wr_id;
567 	wc.status = IB_WC_SUCCESS;
568 	wc.opcode = IB_WC_RECV;
569 	wc.vendor_err = 0;
570 	wc.qp = &qp->ibqp;
571 	wc.src_qp = src_qp;
572 	wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
573 		qib_lookup_pkey(ibp, be32_to_cpu(ohdr->bth[0])) : 0;
574 	wc.slid = be16_to_cpu(hdr->lrh[3]);
575 	wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
576 	dlid = be16_to_cpu(hdr->lrh[1]);
577 	/*
578 	 * Save the LMC lower bits if the destination LID is a unicast LID.
579 	 */
580 	wc.dlid_path_bits = dlid >= QIB_MULTICAST_LID_BASE ? 0 :
581 		dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
582 	wc.port_num = qp->port_num;
583 	/* Signal completion event if the solicited bit is set. */
584 	qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
585 		     (ohdr->bth[0] &
586 			cpu_to_be32(IB_BTH_SOLICITED)) != 0);
587 	return;
588 
589 drop:
590 	ibp->n_pkt_drops++;
591 }
592