xref: /linux/drivers/infiniband/hw/hfi1/mad.c (revision 307797159ac25fe5a2048bf5c6a5718298edca57)
1 /*
2  * Copyright(c) 2015-2017 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 
48 #include <linux/net.h>
49 #include <rdma/opa_addr.h>
50 #define OPA_NUM_PKEY_BLOCKS_PER_SMP (OPA_SMP_DR_DATA_SIZE \
51 			/ (OPA_PARTITION_TABLE_BLK_SIZE * sizeof(u16)))
52 
53 #include "hfi.h"
54 #include "mad.h"
55 #include "trace.h"
56 #include "qp.h"
57 #include "vnic.h"
58 
59 /* the reset value from the FM is supposed to be 0xffff, handle both */
60 #define OPA_LINK_WIDTH_RESET_OLD 0x0fff
61 #define OPA_LINK_WIDTH_RESET 0xffff
62 
63 struct trap_node {
64 	struct list_head list;
65 	struct opa_mad_notice_attr data;
66 	__be64 tid;
67 	int len;
68 	u32 retry;
69 	u8 in_use;
70 	u8 repress;
71 };
72 
73 static int smp_length_check(u32 data_size, u32 request_len)
74 {
75 	if (unlikely(request_len < data_size))
76 		return -EINVAL;
77 
78 	return 0;
79 }
80 
81 static int reply(struct ib_mad_hdr *smp)
82 {
83 	/*
84 	 * The verbs framework will handle the directed/LID route
85 	 * packet changes.
86 	 */
87 	smp->method = IB_MGMT_METHOD_GET_RESP;
88 	if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
89 		smp->status |= IB_SMP_DIRECTION;
90 	return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
91 }
92 
93 static inline void clear_opa_smp_data(struct opa_smp *smp)
94 {
95 	void *data = opa_get_smp_data(smp);
96 	size_t size = opa_get_smp_data_size(smp);
97 
98 	memset(data, 0, size);
99 }
100 
101 static u16 hfi1_lookup_pkey_value(struct hfi1_ibport *ibp, int pkey_idx)
102 {
103 	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
104 
105 	if (pkey_idx < ARRAY_SIZE(ppd->pkeys))
106 		return ppd->pkeys[pkey_idx];
107 
108 	return 0;
109 }
110 
111 void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port)
112 {
113 	struct ib_event event;
114 
115 	event.event = IB_EVENT_PKEY_CHANGE;
116 	event.device = &dd->verbs_dev.rdi.ibdev;
117 	event.element.port_num = port;
118 	ib_dispatch_event(&event);
119 }
120 
121 /*
122  * If the port is down, clean up all pending traps.  We need to be careful
123  * with the given trap, because it may be queued.
124  */
125 static void cleanup_traps(struct hfi1_ibport *ibp, struct trap_node *trap)
126 {
127 	struct trap_node *node, *q;
128 	unsigned long flags;
129 	struct list_head trap_list;
130 	int i;
131 
132 	for (i = 0; i < RVT_MAX_TRAP_LISTS; i++) {
133 		spin_lock_irqsave(&ibp->rvp.lock, flags);
134 		list_replace_init(&ibp->rvp.trap_lists[i].list, &trap_list);
135 		ibp->rvp.trap_lists[i].list_len = 0;
136 		spin_unlock_irqrestore(&ibp->rvp.lock, flags);
137 
138 		/*
139 		 * Remove all items from the list, freeing all the non-given
140 		 * traps.
141 		 */
142 		list_for_each_entry_safe(node, q, &trap_list, list) {
143 			list_del(&node->list);
144 			if (node != trap)
145 				kfree(node);
146 		}
147 	}
148 
149 	/*
150 	 * If this wasn't on one of the lists it would not be freed.  If it
151 	 * was on the list, it is now safe to free.
152 	 */
153 	kfree(trap);
154 }
155 
156 static struct trap_node *check_and_add_trap(struct hfi1_ibport *ibp,
157 					    struct trap_node *trap)
158 {
159 	struct trap_node *node;
160 	struct trap_list *trap_list;
161 	unsigned long flags;
162 	unsigned long timeout;
163 	int found = 0;
164 	unsigned int queue_id;
165 	static int trap_count;
166 
167 	queue_id = trap->data.generic_type & 0x0F;
168 	if (queue_id >= RVT_MAX_TRAP_LISTS) {
169 		trap_count++;
170 		pr_err_ratelimited("hfi1: Invalid trap 0x%0x dropped. Total dropped: %d\n",
171 				   trap->data.generic_type, trap_count);
172 		kfree(trap);
173 		return NULL;
174 	}
175 
176 	/*
177 	 * Since the retry (handle timeout) does not remove a trap request
178 	 * from the list, all we have to do is compare the node.
179 	 */
180 	spin_lock_irqsave(&ibp->rvp.lock, flags);
181 	trap_list = &ibp->rvp.trap_lists[queue_id];
182 
183 	list_for_each_entry(node, &trap_list->list, list) {
184 		if (node == trap) {
185 			node->retry++;
186 			found = 1;
187 			break;
188 		}
189 	}
190 
191 	/* If it is not on the list, add it, limited to RVT-MAX_TRAP_LEN. */
192 	if (!found) {
193 		if (trap_list->list_len < RVT_MAX_TRAP_LEN) {
194 			trap_list->list_len++;
195 			list_add_tail(&trap->list, &trap_list->list);
196 		} else {
197 			pr_warn_ratelimited("hfi1: Maximum trap limit reached for 0x%0x traps\n",
198 					    trap->data.generic_type);
199 			kfree(trap);
200 		}
201 	}
202 
203 	/*
204 	 * Next check to see if there is a timer pending.  If not, set it up
205 	 * and get the first trap from the list.
206 	 */
207 	node = NULL;
208 	if (!timer_pending(&ibp->rvp.trap_timer)) {
209 		/*
210 		 * o14-2
211 		 * If the time out is set we have to wait until it expires
212 		 * before the trap can be sent.
213 		 * This should be > RVT_TRAP_TIMEOUT
214 		 */
215 		timeout = (RVT_TRAP_TIMEOUT *
216 			   (1UL << ibp->rvp.subnet_timeout)) / 1000;
217 		mod_timer(&ibp->rvp.trap_timer,
218 			  jiffies + usecs_to_jiffies(timeout));
219 		node = list_first_entry(&trap_list->list, struct trap_node,
220 					list);
221 		node->in_use = 1;
222 	}
223 	spin_unlock_irqrestore(&ibp->rvp.lock, flags);
224 
225 	return node;
226 }
227 
228 static void subn_handle_opa_trap_repress(struct hfi1_ibport *ibp,
229 					 struct opa_smp *smp)
230 {
231 	struct trap_list *trap_list;
232 	struct trap_node *trap;
233 	unsigned long flags;
234 	int i;
235 
236 	if (smp->attr_id != IB_SMP_ATTR_NOTICE)
237 		return;
238 
239 	spin_lock_irqsave(&ibp->rvp.lock, flags);
240 	for (i = 0; i < RVT_MAX_TRAP_LISTS; i++) {
241 		trap_list = &ibp->rvp.trap_lists[i];
242 		trap = list_first_entry_or_null(&trap_list->list,
243 						struct trap_node, list);
244 		if (trap && trap->tid == smp->tid) {
245 			if (trap->in_use) {
246 				trap->repress = 1;
247 			} else {
248 				trap_list->list_len--;
249 				list_del(&trap->list);
250 				kfree(trap);
251 			}
252 			break;
253 		}
254 	}
255 	spin_unlock_irqrestore(&ibp->rvp.lock, flags);
256 }
257 
258 static void hfi1_update_sm_ah_attr(struct hfi1_ibport *ibp,
259 				   struct rdma_ah_attr *attr, u32 dlid)
260 {
261 	rdma_ah_set_dlid(attr, dlid);
262 	rdma_ah_set_port_num(attr, ppd_from_ibp(ibp)->port);
263 	if (dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
264 		struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
265 
266 		rdma_ah_set_ah_flags(attr, IB_AH_GRH);
267 		grh->sgid_index = 0;
268 		grh->hop_limit = 1;
269 		grh->dgid.global.subnet_prefix =
270 			ibp->rvp.gid_prefix;
271 		grh->dgid.global.interface_id = OPA_MAKE_ID(dlid);
272 	}
273 }
274 
275 static int hfi1_modify_qp0_ah(struct hfi1_ibport *ibp,
276 			      struct rvt_ah *ah, u32 dlid)
277 {
278 	struct rdma_ah_attr attr;
279 	struct rvt_qp *qp0;
280 	int ret = -EINVAL;
281 
282 	memset(&attr, 0, sizeof(attr));
283 	attr.type = ah->ibah.type;
284 	hfi1_update_sm_ah_attr(ibp, &attr, dlid);
285 	rcu_read_lock();
286 	qp0 = rcu_dereference(ibp->rvp.qp[0]);
287 	if (qp0)
288 		ret = rdma_modify_ah(&ah->ibah, &attr);
289 	rcu_read_unlock();
290 	return ret;
291 }
292 
293 static struct ib_ah *hfi1_create_qp0_ah(struct hfi1_ibport *ibp, u32 dlid)
294 {
295 	struct rdma_ah_attr attr;
296 	struct ib_ah *ah = ERR_PTR(-EINVAL);
297 	struct rvt_qp *qp0;
298 	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
299 	struct hfi1_devdata *dd = dd_from_ppd(ppd);
300 	u8 port_num = ppd->port;
301 
302 	memset(&attr, 0, sizeof(attr));
303 	attr.type = rdma_ah_find_type(&dd->verbs_dev.rdi.ibdev, port_num);
304 	hfi1_update_sm_ah_attr(ibp, &attr, dlid);
305 	rcu_read_lock();
306 	qp0 = rcu_dereference(ibp->rvp.qp[0]);
307 	if (qp0)
308 		ah = rdma_create_ah(qp0->ibqp.pd, &attr);
309 	rcu_read_unlock();
310 	return ah;
311 }
312 
313 static void send_trap(struct hfi1_ibport *ibp, struct trap_node *trap)
314 {
315 	struct ib_mad_send_buf *send_buf;
316 	struct ib_mad_agent *agent;
317 	struct opa_smp *smp;
318 	unsigned long flags;
319 	int pkey_idx;
320 	u32 qpn = ppd_from_ibp(ibp)->sm_trap_qp;
321 
322 	agent = ibp->rvp.send_agent;
323 	if (!agent) {
324 		cleanup_traps(ibp, trap);
325 		return;
326 	}
327 
328 	/* o14-3.2.1 */
329 	if (driver_lstate(ppd_from_ibp(ibp)) != IB_PORT_ACTIVE) {
330 		cleanup_traps(ibp, trap);
331 		return;
332 	}
333 
334 	/* Add the trap to the list if necessary and see if we can send it */
335 	trap = check_and_add_trap(ibp, trap);
336 	if (!trap)
337 		return;
338 
339 	pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY);
340 	if (pkey_idx < 0) {
341 		pr_warn("%s: failed to find limited mgmt pkey, defaulting 0x%x\n",
342 			__func__, hfi1_get_pkey(ibp, 1));
343 		pkey_idx = 1;
344 	}
345 
346 	send_buf = ib_create_send_mad(agent, qpn, pkey_idx, 0,
347 				      IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
348 				      GFP_ATOMIC, IB_MGMT_BASE_VERSION);
349 	if (IS_ERR(send_buf))
350 		return;
351 
352 	smp = send_buf->mad;
353 	smp->base_version = OPA_MGMT_BASE_VERSION;
354 	smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
355 	smp->class_version = OPA_SM_CLASS_VERSION;
356 	smp->method = IB_MGMT_METHOD_TRAP;
357 
358 	/* Only update the transaction ID for new traps (o13-5). */
359 	if (trap->tid == 0) {
360 		ibp->rvp.tid++;
361 		/* make sure that tid != 0 */
362 		if (ibp->rvp.tid == 0)
363 			ibp->rvp.tid++;
364 		trap->tid = cpu_to_be64(ibp->rvp.tid);
365 	}
366 	smp->tid = trap->tid;
367 
368 	smp->attr_id = IB_SMP_ATTR_NOTICE;
369 	/* o14-1: smp->mkey = 0; */
370 
371 	memcpy(smp->route.lid.data, &trap->data, trap->len);
372 
373 	spin_lock_irqsave(&ibp->rvp.lock, flags);
374 	if (!ibp->rvp.sm_ah) {
375 		if (ibp->rvp.sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
376 			struct ib_ah *ah;
377 
378 			ah = hfi1_create_qp0_ah(ibp, ibp->rvp.sm_lid);
379 			if (IS_ERR(ah)) {
380 				spin_unlock_irqrestore(&ibp->rvp.lock, flags);
381 				return;
382 			}
383 			send_buf->ah = ah;
384 			ibp->rvp.sm_ah = ibah_to_rvtah(ah);
385 		} else {
386 			spin_unlock_irqrestore(&ibp->rvp.lock, flags);
387 			return;
388 		}
389 	} else {
390 		send_buf->ah = &ibp->rvp.sm_ah->ibah;
391 	}
392 
393 	/*
394 	 * If the trap was repressed while things were getting set up, don't
395 	 * bother sending it. This could happen for a retry.
396 	 */
397 	if (trap->repress) {
398 		list_del(&trap->list);
399 		spin_unlock_irqrestore(&ibp->rvp.lock, flags);
400 		kfree(trap);
401 		ib_free_send_mad(send_buf);
402 		return;
403 	}
404 
405 	trap->in_use = 0;
406 	spin_unlock_irqrestore(&ibp->rvp.lock, flags);
407 
408 	if (ib_post_send_mad(send_buf, NULL))
409 		ib_free_send_mad(send_buf);
410 }
411 
412 void hfi1_handle_trap_timer(struct timer_list *t)
413 {
414 	struct hfi1_ibport *ibp = from_timer(ibp, t, rvp.trap_timer);
415 	struct trap_node *trap = NULL;
416 	unsigned long flags;
417 	int i;
418 
419 	/* Find the trap with the highest priority */
420 	spin_lock_irqsave(&ibp->rvp.lock, flags);
421 	for (i = 0; !trap && i < RVT_MAX_TRAP_LISTS; i++) {
422 		trap = list_first_entry_or_null(&ibp->rvp.trap_lists[i].list,
423 						struct trap_node, list);
424 	}
425 	spin_unlock_irqrestore(&ibp->rvp.lock, flags);
426 
427 	if (trap)
428 		send_trap(ibp, trap);
429 }
430 
431 static struct trap_node *create_trap_node(u8 type, __be16 trap_num, u32 lid)
432 {
433 	struct trap_node *trap;
434 
435 	trap = kzalloc(sizeof(*trap), GFP_ATOMIC);
436 	if (!trap)
437 		return NULL;
438 
439 	INIT_LIST_HEAD(&trap->list);
440 	trap->data.generic_type = type;
441 	trap->data.prod_type_lsb = IB_NOTICE_PROD_CA;
442 	trap->data.trap_num = trap_num;
443 	trap->data.issuer_lid = cpu_to_be32(lid);
444 
445 	return trap;
446 }
447 
448 /*
449  * Send a bad P_Key trap (ch. 14.3.8).
450  */
451 void hfi1_bad_pkey(struct hfi1_ibport *ibp, u32 key, u32 sl,
452 		   u32 qp1, u32 qp2, u32 lid1, u32 lid2)
453 {
454 	struct trap_node *trap;
455 	u32 lid = ppd_from_ibp(ibp)->lid;
456 
457 	ibp->rvp.n_pkt_drops++;
458 	ibp->rvp.pkey_violations++;
459 
460 	trap = create_trap_node(IB_NOTICE_TYPE_SECURITY, OPA_TRAP_BAD_P_KEY,
461 				lid);
462 	if (!trap)
463 		return;
464 
465 	/* Send violation trap */
466 	trap->data.ntc_257_258.lid1 = cpu_to_be32(lid1);
467 	trap->data.ntc_257_258.lid2 = cpu_to_be32(lid2);
468 	trap->data.ntc_257_258.key = cpu_to_be32(key);
469 	trap->data.ntc_257_258.sl = sl << 3;
470 	trap->data.ntc_257_258.qp1 = cpu_to_be32(qp1);
471 	trap->data.ntc_257_258.qp2 = cpu_to_be32(qp2);
472 
473 	trap->len = sizeof(trap->data);
474 	send_trap(ibp, trap);
475 }
476 
477 /*
478  * Send a bad M_Key trap (ch. 14.3.9).
479  */
480 static void bad_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
481 		     __be64 mkey, __be32 dr_slid, u8 return_path[], u8 hop_cnt)
482 {
483 	struct trap_node *trap;
484 	u32 lid = ppd_from_ibp(ibp)->lid;
485 
486 	trap = create_trap_node(IB_NOTICE_TYPE_SECURITY, OPA_TRAP_BAD_M_KEY,
487 				lid);
488 	if (!trap)
489 		return;
490 
491 	/* Send violation trap */
492 	trap->data.ntc_256.lid = trap->data.issuer_lid;
493 	trap->data.ntc_256.method = mad->method;
494 	trap->data.ntc_256.attr_id = mad->attr_id;
495 	trap->data.ntc_256.attr_mod = mad->attr_mod;
496 	trap->data.ntc_256.mkey = mkey;
497 	if (mad->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
498 		trap->data.ntc_256.dr_slid = dr_slid;
499 		trap->data.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE;
500 		if (hop_cnt > ARRAY_SIZE(trap->data.ntc_256.dr_rtn_path)) {
501 			trap->data.ntc_256.dr_trunc_hop |=
502 				IB_NOTICE_TRAP_DR_TRUNC;
503 			hop_cnt = ARRAY_SIZE(trap->data.ntc_256.dr_rtn_path);
504 		}
505 		trap->data.ntc_256.dr_trunc_hop |= hop_cnt;
506 		memcpy(trap->data.ntc_256.dr_rtn_path, return_path,
507 		       hop_cnt);
508 	}
509 
510 	trap->len = sizeof(trap->data);
511 
512 	send_trap(ibp, trap);
513 }
514 
515 /*
516  * Send a Port Capability Mask Changed trap (ch. 14.3.11).
517  */
518 void hfi1_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num)
519 {
520 	struct trap_node *trap;
521 	struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
522 	struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
523 	struct hfi1_ibport *ibp = &dd->pport[port_num - 1].ibport_data;
524 	u32 lid = ppd_from_ibp(ibp)->lid;
525 
526 	trap = create_trap_node(IB_NOTICE_TYPE_INFO,
527 				OPA_TRAP_CHANGE_CAPABILITY,
528 				lid);
529 	if (!trap)
530 		return;
531 
532 	trap->data.ntc_144.lid = trap->data.issuer_lid;
533 	trap->data.ntc_144.new_cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
534 	trap->data.ntc_144.cap_mask3 = cpu_to_be16(ibp->rvp.port_cap3_flags);
535 
536 	trap->len = sizeof(trap->data);
537 	send_trap(ibp, trap);
538 }
539 
540 /*
541  * Send a System Image GUID Changed trap (ch. 14.3.12).
542  */
543 void hfi1_sys_guid_chg(struct hfi1_ibport *ibp)
544 {
545 	struct trap_node *trap;
546 	u32 lid = ppd_from_ibp(ibp)->lid;
547 
548 	trap = create_trap_node(IB_NOTICE_TYPE_INFO, OPA_TRAP_CHANGE_SYSGUID,
549 				lid);
550 	if (!trap)
551 		return;
552 
553 	trap->data.ntc_145.new_sys_guid = ib_hfi1_sys_image_guid;
554 	trap->data.ntc_145.lid = trap->data.issuer_lid;
555 
556 	trap->len = sizeof(trap->data);
557 	send_trap(ibp, trap);
558 }
559 
560 /*
561  * Send a Node Description Changed trap (ch. 14.3.13).
562  */
563 void hfi1_node_desc_chg(struct hfi1_ibport *ibp)
564 {
565 	struct trap_node *trap;
566 	u32 lid = ppd_from_ibp(ibp)->lid;
567 
568 	trap = create_trap_node(IB_NOTICE_TYPE_INFO,
569 				OPA_TRAP_CHANGE_CAPABILITY,
570 				lid);
571 	if (!trap)
572 		return;
573 
574 	trap->data.ntc_144.lid = trap->data.issuer_lid;
575 	trap->data.ntc_144.change_flags =
576 		cpu_to_be16(OPA_NOTICE_TRAP_NODE_DESC_CHG);
577 
578 	trap->len = sizeof(trap->data);
579 	send_trap(ibp, trap);
580 }
581 
582 static int __subn_get_opa_nodedesc(struct opa_smp *smp, u32 am,
583 				   u8 *data, struct ib_device *ibdev,
584 				   u8 port, u32 *resp_len, u32 max_len)
585 {
586 	struct opa_node_description *nd;
587 
588 	if (am || smp_length_check(sizeof(*nd), max_len)) {
589 		smp->status |= IB_SMP_INVALID_FIELD;
590 		return reply((struct ib_mad_hdr *)smp);
591 	}
592 
593 	nd = (struct opa_node_description *)data;
594 
595 	memcpy(nd->data, ibdev->node_desc, sizeof(nd->data));
596 
597 	if (resp_len)
598 		*resp_len += sizeof(*nd);
599 
600 	return reply((struct ib_mad_hdr *)smp);
601 }
602 
603 static int __subn_get_opa_nodeinfo(struct opa_smp *smp, u32 am, u8 *data,
604 				   struct ib_device *ibdev, u8 port,
605 				   u32 *resp_len, u32 max_len)
606 {
607 	struct opa_node_info *ni;
608 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
609 	unsigned pidx = port - 1; /* IB number port from 1, hw from 0 */
610 
611 	ni = (struct opa_node_info *)data;
612 
613 	/* GUID 0 is illegal */
614 	if (am || pidx >= dd->num_pports || ibdev->node_guid == 0 ||
615 	    smp_length_check(sizeof(*ni), max_len) ||
616 	    get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX) == 0) {
617 		smp->status |= IB_SMP_INVALID_FIELD;
618 		return reply((struct ib_mad_hdr *)smp);
619 	}
620 
621 	ni->port_guid = get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX);
622 	ni->base_version = OPA_MGMT_BASE_VERSION;
623 	ni->class_version = OPA_SM_CLASS_VERSION;
624 	ni->node_type = 1;     /* channel adapter */
625 	ni->num_ports = ibdev->phys_port_cnt;
626 	/* This is already in network order */
627 	ni->system_image_guid = ib_hfi1_sys_image_guid;
628 	ni->node_guid = ibdev->node_guid;
629 	ni->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd));
630 	ni->device_id = cpu_to_be16(dd->pcidev->device);
631 	ni->revision = cpu_to_be32(dd->minrev);
632 	ni->local_port_num = port;
633 	ni->vendor_id[0] = dd->oui1;
634 	ni->vendor_id[1] = dd->oui2;
635 	ni->vendor_id[2] = dd->oui3;
636 
637 	if (resp_len)
638 		*resp_len += sizeof(*ni);
639 
640 	return reply((struct ib_mad_hdr *)smp);
641 }
642 
643 static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev,
644 			     u8 port)
645 {
646 	struct ib_node_info *nip = (struct ib_node_info *)&smp->data;
647 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
648 	unsigned pidx = port - 1; /* IB number port from 1, hw from 0 */
649 
650 	/* GUID 0 is illegal */
651 	if (smp->attr_mod || pidx >= dd->num_pports ||
652 	    ibdev->node_guid == 0 ||
653 	    get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX) == 0) {
654 		smp->status |= IB_SMP_INVALID_FIELD;
655 		return reply((struct ib_mad_hdr *)smp);
656 	}
657 
658 	nip->port_guid = get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX);
659 	nip->base_version = OPA_MGMT_BASE_VERSION;
660 	nip->class_version = OPA_SM_CLASS_VERSION;
661 	nip->node_type = 1;     /* channel adapter */
662 	nip->num_ports = ibdev->phys_port_cnt;
663 	/* This is already in network order */
664 	nip->sys_guid = ib_hfi1_sys_image_guid;
665 	nip->node_guid = ibdev->node_guid;
666 	nip->partition_cap = cpu_to_be16(hfi1_get_npkeys(dd));
667 	nip->device_id = cpu_to_be16(dd->pcidev->device);
668 	nip->revision = cpu_to_be32(dd->minrev);
669 	nip->local_port_num = port;
670 	nip->vendor_id[0] = dd->oui1;
671 	nip->vendor_id[1] = dd->oui2;
672 	nip->vendor_id[2] = dd->oui3;
673 
674 	return reply((struct ib_mad_hdr *)smp);
675 }
676 
677 static void set_link_width_enabled(struct hfi1_pportdata *ppd, u32 w)
678 {
679 	(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LWID_ENB, w);
680 }
681 
682 static void set_link_width_downgrade_enabled(struct hfi1_pportdata *ppd, u32 w)
683 {
684 	(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LWID_DG_ENB, w);
685 }
686 
687 static void set_link_speed_enabled(struct hfi1_pportdata *ppd, u32 s)
688 {
689 	(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_SPD_ENB, s);
690 }
691 
692 static int check_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
693 		      int mad_flags, __be64 mkey, __be32 dr_slid,
694 		      u8 return_path[], u8 hop_cnt)
695 {
696 	int valid_mkey = 0;
697 	int ret = 0;
698 
699 	/* Is the mkey in the process of expiring? */
700 	if (ibp->rvp.mkey_lease_timeout &&
701 	    time_after_eq(jiffies, ibp->rvp.mkey_lease_timeout)) {
702 		/* Clear timeout and mkey protection field. */
703 		ibp->rvp.mkey_lease_timeout = 0;
704 		ibp->rvp.mkeyprot = 0;
705 	}
706 
707 	if ((mad_flags & IB_MAD_IGNORE_MKEY) ||  ibp->rvp.mkey == 0 ||
708 	    ibp->rvp.mkey == mkey)
709 		valid_mkey = 1;
710 
711 	/* Unset lease timeout on any valid Get/Set/TrapRepress */
712 	if (valid_mkey && ibp->rvp.mkey_lease_timeout &&
713 	    (mad->method == IB_MGMT_METHOD_GET ||
714 	     mad->method == IB_MGMT_METHOD_SET ||
715 	     mad->method == IB_MGMT_METHOD_TRAP_REPRESS))
716 		ibp->rvp.mkey_lease_timeout = 0;
717 
718 	if (!valid_mkey) {
719 		switch (mad->method) {
720 		case IB_MGMT_METHOD_GET:
721 			/* Bad mkey not a violation below level 2 */
722 			if (ibp->rvp.mkeyprot < 2)
723 				break;
724 			/* fall through */
725 		case IB_MGMT_METHOD_SET:
726 		case IB_MGMT_METHOD_TRAP_REPRESS:
727 			if (ibp->rvp.mkey_violations != 0xFFFF)
728 				++ibp->rvp.mkey_violations;
729 			if (!ibp->rvp.mkey_lease_timeout &&
730 			    ibp->rvp.mkey_lease_period)
731 				ibp->rvp.mkey_lease_timeout = jiffies +
732 					ibp->rvp.mkey_lease_period * HZ;
733 			/* Generate a trap notice. */
734 			bad_mkey(ibp, mad, mkey, dr_slid, return_path,
735 				 hop_cnt);
736 			ret = 1;
737 		}
738 	}
739 
740 	return ret;
741 }
742 
743 /*
744  * The SMA caches reads from LCB registers in case the LCB is unavailable.
745  * (The LCB is unavailable in certain link states, for example.)
746  */
747 struct lcb_datum {
748 	u32 off;
749 	u64 val;
750 };
751 
752 static struct lcb_datum lcb_cache[] = {
753 	{ DC_LCB_STS_ROUND_TRIP_LTP_CNT, 0 },
754 };
755 
756 static int write_lcb_cache(u32 off, u64 val)
757 {
758 	int i;
759 
760 	for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
761 		if (lcb_cache[i].off == off) {
762 			lcb_cache[i].val = val;
763 			return 0;
764 		}
765 	}
766 
767 	pr_warn("%s bad offset 0x%x\n", __func__, off);
768 	return -1;
769 }
770 
771 static int read_lcb_cache(u32 off, u64 *val)
772 {
773 	int i;
774 
775 	for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
776 		if (lcb_cache[i].off == off) {
777 			*val = lcb_cache[i].val;
778 			return 0;
779 		}
780 	}
781 
782 	pr_warn("%s bad offset 0x%x\n", __func__, off);
783 	return -1;
784 }
785 
786 void read_ltp_rtt(struct hfi1_devdata *dd)
787 {
788 	u64 reg;
789 
790 	if (read_lcb_csr(dd, DC_LCB_STS_ROUND_TRIP_LTP_CNT, &reg))
791 		dd_dev_err(dd, "%s: unable to read LTP RTT\n", __func__);
792 	else
793 		write_lcb_cache(DC_LCB_STS_ROUND_TRIP_LTP_CNT, reg);
794 }
795 
796 static int __subn_get_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
797 				   struct ib_device *ibdev, u8 port,
798 				   u32 *resp_len, u32 max_len)
799 {
800 	int i;
801 	struct hfi1_devdata *dd;
802 	struct hfi1_pportdata *ppd;
803 	struct hfi1_ibport *ibp;
804 	struct opa_port_info *pi = (struct opa_port_info *)data;
805 	u8 mtu;
806 	u8 credit_rate;
807 	u8 is_beaconing_active;
808 	u32 state;
809 	u32 num_ports = OPA_AM_NPORT(am);
810 	u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
811 	u32 buffer_units;
812 	u64 tmp = 0;
813 
814 	if (num_ports != 1 || smp_length_check(sizeof(*pi), max_len)) {
815 		smp->status |= IB_SMP_INVALID_FIELD;
816 		return reply((struct ib_mad_hdr *)smp);
817 	}
818 
819 	dd = dd_from_ibdev(ibdev);
820 	/* IB numbers ports from 1, hw from 0 */
821 	ppd = dd->pport + (port - 1);
822 	ibp = &ppd->ibport_data;
823 
824 	if (ppd->vls_supported / 2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) ||
825 	    ppd->vls_supported > ARRAY_SIZE(dd->vld)) {
826 		smp->status |= IB_SMP_INVALID_FIELD;
827 		return reply((struct ib_mad_hdr *)smp);
828 	}
829 
830 	pi->lid = cpu_to_be32(ppd->lid);
831 
832 	/* Only return the mkey if the protection field allows it. */
833 	if (!(smp->method == IB_MGMT_METHOD_GET &&
834 	      ibp->rvp.mkey != smp->mkey &&
835 	      ibp->rvp.mkeyprot == 1))
836 		pi->mkey = ibp->rvp.mkey;
837 
838 	pi->subnet_prefix = ibp->rvp.gid_prefix;
839 	pi->sm_lid = cpu_to_be32(ibp->rvp.sm_lid);
840 	pi->ib_cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags);
841 	pi->mkey_lease_period = cpu_to_be16(ibp->rvp.mkey_lease_period);
842 	pi->sm_trap_qp = cpu_to_be32(ppd->sm_trap_qp);
843 	pi->sa_qp = cpu_to_be32(ppd->sa_qp);
844 
845 	pi->link_width.enabled = cpu_to_be16(ppd->link_width_enabled);
846 	pi->link_width.supported = cpu_to_be16(ppd->link_width_supported);
847 	pi->link_width.active = cpu_to_be16(ppd->link_width_active);
848 
849 	pi->link_width_downgrade.supported =
850 			cpu_to_be16(ppd->link_width_downgrade_supported);
851 	pi->link_width_downgrade.enabled =
852 			cpu_to_be16(ppd->link_width_downgrade_enabled);
853 	pi->link_width_downgrade.tx_active =
854 			cpu_to_be16(ppd->link_width_downgrade_tx_active);
855 	pi->link_width_downgrade.rx_active =
856 			cpu_to_be16(ppd->link_width_downgrade_rx_active);
857 
858 	pi->link_speed.supported = cpu_to_be16(ppd->link_speed_supported);
859 	pi->link_speed.active = cpu_to_be16(ppd->link_speed_active);
860 	pi->link_speed.enabled = cpu_to_be16(ppd->link_speed_enabled);
861 
862 	state = driver_lstate(ppd);
863 
864 	if (start_of_sm_config && (state == IB_PORT_INIT))
865 		ppd->is_sm_config_started = 1;
866 
867 	pi->port_phys_conf = (ppd->port_type & 0xf);
868 
869 	pi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4;
870 	pi->port_states.ledenable_offlinereason |=
871 		ppd->is_sm_config_started << 5;
872 	/*
873 	 * This pairs with the memory barrier in hfi1_start_led_override to
874 	 * ensure that we read the correct state of LED beaconing represented
875 	 * by led_override_timer_active
876 	 */
877 	smp_rmb();
878 	is_beaconing_active = !!atomic_read(&ppd->led_override_timer_active);
879 	pi->port_states.ledenable_offlinereason |= is_beaconing_active << 6;
880 	pi->port_states.ledenable_offlinereason |=
881 		ppd->offline_disabled_reason;
882 
883 	pi->port_states.portphysstate_portstate =
884 		(driver_pstate(ppd) << 4) | state;
885 
886 	pi->mkeyprotect_lmc = (ibp->rvp.mkeyprot << 6) | ppd->lmc;
887 
888 	memset(pi->neigh_mtu.pvlx_to_mtu, 0, sizeof(pi->neigh_mtu.pvlx_to_mtu));
889 	for (i = 0; i < ppd->vls_supported; i++) {
890 		mtu = mtu_to_enum(dd->vld[i].mtu, HFI1_DEFAULT_ACTIVE_MTU);
891 		if ((i % 2) == 0)
892 			pi->neigh_mtu.pvlx_to_mtu[i / 2] |= (mtu << 4);
893 		else
894 			pi->neigh_mtu.pvlx_to_mtu[i / 2] |= mtu;
895 	}
896 	/* don't forget VL 15 */
897 	mtu = mtu_to_enum(dd->vld[15].mtu, 2048);
898 	pi->neigh_mtu.pvlx_to_mtu[15 / 2] |= mtu;
899 	pi->smsl = ibp->rvp.sm_sl & OPA_PI_MASK_SMSL;
900 	pi->operational_vls = hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS);
901 	pi->partenforce_filterraw |=
902 		(ppd->linkinit_reason & OPA_PI_MASK_LINKINIT_REASON);
903 	if (ppd->part_enforce & HFI1_PART_ENFORCE_IN)
904 		pi->partenforce_filterraw |= OPA_PI_MASK_PARTITION_ENFORCE_IN;
905 	if (ppd->part_enforce & HFI1_PART_ENFORCE_OUT)
906 		pi->partenforce_filterraw |= OPA_PI_MASK_PARTITION_ENFORCE_OUT;
907 	pi->mkey_violations = cpu_to_be16(ibp->rvp.mkey_violations);
908 	/* P_KeyViolations are counted by hardware. */
909 	pi->pkey_violations = cpu_to_be16(ibp->rvp.pkey_violations);
910 	pi->qkey_violations = cpu_to_be16(ibp->rvp.qkey_violations);
911 
912 	pi->vl.cap = ppd->vls_supported;
913 	pi->vl.high_limit = cpu_to_be16(ibp->rvp.vl_high_limit);
914 	pi->vl.arb_high_cap = (u8)hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_CAP);
915 	pi->vl.arb_low_cap = (u8)hfi1_get_ib_cfg(ppd, HFI1_IB_CFG_VL_LOW_CAP);
916 
917 	pi->clientrereg_subnettimeout = ibp->rvp.subnet_timeout;
918 
919 	pi->port_link_mode  = cpu_to_be16(OPA_PORT_LINK_MODE_OPA << 10 |
920 					  OPA_PORT_LINK_MODE_OPA << 5 |
921 					  OPA_PORT_LINK_MODE_OPA);
922 
923 	pi->port_ltp_crc_mode = cpu_to_be16(ppd->port_ltp_crc_mode);
924 
925 	pi->port_mode = cpu_to_be16(
926 				ppd->is_active_optimize_enabled ?
927 					OPA_PI_MASK_PORT_ACTIVE_OPTOMIZE : 0);
928 
929 	pi->port_packet_format.supported =
930 		cpu_to_be16(OPA_PORT_PACKET_FORMAT_9B |
931 			    OPA_PORT_PACKET_FORMAT_16B);
932 	pi->port_packet_format.enabled =
933 		cpu_to_be16(OPA_PORT_PACKET_FORMAT_9B |
934 			    OPA_PORT_PACKET_FORMAT_16B);
935 
936 	/* flit_control.interleave is (OPA V1, version .76):
937 	 * bits		use
938 	 * ----		---
939 	 * 2		res
940 	 * 2		DistanceSupported
941 	 * 2		DistanceEnabled
942 	 * 5		MaxNextLevelTxEnabled
943 	 * 5		MaxNestLevelRxSupported
944 	 *
945 	 * HFI supports only "distance mode 1" (see OPA V1, version .76,
946 	 * section 9.6.2), so set DistanceSupported, DistanceEnabled
947 	 * to 0x1.
948 	 */
949 	pi->flit_control.interleave = cpu_to_be16(0x1400);
950 
951 	pi->link_down_reason = ppd->local_link_down_reason.sma;
952 	pi->neigh_link_down_reason = ppd->neigh_link_down_reason.sma;
953 	pi->port_error_action = cpu_to_be32(ppd->port_error_action);
954 	pi->mtucap = mtu_to_enum(hfi1_max_mtu, IB_MTU_4096);
955 
956 	/* 32.768 usec. response time (guessing) */
957 	pi->resptimevalue = 3;
958 
959 	pi->local_port_num = port;
960 
961 	/* buffer info for FM */
962 	pi->overall_buffer_space = cpu_to_be16(dd->link_credits);
963 
964 	pi->neigh_node_guid = cpu_to_be64(ppd->neighbor_guid);
965 	pi->neigh_port_num = ppd->neighbor_port_number;
966 	pi->port_neigh_mode =
967 		(ppd->neighbor_type & OPA_PI_MASK_NEIGH_NODE_TYPE) |
968 		(ppd->mgmt_allowed ? OPA_PI_MASK_NEIGH_MGMT_ALLOWED : 0) |
969 		(ppd->neighbor_fm_security ?
970 			OPA_PI_MASK_NEIGH_FW_AUTH_BYPASS : 0);
971 
972 	/* HFIs shall always return VL15 credits to their
973 	 * neighbor in a timely manner, without any credit return pacing.
974 	 */
975 	credit_rate = 0;
976 	buffer_units  = (dd->vau) & OPA_PI_MASK_BUF_UNIT_BUF_ALLOC;
977 	buffer_units |= (dd->vcu << 3) & OPA_PI_MASK_BUF_UNIT_CREDIT_ACK;
978 	buffer_units |= (credit_rate << 6) &
979 				OPA_PI_MASK_BUF_UNIT_VL15_CREDIT_RATE;
980 	buffer_units |= (dd->vl15_init << 11) & OPA_PI_MASK_BUF_UNIT_VL15_INIT;
981 	pi->buffer_units = cpu_to_be32(buffer_units);
982 
983 	pi->opa_cap_mask = cpu_to_be16(ibp->rvp.port_cap3_flags);
984 	pi->collectivemask_multicastmask = ((OPA_COLLECTIVE_NR & 0x7)
985 					    << 3 | (OPA_MCAST_NR & 0x7));
986 
987 	/* HFI supports a replay buffer 128 LTPs in size */
988 	pi->replay_depth.buffer = 0x80;
989 	/* read the cached value of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
990 	read_lcb_cache(DC_LCB_STS_ROUND_TRIP_LTP_CNT, &tmp);
991 
992 	/*
993 	 * this counter is 16 bits wide, but the replay_depth.wire
994 	 * variable is only 8 bits
995 	 */
996 	if (tmp > 0xff)
997 		tmp = 0xff;
998 	pi->replay_depth.wire = tmp;
999 
1000 	if (resp_len)
1001 		*resp_len += sizeof(struct opa_port_info);
1002 
1003 	return reply((struct ib_mad_hdr *)smp);
1004 }
1005 
1006 /**
1007  * get_pkeys - return the PKEY table
1008  * @dd: the hfi1_ib device
1009  * @port: the IB port number
1010  * @pkeys: the pkey table is placed here
1011  */
1012 static int get_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
1013 {
1014 	struct hfi1_pportdata *ppd = dd->pport + port - 1;
1015 
1016 	memcpy(pkeys, ppd->pkeys, sizeof(ppd->pkeys));
1017 
1018 	return 0;
1019 }
1020 
1021 static int __subn_get_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
1022 				    struct ib_device *ibdev, u8 port,
1023 				    u32 *resp_len, u32 max_len)
1024 {
1025 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1026 	u32 n_blocks_req = OPA_AM_NBLK(am);
1027 	u32 start_block = am & 0x7ff;
1028 	__be16 *p;
1029 	u16 *q;
1030 	int i;
1031 	u16 n_blocks_avail;
1032 	unsigned npkeys = hfi1_get_npkeys(dd);
1033 	size_t size;
1034 
1035 	if (n_blocks_req == 0) {
1036 		pr_warn("OPA Get PKey AM Invalid : P = %d; B = 0x%x; N = 0x%x\n",
1037 			port, start_block, n_blocks_req);
1038 		smp->status |= IB_SMP_INVALID_FIELD;
1039 		return reply((struct ib_mad_hdr *)smp);
1040 	}
1041 
1042 	n_blocks_avail = (u16)(npkeys / OPA_PARTITION_TABLE_BLK_SIZE) + 1;
1043 
1044 	size = (n_blocks_req * OPA_PARTITION_TABLE_BLK_SIZE) * sizeof(u16);
1045 
1046 	if (smp_length_check(size, max_len)) {
1047 		smp->status |= IB_SMP_INVALID_FIELD;
1048 		return reply((struct ib_mad_hdr *)smp);
1049 	}
1050 
1051 	if (start_block + n_blocks_req > n_blocks_avail ||
1052 	    n_blocks_req > OPA_NUM_PKEY_BLOCKS_PER_SMP) {
1053 		pr_warn("OPA Get PKey AM Invalid : s 0x%x; req 0x%x; "
1054 			"avail 0x%x; blk/smp 0x%lx\n",
1055 			start_block, n_blocks_req, n_blocks_avail,
1056 			OPA_NUM_PKEY_BLOCKS_PER_SMP);
1057 		smp->status |= IB_SMP_INVALID_FIELD;
1058 		return reply((struct ib_mad_hdr *)smp);
1059 	}
1060 
1061 	p = (__be16 *)data;
1062 	q = (u16 *)data;
1063 	/* get the real pkeys if we are requesting the first block */
1064 	if (start_block == 0) {
1065 		get_pkeys(dd, port, q);
1066 		for (i = 0; i < npkeys; i++)
1067 			p[i] = cpu_to_be16(q[i]);
1068 		if (resp_len)
1069 			*resp_len += size;
1070 	} else {
1071 		smp->status |= IB_SMP_INVALID_FIELD;
1072 	}
1073 	return reply((struct ib_mad_hdr *)smp);
1074 }
1075 
1076 enum {
1077 	HFI_TRANSITION_DISALLOWED,
1078 	HFI_TRANSITION_IGNORED,
1079 	HFI_TRANSITION_ALLOWED,
1080 	HFI_TRANSITION_UNDEFINED,
1081 };
1082 
1083 /*
1084  * Use shortened names to improve readability of
1085  * {logical,physical}_state_transitions
1086  */
1087 enum {
1088 	__D = HFI_TRANSITION_DISALLOWED,
1089 	__I = HFI_TRANSITION_IGNORED,
1090 	__A = HFI_TRANSITION_ALLOWED,
1091 	__U = HFI_TRANSITION_UNDEFINED,
1092 };
1093 
1094 /*
1095  * IB_PORTPHYSSTATE_POLLING (2) through OPA_PORTPHYSSTATE_MAX (11) are
1096  * represented in physical_state_transitions.
1097  */
1098 #define __N_PHYSTATES (OPA_PORTPHYSSTATE_MAX - IB_PORTPHYSSTATE_POLLING + 1)
1099 
1100 /*
1101  * Within physical_state_transitions, rows represent "old" states,
1102  * columns "new" states, and physical_state_transitions.allowed[old][new]
1103  * indicates if the transition from old state to new state is legal (see
1104  * OPAg1v1, Table 6-4).
1105  */
1106 static const struct {
1107 	u8 allowed[__N_PHYSTATES][__N_PHYSTATES];
1108 } physical_state_transitions = {
1109 	{
1110 		/* 2    3    4    5    6    7    8    9   10   11 */
1111 	/* 2 */	{ __A, __A, __D, __D, __D, __D, __D, __D, __D, __D },
1112 	/* 3 */	{ __A, __I, __D, __D, __D, __D, __D, __D, __D, __A },
1113 	/* 4 */	{ __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
1114 	/* 5 */	{ __A, __A, __D, __I, __D, __D, __D, __D, __D, __D },
1115 	/* 6 */	{ __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
1116 	/* 7 */	{ __D, __A, __D, __D, __D, __I, __D, __D, __D, __D },
1117 	/* 8 */	{ __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
1118 	/* 9 */	{ __I, __A, __D, __D, __D, __D, __D, __I, __D, __D },
1119 	/*10 */	{ __U, __U, __U, __U, __U, __U, __U, __U, __U, __U },
1120 	/*11 */	{ __D, __A, __D, __D, __D, __D, __D, __D, __D, __I },
1121 	}
1122 };
1123 
1124 /*
1125  * IB_PORT_DOWN (1) through IB_PORT_ACTIVE_DEFER (5) are represented
1126  * logical_state_transitions
1127  */
1128 
1129 #define __N_LOGICAL_STATES (IB_PORT_ACTIVE_DEFER - IB_PORT_DOWN + 1)
1130 
1131 /*
1132  * Within logical_state_transitions rows represent "old" states,
1133  * columns "new" states, and logical_state_transitions.allowed[old][new]
1134  * indicates if the transition from old state to new state is legal (see
1135  * OPAg1v1, Table 9-12).
1136  */
1137 static const struct {
1138 	u8 allowed[__N_LOGICAL_STATES][__N_LOGICAL_STATES];
1139 } logical_state_transitions = {
1140 	{
1141 		/* 1    2    3    4    5 */
1142 	/* 1 */	{ __I, __D, __D, __D, __U},
1143 	/* 2 */	{ __D, __I, __A, __D, __U},
1144 	/* 3 */	{ __D, __D, __I, __A, __U},
1145 	/* 4 */	{ __D, __D, __I, __I, __U},
1146 	/* 5 */	{ __U, __U, __U, __U, __U},
1147 	}
1148 };
1149 
1150 static int logical_transition_allowed(int old, int new)
1151 {
1152 	if (old < IB_PORT_NOP || old > IB_PORT_ACTIVE_DEFER ||
1153 	    new < IB_PORT_NOP || new > IB_PORT_ACTIVE_DEFER) {
1154 		pr_warn("invalid logical state(s) (old %d new %d)\n",
1155 			old, new);
1156 		return HFI_TRANSITION_UNDEFINED;
1157 	}
1158 
1159 	if (new == IB_PORT_NOP)
1160 		return HFI_TRANSITION_ALLOWED; /* always allowed */
1161 
1162 	/* adjust states for indexing into logical_state_transitions */
1163 	old -= IB_PORT_DOWN;
1164 	new -= IB_PORT_DOWN;
1165 
1166 	if (old < 0 || new < 0)
1167 		return HFI_TRANSITION_UNDEFINED;
1168 	return logical_state_transitions.allowed[old][new];
1169 }
1170 
1171 static int physical_transition_allowed(int old, int new)
1172 {
1173 	if (old < IB_PORTPHYSSTATE_NOP || old > OPA_PORTPHYSSTATE_MAX ||
1174 	    new < IB_PORTPHYSSTATE_NOP || new > OPA_PORTPHYSSTATE_MAX) {
1175 		pr_warn("invalid physical state(s) (old %d new %d)\n",
1176 			old, new);
1177 		return HFI_TRANSITION_UNDEFINED;
1178 	}
1179 
1180 	if (new == IB_PORTPHYSSTATE_NOP)
1181 		return HFI_TRANSITION_ALLOWED; /* always allowed */
1182 
1183 	/* adjust states for indexing into physical_state_transitions */
1184 	old -= IB_PORTPHYSSTATE_POLLING;
1185 	new -= IB_PORTPHYSSTATE_POLLING;
1186 
1187 	if (old < 0 || new < 0)
1188 		return HFI_TRANSITION_UNDEFINED;
1189 	return physical_state_transitions.allowed[old][new];
1190 }
1191 
1192 static int port_states_transition_allowed(struct hfi1_pportdata *ppd,
1193 					  u32 logical_new, u32 physical_new)
1194 {
1195 	u32 physical_old = driver_pstate(ppd);
1196 	u32 logical_old = driver_lstate(ppd);
1197 	int ret, logical_allowed, physical_allowed;
1198 
1199 	ret = logical_transition_allowed(logical_old, logical_new);
1200 	logical_allowed = ret;
1201 
1202 	if (ret == HFI_TRANSITION_DISALLOWED ||
1203 	    ret == HFI_TRANSITION_UNDEFINED) {
1204 		pr_warn("invalid logical state transition %s -> %s\n",
1205 			opa_lstate_name(logical_old),
1206 			opa_lstate_name(logical_new));
1207 		return ret;
1208 	}
1209 
1210 	ret = physical_transition_allowed(physical_old, physical_new);
1211 	physical_allowed = ret;
1212 
1213 	if (ret == HFI_TRANSITION_DISALLOWED ||
1214 	    ret == HFI_TRANSITION_UNDEFINED) {
1215 		pr_warn("invalid physical state transition %s -> %s\n",
1216 			opa_pstate_name(physical_old),
1217 			opa_pstate_name(physical_new));
1218 		return ret;
1219 	}
1220 
1221 	if (logical_allowed == HFI_TRANSITION_IGNORED &&
1222 	    physical_allowed == HFI_TRANSITION_IGNORED)
1223 		return HFI_TRANSITION_IGNORED;
1224 
1225 	/*
1226 	 * A change request of Physical Port State from
1227 	 * 'Offline' to 'Polling' should be ignored.
1228 	 */
1229 	if ((physical_old == OPA_PORTPHYSSTATE_OFFLINE) &&
1230 	    (physical_new == IB_PORTPHYSSTATE_POLLING))
1231 		return HFI_TRANSITION_IGNORED;
1232 
1233 	/*
1234 	 * Either physical_allowed or logical_allowed is
1235 	 * HFI_TRANSITION_ALLOWED.
1236 	 */
1237 	return HFI_TRANSITION_ALLOWED;
1238 }
1239 
1240 static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
1241 			   u32 logical_state, u32 phys_state, int local_mad)
1242 {
1243 	struct hfi1_devdata *dd = ppd->dd;
1244 	u32 link_state;
1245 	int ret;
1246 
1247 	ret = port_states_transition_allowed(ppd, logical_state, phys_state);
1248 	if (ret == HFI_TRANSITION_DISALLOWED ||
1249 	    ret == HFI_TRANSITION_UNDEFINED) {
1250 		/* error message emitted above */
1251 		smp->status |= IB_SMP_INVALID_FIELD;
1252 		return 0;
1253 	}
1254 
1255 	if (ret == HFI_TRANSITION_IGNORED)
1256 		return 0;
1257 
1258 	if ((phys_state != IB_PORTPHYSSTATE_NOP) &&
1259 	    !(logical_state == IB_PORT_DOWN ||
1260 	      logical_state == IB_PORT_NOP)){
1261 		pr_warn("SubnSet(OPA_PortInfo) port state invalid: logical_state 0x%x physical_state 0x%x\n",
1262 			logical_state, phys_state);
1263 		smp->status |= IB_SMP_INVALID_FIELD;
1264 	}
1265 
1266 	/*
1267 	 * Logical state changes are summarized in OPAv1g1 spec.,
1268 	 * Table 9-12; physical state changes are summarized in
1269 	 * OPAv1g1 spec., Table 6.4.
1270 	 */
1271 	switch (logical_state) {
1272 	case IB_PORT_NOP:
1273 		if (phys_state == IB_PORTPHYSSTATE_NOP)
1274 			break;
1275 		/* FALLTHROUGH */
1276 	case IB_PORT_DOWN:
1277 		if (phys_state == IB_PORTPHYSSTATE_NOP) {
1278 			link_state = HLS_DN_DOWNDEF;
1279 		} else if (phys_state == IB_PORTPHYSSTATE_POLLING) {
1280 			link_state = HLS_DN_POLL;
1281 			set_link_down_reason(ppd, OPA_LINKDOWN_REASON_FM_BOUNCE,
1282 					     0, OPA_LINKDOWN_REASON_FM_BOUNCE);
1283 		} else if (phys_state == IB_PORTPHYSSTATE_DISABLED) {
1284 			link_state = HLS_DN_DISABLE;
1285 		} else {
1286 			pr_warn("SubnSet(OPA_PortInfo) invalid physical state 0x%x\n",
1287 				phys_state);
1288 			smp->status |= IB_SMP_INVALID_FIELD;
1289 			break;
1290 		}
1291 
1292 		if ((link_state == HLS_DN_POLL ||
1293 		     link_state == HLS_DN_DOWNDEF)) {
1294 			/*
1295 			 * Going to poll.  No matter what the current state,
1296 			 * always move offline first, then tune and start the
1297 			 * link.  This correctly handles a FM link bounce and
1298 			 * a link enable.  Going offline is a no-op if already
1299 			 * offline.
1300 			 */
1301 			set_link_state(ppd, HLS_DN_OFFLINE);
1302 			start_link(ppd);
1303 		} else {
1304 			set_link_state(ppd, link_state);
1305 		}
1306 		if (link_state == HLS_DN_DISABLE &&
1307 		    (ppd->offline_disabled_reason >
1308 		     HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED) ||
1309 		     ppd->offline_disabled_reason ==
1310 		     HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
1311 			ppd->offline_disabled_reason =
1312 			HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
1313 		/*
1314 		 * Don't send a reply if the response would be sent
1315 		 * through the disabled port.
1316 		 */
1317 		if (link_state == HLS_DN_DISABLE && !local_mad)
1318 			return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
1319 		break;
1320 	case IB_PORT_ARMED:
1321 		ret = set_link_state(ppd, HLS_UP_ARMED);
1322 		if (!ret)
1323 			send_idle_sma(dd, SMA_IDLE_ARM);
1324 		break;
1325 	case IB_PORT_ACTIVE:
1326 		if (ppd->neighbor_normal) {
1327 			ret = set_link_state(ppd, HLS_UP_ACTIVE);
1328 			if (ret == 0)
1329 				send_idle_sma(dd, SMA_IDLE_ACTIVE);
1330 		} else {
1331 			pr_warn("SubnSet(OPA_PortInfo) Cannot move to Active with NeighborNormal 0\n");
1332 			smp->status |= IB_SMP_INVALID_FIELD;
1333 		}
1334 		break;
1335 	default:
1336 		pr_warn("SubnSet(OPA_PortInfo) invalid logical state 0x%x\n",
1337 			logical_state);
1338 		smp->status |= IB_SMP_INVALID_FIELD;
1339 	}
1340 
1341 	return 0;
1342 }
1343 
1344 /**
1345  * subn_set_opa_portinfo - set port information
1346  * @smp: the incoming SM packet
1347  * @ibdev: the infiniband device
1348  * @port: the port on the device
1349  *
1350  */
1351 static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
1352 				   struct ib_device *ibdev, u8 port,
1353 				   u32 *resp_len, u32 max_len, int local_mad)
1354 {
1355 	struct opa_port_info *pi = (struct opa_port_info *)data;
1356 	struct ib_event event;
1357 	struct hfi1_devdata *dd;
1358 	struct hfi1_pportdata *ppd;
1359 	struct hfi1_ibport *ibp;
1360 	u8 clientrereg;
1361 	unsigned long flags;
1362 	u32 smlid;
1363 	u32 lid;
1364 	u8 ls_old, ls_new, ps_new;
1365 	u8 vls;
1366 	u8 msl;
1367 	u8 crc_enabled;
1368 	u16 lse, lwe, mtu;
1369 	u32 num_ports = OPA_AM_NPORT(am);
1370 	u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
1371 	int ret, i, invalid = 0, call_set_mtu = 0;
1372 	int call_link_downgrade_policy = 0;
1373 
1374 	if (num_ports != 1 ||
1375 	    smp_length_check(sizeof(*pi), max_len)) {
1376 		smp->status |= IB_SMP_INVALID_FIELD;
1377 		return reply((struct ib_mad_hdr *)smp);
1378 	}
1379 
1380 	lid = be32_to_cpu(pi->lid);
1381 	if (lid & 0xFF000000) {
1382 		pr_warn("OPA_PortInfo lid out of range: %X\n", lid);
1383 		smp->status |= IB_SMP_INVALID_FIELD;
1384 		goto get_only;
1385 	}
1386 
1387 
1388 	smlid = be32_to_cpu(pi->sm_lid);
1389 	if (smlid & 0xFF000000) {
1390 		pr_warn("OPA_PortInfo SM lid out of range: %X\n", smlid);
1391 		smp->status |= IB_SMP_INVALID_FIELD;
1392 		goto get_only;
1393 	}
1394 
1395 	clientrereg = (pi->clientrereg_subnettimeout &
1396 			OPA_PI_MASK_CLIENT_REREGISTER);
1397 
1398 	dd = dd_from_ibdev(ibdev);
1399 	/* IB numbers ports from 1, hw from 0 */
1400 	ppd = dd->pport + (port - 1);
1401 	ibp = &ppd->ibport_data;
1402 	event.device = ibdev;
1403 	event.element.port_num = port;
1404 
1405 	ls_old = driver_lstate(ppd);
1406 
1407 	ibp->rvp.mkey = pi->mkey;
1408 	if (ibp->rvp.gid_prefix != pi->subnet_prefix) {
1409 		ibp->rvp.gid_prefix = pi->subnet_prefix;
1410 		event.event = IB_EVENT_GID_CHANGE;
1411 		ib_dispatch_event(&event);
1412 	}
1413 	ibp->rvp.mkey_lease_period = be16_to_cpu(pi->mkey_lease_period);
1414 
1415 	/* Must be a valid unicast LID address. */
1416 	if ((lid == 0 && ls_old > IB_PORT_INIT) ||
1417 	     (hfi1_is_16B_mcast(lid))) {
1418 		smp->status |= IB_SMP_INVALID_FIELD;
1419 		pr_warn("SubnSet(OPA_PortInfo) lid invalid 0x%x\n",
1420 			lid);
1421 	} else if (ppd->lid != lid ||
1422 		 ppd->lmc != (pi->mkeyprotect_lmc & OPA_PI_MASK_LMC)) {
1423 		if (ppd->lid != lid)
1424 			hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LID_CHANGE_BIT);
1425 		if (ppd->lmc != (pi->mkeyprotect_lmc & OPA_PI_MASK_LMC))
1426 			hfi1_set_uevent_bits(ppd, _HFI1_EVENT_LMC_CHANGE_BIT);
1427 		hfi1_set_lid(ppd, lid, pi->mkeyprotect_lmc & OPA_PI_MASK_LMC);
1428 		event.event = IB_EVENT_LID_CHANGE;
1429 		ib_dispatch_event(&event);
1430 
1431 		if (HFI1_PORT_GUID_INDEX + 1 < HFI1_GUIDS_PER_PORT) {
1432 			/* Manufacture GID from LID to support extended
1433 			 * addresses
1434 			 */
1435 			ppd->guids[HFI1_PORT_GUID_INDEX + 1] =
1436 				be64_to_cpu(OPA_MAKE_ID(lid));
1437 			event.event = IB_EVENT_GID_CHANGE;
1438 			ib_dispatch_event(&event);
1439 		}
1440 	}
1441 
1442 	msl = pi->smsl & OPA_PI_MASK_SMSL;
1443 	if (pi->partenforce_filterraw & OPA_PI_MASK_LINKINIT_REASON)
1444 		ppd->linkinit_reason =
1445 			(pi->partenforce_filterraw &
1446 			 OPA_PI_MASK_LINKINIT_REASON);
1447 
1448 	/* Must be a valid unicast LID address. */
1449 	if ((smlid == 0 && ls_old > IB_PORT_INIT) ||
1450 	     (hfi1_is_16B_mcast(smlid))) {
1451 		smp->status |= IB_SMP_INVALID_FIELD;
1452 		pr_warn("SubnSet(OPA_PortInfo) smlid invalid 0x%x\n", smlid);
1453 	} else if (smlid != ibp->rvp.sm_lid || msl != ibp->rvp.sm_sl) {
1454 		pr_warn("SubnSet(OPA_PortInfo) smlid 0x%x\n", smlid);
1455 		spin_lock_irqsave(&ibp->rvp.lock, flags);
1456 		if (ibp->rvp.sm_ah) {
1457 			if (smlid != ibp->rvp.sm_lid)
1458 				hfi1_modify_qp0_ah(ibp, ibp->rvp.sm_ah, smlid);
1459 			if (msl != ibp->rvp.sm_sl)
1460 				rdma_ah_set_sl(&ibp->rvp.sm_ah->attr, msl);
1461 		}
1462 		spin_unlock_irqrestore(&ibp->rvp.lock, flags);
1463 		if (smlid != ibp->rvp.sm_lid)
1464 			ibp->rvp.sm_lid = smlid;
1465 		if (msl != ibp->rvp.sm_sl)
1466 			ibp->rvp.sm_sl = msl;
1467 		event.event = IB_EVENT_SM_CHANGE;
1468 		ib_dispatch_event(&event);
1469 	}
1470 
1471 	if (pi->link_down_reason == 0) {
1472 		ppd->local_link_down_reason.sma = 0;
1473 		ppd->local_link_down_reason.latest = 0;
1474 	}
1475 
1476 	if (pi->neigh_link_down_reason == 0) {
1477 		ppd->neigh_link_down_reason.sma = 0;
1478 		ppd->neigh_link_down_reason.latest = 0;
1479 	}
1480 
1481 	ppd->sm_trap_qp = be32_to_cpu(pi->sm_trap_qp);
1482 	ppd->sa_qp = be32_to_cpu(pi->sa_qp);
1483 
1484 	ppd->port_error_action = be32_to_cpu(pi->port_error_action);
1485 	lwe = be16_to_cpu(pi->link_width.enabled);
1486 	if (lwe) {
1487 		if (lwe == OPA_LINK_WIDTH_RESET ||
1488 		    lwe == OPA_LINK_WIDTH_RESET_OLD)
1489 			set_link_width_enabled(ppd, ppd->link_width_supported);
1490 		else if ((lwe & ~ppd->link_width_supported) == 0)
1491 			set_link_width_enabled(ppd, lwe);
1492 		else
1493 			smp->status |= IB_SMP_INVALID_FIELD;
1494 	}
1495 	lwe = be16_to_cpu(pi->link_width_downgrade.enabled);
1496 	/* LWD.E is always applied - 0 means "disabled" */
1497 	if (lwe == OPA_LINK_WIDTH_RESET ||
1498 	    lwe == OPA_LINK_WIDTH_RESET_OLD) {
1499 		set_link_width_downgrade_enabled(ppd,
1500 						 ppd->
1501 						 link_width_downgrade_supported
1502 						 );
1503 	} else if ((lwe & ~ppd->link_width_downgrade_supported) == 0) {
1504 		/* only set and apply if something changed */
1505 		if (lwe != ppd->link_width_downgrade_enabled) {
1506 			set_link_width_downgrade_enabled(ppd, lwe);
1507 			call_link_downgrade_policy = 1;
1508 		}
1509 	} else {
1510 		smp->status |= IB_SMP_INVALID_FIELD;
1511 	}
1512 	lse = be16_to_cpu(pi->link_speed.enabled);
1513 	if (lse) {
1514 		if (lse & be16_to_cpu(pi->link_speed.supported))
1515 			set_link_speed_enabled(ppd, lse);
1516 		else
1517 			smp->status |= IB_SMP_INVALID_FIELD;
1518 	}
1519 
1520 	ibp->rvp.mkeyprot =
1521 		(pi->mkeyprotect_lmc & OPA_PI_MASK_MKEY_PROT_BIT) >> 6;
1522 	ibp->rvp.vl_high_limit = be16_to_cpu(pi->vl.high_limit) & 0xFF;
1523 	(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_VL_HIGH_LIMIT,
1524 				    ibp->rvp.vl_high_limit);
1525 
1526 	if (ppd->vls_supported / 2 > ARRAY_SIZE(pi->neigh_mtu.pvlx_to_mtu) ||
1527 	    ppd->vls_supported > ARRAY_SIZE(dd->vld)) {
1528 		smp->status |= IB_SMP_INVALID_FIELD;
1529 		return reply((struct ib_mad_hdr *)smp);
1530 	}
1531 	for (i = 0; i < ppd->vls_supported; i++) {
1532 		if ((i % 2) == 0)
1533 			mtu = enum_to_mtu((pi->neigh_mtu.pvlx_to_mtu[i / 2] >>
1534 					   4) & 0xF);
1535 		else
1536 			mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[i / 2] &
1537 					  0xF);
1538 		if (mtu == 0xffff) {
1539 			pr_warn("SubnSet(OPA_PortInfo) mtu invalid %d (0x%x)\n",
1540 				mtu,
1541 				(pi->neigh_mtu.pvlx_to_mtu[0] >> 4) & 0xF);
1542 			smp->status |= IB_SMP_INVALID_FIELD;
1543 			mtu = hfi1_max_mtu; /* use a valid MTU */
1544 		}
1545 		if (dd->vld[i].mtu != mtu) {
1546 			dd_dev_info(dd,
1547 				    "MTU change on vl %d from %d to %d\n",
1548 				    i, dd->vld[i].mtu, mtu);
1549 			dd->vld[i].mtu = mtu;
1550 			call_set_mtu++;
1551 		}
1552 	}
1553 	/* As per OPAV1 spec: VL15 must support and be configured
1554 	 * for operation with a 2048 or larger MTU.
1555 	 */
1556 	mtu = enum_to_mtu(pi->neigh_mtu.pvlx_to_mtu[15 / 2] & 0xF);
1557 	if (mtu < 2048 || mtu == 0xffff)
1558 		mtu = 2048;
1559 	if (dd->vld[15].mtu != mtu) {
1560 		dd_dev_info(dd,
1561 			    "MTU change on vl 15 from %d to %d\n",
1562 			    dd->vld[15].mtu, mtu);
1563 		dd->vld[15].mtu = mtu;
1564 		call_set_mtu++;
1565 	}
1566 	if (call_set_mtu)
1567 		set_mtu(ppd);
1568 
1569 	/* Set operational VLs */
1570 	vls = pi->operational_vls & OPA_PI_MASK_OPERATIONAL_VL;
1571 	if (vls) {
1572 		if (vls > ppd->vls_supported) {
1573 			pr_warn("SubnSet(OPA_PortInfo) VL's supported invalid %d\n",
1574 				pi->operational_vls);
1575 			smp->status |= IB_SMP_INVALID_FIELD;
1576 		} else {
1577 			if (hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_OP_VLS,
1578 					    vls) == -EINVAL)
1579 				smp->status |= IB_SMP_INVALID_FIELD;
1580 		}
1581 	}
1582 
1583 	if (pi->mkey_violations == 0)
1584 		ibp->rvp.mkey_violations = 0;
1585 
1586 	if (pi->pkey_violations == 0)
1587 		ibp->rvp.pkey_violations = 0;
1588 
1589 	if (pi->qkey_violations == 0)
1590 		ibp->rvp.qkey_violations = 0;
1591 
1592 	ibp->rvp.subnet_timeout =
1593 		pi->clientrereg_subnettimeout & OPA_PI_MASK_SUBNET_TIMEOUT;
1594 
1595 	crc_enabled = be16_to_cpu(pi->port_ltp_crc_mode);
1596 	crc_enabled >>= 4;
1597 	crc_enabled &= 0xf;
1598 
1599 	if (crc_enabled != 0)
1600 		ppd->port_crc_mode_enabled = port_ltp_to_cap(crc_enabled);
1601 
1602 	ppd->is_active_optimize_enabled =
1603 			!!(be16_to_cpu(pi->port_mode)
1604 					& OPA_PI_MASK_PORT_ACTIVE_OPTOMIZE);
1605 
1606 	ls_new = pi->port_states.portphysstate_portstate &
1607 			OPA_PI_MASK_PORT_STATE;
1608 	ps_new = (pi->port_states.portphysstate_portstate &
1609 			OPA_PI_MASK_PORT_PHYSICAL_STATE) >> 4;
1610 
1611 	if (ls_old == IB_PORT_INIT) {
1612 		if (start_of_sm_config) {
1613 			if (ls_new == ls_old || (ls_new == IB_PORT_ARMED))
1614 				ppd->is_sm_config_started = 1;
1615 		} else if (ls_new == IB_PORT_ARMED) {
1616 			if (ppd->is_sm_config_started == 0) {
1617 				invalid = 1;
1618 				smp->status |= IB_SMP_INVALID_FIELD;
1619 			}
1620 		}
1621 	}
1622 
1623 	/* Handle CLIENT_REREGISTER event b/c SM asked us for it */
1624 	if (clientrereg) {
1625 		event.event = IB_EVENT_CLIENT_REREGISTER;
1626 		ib_dispatch_event(&event);
1627 	}
1628 
1629 	/*
1630 	 * Do the port state change now that the other link parameters
1631 	 * have been set.
1632 	 * Changing the port physical state only makes sense if the link
1633 	 * is down or is being set to down.
1634 	 */
1635 
1636 	if (!invalid) {
1637 		ret = set_port_states(ppd, smp, ls_new, ps_new, local_mad);
1638 		if (ret)
1639 			return ret;
1640 	}
1641 
1642 	ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len,
1643 				      max_len);
1644 
1645 	/* restore re-reg bit per o14-12.2.1 */
1646 	pi->clientrereg_subnettimeout |= clientrereg;
1647 
1648 	/*
1649 	 * Apply the new link downgrade policy.  This may result in a link
1650 	 * bounce.  Do this after everything else so things are settled.
1651 	 * Possible problem: if setting the port state above fails, then
1652 	 * the policy change is not applied.
1653 	 */
1654 	if (call_link_downgrade_policy)
1655 		apply_link_downgrade_policy(ppd, 0);
1656 
1657 	return ret;
1658 
1659 get_only:
1660 	return __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len,
1661 				       max_len);
1662 }
1663 
1664 /**
1665  * set_pkeys - set the PKEY table for ctxt 0
1666  * @dd: the hfi1_ib device
1667  * @port: the IB port number
1668  * @pkeys: the PKEY table
1669  */
1670 static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
1671 {
1672 	struct hfi1_pportdata *ppd;
1673 	int i;
1674 	int changed = 0;
1675 	int update_includes_mgmt_partition = 0;
1676 
1677 	/*
1678 	 * IB port one/two always maps to context zero/one,
1679 	 * always a kernel context, no locking needed
1680 	 * If we get here with ppd setup, no need to check
1681 	 * that rcd is valid.
1682 	 */
1683 	ppd = dd->pport + (port - 1);
1684 	/*
1685 	 * If the update does not include the management pkey, don't do it.
1686 	 */
1687 	for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
1688 		if (pkeys[i] == LIM_MGMT_P_KEY) {
1689 			update_includes_mgmt_partition = 1;
1690 			break;
1691 		}
1692 	}
1693 
1694 	if (!update_includes_mgmt_partition)
1695 		return 1;
1696 
1697 	for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
1698 		u16 key = pkeys[i];
1699 		u16 okey = ppd->pkeys[i];
1700 
1701 		if (key == okey)
1702 			continue;
1703 		/*
1704 		 * The SM gives us the complete PKey table. We have
1705 		 * to ensure that we put the PKeys in the matching
1706 		 * slots.
1707 		 */
1708 		ppd->pkeys[i] = key;
1709 		changed = 1;
1710 	}
1711 
1712 	if (changed) {
1713 		(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
1714 		hfi1_event_pkey_change(dd, port);
1715 	}
1716 
1717 	return 0;
1718 }
1719 
1720 static int __subn_set_opa_pkeytable(struct opa_smp *smp, u32 am, u8 *data,
1721 				    struct ib_device *ibdev, u8 port,
1722 				    u32 *resp_len, u32 max_len)
1723 {
1724 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1725 	u32 n_blocks_sent = OPA_AM_NBLK(am);
1726 	u32 start_block = am & 0x7ff;
1727 	u16 *p = (u16 *)data;
1728 	__be16 *q = (__be16 *)data;
1729 	int i;
1730 	u16 n_blocks_avail;
1731 	unsigned npkeys = hfi1_get_npkeys(dd);
1732 	u32 size = 0;
1733 
1734 	if (n_blocks_sent == 0) {
1735 		pr_warn("OPA Get PKey AM Invalid : P = %d; B = 0x%x; N = 0x%x\n",
1736 			port, start_block, n_blocks_sent);
1737 		smp->status |= IB_SMP_INVALID_FIELD;
1738 		return reply((struct ib_mad_hdr *)smp);
1739 	}
1740 
1741 	n_blocks_avail = (u16)(npkeys / OPA_PARTITION_TABLE_BLK_SIZE) + 1;
1742 
1743 	size = sizeof(u16) * (n_blocks_sent * OPA_PARTITION_TABLE_BLK_SIZE);
1744 
1745 	if (smp_length_check(size, max_len)) {
1746 		smp->status |= IB_SMP_INVALID_FIELD;
1747 		return reply((struct ib_mad_hdr *)smp);
1748 	}
1749 
1750 	if (start_block + n_blocks_sent > n_blocks_avail ||
1751 	    n_blocks_sent > OPA_NUM_PKEY_BLOCKS_PER_SMP) {
1752 		pr_warn("OPA Set PKey AM Invalid : s 0x%x; req 0x%x; avail 0x%x; blk/smp 0x%lx\n",
1753 			start_block, n_blocks_sent, n_blocks_avail,
1754 			OPA_NUM_PKEY_BLOCKS_PER_SMP);
1755 		smp->status |= IB_SMP_INVALID_FIELD;
1756 		return reply((struct ib_mad_hdr *)smp);
1757 	}
1758 
1759 	for (i = 0; i < n_blocks_sent * OPA_PARTITION_TABLE_BLK_SIZE; i++)
1760 		p[i] = be16_to_cpu(q[i]);
1761 
1762 	if (start_block == 0 && set_pkeys(dd, port, p) != 0) {
1763 		smp->status |= IB_SMP_INVALID_FIELD;
1764 		return reply((struct ib_mad_hdr *)smp);
1765 	}
1766 
1767 	return __subn_get_opa_pkeytable(smp, am, data, ibdev, port, resp_len,
1768 					max_len);
1769 }
1770 
1771 #define ILLEGAL_VL 12
1772 /*
1773  * filter_sc2vlt changes mappings to VL15 to ILLEGAL_VL (except
1774  * for SC15, which must map to VL15). If we don't remap things this
1775  * way it is possible for VL15 counters to increment when we try to
1776  * send on a SC which is mapped to an invalid VL.
1777  * When getting the table convert ILLEGAL_VL back to VL15.
1778  */
1779 static void filter_sc2vlt(void *data, bool set)
1780 {
1781 	int i;
1782 	u8 *pd = data;
1783 
1784 	for (i = 0; i < OPA_MAX_SCS; i++) {
1785 		if (i == 15)
1786 			continue;
1787 
1788 		if (set) {
1789 			if ((pd[i] & 0x1f) == 0xf)
1790 				pd[i] = ILLEGAL_VL;
1791 		} else {
1792 			if ((pd[i] & 0x1f) == ILLEGAL_VL)
1793 				pd[i] = 0xf;
1794 		}
1795 	}
1796 }
1797 
1798 static int set_sc2vlt_tables(struct hfi1_devdata *dd, void *data)
1799 {
1800 	u64 *val = data;
1801 
1802 	filter_sc2vlt(data, true);
1803 
1804 	write_csr(dd, SEND_SC2VLT0, *val++);
1805 	write_csr(dd, SEND_SC2VLT1, *val++);
1806 	write_csr(dd, SEND_SC2VLT2, *val++);
1807 	write_csr(dd, SEND_SC2VLT3, *val++);
1808 	write_seqlock_irq(&dd->sc2vl_lock);
1809 	memcpy(dd->sc2vl, data, sizeof(dd->sc2vl));
1810 	write_sequnlock_irq(&dd->sc2vl_lock);
1811 	return 0;
1812 }
1813 
1814 static int get_sc2vlt_tables(struct hfi1_devdata *dd, void *data)
1815 {
1816 	u64 *val = (u64 *)data;
1817 
1818 	*val++ = read_csr(dd, SEND_SC2VLT0);
1819 	*val++ = read_csr(dd, SEND_SC2VLT1);
1820 	*val++ = read_csr(dd, SEND_SC2VLT2);
1821 	*val++ = read_csr(dd, SEND_SC2VLT3);
1822 
1823 	filter_sc2vlt((u64 *)data, false);
1824 	return 0;
1825 }
1826 
1827 static int __subn_get_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
1828 				   struct ib_device *ibdev, u8 port,
1829 				   u32 *resp_len, u32 max_len)
1830 {
1831 	struct hfi1_ibport *ibp = to_iport(ibdev, port);
1832 	u8 *p = data;
1833 	size_t size = ARRAY_SIZE(ibp->sl_to_sc); /* == 32 */
1834 	unsigned i;
1835 
1836 	if (am || smp_length_check(size, max_len)) {
1837 		smp->status |= IB_SMP_INVALID_FIELD;
1838 		return reply((struct ib_mad_hdr *)smp);
1839 	}
1840 
1841 	for (i = 0; i < ARRAY_SIZE(ibp->sl_to_sc); i++)
1842 		*p++ = ibp->sl_to_sc[i];
1843 
1844 	if (resp_len)
1845 		*resp_len += size;
1846 
1847 	return reply((struct ib_mad_hdr *)smp);
1848 }
1849 
1850 static int __subn_set_opa_sl_to_sc(struct opa_smp *smp, u32 am, u8 *data,
1851 				   struct ib_device *ibdev, u8 port,
1852 				   u32 *resp_len, u32 max_len)
1853 {
1854 	struct hfi1_ibport *ibp = to_iport(ibdev, port);
1855 	u8 *p = data;
1856 	size_t size = ARRAY_SIZE(ibp->sl_to_sc);
1857 	int i;
1858 	u8 sc;
1859 
1860 	if (am || smp_length_check(size, max_len)) {
1861 		smp->status |= IB_SMP_INVALID_FIELD;
1862 		return reply((struct ib_mad_hdr *)smp);
1863 	}
1864 
1865 	for (i = 0; i <  ARRAY_SIZE(ibp->sl_to_sc); i++) {
1866 		sc = *p++;
1867 		if (ibp->sl_to_sc[i] != sc) {
1868 			ibp->sl_to_sc[i] = sc;
1869 
1870 			/* Put all stale qps into error state */
1871 			hfi1_error_port_qps(ibp, i);
1872 		}
1873 	}
1874 
1875 	return __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port, resp_len,
1876 				       max_len);
1877 }
1878 
1879 static int __subn_get_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
1880 				   struct ib_device *ibdev, u8 port,
1881 				   u32 *resp_len, u32 max_len)
1882 {
1883 	struct hfi1_ibport *ibp = to_iport(ibdev, port);
1884 	u8 *p = data;
1885 	size_t size = ARRAY_SIZE(ibp->sc_to_sl); /* == 32 */
1886 	unsigned i;
1887 
1888 	if (am || smp_length_check(size, max_len)) {
1889 		smp->status |= IB_SMP_INVALID_FIELD;
1890 		return reply((struct ib_mad_hdr *)smp);
1891 	}
1892 
1893 	for (i = 0; i < ARRAY_SIZE(ibp->sc_to_sl); i++)
1894 		*p++ = ibp->sc_to_sl[i];
1895 
1896 	if (resp_len)
1897 		*resp_len += size;
1898 
1899 	return reply((struct ib_mad_hdr *)smp);
1900 }
1901 
1902 static int __subn_set_opa_sc_to_sl(struct opa_smp *smp, u32 am, u8 *data,
1903 				   struct ib_device *ibdev, u8 port,
1904 				   u32 *resp_len, u32 max_len)
1905 {
1906 	struct hfi1_ibport *ibp = to_iport(ibdev, port);
1907 	size_t size = ARRAY_SIZE(ibp->sc_to_sl);
1908 	u8 *p = data;
1909 	int i;
1910 
1911 	if (am || smp_length_check(size, max_len)) {
1912 		smp->status |= IB_SMP_INVALID_FIELD;
1913 		return reply((struct ib_mad_hdr *)smp);
1914 	}
1915 
1916 	for (i = 0; i < ARRAY_SIZE(ibp->sc_to_sl); i++)
1917 		ibp->sc_to_sl[i] = *p++;
1918 
1919 	return __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port, resp_len,
1920 				       max_len);
1921 }
1922 
1923 static int __subn_get_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
1924 				    struct ib_device *ibdev, u8 port,
1925 				    u32 *resp_len, u32 max_len)
1926 {
1927 	u32 n_blocks = OPA_AM_NBLK(am);
1928 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1929 	void *vp = (void *)data;
1930 	size_t size = 4 * sizeof(u64);
1931 
1932 	if (n_blocks != 1 || smp_length_check(size, max_len)) {
1933 		smp->status |= IB_SMP_INVALID_FIELD;
1934 		return reply((struct ib_mad_hdr *)smp);
1935 	}
1936 
1937 	get_sc2vlt_tables(dd, vp);
1938 
1939 	if (resp_len)
1940 		*resp_len += size;
1941 
1942 	return reply((struct ib_mad_hdr *)smp);
1943 }
1944 
1945 static int __subn_set_opa_sc_to_vlt(struct opa_smp *smp, u32 am, u8 *data,
1946 				    struct ib_device *ibdev, u8 port,
1947 				    u32 *resp_len, u32 max_len)
1948 {
1949 	u32 n_blocks = OPA_AM_NBLK(am);
1950 	int async_update = OPA_AM_ASYNC(am);
1951 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1952 	void *vp = (void *)data;
1953 	struct hfi1_pportdata *ppd;
1954 	int lstate;
1955 	/*
1956 	 * set_sc2vlt_tables writes the information contained in *data
1957 	 * to four 64-bit registers SendSC2VLt[0-3]. We need to make
1958 	 * sure *max_len is not greater than the total size of the four
1959 	 * SendSC2VLt[0-3] registers.
1960 	 */
1961 	size_t size = 4 * sizeof(u64);
1962 
1963 	if (n_blocks != 1 || async_update || smp_length_check(size, max_len)) {
1964 		smp->status |= IB_SMP_INVALID_FIELD;
1965 		return reply((struct ib_mad_hdr *)smp);
1966 	}
1967 
1968 	/* IB numbers ports from 1, hw from 0 */
1969 	ppd = dd->pport + (port - 1);
1970 	lstate = driver_lstate(ppd);
1971 	/*
1972 	 * it's known that async_update is 0 by this point, but include
1973 	 * the explicit check for clarity
1974 	 */
1975 	if (!async_update &&
1976 	    (lstate == IB_PORT_ARMED || lstate == IB_PORT_ACTIVE)) {
1977 		smp->status |= IB_SMP_INVALID_FIELD;
1978 		return reply((struct ib_mad_hdr *)smp);
1979 	}
1980 
1981 	set_sc2vlt_tables(dd, vp);
1982 
1983 	return __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port, resp_len,
1984 					max_len);
1985 }
1986 
1987 static int __subn_get_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
1988 				     struct ib_device *ibdev, u8 port,
1989 				     u32 *resp_len, u32 max_len)
1990 {
1991 	u32 n_blocks = OPA_AM_NPORT(am);
1992 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1993 	struct hfi1_pportdata *ppd;
1994 	void *vp = (void *)data;
1995 	int size = sizeof(struct sc2vlnt);
1996 
1997 	if (n_blocks != 1 || smp_length_check(size, max_len)) {
1998 		smp->status |= IB_SMP_INVALID_FIELD;
1999 		return reply((struct ib_mad_hdr *)smp);
2000 	}
2001 
2002 	ppd = dd->pport + (port - 1);
2003 
2004 	fm_get_table(ppd, FM_TBL_SC2VLNT, vp);
2005 
2006 	if (resp_len)
2007 		*resp_len += size;
2008 
2009 	return reply((struct ib_mad_hdr *)smp);
2010 }
2011 
2012 static int __subn_set_opa_sc_to_vlnt(struct opa_smp *smp, u32 am, u8 *data,
2013 				     struct ib_device *ibdev, u8 port,
2014 				     u32 *resp_len, u32 max_len)
2015 {
2016 	u32 n_blocks = OPA_AM_NPORT(am);
2017 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2018 	struct hfi1_pportdata *ppd;
2019 	void *vp = (void *)data;
2020 	int lstate;
2021 	int size = sizeof(struct sc2vlnt);
2022 
2023 	if (n_blocks != 1 || smp_length_check(size, max_len)) {
2024 		smp->status |= IB_SMP_INVALID_FIELD;
2025 		return reply((struct ib_mad_hdr *)smp);
2026 	}
2027 
2028 	/* IB numbers ports from 1, hw from 0 */
2029 	ppd = dd->pport + (port - 1);
2030 	lstate = driver_lstate(ppd);
2031 	if (lstate == IB_PORT_ARMED || lstate == IB_PORT_ACTIVE) {
2032 		smp->status |= IB_SMP_INVALID_FIELD;
2033 		return reply((struct ib_mad_hdr *)smp);
2034 	}
2035 
2036 	ppd = dd->pport + (port - 1);
2037 
2038 	fm_set_table(ppd, FM_TBL_SC2VLNT, vp);
2039 
2040 	return __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port,
2041 					 resp_len, max_len);
2042 }
2043 
2044 static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
2045 			      struct ib_device *ibdev, u8 port,
2046 			      u32 *resp_len, u32 max_len)
2047 {
2048 	u32 nports = OPA_AM_NPORT(am);
2049 	u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
2050 	u32 lstate;
2051 	struct hfi1_ibport *ibp;
2052 	struct hfi1_pportdata *ppd;
2053 	struct opa_port_state_info *psi = (struct opa_port_state_info *)data;
2054 
2055 	if (nports != 1 || smp_length_check(sizeof(*psi), max_len)) {
2056 		smp->status |= IB_SMP_INVALID_FIELD;
2057 		return reply((struct ib_mad_hdr *)smp);
2058 	}
2059 
2060 	ibp = to_iport(ibdev, port);
2061 	ppd = ppd_from_ibp(ibp);
2062 
2063 	lstate = driver_lstate(ppd);
2064 
2065 	if (start_of_sm_config && (lstate == IB_PORT_INIT))
2066 		ppd->is_sm_config_started = 1;
2067 
2068 	psi->port_states.ledenable_offlinereason = ppd->neighbor_normal << 4;
2069 	psi->port_states.ledenable_offlinereason |=
2070 		ppd->is_sm_config_started << 5;
2071 	psi->port_states.ledenable_offlinereason |=
2072 		ppd->offline_disabled_reason;
2073 
2074 	psi->port_states.portphysstate_portstate =
2075 		(driver_pstate(ppd) << 4) | (lstate & 0xf);
2076 	psi->link_width_downgrade_tx_active =
2077 		cpu_to_be16(ppd->link_width_downgrade_tx_active);
2078 	psi->link_width_downgrade_rx_active =
2079 		cpu_to_be16(ppd->link_width_downgrade_rx_active);
2080 	if (resp_len)
2081 		*resp_len += sizeof(struct opa_port_state_info);
2082 
2083 	return reply((struct ib_mad_hdr *)smp);
2084 }
2085 
2086 static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
2087 			      struct ib_device *ibdev, u8 port,
2088 			      u32 *resp_len, u32 max_len, int local_mad)
2089 {
2090 	u32 nports = OPA_AM_NPORT(am);
2091 	u32 start_of_sm_config = OPA_AM_START_SM_CFG(am);
2092 	u32 ls_old;
2093 	u8 ls_new, ps_new;
2094 	struct hfi1_ibport *ibp;
2095 	struct hfi1_pportdata *ppd;
2096 	struct opa_port_state_info *psi = (struct opa_port_state_info *)data;
2097 	int ret, invalid = 0;
2098 
2099 	if (nports != 1 || smp_length_check(sizeof(*psi), max_len)) {
2100 		smp->status |= IB_SMP_INVALID_FIELD;
2101 		return reply((struct ib_mad_hdr *)smp);
2102 	}
2103 
2104 	ibp = to_iport(ibdev, port);
2105 	ppd = ppd_from_ibp(ibp);
2106 
2107 	ls_old = driver_lstate(ppd);
2108 
2109 	ls_new = port_states_to_logical_state(&psi->port_states);
2110 	ps_new = port_states_to_phys_state(&psi->port_states);
2111 
2112 	if (ls_old == IB_PORT_INIT) {
2113 		if (start_of_sm_config) {
2114 			if (ls_new == ls_old || (ls_new == IB_PORT_ARMED))
2115 				ppd->is_sm_config_started = 1;
2116 		} else if (ls_new == IB_PORT_ARMED) {
2117 			if (ppd->is_sm_config_started == 0) {
2118 				invalid = 1;
2119 				smp->status |= IB_SMP_INVALID_FIELD;
2120 			}
2121 		}
2122 	}
2123 
2124 	if (!invalid) {
2125 		ret = set_port_states(ppd, smp, ls_new, ps_new, local_mad);
2126 		if (ret)
2127 			return ret;
2128 	}
2129 
2130 	return __subn_get_opa_psi(smp, am, data, ibdev, port, resp_len,
2131 				  max_len);
2132 }
2133 
2134 static int __subn_get_opa_cable_info(struct opa_smp *smp, u32 am, u8 *data,
2135 				     struct ib_device *ibdev, u8 port,
2136 				     u32 *resp_len, u32 max_len)
2137 {
2138 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2139 	u32 addr = OPA_AM_CI_ADDR(am);
2140 	u32 len = OPA_AM_CI_LEN(am) + 1;
2141 	int ret;
2142 
2143 	if (dd->pport->port_type != PORT_TYPE_QSFP ||
2144 	    smp_length_check(len, max_len)) {
2145 		smp->status |= IB_SMP_INVALID_FIELD;
2146 		return reply((struct ib_mad_hdr *)smp);
2147 	}
2148 
2149 #define __CI_PAGE_SIZE BIT(7) /* 128 bytes */
2150 #define __CI_PAGE_MASK ~(__CI_PAGE_SIZE - 1)
2151 #define __CI_PAGE_NUM(a) ((a) & __CI_PAGE_MASK)
2152 
2153 	/*
2154 	 * check that addr is within spec, and
2155 	 * addr and (addr + len - 1) are on the same "page"
2156 	 */
2157 	if (addr >= 4096 ||
2158 	    (__CI_PAGE_NUM(addr) != __CI_PAGE_NUM(addr + len - 1))) {
2159 		smp->status |= IB_SMP_INVALID_FIELD;
2160 		return reply((struct ib_mad_hdr *)smp);
2161 	}
2162 
2163 	ret = get_cable_info(dd, port, addr, len, data);
2164 
2165 	if (ret == -ENODEV) {
2166 		smp->status |= IB_SMP_UNSUP_METH_ATTR;
2167 		return reply((struct ib_mad_hdr *)smp);
2168 	}
2169 
2170 	/* The address range for the CableInfo SMA query is wider than the
2171 	 * memory available on the QSFP cable. We want to return a valid
2172 	 * response, albeit zeroed out, for address ranges beyond available
2173 	 * memory but that are within the CableInfo query spec
2174 	 */
2175 	if (ret < 0 && ret != -ERANGE) {
2176 		smp->status |= IB_SMP_INVALID_FIELD;
2177 		return reply((struct ib_mad_hdr *)smp);
2178 	}
2179 
2180 	if (resp_len)
2181 		*resp_len += len;
2182 
2183 	return reply((struct ib_mad_hdr *)smp);
2184 }
2185 
2186 static int __subn_get_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
2187 			      struct ib_device *ibdev, u8 port, u32 *resp_len,
2188 			      u32 max_len)
2189 {
2190 	u32 num_ports = OPA_AM_NPORT(am);
2191 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2192 	struct hfi1_pportdata *ppd;
2193 	struct buffer_control *p = (struct buffer_control *)data;
2194 	int size = sizeof(struct buffer_control);
2195 
2196 	if (num_ports != 1 || smp_length_check(size, max_len)) {
2197 		smp->status |= IB_SMP_INVALID_FIELD;
2198 		return reply((struct ib_mad_hdr *)smp);
2199 	}
2200 
2201 	ppd = dd->pport + (port - 1);
2202 	fm_get_table(ppd, FM_TBL_BUFFER_CONTROL, p);
2203 	trace_bct_get(dd, p);
2204 	if (resp_len)
2205 		*resp_len += size;
2206 
2207 	return reply((struct ib_mad_hdr *)smp);
2208 }
2209 
2210 static int __subn_set_opa_bct(struct opa_smp *smp, u32 am, u8 *data,
2211 			      struct ib_device *ibdev, u8 port, u32 *resp_len,
2212 			      u32 max_len)
2213 {
2214 	u32 num_ports = OPA_AM_NPORT(am);
2215 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2216 	struct hfi1_pportdata *ppd;
2217 	struct buffer_control *p = (struct buffer_control *)data;
2218 
2219 	if (num_ports != 1 || smp_length_check(sizeof(*p), max_len)) {
2220 		smp->status |= IB_SMP_INVALID_FIELD;
2221 		return reply((struct ib_mad_hdr *)smp);
2222 	}
2223 	ppd = dd->pport + (port - 1);
2224 	trace_bct_set(dd, p);
2225 	if (fm_set_table(ppd, FM_TBL_BUFFER_CONTROL, p) < 0) {
2226 		smp->status |= IB_SMP_INVALID_FIELD;
2227 		return reply((struct ib_mad_hdr *)smp);
2228 	}
2229 
2230 	return __subn_get_opa_bct(smp, am, data, ibdev, port, resp_len,
2231 				  max_len);
2232 }
2233 
2234 static int __subn_get_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
2235 				 struct ib_device *ibdev, u8 port,
2236 				 u32 *resp_len, u32 max_len)
2237 {
2238 	struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
2239 	u32 num_ports = OPA_AM_NPORT(am);
2240 	u8 section = (am & 0x00ff0000) >> 16;
2241 	u8 *p = data;
2242 	int size = 256;
2243 
2244 	if (num_ports != 1 || smp_length_check(size, max_len)) {
2245 		smp->status |= IB_SMP_INVALID_FIELD;
2246 		return reply((struct ib_mad_hdr *)smp);
2247 	}
2248 
2249 	switch (section) {
2250 	case OPA_VLARB_LOW_ELEMENTS:
2251 		fm_get_table(ppd, FM_TBL_VL_LOW_ARB, p);
2252 		break;
2253 	case OPA_VLARB_HIGH_ELEMENTS:
2254 		fm_get_table(ppd, FM_TBL_VL_HIGH_ARB, p);
2255 		break;
2256 	case OPA_VLARB_PREEMPT_ELEMENTS:
2257 		fm_get_table(ppd, FM_TBL_VL_PREEMPT_ELEMS, p);
2258 		break;
2259 	case OPA_VLARB_PREEMPT_MATRIX:
2260 		fm_get_table(ppd, FM_TBL_VL_PREEMPT_MATRIX, p);
2261 		break;
2262 	default:
2263 		pr_warn("OPA SubnGet(VL Arb) AM Invalid : 0x%x\n",
2264 			be32_to_cpu(smp->attr_mod));
2265 		smp->status |= IB_SMP_INVALID_FIELD;
2266 		size = 0;
2267 		break;
2268 	}
2269 
2270 	if (size > 0 && resp_len)
2271 		*resp_len += size;
2272 
2273 	return reply((struct ib_mad_hdr *)smp);
2274 }
2275 
2276 static int __subn_set_opa_vl_arb(struct opa_smp *smp, u32 am, u8 *data,
2277 				 struct ib_device *ibdev, u8 port,
2278 				 u32 *resp_len, u32 max_len)
2279 {
2280 	struct hfi1_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port));
2281 	u32 num_ports = OPA_AM_NPORT(am);
2282 	u8 section = (am & 0x00ff0000) >> 16;
2283 	u8 *p = data;
2284 	int size = 256;
2285 
2286 	if (num_ports != 1 || smp_length_check(size, max_len)) {
2287 		smp->status |= IB_SMP_INVALID_FIELD;
2288 		return reply((struct ib_mad_hdr *)smp);
2289 	}
2290 
2291 	switch (section) {
2292 	case OPA_VLARB_LOW_ELEMENTS:
2293 		(void)fm_set_table(ppd, FM_TBL_VL_LOW_ARB, p);
2294 		break;
2295 	case OPA_VLARB_HIGH_ELEMENTS:
2296 		(void)fm_set_table(ppd, FM_TBL_VL_HIGH_ARB, p);
2297 		break;
2298 	/*
2299 	 * neither OPA_VLARB_PREEMPT_ELEMENTS, or OPA_VLARB_PREEMPT_MATRIX
2300 	 * can be changed from the default values
2301 	 */
2302 	case OPA_VLARB_PREEMPT_ELEMENTS:
2303 		/* FALLTHROUGH */
2304 	case OPA_VLARB_PREEMPT_MATRIX:
2305 		smp->status |= IB_SMP_UNSUP_METH_ATTR;
2306 		break;
2307 	default:
2308 		pr_warn("OPA SubnSet(VL Arb) AM Invalid : 0x%x\n",
2309 			be32_to_cpu(smp->attr_mod));
2310 		smp->status |= IB_SMP_INVALID_FIELD;
2311 		break;
2312 	}
2313 
2314 	return __subn_get_opa_vl_arb(smp, am, data, ibdev, port, resp_len,
2315 				     max_len);
2316 }
2317 
2318 struct opa_pma_mad {
2319 	struct ib_mad_hdr mad_hdr;
2320 	u8 data[2024];
2321 } __packed;
2322 
2323 struct opa_port_status_req {
2324 	__u8 port_num;
2325 	__u8 reserved[3];
2326 	__be32 vl_select_mask;
2327 };
2328 
2329 #define VL_MASK_ALL		0x000080ff
2330 
2331 struct opa_port_status_rsp {
2332 	__u8 port_num;
2333 	__u8 reserved[3];
2334 	__be32  vl_select_mask;
2335 
2336 	/* Data counters */
2337 	__be64 port_xmit_data;
2338 	__be64 port_rcv_data;
2339 	__be64 port_xmit_pkts;
2340 	__be64 port_rcv_pkts;
2341 	__be64 port_multicast_xmit_pkts;
2342 	__be64 port_multicast_rcv_pkts;
2343 	__be64 port_xmit_wait;
2344 	__be64 sw_port_congestion;
2345 	__be64 port_rcv_fecn;
2346 	__be64 port_rcv_becn;
2347 	__be64 port_xmit_time_cong;
2348 	__be64 port_xmit_wasted_bw;
2349 	__be64 port_xmit_wait_data;
2350 	__be64 port_rcv_bubble;
2351 	__be64 port_mark_fecn;
2352 	/* Error counters */
2353 	__be64 port_rcv_constraint_errors;
2354 	__be64 port_rcv_switch_relay_errors;
2355 	__be64 port_xmit_discards;
2356 	__be64 port_xmit_constraint_errors;
2357 	__be64 port_rcv_remote_physical_errors;
2358 	__be64 local_link_integrity_errors;
2359 	__be64 port_rcv_errors;
2360 	__be64 excessive_buffer_overruns;
2361 	__be64 fm_config_errors;
2362 	__be32 link_error_recovery;
2363 	__be32 link_downed;
2364 	u8 uncorrectable_errors;
2365 
2366 	u8 link_quality_indicator; /* 5res, 3bit */
2367 	u8 res2[6];
2368 	struct _vls_pctrs {
2369 		/* per-VL Data counters */
2370 		__be64 port_vl_xmit_data;
2371 		__be64 port_vl_rcv_data;
2372 		__be64 port_vl_xmit_pkts;
2373 		__be64 port_vl_rcv_pkts;
2374 		__be64 port_vl_xmit_wait;
2375 		__be64 sw_port_vl_congestion;
2376 		__be64 port_vl_rcv_fecn;
2377 		__be64 port_vl_rcv_becn;
2378 		__be64 port_xmit_time_cong;
2379 		__be64 port_vl_xmit_wasted_bw;
2380 		__be64 port_vl_xmit_wait_data;
2381 		__be64 port_vl_rcv_bubble;
2382 		__be64 port_vl_mark_fecn;
2383 		__be64 port_vl_xmit_discards;
2384 	} vls[0]; /* real array size defined by # bits set in vl_select_mask */
2385 };
2386 
2387 enum counter_selects {
2388 	CS_PORT_XMIT_DATA			= (1 << 31),
2389 	CS_PORT_RCV_DATA			= (1 << 30),
2390 	CS_PORT_XMIT_PKTS			= (1 << 29),
2391 	CS_PORT_RCV_PKTS			= (1 << 28),
2392 	CS_PORT_MCAST_XMIT_PKTS			= (1 << 27),
2393 	CS_PORT_MCAST_RCV_PKTS			= (1 << 26),
2394 	CS_PORT_XMIT_WAIT			= (1 << 25),
2395 	CS_SW_PORT_CONGESTION			= (1 << 24),
2396 	CS_PORT_RCV_FECN			= (1 << 23),
2397 	CS_PORT_RCV_BECN			= (1 << 22),
2398 	CS_PORT_XMIT_TIME_CONG			= (1 << 21),
2399 	CS_PORT_XMIT_WASTED_BW			= (1 << 20),
2400 	CS_PORT_XMIT_WAIT_DATA			= (1 << 19),
2401 	CS_PORT_RCV_BUBBLE			= (1 << 18),
2402 	CS_PORT_MARK_FECN			= (1 << 17),
2403 	CS_PORT_RCV_CONSTRAINT_ERRORS		= (1 << 16),
2404 	CS_PORT_RCV_SWITCH_RELAY_ERRORS		= (1 << 15),
2405 	CS_PORT_XMIT_DISCARDS			= (1 << 14),
2406 	CS_PORT_XMIT_CONSTRAINT_ERRORS		= (1 << 13),
2407 	CS_PORT_RCV_REMOTE_PHYSICAL_ERRORS	= (1 << 12),
2408 	CS_LOCAL_LINK_INTEGRITY_ERRORS		= (1 << 11),
2409 	CS_PORT_RCV_ERRORS			= (1 << 10),
2410 	CS_EXCESSIVE_BUFFER_OVERRUNS		= (1 << 9),
2411 	CS_FM_CONFIG_ERRORS			= (1 << 8),
2412 	CS_LINK_ERROR_RECOVERY			= (1 << 7),
2413 	CS_LINK_DOWNED				= (1 << 6),
2414 	CS_UNCORRECTABLE_ERRORS			= (1 << 5),
2415 };
2416 
2417 struct opa_clear_port_status {
2418 	__be64 port_select_mask[4];
2419 	__be32 counter_select_mask;
2420 };
2421 
2422 struct opa_aggregate {
2423 	__be16 attr_id;
2424 	__be16 err_reqlength;	/* 1 bit, 8 res, 7 bit */
2425 	__be32 attr_mod;
2426 	u8 data[0];
2427 };
2428 
2429 #define MSK_LLI 0x000000f0
2430 #define MSK_LLI_SFT 4
2431 #define MSK_LER 0x0000000f
2432 #define MSK_LER_SFT 0
2433 #define ADD_LLI 8
2434 #define ADD_LER 2
2435 
2436 /* Request contains first three fields, response contains those plus the rest */
2437 struct opa_port_data_counters_msg {
2438 	__be64 port_select_mask[4];
2439 	__be32 vl_select_mask;
2440 	__be32 resolution;
2441 
2442 	/* Response fields follow */
2443 	struct _port_dctrs {
2444 		u8 port_number;
2445 		u8 reserved2[3];
2446 		__be32 link_quality_indicator; /* 29res, 3bit */
2447 
2448 		/* Data counters */
2449 		__be64 port_xmit_data;
2450 		__be64 port_rcv_data;
2451 		__be64 port_xmit_pkts;
2452 		__be64 port_rcv_pkts;
2453 		__be64 port_multicast_xmit_pkts;
2454 		__be64 port_multicast_rcv_pkts;
2455 		__be64 port_xmit_wait;
2456 		__be64 sw_port_congestion;
2457 		__be64 port_rcv_fecn;
2458 		__be64 port_rcv_becn;
2459 		__be64 port_xmit_time_cong;
2460 		__be64 port_xmit_wasted_bw;
2461 		__be64 port_xmit_wait_data;
2462 		__be64 port_rcv_bubble;
2463 		__be64 port_mark_fecn;
2464 
2465 		__be64 port_error_counter_summary;
2466 		/* Sum of error counts/port */
2467 
2468 		struct _vls_dctrs {
2469 			/* per-VL Data counters */
2470 			__be64 port_vl_xmit_data;
2471 			__be64 port_vl_rcv_data;
2472 			__be64 port_vl_xmit_pkts;
2473 			__be64 port_vl_rcv_pkts;
2474 			__be64 port_vl_xmit_wait;
2475 			__be64 sw_port_vl_congestion;
2476 			__be64 port_vl_rcv_fecn;
2477 			__be64 port_vl_rcv_becn;
2478 			__be64 port_xmit_time_cong;
2479 			__be64 port_vl_xmit_wasted_bw;
2480 			__be64 port_vl_xmit_wait_data;
2481 			__be64 port_vl_rcv_bubble;
2482 			__be64 port_vl_mark_fecn;
2483 		} vls[0];
2484 		/* array size defined by #bits set in vl_select_mask*/
2485 	} port[1]; /* array size defined by  #ports in attribute modifier */
2486 };
2487 
2488 struct opa_port_error_counters64_msg {
2489 	/*
2490 	 * Request contains first two fields, response contains the
2491 	 * whole magilla
2492 	 */
2493 	__be64 port_select_mask[4];
2494 	__be32 vl_select_mask;
2495 
2496 	/* Response-only fields follow */
2497 	__be32 reserved1;
2498 	struct _port_ectrs {
2499 		u8 port_number;
2500 		u8 reserved2[7];
2501 		__be64 port_rcv_constraint_errors;
2502 		__be64 port_rcv_switch_relay_errors;
2503 		__be64 port_xmit_discards;
2504 		__be64 port_xmit_constraint_errors;
2505 		__be64 port_rcv_remote_physical_errors;
2506 		__be64 local_link_integrity_errors;
2507 		__be64 port_rcv_errors;
2508 		__be64 excessive_buffer_overruns;
2509 		__be64 fm_config_errors;
2510 		__be32 link_error_recovery;
2511 		__be32 link_downed;
2512 		u8 uncorrectable_errors;
2513 		u8 reserved3[7];
2514 		struct _vls_ectrs {
2515 			__be64 port_vl_xmit_discards;
2516 		} vls[0];
2517 		/* array size defined by #bits set in vl_select_mask */
2518 	} port[1]; /* array size defined by #ports in attribute modifier */
2519 };
2520 
2521 struct opa_port_error_info_msg {
2522 	__be64 port_select_mask[4];
2523 	__be32 error_info_select_mask;
2524 	__be32 reserved1;
2525 	struct _port_ei {
2526 		u8 port_number;
2527 		u8 reserved2[7];
2528 
2529 		/* PortRcvErrorInfo */
2530 		struct {
2531 			u8 status_and_code;
2532 			union {
2533 				u8 raw[17];
2534 				struct {
2535 					/* EI1to12 format */
2536 					u8 packet_flit1[8];
2537 					u8 packet_flit2[8];
2538 					u8 remaining_flit_bits12;
2539 				} ei1to12;
2540 				struct {
2541 					u8 packet_bytes[8];
2542 					u8 remaining_flit_bits;
2543 				} ei13;
2544 			} ei;
2545 			u8 reserved3[6];
2546 		} __packed port_rcv_ei;
2547 
2548 		/* ExcessiveBufferOverrunInfo */
2549 		struct {
2550 			u8 status_and_sc;
2551 			u8 reserved4[7];
2552 		} __packed excessive_buffer_overrun_ei;
2553 
2554 		/* PortXmitConstraintErrorInfo */
2555 		struct {
2556 			u8 status;
2557 			u8 reserved5;
2558 			__be16 pkey;
2559 			__be32 slid;
2560 		} __packed port_xmit_constraint_ei;
2561 
2562 		/* PortRcvConstraintErrorInfo */
2563 		struct {
2564 			u8 status;
2565 			u8 reserved6;
2566 			__be16 pkey;
2567 			__be32 slid;
2568 		} __packed port_rcv_constraint_ei;
2569 
2570 		/* PortRcvSwitchRelayErrorInfo */
2571 		struct {
2572 			u8 status_and_code;
2573 			u8 reserved7[3];
2574 			__u32 error_info;
2575 		} __packed port_rcv_switch_relay_ei;
2576 
2577 		/* UncorrectableErrorInfo */
2578 		struct {
2579 			u8 status_and_code;
2580 			u8 reserved8;
2581 		} __packed uncorrectable_ei;
2582 
2583 		/* FMConfigErrorInfo */
2584 		struct {
2585 			u8 status_and_code;
2586 			u8 error_info;
2587 		} __packed fm_config_ei;
2588 		__u32 reserved9;
2589 	} port[1]; /* actual array size defined by #ports in attr modifier */
2590 };
2591 
2592 /* opa_port_error_info_msg error_info_select_mask bit definitions */
2593 enum error_info_selects {
2594 	ES_PORT_RCV_ERROR_INFO			= (1 << 31),
2595 	ES_EXCESSIVE_BUFFER_OVERRUN_INFO	= (1 << 30),
2596 	ES_PORT_XMIT_CONSTRAINT_ERROR_INFO	= (1 << 29),
2597 	ES_PORT_RCV_CONSTRAINT_ERROR_INFO	= (1 << 28),
2598 	ES_PORT_RCV_SWITCH_RELAY_ERROR_INFO	= (1 << 27),
2599 	ES_UNCORRECTABLE_ERROR_INFO		= (1 << 26),
2600 	ES_FM_CONFIG_ERROR_INFO			= (1 << 25)
2601 };
2602 
2603 static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp,
2604 				     struct ib_device *ibdev, u32 *resp_len)
2605 {
2606 	struct opa_class_port_info *p =
2607 		(struct opa_class_port_info *)pmp->data;
2608 
2609 	memset(pmp->data, 0, sizeof(pmp->data));
2610 
2611 	if (pmp->mad_hdr.attr_mod != 0)
2612 		pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2613 
2614 	p->base_version = OPA_MGMT_BASE_VERSION;
2615 	p->class_version = OPA_SM_CLASS_VERSION;
2616 	/*
2617 	 * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
2618 	 */
2619 	p->cap_mask2_resp_time = cpu_to_be32(18);
2620 
2621 	if (resp_len)
2622 		*resp_len += sizeof(*p);
2623 
2624 	return reply((struct ib_mad_hdr *)pmp);
2625 }
2626 
2627 static void a0_portstatus(struct hfi1_pportdata *ppd,
2628 			  struct opa_port_status_rsp *rsp, u32 vl_select_mask)
2629 {
2630 	if (!is_bx(ppd->dd)) {
2631 		unsigned long vl;
2632 		u64 sum_vl_xmit_wait = 0;
2633 		u32 vl_all_mask = VL_MASK_ALL;
2634 
2635 		for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
2636 				 8 * sizeof(vl_all_mask)) {
2637 			u64 tmp = sum_vl_xmit_wait +
2638 				  read_port_cntr(ppd, C_TX_WAIT_VL,
2639 						 idx_from_vl(vl));
2640 			if (tmp < sum_vl_xmit_wait) {
2641 				/* we wrapped */
2642 				sum_vl_xmit_wait = (u64)~0;
2643 				break;
2644 			}
2645 			sum_vl_xmit_wait = tmp;
2646 		}
2647 		if (be64_to_cpu(rsp->port_xmit_wait) > sum_vl_xmit_wait)
2648 			rsp->port_xmit_wait = cpu_to_be64(sum_vl_xmit_wait);
2649 	}
2650 }
2651 
2652 /**
2653  * tx_link_width - convert link width bitmask to integer
2654  * value representing actual link width.
2655  * @link_width: width of active link
2656  * @return: return index of the bit set in link_width var
2657  *
2658  * The function convert and return the index of bit set
2659  * that indicate the current link width.
2660  */
2661 u16 tx_link_width(u16 link_width)
2662 {
2663 	int n = LINK_WIDTH_DEFAULT;
2664 	u16 tx_width = n;
2665 
2666 	while (link_width && n) {
2667 		if (link_width & (1 << (n - 1))) {
2668 			tx_width = n;
2669 			break;
2670 		}
2671 		n--;
2672 	}
2673 
2674 	return tx_width;
2675 }
2676 
2677 /**
2678  * get_xmit_wait_counters - Convert HFI 's SendWaitCnt/SendWaitVlCnt
2679  * counter in unit of TXE cycle times to flit times.
2680  * @ppd: info of physical Hfi port
2681  * @link_width: width of active link
2682  * @link_speed: speed of active link
2683  * @vl: represent VL0-VL7, VL15 for PortVLXmitWait counters request
2684  * and if vl value is C_VL_COUNT, it represent SendWaitCnt
2685  * counter request
2686  * @return: return SendWaitCnt/SendWaitVlCnt counter value per vl.
2687  *
2688  * Convert SendWaitCnt/SendWaitVlCnt counter from TXE cycle times to
2689  * flit times. Call this function to samples these counters. This
2690  * function will calculate for previous state transition and update
2691  * current state at end of function using ppd->prev_link_width and
2692  * ppd->port_vl_xmit_wait_last to port_vl_xmit_wait_curr and link_width.
2693  */
2694 u64 get_xmit_wait_counters(struct hfi1_pportdata *ppd,
2695 			   u16 link_width, u16 link_speed, int vl)
2696 {
2697 	u64 port_vl_xmit_wait_curr;
2698 	u64 delta_vl_xmit_wait;
2699 	u64 xmit_wait_val;
2700 
2701 	if (vl > C_VL_COUNT)
2702 		return  0;
2703 	if (vl < C_VL_COUNT)
2704 		port_vl_xmit_wait_curr =
2705 			read_port_cntr(ppd, C_TX_WAIT_VL, vl);
2706 	else
2707 		port_vl_xmit_wait_curr =
2708 			read_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL);
2709 
2710 	xmit_wait_val =
2711 		port_vl_xmit_wait_curr -
2712 		ppd->port_vl_xmit_wait_last[vl];
2713 	delta_vl_xmit_wait =
2714 		convert_xmit_counter(xmit_wait_val,
2715 				     ppd->prev_link_width,
2716 				     link_speed);
2717 
2718 	ppd->vl_xmit_flit_cnt[vl] += delta_vl_xmit_wait;
2719 	ppd->port_vl_xmit_wait_last[vl] = port_vl_xmit_wait_curr;
2720 	ppd->prev_link_width = link_width;
2721 
2722 	return ppd->vl_xmit_flit_cnt[vl];
2723 }
2724 
2725 static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
2726 				  struct ib_device *ibdev,
2727 				  u8 port, u32 *resp_len)
2728 {
2729 	struct opa_port_status_req *req =
2730 		(struct opa_port_status_req *)pmp->data;
2731 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2732 	struct opa_port_status_rsp *rsp;
2733 	u32 vl_select_mask = be32_to_cpu(req->vl_select_mask);
2734 	unsigned long vl;
2735 	size_t response_data_size;
2736 	u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
2737 	u8 port_num = req->port_num;
2738 	u8 num_vls = hweight32(vl_select_mask);
2739 	struct _vls_pctrs *vlinfo;
2740 	struct hfi1_ibport *ibp = to_iport(ibdev, port);
2741 	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2742 	int vfi;
2743 	u64 tmp, tmp2;
2744 	u16 link_width;
2745 	u16 link_speed;
2746 
2747 	response_data_size = sizeof(struct opa_port_status_rsp) +
2748 				num_vls * sizeof(struct _vls_pctrs);
2749 	if (response_data_size > sizeof(pmp->data)) {
2750 		pmp->mad_hdr.status |= OPA_PM_STATUS_REQUEST_TOO_LARGE;
2751 		return reply((struct ib_mad_hdr *)pmp);
2752 	}
2753 
2754 	if (nports != 1 || (port_num && port_num != port) ||
2755 	    num_vls > OPA_MAX_VLS || (vl_select_mask & ~VL_MASK_ALL)) {
2756 		pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
2757 		return reply((struct ib_mad_hdr *)pmp);
2758 	}
2759 
2760 	memset(pmp->data, 0, sizeof(pmp->data));
2761 
2762 	rsp = (struct opa_port_status_rsp *)pmp->data;
2763 	if (port_num)
2764 		rsp->port_num = port_num;
2765 	else
2766 		rsp->port_num = port;
2767 
2768 	rsp->port_rcv_constraint_errors =
2769 		cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
2770 					   CNTR_INVALID_VL));
2771 
2772 	hfi1_read_link_quality(dd, &rsp->link_quality_indicator);
2773 
2774 	rsp->vl_select_mask = cpu_to_be32(vl_select_mask);
2775 	rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
2776 					  CNTR_INVALID_VL));
2777 	rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
2778 					 CNTR_INVALID_VL));
2779 	rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS,
2780 					  CNTR_INVALID_VL));
2781 	rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS,
2782 					 CNTR_INVALID_VL));
2783 	rsp->port_multicast_xmit_pkts =
2784 		cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS,
2785 					  CNTR_INVALID_VL));
2786 	rsp->port_multicast_rcv_pkts =
2787 		cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS,
2788 					  CNTR_INVALID_VL));
2789 	/*
2790 	 * Convert PortXmitWait counter from TXE cycle times
2791 	 * to flit times.
2792 	 */
2793 	link_width =
2794 		tx_link_width(ppd->link_width_downgrade_tx_active);
2795 	link_speed = get_link_speed(ppd->link_speed_active);
2796 	rsp->port_xmit_wait =
2797 		cpu_to_be64(get_xmit_wait_counters(ppd, link_width,
2798 						   link_speed, C_VL_COUNT));
2799 	rsp->port_rcv_fecn =
2800 		cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL));
2801 	rsp->port_rcv_becn =
2802 		cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL));
2803 	rsp->port_xmit_discards =
2804 		cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD,
2805 					   CNTR_INVALID_VL));
2806 	rsp->port_xmit_constraint_errors =
2807 		cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
2808 					   CNTR_INVALID_VL));
2809 	rsp->port_rcv_remote_physical_errors =
2810 		cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
2811 					  CNTR_INVALID_VL));
2812 	rsp->local_link_integrity_errors =
2813 		cpu_to_be64(read_dev_cntr(dd, C_DC_RX_REPLAY,
2814 					  CNTR_INVALID_VL));
2815 	tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
2816 	tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
2817 				   CNTR_INVALID_VL);
2818 	if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) {
2819 		/* overflow/wrapped */
2820 		rsp->link_error_recovery = cpu_to_be32(~0);
2821 	} else {
2822 		rsp->link_error_recovery = cpu_to_be32(tmp2);
2823 	}
2824 	rsp->port_rcv_errors =
2825 		cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
2826 	rsp->excessive_buffer_overruns =
2827 		cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL));
2828 	rsp->fm_config_errors =
2829 		cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR,
2830 					  CNTR_INVALID_VL));
2831 	rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN,
2832 						      CNTR_INVALID_VL));
2833 
2834 	/* rsp->uncorrectable_errors is 8 bits wide, and it pegs at 0xff */
2835 	tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
2836 	rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff;
2837 
2838 	vlinfo = &rsp->vls[0];
2839 	vfi = 0;
2840 	/* The vl_select_mask has been checked above, and we know
2841 	 * that it contains only entries which represent valid VLs.
2842 	 * So in the for_each_set_bit() loop below, we don't need
2843 	 * any additional checks for vl.
2844 	 */
2845 	for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
2846 			 8 * sizeof(vl_select_mask)) {
2847 		memset(vlinfo, 0, sizeof(*vlinfo));
2848 
2849 		tmp = read_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl));
2850 		rsp->vls[vfi].port_vl_rcv_data = cpu_to_be64(tmp);
2851 
2852 		rsp->vls[vfi].port_vl_rcv_pkts =
2853 			cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL,
2854 						  idx_from_vl(vl)));
2855 
2856 		rsp->vls[vfi].port_vl_xmit_data =
2857 			cpu_to_be64(read_port_cntr(ppd, C_TX_FLIT_VL,
2858 						   idx_from_vl(vl)));
2859 
2860 		rsp->vls[vfi].port_vl_xmit_pkts =
2861 			cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL,
2862 						   idx_from_vl(vl)));
2863 		/*
2864 		 * Convert PortVlXmitWait counter from TXE cycle
2865 		 * times to flit times.
2866 		 */
2867 		rsp->vls[vfi].port_vl_xmit_wait =
2868 			cpu_to_be64(get_xmit_wait_counters(ppd, link_width,
2869 							   link_speed,
2870 							   idx_from_vl(vl)));
2871 
2872 		rsp->vls[vfi].port_vl_rcv_fecn =
2873 			cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL,
2874 						  idx_from_vl(vl)));
2875 
2876 		rsp->vls[vfi].port_vl_rcv_becn =
2877 			cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL,
2878 						  idx_from_vl(vl)));
2879 
2880 		rsp->vls[vfi].port_vl_xmit_discards =
2881 			cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
2882 						   idx_from_vl(vl)));
2883 		vlinfo++;
2884 		vfi++;
2885 	}
2886 
2887 	a0_portstatus(ppd, rsp, vl_select_mask);
2888 
2889 	if (resp_len)
2890 		*resp_len += response_data_size;
2891 
2892 	return reply((struct ib_mad_hdr *)pmp);
2893 }
2894 
2895 static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port,
2896 				     u8 res_lli, u8 res_ler)
2897 {
2898 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2899 	struct hfi1_ibport *ibp = to_iport(ibdev, port);
2900 	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2901 	u64 error_counter_summary = 0, tmp;
2902 
2903 	error_counter_summary += read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
2904 						CNTR_INVALID_VL);
2905 	/* port_rcv_switch_relay_errors is 0 for HFIs */
2906 	error_counter_summary += read_port_cntr(ppd, C_SW_XMIT_DSCD,
2907 						CNTR_INVALID_VL);
2908 	error_counter_summary += read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
2909 						CNTR_INVALID_VL);
2910 	error_counter_summary += read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
2911 					       CNTR_INVALID_VL);
2912 	/* local link integrity must be right-shifted by the lli resolution */
2913 	error_counter_summary += (read_dev_cntr(dd, C_DC_RX_REPLAY,
2914 						CNTR_INVALID_VL) >> res_lli);
2915 	/* link error recovery must b right-shifted by the ler resolution */
2916 	tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
2917 	tmp += read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL);
2918 	error_counter_summary += (tmp >> res_ler);
2919 	error_counter_summary += read_dev_cntr(dd, C_DC_RCV_ERR,
2920 					       CNTR_INVALID_VL);
2921 	error_counter_summary += read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
2922 	error_counter_summary += read_dev_cntr(dd, C_DC_FM_CFG_ERR,
2923 					       CNTR_INVALID_VL);
2924 	/* ppd->link_downed is a 32-bit value */
2925 	error_counter_summary += read_port_cntr(ppd, C_SW_LINK_DOWN,
2926 						CNTR_INVALID_VL);
2927 	tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
2928 	/* this is an 8-bit quantity */
2929 	error_counter_summary += tmp < 0x100 ? (tmp & 0xff) : 0xff;
2930 
2931 	return error_counter_summary;
2932 }
2933 
2934 static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp,
2935 			    u32 vl_select_mask)
2936 {
2937 	if (!is_bx(ppd->dd)) {
2938 		unsigned long vl;
2939 		u64 sum_vl_xmit_wait = 0;
2940 		u32 vl_all_mask = VL_MASK_ALL;
2941 
2942 		for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
2943 				 8 * sizeof(vl_all_mask)) {
2944 			u64 tmp = sum_vl_xmit_wait +
2945 				  read_port_cntr(ppd, C_TX_WAIT_VL,
2946 						 idx_from_vl(vl));
2947 			if (tmp < sum_vl_xmit_wait) {
2948 				/* we wrapped */
2949 				sum_vl_xmit_wait = (u64)~0;
2950 				break;
2951 			}
2952 			sum_vl_xmit_wait = tmp;
2953 		}
2954 		if (be64_to_cpu(rsp->port_xmit_wait) > sum_vl_xmit_wait)
2955 			rsp->port_xmit_wait = cpu_to_be64(sum_vl_xmit_wait);
2956 	}
2957 }
2958 
2959 static void pma_get_opa_port_dctrs(struct ib_device *ibdev,
2960 				   struct _port_dctrs *rsp)
2961 {
2962 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2963 
2964 	rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
2965 						CNTR_INVALID_VL));
2966 	rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
2967 						CNTR_INVALID_VL));
2968 	rsp->port_xmit_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_PKTS,
2969 						CNTR_INVALID_VL));
2970 	rsp->port_rcv_pkts = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_PKTS,
2971 						CNTR_INVALID_VL));
2972 	rsp->port_multicast_xmit_pkts =
2973 		cpu_to_be64(read_dev_cntr(dd, C_DC_MC_XMIT_PKTS,
2974 					  CNTR_INVALID_VL));
2975 	rsp->port_multicast_rcv_pkts =
2976 		cpu_to_be64(read_dev_cntr(dd, C_DC_MC_RCV_PKTS,
2977 					  CNTR_INVALID_VL));
2978 }
2979 
2980 static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
2981 				    struct ib_device *ibdev,
2982 				    u8 port, u32 *resp_len)
2983 {
2984 	struct opa_port_data_counters_msg *req =
2985 		(struct opa_port_data_counters_msg *)pmp->data;
2986 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
2987 	struct hfi1_ibport *ibp = to_iport(ibdev, port);
2988 	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
2989 	struct _port_dctrs *rsp;
2990 	struct _vls_dctrs *vlinfo;
2991 	size_t response_data_size;
2992 	u32 num_ports;
2993 	u8 lq, num_vls;
2994 	u8 res_lli, res_ler;
2995 	u64 port_mask;
2996 	u8 port_num;
2997 	unsigned long vl;
2998 	u32 vl_select_mask;
2999 	int vfi;
3000 	u16 link_width;
3001 	u16 link_speed;
3002 
3003 	num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
3004 	num_vls = hweight32(be32_to_cpu(req->vl_select_mask));
3005 	vl_select_mask = be32_to_cpu(req->vl_select_mask);
3006 	res_lli = (u8)(be32_to_cpu(req->resolution) & MSK_LLI) >> MSK_LLI_SFT;
3007 	res_lli = res_lli ? res_lli + ADD_LLI : 0;
3008 	res_ler = (u8)(be32_to_cpu(req->resolution) & MSK_LER) >> MSK_LER_SFT;
3009 	res_ler = res_ler ? res_ler + ADD_LER : 0;
3010 
3011 	if (num_ports != 1 || (vl_select_mask & ~VL_MASK_ALL)) {
3012 		pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3013 		return reply((struct ib_mad_hdr *)pmp);
3014 	}
3015 
3016 	/* Sanity check */
3017 	response_data_size = sizeof(struct opa_port_data_counters_msg) +
3018 				num_vls * sizeof(struct _vls_dctrs);
3019 
3020 	if (response_data_size > sizeof(pmp->data)) {
3021 		pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3022 		return reply((struct ib_mad_hdr *)pmp);
3023 	}
3024 
3025 	/*
3026 	 * The bit set in the mask needs to be consistent with the
3027 	 * port the request came in on.
3028 	 */
3029 	port_mask = be64_to_cpu(req->port_select_mask[3]);
3030 	port_num = find_first_bit((unsigned long *)&port_mask,
3031 				  sizeof(port_mask) * 8);
3032 
3033 	if (port_num != port) {
3034 		pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3035 		return reply((struct ib_mad_hdr *)pmp);
3036 	}
3037 
3038 	rsp = &req->port[0];
3039 	memset(rsp, 0, sizeof(*rsp));
3040 
3041 	rsp->port_number = port;
3042 	/*
3043 	 * Note that link_quality_indicator is a 32 bit quantity in
3044 	 * 'datacounters' queries (as opposed to 'portinfo' queries,
3045 	 * where it's a byte).
3046 	 */
3047 	hfi1_read_link_quality(dd, &lq);
3048 	rsp->link_quality_indicator = cpu_to_be32((u32)lq);
3049 	pma_get_opa_port_dctrs(ibdev, rsp);
3050 
3051 	/*
3052 	 * Convert PortXmitWait counter from TXE
3053 	 * cycle times to flit times.
3054 	 */
3055 	link_width =
3056 		tx_link_width(ppd->link_width_downgrade_tx_active);
3057 	link_speed = get_link_speed(ppd->link_speed_active);
3058 	rsp->port_xmit_wait =
3059 		cpu_to_be64(get_xmit_wait_counters(ppd, link_width,
3060 						   link_speed, C_VL_COUNT));
3061 	rsp->port_rcv_fecn =
3062 		cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL));
3063 	rsp->port_rcv_becn =
3064 		cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL));
3065 	rsp->port_error_counter_summary =
3066 		cpu_to_be64(get_error_counter_summary(ibdev, port,
3067 						      res_lli, res_ler));
3068 
3069 	vlinfo = &rsp->vls[0];
3070 	vfi = 0;
3071 	/* The vl_select_mask has been checked above, and we know
3072 	 * that it contains only entries which represent valid VLs.
3073 	 * So in the for_each_set_bit() loop below, we don't need
3074 	 * any additional checks for vl.
3075 	 */
3076 	for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
3077 			 8 * sizeof(req->vl_select_mask)) {
3078 		memset(vlinfo, 0, sizeof(*vlinfo));
3079 
3080 		rsp->vls[vfi].port_vl_xmit_data =
3081 			cpu_to_be64(read_port_cntr(ppd, C_TX_FLIT_VL,
3082 						   idx_from_vl(vl)));
3083 
3084 		rsp->vls[vfi].port_vl_rcv_data =
3085 			cpu_to_be64(read_dev_cntr(dd, C_DC_RX_FLIT_VL,
3086 						  idx_from_vl(vl)));
3087 
3088 		rsp->vls[vfi].port_vl_xmit_pkts =
3089 			cpu_to_be64(read_port_cntr(ppd, C_TX_PKT_VL,
3090 						   idx_from_vl(vl)));
3091 
3092 		rsp->vls[vfi].port_vl_rcv_pkts =
3093 			cpu_to_be64(read_dev_cntr(dd, C_DC_RX_PKT_VL,
3094 						  idx_from_vl(vl)));
3095 
3096 		/*
3097 		 * Convert PortVlXmitWait counter from TXE
3098 		 * cycle times to flit times.
3099 		 */
3100 		rsp->vls[vfi].port_vl_xmit_wait =
3101 			cpu_to_be64(get_xmit_wait_counters(ppd, link_width,
3102 							   link_speed,
3103 							   idx_from_vl(vl)));
3104 
3105 		rsp->vls[vfi].port_vl_rcv_fecn =
3106 			cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FCN_VL,
3107 						  idx_from_vl(vl)));
3108 		rsp->vls[vfi].port_vl_rcv_becn =
3109 			cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_BCN_VL,
3110 						  idx_from_vl(vl)));
3111 
3112 		/* rsp->port_vl_xmit_time_cong is 0 for HFIs */
3113 		/* rsp->port_vl_xmit_wasted_bw ??? */
3114 		/* port_vl_xmit_wait_data - TXE (table 13-9 HFI spec) ???
3115 		 * does this differ from rsp->vls[vfi].port_vl_xmit_wait
3116 		 */
3117 		/*rsp->vls[vfi].port_vl_mark_fecn =
3118 		 *	cpu_to_be64(read_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT
3119 		 *		+ offset));
3120 		 */
3121 		vlinfo++;
3122 		vfi++;
3123 	}
3124 
3125 	a0_datacounters(ppd, rsp, vl_select_mask);
3126 
3127 	if (resp_len)
3128 		*resp_len += response_data_size;
3129 
3130 	return reply((struct ib_mad_hdr *)pmp);
3131 }
3132 
3133 static int pma_get_ib_portcounters_ext(struct ib_pma_mad *pmp,
3134 				       struct ib_device *ibdev, u8 port)
3135 {
3136 	struct ib_pma_portcounters_ext *p = (struct ib_pma_portcounters_ext *)
3137 						pmp->data;
3138 	struct _port_dctrs rsp;
3139 
3140 	if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
3141 		pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3142 		goto bail;
3143 	}
3144 
3145 	memset(&rsp, 0, sizeof(rsp));
3146 	pma_get_opa_port_dctrs(ibdev, &rsp);
3147 
3148 	p->port_xmit_data = rsp.port_xmit_data;
3149 	p->port_rcv_data = rsp.port_rcv_data;
3150 	p->port_xmit_packets = rsp.port_xmit_pkts;
3151 	p->port_rcv_packets = rsp.port_rcv_pkts;
3152 	p->port_unicast_xmit_packets = 0;
3153 	p->port_unicast_rcv_packets =  0;
3154 	p->port_multicast_xmit_packets = rsp.port_multicast_xmit_pkts;
3155 	p->port_multicast_rcv_packets = rsp.port_multicast_rcv_pkts;
3156 
3157 bail:
3158 	return reply((struct ib_mad_hdr *)pmp);
3159 }
3160 
3161 static void pma_get_opa_port_ectrs(struct ib_device *ibdev,
3162 				   struct _port_ectrs *rsp, u8 port)
3163 {
3164 	u64 tmp, tmp2;
3165 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
3166 	struct hfi1_ibport *ibp = to_iport(ibdev, port);
3167 	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3168 
3169 	tmp = read_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL);
3170 	tmp2 = tmp + read_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
3171 					CNTR_INVALID_VL);
3172 	if (tmp2 > (u32)UINT_MAX || tmp2 < tmp) {
3173 		/* overflow/wrapped */
3174 		rsp->link_error_recovery = cpu_to_be32(~0);
3175 	} else {
3176 		rsp->link_error_recovery = cpu_to_be32(tmp2);
3177 	}
3178 
3179 	rsp->link_downed = cpu_to_be32(read_port_cntr(ppd, C_SW_LINK_DOWN,
3180 						CNTR_INVALID_VL));
3181 	rsp->port_rcv_errors =
3182 		cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
3183 	rsp->port_rcv_remote_physical_errors =
3184 		cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
3185 					  CNTR_INVALID_VL));
3186 	rsp->port_rcv_switch_relay_errors = 0;
3187 	rsp->port_xmit_discards =
3188 		cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD,
3189 					   CNTR_INVALID_VL));
3190 	rsp->port_xmit_constraint_errors =
3191 		cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_CSTR_ERR,
3192 					   CNTR_INVALID_VL));
3193 	rsp->port_rcv_constraint_errors =
3194 		cpu_to_be64(read_port_cntr(ppd, C_SW_RCV_CSTR_ERR,
3195 					   CNTR_INVALID_VL));
3196 	rsp->local_link_integrity_errors =
3197 		cpu_to_be64(read_dev_cntr(dd, C_DC_RX_REPLAY,
3198 					  CNTR_INVALID_VL));
3199 	rsp->excessive_buffer_overruns =
3200 		cpu_to_be64(read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL));
3201 }
3202 
3203 static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
3204 				  struct ib_device *ibdev,
3205 				  u8 port, u32 *resp_len)
3206 {
3207 	size_t response_data_size;
3208 	struct _port_ectrs *rsp;
3209 	u8 port_num;
3210 	struct opa_port_error_counters64_msg *req;
3211 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
3212 	u32 num_ports;
3213 	u8 num_pslm;
3214 	u8 num_vls;
3215 	struct hfi1_ibport *ibp;
3216 	struct hfi1_pportdata *ppd;
3217 	struct _vls_ectrs *vlinfo;
3218 	unsigned long vl;
3219 	u64 port_mask, tmp;
3220 	u32 vl_select_mask;
3221 	int vfi;
3222 
3223 	req = (struct opa_port_error_counters64_msg *)pmp->data;
3224 
3225 	num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
3226 
3227 	num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
3228 	num_vls = hweight32(be32_to_cpu(req->vl_select_mask));
3229 
3230 	if (num_ports != 1 || num_ports != num_pslm) {
3231 		pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3232 		return reply((struct ib_mad_hdr *)pmp);
3233 	}
3234 
3235 	response_data_size = sizeof(struct opa_port_error_counters64_msg) +
3236 				num_vls * sizeof(struct _vls_ectrs);
3237 
3238 	if (response_data_size > sizeof(pmp->data)) {
3239 		pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3240 		return reply((struct ib_mad_hdr *)pmp);
3241 	}
3242 	/*
3243 	 * The bit set in the mask needs to be consistent with the
3244 	 * port the request came in on.
3245 	 */
3246 	port_mask = be64_to_cpu(req->port_select_mask[3]);
3247 	port_num = find_first_bit((unsigned long *)&port_mask,
3248 				  sizeof(port_mask) * 8);
3249 
3250 	if (port_num != port) {
3251 		pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3252 		return reply((struct ib_mad_hdr *)pmp);
3253 	}
3254 
3255 	rsp = &req->port[0];
3256 
3257 	ibp = to_iport(ibdev, port_num);
3258 	ppd = ppd_from_ibp(ibp);
3259 
3260 	memset(rsp, 0, sizeof(*rsp));
3261 	rsp->port_number = port_num;
3262 
3263 	pma_get_opa_port_ectrs(ibdev, rsp, port_num);
3264 
3265 	rsp->port_rcv_remote_physical_errors =
3266 		cpu_to_be64(read_dev_cntr(dd, C_DC_RMT_PHY_ERR,
3267 					  CNTR_INVALID_VL));
3268 	rsp->fm_config_errors =
3269 		cpu_to_be64(read_dev_cntr(dd, C_DC_FM_CFG_ERR,
3270 					  CNTR_INVALID_VL));
3271 	tmp = read_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL);
3272 
3273 	rsp->uncorrectable_errors = tmp < 0x100 ? (tmp & 0xff) : 0xff;
3274 	rsp->port_rcv_errors =
3275 		cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL));
3276 	vlinfo = &rsp->vls[0];
3277 	vfi = 0;
3278 	vl_select_mask = be32_to_cpu(req->vl_select_mask);
3279 	for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
3280 			 8 * sizeof(req->vl_select_mask)) {
3281 		memset(vlinfo, 0, sizeof(*vlinfo));
3282 		rsp->vls[vfi].port_vl_xmit_discards =
3283 			cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
3284 						   idx_from_vl(vl)));
3285 		vlinfo += 1;
3286 		vfi++;
3287 	}
3288 
3289 	if (resp_len)
3290 		*resp_len += response_data_size;
3291 
3292 	return reply((struct ib_mad_hdr *)pmp);
3293 }
3294 
3295 static int pma_get_ib_portcounters(struct ib_pma_mad *pmp,
3296 				   struct ib_device *ibdev, u8 port)
3297 {
3298 	struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
3299 		pmp->data;
3300 	struct _port_ectrs rsp;
3301 	u64 temp_link_overrun_errors;
3302 	u64 temp_64;
3303 	u32 temp_32;
3304 
3305 	memset(&rsp, 0, sizeof(rsp));
3306 	pma_get_opa_port_ectrs(ibdev, &rsp, port);
3307 
3308 	if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
3309 		pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3310 		goto bail;
3311 	}
3312 
3313 	p->symbol_error_counter = 0; /* N/A for OPA */
3314 
3315 	temp_32 = be32_to_cpu(rsp.link_error_recovery);
3316 	if (temp_32 > 0xFFUL)
3317 		p->link_error_recovery_counter = 0xFF;
3318 	else
3319 		p->link_error_recovery_counter = (u8)temp_32;
3320 
3321 	temp_32 = be32_to_cpu(rsp.link_downed);
3322 	if (temp_32 > 0xFFUL)
3323 		p->link_downed_counter = 0xFF;
3324 	else
3325 		p->link_downed_counter = (u8)temp_32;
3326 
3327 	temp_64 = be64_to_cpu(rsp.port_rcv_errors);
3328 	if (temp_64 > 0xFFFFUL)
3329 		p->port_rcv_errors = cpu_to_be16(0xFFFF);
3330 	else
3331 		p->port_rcv_errors = cpu_to_be16((u16)temp_64);
3332 
3333 	temp_64 = be64_to_cpu(rsp.port_rcv_remote_physical_errors);
3334 	if (temp_64 > 0xFFFFUL)
3335 		p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
3336 	else
3337 		p->port_rcv_remphys_errors = cpu_to_be16((u16)temp_64);
3338 
3339 	temp_64 = be64_to_cpu(rsp.port_rcv_switch_relay_errors);
3340 	p->port_rcv_switch_relay_errors = cpu_to_be16((u16)temp_64);
3341 
3342 	temp_64 = be64_to_cpu(rsp.port_xmit_discards);
3343 	if (temp_64 > 0xFFFFUL)
3344 		p->port_xmit_discards = cpu_to_be16(0xFFFF);
3345 	else
3346 		p->port_xmit_discards = cpu_to_be16((u16)temp_64);
3347 
3348 	temp_64 = be64_to_cpu(rsp.port_xmit_constraint_errors);
3349 	if (temp_64 > 0xFFUL)
3350 		p->port_xmit_constraint_errors = 0xFF;
3351 	else
3352 		p->port_xmit_constraint_errors = (u8)temp_64;
3353 
3354 	temp_64 = be64_to_cpu(rsp.port_rcv_constraint_errors);
3355 	if (temp_64 > 0xFFUL)
3356 		p->port_rcv_constraint_errors = 0xFFUL;
3357 	else
3358 		p->port_rcv_constraint_errors = (u8)temp_64;
3359 
3360 	/* LocalLink: 7:4, BufferOverrun: 3:0 */
3361 	temp_64 = be64_to_cpu(rsp.local_link_integrity_errors);
3362 	if (temp_64 > 0xFUL)
3363 		temp_64 = 0xFUL;
3364 
3365 	temp_link_overrun_errors = temp_64 << 4;
3366 
3367 	temp_64 = be64_to_cpu(rsp.excessive_buffer_overruns);
3368 	if (temp_64 > 0xFUL)
3369 		temp_64 = 0xFUL;
3370 	temp_link_overrun_errors |= temp_64;
3371 
3372 	p->link_overrun_errors = (u8)temp_link_overrun_errors;
3373 
3374 	p->vl15_dropped = 0; /* N/A for OPA */
3375 
3376 bail:
3377 	return reply((struct ib_mad_hdr *)pmp);
3378 }
3379 
3380 static int pma_get_opa_errorinfo(struct opa_pma_mad *pmp,
3381 				 struct ib_device *ibdev,
3382 				 u8 port, u32 *resp_len)
3383 {
3384 	size_t response_data_size;
3385 	struct _port_ei *rsp;
3386 	struct opa_port_error_info_msg *req;
3387 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
3388 	u64 port_mask;
3389 	u32 num_ports;
3390 	u8 port_num;
3391 	u8 num_pslm;
3392 	u64 reg;
3393 
3394 	req = (struct opa_port_error_info_msg *)pmp->data;
3395 	rsp = &req->port[0];
3396 
3397 	num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod));
3398 	num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
3399 
3400 	memset(rsp, 0, sizeof(*rsp));
3401 
3402 	if (num_ports != 1 || num_ports != num_pslm) {
3403 		pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3404 		return reply((struct ib_mad_hdr *)pmp);
3405 	}
3406 
3407 	/* Sanity check */
3408 	response_data_size = sizeof(struct opa_port_error_info_msg);
3409 
3410 	if (response_data_size > sizeof(pmp->data)) {
3411 		pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3412 		return reply((struct ib_mad_hdr *)pmp);
3413 	}
3414 
3415 	/*
3416 	 * The bit set in the mask needs to be consistent with the port
3417 	 * the request came in on.
3418 	 */
3419 	port_mask = be64_to_cpu(req->port_select_mask[3]);
3420 	port_num = find_first_bit((unsigned long *)&port_mask,
3421 				  sizeof(port_mask) * 8);
3422 
3423 	if (port_num != port) {
3424 		pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3425 		return reply((struct ib_mad_hdr *)pmp);
3426 	}
3427 	rsp->port_number = port;
3428 
3429 	/* PortRcvErrorInfo */
3430 	rsp->port_rcv_ei.status_and_code =
3431 		dd->err_info_rcvport.status_and_code;
3432 	memcpy(&rsp->port_rcv_ei.ei.ei1to12.packet_flit1,
3433 	       &dd->err_info_rcvport.packet_flit1, sizeof(u64));
3434 	memcpy(&rsp->port_rcv_ei.ei.ei1to12.packet_flit2,
3435 	       &dd->err_info_rcvport.packet_flit2, sizeof(u64));
3436 
3437 	/* ExcessiverBufferOverrunInfo */
3438 	reg = read_csr(dd, RCV_ERR_INFO);
3439 	if (reg & RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK) {
3440 		/*
3441 		 * if the RcvExcessBufferOverrun bit is set, save SC of
3442 		 * first pkt that encountered an excess buffer overrun
3443 		 */
3444 		u8 tmp = (u8)reg;
3445 
3446 		tmp &=  RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SC_SMASK;
3447 		tmp <<= 2;
3448 		rsp->excessive_buffer_overrun_ei.status_and_sc = tmp;
3449 		/* set the status bit */
3450 		rsp->excessive_buffer_overrun_ei.status_and_sc |= 0x80;
3451 	}
3452 
3453 	rsp->port_xmit_constraint_ei.status =
3454 		dd->err_info_xmit_constraint.status;
3455 	rsp->port_xmit_constraint_ei.pkey =
3456 		cpu_to_be16(dd->err_info_xmit_constraint.pkey);
3457 	rsp->port_xmit_constraint_ei.slid =
3458 		cpu_to_be32(dd->err_info_xmit_constraint.slid);
3459 
3460 	rsp->port_rcv_constraint_ei.status =
3461 		dd->err_info_rcv_constraint.status;
3462 	rsp->port_rcv_constraint_ei.pkey =
3463 		cpu_to_be16(dd->err_info_rcv_constraint.pkey);
3464 	rsp->port_rcv_constraint_ei.slid =
3465 		cpu_to_be32(dd->err_info_rcv_constraint.slid);
3466 
3467 	/* UncorrectableErrorInfo */
3468 	rsp->uncorrectable_ei.status_and_code = dd->err_info_uncorrectable;
3469 
3470 	/* FMConfigErrorInfo */
3471 	rsp->fm_config_ei.status_and_code = dd->err_info_fmconfig;
3472 
3473 	if (resp_len)
3474 		*resp_len += response_data_size;
3475 
3476 	return reply((struct ib_mad_hdr *)pmp);
3477 }
3478 
3479 static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
3480 				  struct ib_device *ibdev,
3481 				  u8 port, u32 *resp_len)
3482 {
3483 	struct opa_clear_port_status *req =
3484 		(struct opa_clear_port_status *)pmp->data;
3485 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
3486 	struct hfi1_ibport *ibp = to_iport(ibdev, port);
3487 	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3488 	u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
3489 	u64 portn = be64_to_cpu(req->port_select_mask[3]);
3490 	u32 counter_select = be32_to_cpu(req->counter_select_mask);
3491 	u32 vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */
3492 	unsigned long vl;
3493 
3494 	if ((nports != 1) || (portn != 1 << port)) {
3495 		pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3496 		return reply((struct ib_mad_hdr *)pmp);
3497 	}
3498 	/*
3499 	 * only counters returned by pma_get_opa_portstatus() are
3500 	 * handled, so when pma_get_opa_portstatus() gets a fix,
3501 	 * the corresponding change should be made here as well.
3502 	 */
3503 
3504 	if (counter_select & CS_PORT_XMIT_DATA)
3505 		write_dev_cntr(dd, C_DC_XMIT_FLITS, CNTR_INVALID_VL, 0);
3506 
3507 	if (counter_select & CS_PORT_RCV_DATA)
3508 		write_dev_cntr(dd, C_DC_RCV_FLITS, CNTR_INVALID_VL, 0);
3509 
3510 	if (counter_select & CS_PORT_XMIT_PKTS)
3511 		write_dev_cntr(dd, C_DC_XMIT_PKTS, CNTR_INVALID_VL, 0);
3512 
3513 	if (counter_select & CS_PORT_RCV_PKTS)
3514 		write_dev_cntr(dd, C_DC_RCV_PKTS, CNTR_INVALID_VL, 0);
3515 
3516 	if (counter_select & CS_PORT_MCAST_XMIT_PKTS)
3517 		write_dev_cntr(dd, C_DC_MC_XMIT_PKTS, CNTR_INVALID_VL, 0);
3518 
3519 	if (counter_select & CS_PORT_MCAST_RCV_PKTS)
3520 		write_dev_cntr(dd, C_DC_MC_RCV_PKTS, CNTR_INVALID_VL, 0);
3521 
3522 	if (counter_select & CS_PORT_XMIT_WAIT) {
3523 		write_port_cntr(ppd, C_TX_WAIT, CNTR_INVALID_VL, 0);
3524 		ppd->port_vl_xmit_wait_last[C_VL_COUNT] = 0;
3525 		ppd->vl_xmit_flit_cnt[C_VL_COUNT] = 0;
3526 	}
3527 	/* ignore cs_sw_portCongestion for HFIs */
3528 
3529 	if (counter_select & CS_PORT_RCV_FECN)
3530 		write_dev_cntr(dd, C_DC_RCV_FCN, CNTR_INVALID_VL, 0);
3531 
3532 	if (counter_select & CS_PORT_RCV_BECN)
3533 		write_dev_cntr(dd, C_DC_RCV_BCN, CNTR_INVALID_VL, 0);
3534 
3535 	/* ignore cs_port_xmit_time_cong for HFIs */
3536 	/* ignore cs_port_xmit_wasted_bw for now */
3537 	/* ignore cs_port_xmit_wait_data for now */
3538 	if (counter_select & CS_PORT_RCV_BUBBLE)
3539 		write_dev_cntr(dd, C_DC_RCV_BBL, CNTR_INVALID_VL, 0);
3540 
3541 	/* Only applicable for switch */
3542 	/* if (counter_select & CS_PORT_MARK_FECN)
3543 	 *	write_csr(dd, DCC_PRF_PORT_MARK_FECN_CNT, 0);
3544 	 */
3545 
3546 	if (counter_select & CS_PORT_RCV_CONSTRAINT_ERRORS)
3547 		write_port_cntr(ppd, C_SW_RCV_CSTR_ERR, CNTR_INVALID_VL, 0);
3548 
3549 	/* ignore cs_port_rcv_switch_relay_errors for HFIs */
3550 	if (counter_select & CS_PORT_XMIT_DISCARDS)
3551 		write_port_cntr(ppd, C_SW_XMIT_DSCD, CNTR_INVALID_VL, 0);
3552 
3553 	if (counter_select & CS_PORT_XMIT_CONSTRAINT_ERRORS)
3554 		write_port_cntr(ppd, C_SW_XMIT_CSTR_ERR, CNTR_INVALID_VL, 0);
3555 
3556 	if (counter_select & CS_PORT_RCV_REMOTE_PHYSICAL_ERRORS)
3557 		write_dev_cntr(dd, C_DC_RMT_PHY_ERR, CNTR_INVALID_VL, 0);
3558 
3559 	if (counter_select & CS_LOCAL_LINK_INTEGRITY_ERRORS)
3560 		write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0);
3561 
3562 	if (counter_select & CS_LINK_ERROR_RECOVERY) {
3563 		write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
3564 		write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT,
3565 			       CNTR_INVALID_VL, 0);
3566 	}
3567 
3568 	if (counter_select & CS_PORT_RCV_ERRORS)
3569 		write_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL, 0);
3570 
3571 	if (counter_select & CS_EXCESSIVE_BUFFER_OVERRUNS) {
3572 		write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0);
3573 		dd->rcv_ovfl_cnt = 0;
3574 	}
3575 
3576 	if (counter_select & CS_FM_CONFIG_ERRORS)
3577 		write_dev_cntr(dd, C_DC_FM_CFG_ERR, CNTR_INVALID_VL, 0);
3578 
3579 	if (counter_select & CS_LINK_DOWNED)
3580 		write_port_cntr(ppd, C_SW_LINK_DOWN, CNTR_INVALID_VL, 0);
3581 
3582 	if (counter_select & CS_UNCORRECTABLE_ERRORS)
3583 		write_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL, 0);
3584 
3585 	for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
3586 			 8 * sizeof(vl_select_mask)) {
3587 		if (counter_select & CS_PORT_XMIT_DATA)
3588 			write_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl), 0);
3589 
3590 		if (counter_select & CS_PORT_RCV_DATA)
3591 			write_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl), 0);
3592 
3593 		if (counter_select & CS_PORT_XMIT_PKTS)
3594 			write_port_cntr(ppd, C_TX_PKT_VL, idx_from_vl(vl), 0);
3595 
3596 		if (counter_select & CS_PORT_RCV_PKTS)
3597 			write_dev_cntr(dd, C_DC_RX_PKT_VL, idx_from_vl(vl), 0);
3598 
3599 		if (counter_select & CS_PORT_XMIT_WAIT) {
3600 			write_port_cntr(ppd, C_TX_WAIT_VL, idx_from_vl(vl), 0);
3601 			ppd->port_vl_xmit_wait_last[idx_from_vl(vl)] = 0;
3602 			ppd->vl_xmit_flit_cnt[idx_from_vl(vl)] = 0;
3603 		}
3604 
3605 		/* sw_port_vl_congestion is 0 for HFIs */
3606 		if (counter_select & CS_PORT_RCV_FECN)
3607 			write_dev_cntr(dd, C_DC_RCV_FCN_VL, idx_from_vl(vl), 0);
3608 
3609 		if (counter_select & CS_PORT_RCV_BECN)
3610 			write_dev_cntr(dd, C_DC_RCV_BCN_VL, idx_from_vl(vl), 0);
3611 
3612 		/* port_vl_xmit_time_cong is 0 for HFIs */
3613 		/* port_vl_xmit_wasted_bw ??? */
3614 		/* port_vl_xmit_wait_data - TXE (table 13-9 HFI spec) ??? */
3615 		if (counter_select & CS_PORT_RCV_BUBBLE)
3616 			write_dev_cntr(dd, C_DC_RCV_BBL_VL, idx_from_vl(vl), 0);
3617 
3618 		/* if (counter_select & CS_PORT_MARK_FECN)
3619 		 *     write_csr(dd, DCC_PRF_PORT_VL_MARK_FECN_CNT + offset, 0);
3620 		 */
3621 		if (counter_select & C_SW_XMIT_DSCD_VL)
3622 			write_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
3623 					idx_from_vl(vl), 0);
3624 	}
3625 
3626 	if (resp_len)
3627 		*resp_len += sizeof(*req);
3628 
3629 	return reply((struct ib_mad_hdr *)pmp);
3630 }
3631 
3632 static int pma_set_opa_errorinfo(struct opa_pma_mad *pmp,
3633 				 struct ib_device *ibdev,
3634 				 u8 port, u32 *resp_len)
3635 {
3636 	struct _port_ei *rsp;
3637 	struct opa_port_error_info_msg *req;
3638 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
3639 	u64 port_mask;
3640 	u32 num_ports;
3641 	u8 port_num;
3642 	u8 num_pslm;
3643 	u32 error_info_select;
3644 
3645 	req = (struct opa_port_error_info_msg *)pmp->data;
3646 	rsp = &req->port[0];
3647 
3648 	num_ports = OPA_AM_NPORT(be32_to_cpu(pmp->mad_hdr.attr_mod));
3649 	num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
3650 
3651 	memset(rsp, 0, sizeof(*rsp));
3652 
3653 	if (num_ports != 1 || num_ports != num_pslm) {
3654 		pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3655 		return reply((struct ib_mad_hdr *)pmp);
3656 	}
3657 
3658 	/*
3659 	 * The bit set in the mask needs to be consistent with the port
3660 	 * the request came in on.
3661 	 */
3662 	port_mask = be64_to_cpu(req->port_select_mask[3]);
3663 	port_num = find_first_bit((unsigned long *)&port_mask,
3664 				  sizeof(port_mask) * 8);
3665 
3666 	if (port_num != port) {
3667 		pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
3668 		return reply((struct ib_mad_hdr *)pmp);
3669 	}
3670 
3671 	error_info_select = be32_to_cpu(req->error_info_select_mask);
3672 
3673 	/* PortRcvErrorInfo */
3674 	if (error_info_select & ES_PORT_RCV_ERROR_INFO)
3675 		/* turn off status bit */
3676 		dd->err_info_rcvport.status_and_code &= ~OPA_EI_STATUS_SMASK;
3677 
3678 	/* ExcessiverBufferOverrunInfo */
3679 	if (error_info_select & ES_EXCESSIVE_BUFFER_OVERRUN_INFO)
3680 		/*
3681 		 * status bit is essentially kept in the h/w - bit 5 of
3682 		 * RCV_ERR_INFO
3683 		 */
3684 		write_csr(dd, RCV_ERR_INFO,
3685 			  RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
3686 
3687 	if (error_info_select & ES_PORT_XMIT_CONSTRAINT_ERROR_INFO)
3688 		dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK;
3689 
3690 	if (error_info_select & ES_PORT_RCV_CONSTRAINT_ERROR_INFO)
3691 		dd->err_info_rcv_constraint.status &= ~OPA_EI_STATUS_SMASK;
3692 
3693 	/* UncorrectableErrorInfo */
3694 	if (error_info_select & ES_UNCORRECTABLE_ERROR_INFO)
3695 		/* turn off status bit */
3696 		dd->err_info_uncorrectable &= ~OPA_EI_STATUS_SMASK;
3697 
3698 	/* FMConfigErrorInfo */
3699 	if (error_info_select & ES_FM_CONFIG_ERROR_INFO)
3700 		/* turn off status bit */
3701 		dd->err_info_fmconfig &= ~OPA_EI_STATUS_SMASK;
3702 
3703 	if (resp_len)
3704 		*resp_len += sizeof(*req);
3705 
3706 	return reply((struct ib_mad_hdr *)pmp);
3707 }
3708 
3709 struct opa_congestion_info_attr {
3710 	__be16 congestion_info;
3711 	u8 control_table_cap;	/* Multiple of 64 entry unit CCTs */
3712 	u8 congestion_log_length;
3713 } __packed;
3714 
3715 static int __subn_get_opa_cong_info(struct opa_smp *smp, u32 am, u8 *data,
3716 				    struct ib_device *ibdev, u8 port,
3717 				    u32 *resp_len, u32 max_len)
3718 {
3719 	struct opa_congestion_info_attr *p =
3720 		(struct opa_congestion_info_attr *)data;
3721 	struct hfi1_ibport *ibp = to_iport(ibdev, port);
3722 	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3723 
3724 	if (smp_length_check(sizeof(*p), max_len)) {
3725 		smp->status |= IB_SMP_INVALID_FIELD;
3726 		return reply((struct ib_mad_hdr *)smp);
3727 	}
3728 
3729 	p->congestion_info = 0;
3730 	p->control_table_cap = ppd->cc_max_table_entries;
3731 	p->congestion_log_length = OPA_CONG_LOG_ELEMS;
3732 
3733 	if (resp_len)
3734 		*resp_len += sizeof(*p);
3735 
3736 	return reply((struct ib_mad_hdr *)smp);
3737 }
3738 
3739 static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am,
3740 				       u8 *data, struct ib_device *ibdev,
3741 				       u8 port, u32 *resp_len, u32 max_len)
3742 {
3743 	int i;
3744 	struct opa_congestion_setting_attr *p =
3745 		(struct opa_congestion_setting_attr *)data;
3746 	struct hfi1_ibport *ibp = to_iport(ibdev, port);
3747 	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3748 	struct opa_congestion_setting_entry_shadow *entries;
3749 	struct cc_state *cc_state;
3750 
3751 	if (smp_length_check(sizeof(*p), max_len)) {
3752 		smp->status |= IB_SMP_INVALID_FIELD;
3753 		return reply((struct ib_mad_hdr *)smp);
3754 	}
3755 
3756 	rcu_read_lock();
3757 
3758 	cc_state = get_cc_state(ppd);
3759 
3760 	if (!cc_state) {
3761 		rcu_read_unlock();
3762 		return reply((struct ib_mad_hdr *)smp);
3763 	}
3764 
3765 	entries = cc_state->cong_setting.entries;
3766 	p->port_control = cpu_to_be16(cc_state->cong_setting.port_control);
3767 	p->control_map = cpu_to_be32(cc_state->cong_setting.control_map);
3768 	for (i = 0; i < OPA_MAX_SLS; i++) {
3769 		p->entries[i].ccti_increase = entries[i].ccti_increase;
3770 		p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer);
3771 		p->entries[i].trigger_threshold =
3772 			entries[i].trigger_threshold;
3773 		p->entries[i].ccti_min = entries[i].ccti_min;
3774 	}
3775 
3776 	rcu_read_unlock();
3777 
3778 	if (resp_len)
3779 		*resp_len += sizeof(*p);
3780 
3781 	return reply((struct ib_mad_hdr *)smp);
3782 }
3783 
3784 /*
3785  * Apply congestion control information stored in the ppd to the
3786  * active structure.
3787  */
3788 static void apply_cc_state(struct hfi1_pportdata *ppd)
3789 {
3790 	struct cc_state *old_cc_state, *new_cc_state;
3791 
3792 	new_cc_state = kzalloc(sizeof(*new_cc_state), GFP_KERNEL);
3793 	if (!new_cc_state)
3794 		return;
3795 
3796 	/*
3797 	 * Hold the lock for updating *and* to prevent ppd information
3798 	 * from changing during the update.
3799 	 */
3800 	spin_lock(&ppd->cc_state_lock);
3801 
3802 	old_cc_state = get_cc_state_protected(ppd);
3803 	if (!old_cc_state) {
3804 		/* never active, or shutting down */
3805 		spin_unlock(&ppd->cc_state_lock);
3806 		kfree(new_cc_state);
3807 		return;
3808 	}
3809 
3810 	*new_cc_state = *old_cc_state;
3811 
3812 	if (ppd->total_cct_entry)
3813 		new_cc_state->cct.ccti_limit = ppd->total_cct_entry - 1;
3814 	else
3815 		new_cc_state->cct.ccti_limit = 0;
3816 
3817 	memcpy(new_cc_state->cct.entries, ppd->ccti_entries,
3818 	       ppd->total_cct_entry * sizeof(struct ib_cc_table_entry));
3819 
3820 	new_cc_state->cong_setting.port_control = IB_CC_CCS_PC_SL_BASED;
3821 	new_cc_state->cong_setting.control_map = ppd->cc_sl_control_map;
3822 	memcpy(new_cc_state->cong_setting.entries, ppd->congestion_entries,
3823 	       OPA_MAX_SLS * sizeof(struct opa_congestion_setting_entry));
3824 
3825 	rcu_assign_pointer(ppd->cc_state, new_cc_state);
3826 
3827 	spin_unlock(&ppd->cc_state_lock);
3828 
3829 	kfree_rcu(old_cc_state, rcu);
3830 }
3831 
3832 static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
3833 				       struct ib_device *ibdev, u8 port,
3834 				       u32 *resp_len, u32 max_len)
3835 {
3836 	struct opa_congestion_setting_attr *p =
3837 		(struct opa_congestion_setting_attr *)data;
3838 	struct hfi1_ibport *ibp = to_iport(ibdev, port);
3839 	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3840 	struct opa_congestion_setting_entry_shadow *entries;
3841 	int i;
3842 
3843 	if (smp_length_check(sizeof(*p), max_len)) {
3844 		smp->status |= IB_SMP_INVALID_FIELD;
3845 		return reply((struct ib_mad_hdr *)smp);
3846 	}
3847 
3848 	/*
3849 	 * Save details from packet into the ppd.  Hold the cc_state_lock so
3850 	 * our information is consistent with anyone trying to apply the state.
3851 	 */
3852 	spin_lock(&ppd->cc_state_lock);
3853 	ppd->cc_sl_control_map = be32_to_cpu(p->control_map);
3854 
3855 	entries = ppd->congestion_entries;
3856 	for (i = 0; i < OPA_MAX_SLS; i++) {
3857 		entries[i].ccti_increase = p->entries[i].ccti_increase;
3858 		entries[i].ccti_timer = be16_to_cpu(p->entries[i].ccti_timer);
3859 		entries[i].trigger_threshold =
3860 			p->entries[i].trigger_threshold;
3861 		entries[i].ccti_min = p->entries[i].ccti_min;
3862 	}
3863 	spin_unlock(&ppd->cc_state_lock);
3864 
3865 	/* now apply the information */
3866 	apply_cc_state(ppd);
3867 
3868 	return __subn_get_opa_cong_setting(smp, am, data, ibdev, port,
3869 					   resp_len, max_len);
3870 }
3871 
3872 static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am,
3873 					u8 *data, struct ib_device *ibdev,
3874 					u8 port, u32 *resp_len, u32 max_len)
3875 {
3876 	struct hfi1_ibport *ibp = to_iport(ibdev, port);
3877 	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3878 	struct opa_hfi1_cong_log *cong_log = (struct opa_hfi1_cong_log *)data;
3879 	u64 ts;
3880 	int i;
3881 
3882 	if (am || smp_length_check(sizeof(*cong_log), max_len)) {
3883 		smp->status |= IB_SMP_INVALID_FIELD;
3884 		return reply((struct ib_mad_hdr *)smp);
3885 	}
3886 
3887 	spin_lock_irq(&ppd->cc_log_lock);
3888 
3889 	cong_log->log_type = OPA_CC_LOG_TYPE_HFI;
3890 	cong_log->congestion_flags = 0;
3891 	cong_log->threshold_event_counter =
3892 		cpu_to_be16(ppd->threshold_event_counter);
3893 	memcpy(cong_log->threshold_cong_event_map,
3894 	       ppd->threshold_cong_event_map,
3895 	       sizeof(cong_log->threshold_cong_event_map));
3896 	/* keep timestamp in units of 1.024 usec */
3897 	ts = ktime_get_ns() / 1024;
3898 	cong_log->current_time_stamp = cpu_to_be32(ts);
3899 	for (i = 0; i < OPA_CONG_LOG_ELEMS; i++) {
3900 		struct opa_hfi1_cong_log_event_internal *cce =
3901 			&ppd->cc_events[ppd->cc_mad_idx++];
3902 		if (ppd->cc_mad_idx == OPA_CONG_LOG_ELEMS)
3903 			ppd->cc_mad_idx = 0;
3904 		/*
3905 		 * Entries which are older than twice the time
3906 		 * required to wrap the counter are supposed to
3907 		 * be zeroed (CA10-49 IBTA, release 1.2.1, V1).
3908 		 */
3909 		if ((ts - cce->timestamp) / 2 > U32_MAX)
3910 			continue;
3911 		memcpy(cong_log->events[i].local_qp_cn_entry, &cce->lqpn, 3);
3912 		memcpy(cong_log->events[i].remote_qp_number_cn_entry,
3913 		       &cce->rqpn, 3);
3914 		cong_log->events[i].sl_svc_type_cn_entry =
3915 			((cce->sl & 0x1f) << 3) | (cce->svc_type & 0x7);
3916 		cong_log->events[i].remote_lid_cn_entry =
3917 			cpu_to_be32(cce->rlid);
3918 		cong_log->events[i].timestamp_cn_entry =
3919 			cpu_to_be32(cce->timestamp);
3920 	}
3921 
3922 	/*
3923 	 * Reset threshold_cong_event_map, and threshold_event_counter
3924 	 * to 0 when log is read.
3925 	 */
3926 	memset(ppd->threshold_cong_event_map, 0x0,
3927 	       sizeof(ppd->threshold_cong_event_map));
3928 	ppd->threshold_event_counter = 0;
3929 
3930 	spin_unlock_irq(&ppd->cc_log_lock);
3931 
3932 	if (resp_len)
3933 		*resp_len += sizeof(struct opa_hfi1_cong_log);
3934 
3935 	return reply((struct ib_mad_hdr *)smp);
3936 }
3937 
3938 static int __subn_get_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
3939 				   struct ib_device *ibdev, u8 port,
3940 				   u32 *resp_len, u32 max_len)
3941 {
3942 	struct ib_cc_table_attr *cc_table_attr =
3943 		(struct ib_cc_table_attr *)data;
3944 	struct hfi1_ibport *ibp = to_iport(ibdev, port);
3945 	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3946 	u32 start_block = OPA_AM_START_BLK(am);
3947 	u32 n_blocks = OPA_AM_NBLK(am);
3948 	struct ib_cc_table_entry_shadow *entries;
3949 	int i, j;
3950 	u32 sentry, eentry;
3951 	struct cc_state *cc_state;
3952 	u32 size = sizeof(u16) * (IB_CCT_ENTRIES * n_blocks + 1);
3953 
3954 	/* sanity check n_blocks, start_block */
3955 	if (n_blocks == 0 || smp_length_check(size, max_len) ||
3956 	    start_block + n_blocks > ppd->cc_max_table_entries) {
3957 		smp->status |= IB_SMP_INVALID_FIELD;
3958 		return reply((struct ib_mad_hdr *)smp);
3959 	}
3960 
3961 	rcu_read_lock();
3962 
3963 	cc_state = get_cc_state(ppd);
3964 
3965 	if (!cc_state) {
3966 		rcu_read_unlock();
3967 		return reply((struct ib_mad_hdr *)smp);
3968 	}
3969 
3970 	sentry = start_block * IB_CCT_ENTRIES;
3971 	eentry = sentry + (IB_CCT_ENTRIES * n_blocks);
3972 
3973 	cc_table_attr->ccti_limit = cpu_to_be16(cc_state->cct.ccti_limit);
3974 
3975 	entries = cc_state->cct.entries;
3976 
3977 	/* return n_blocks, though the last block may not be full */
3978 	for (j = 0, i = sentry; i < eentry; j++, i++)
3979 		cc_table_attr->ccti_entries[j].entry =
3980 			cpu_to_be16(entries[i].entry);
3981 
3982 	rcu_read_unlock();
3983 
3984 	if (resp_len)
3985 		*resp_len += size;
3986 
3987 	return reply((struct ib_mad_hdr *)smp);
3988 }
3989 
3990 static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
3991 				   struct ib_device *ibdev, u8 port,
3992 				   u32 *resp_len, u32 max_len)
3993 {
3994 	struct ib_cc_table_attr *p = (struct ib_cc_table_attr *)data;
3995 	struct hfi1_ibport *ibp = to_iport(ibdev, port);
3996 	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
3997 	u32 start_block = OPA_AM_START_BLK(am);
3998 	u32 n_blocks = OPA_AM_NBLK(am);
3999 	struct ib_cc_table_entry_shadow *entries;
4000 	int i, j;
4001 	u32 sentry, eentry;
4002 	u16 ccti_limit;
4003 	u32 size = sizeof(u16) * (IB_CCT_ENTRIES * n_blocks + 1);
4004 
4005 	/* sanity check n_blocks, start_block */
4006 	if (n_blocks == 0 || smp_length_check(size, max_len) ||
4007 	    start_block + n_blocks > ppd->cc_max_table_entries) {
4008 		smp->status |= IB_SMP_INVALID_FIELD;
4009 		return reply((struct ib_mad_hdr *)smp);
4010 	}
4011 
4012 	sentry = start_block * IB_CCT_ENTRIES;
4013 	eentry = sentry + ((n_blocks - 1) * IB_CCT_ENTRIES) +
4014 		 (be16_to_cpu(p->ccti_limit)) % IB_CCT_ENTRIES + 1;
4015 
4016 	/* sanity check ccti_limit */
4017 	ccti_limit = be16_to_cpu(p->ccti_limit);
4018 	if (ccti_limit + 1 > eentry) {
4019 		smp->status |= IB_SMP_INVALID_FIELD;
4020 		return reply((struct ib_mad_hdr *)smp);
4021 	}
4022 
4023 	/*
4024 	 * Save details from packet into the ppd.  Hold the cc_state_lock so
4025 	 * our information is consistent with anyone trying to apply the state.
4026 	 */
4027 	spin_lock(&ppd->cc_state_lock);
4028 	ppd->total_cct_entry = ccti_limit + 1;
4029 	entries = ppd->ccti_entries;
4030 	for (j = 0, i = sentry; i < eentry; j++, i++)
4031 		entries[i].entry = be16_to_cpu(p->ccti_entries[j].entry);
4032 	spin_unlock(&ppd->cc_state_lock);
4033 
4034 	/* now apply the information */
4035 	apply_cc_state(ppd);
4036 
4037 	return __subn_get_opa_cc_table(smp, am, data, ibdev, port, resp_len,
4038 				       max_len);
4039 }
4040 
4041 struct opa_led_info {
4042 	__be32 rsvd_led_mask;
4043 	__be32 rsvd;
4044 };
4045 
4046 #define OPA_LED_SHIFT	31
4047 #define OPA_LED_MASK	BIT(OPA_LED_SHIFT)
4048 
4049 static int __subn_get_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
4050 				   struct ib_device *ibdev, u8 port,
4051 				   u32 *resp_len, u32 max_len)
4052 {
4053 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
4054 	struct hfi1_pportdata *ppd = dd->pport;
4055 	struct opa_led_info *p = (struct opa_led_info *)data;
4056 	u32 nport = OPA_AM_NPORT(am);
4057 	u32 is_beaconing_active;
4058 
4059 	if (nport != 1 || smp_length_check(sizeof(*p), max_len)) {
4060 		smp->status |= IB_SMP_INVALID_FIELD;
4061 		return reply((struct ib_mad_hdr *)smp);
4062 	}
4063 
4064 	/*
4065 	 * This pairs with the memory barrier in hfi1_start_led_override to
4066 	 * ensure that we read the correct state of LED beaconing represented
4067 	 * by led_override_timer_active
4068 	 */
4069 	smp_rmb();
4070 	is_beaconing_active = !!atomic_read(&ppd->led_override_timer_active);
4071 	p->rsvd_led_mask = cpu_to_be32(is_beaconing_active << OPA_LED_SHIFT);
4072 
4073 	if (resp_len)
4074 		*resp_len += sizeof(struct opa_led_info);
4075 
4076 	return reply((struct ib_mad_hdr *)smp);
4077 }
4078 
4079 static int __subn_set_opa_led_info(struct opa_smp *smp, u32 am, u8 *data,
4080 				   struct ib_device *ibdev, u8 port,
4081 				   u32 *resp_len, u32 max_len)
4082 {
4083 	struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
4084 	struct opa_led_info *p = (struct opa_led_info *)data;
4085 	u32 nport = OPA_AM_NPORT(am);
4086 	int on = !!(be32_to_cpu(p->rsvd_led_mask) & OPA_LED_MASK);
4087 
4088 	if (nport != 1 || smp_length_check(sizeof(*p), max_len)) {
4089 		smp->status |= IB_SMP_INVALID_FIELD;
4090 		return reply((struct ib_mad_hdr *)smp);
4091 	}
4092 
4093 	if (on)
4094 		hfi1_start_led_override(dd->pport, 2000, 1500);
4095 	else
4096 		shutdown_led_override(dd->pport);
4097 
4098 	return __subn_get_opa_led_info(smp, am, data, ibdev, port, resp_len,
4099 				       max_len);
4100 }
4101 
4102 static int subn_get_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
4103 			    u8 *data, struct ib_device *ibdev, u8 port,
4104 			    u32 *resp_len, u32 max_len)
4105 {
4106 	int ret;
4107 	struct hfi1_ibport *ibp = to_iport(ibdev, port);
4108 
4109 	switch (attr_id) {
4110 	case IB_SMP_ATTR_NODE_DESC:
4111 		ret = __subn_get_opa_nodedesc(smp, am, data, ibdev, port,
4112 					      resp_len, max_len);
4113 		break;
4114 	case IB_SMP_ATTR_NODE_INFO:
4115 		ret = __subn_get_opa_nodeinfo(smp, am, data, ibdev, port,
4116 					      resp_len, max_len);
4117 		break;
4118 	case IB_SMP_ATTR_PORT_INFO:
4119 		ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port,
4120 					      resp_len, max_len);
4121 		break;
4122 	case IB_SMP_ATTR_PKEY_TABLE:
4123 		ret = __subn_get_opa_pkeytable(smp, am, data, ibdev, port,
4124 					       resp_len, max_len);
4125 		break;
4126 	case OPA_ATTRIB_ID_SL_TO_SC_MAP:
4127 		ret = __subn_get_opa_sl_to_sc(smp, am, data, ibdev, port,
4128 					      resp_len, max_len);
4129 		break;
4130 	case OPA_ATTRIB_ID_SC_TO_SL_MAP:
4131 		ret = __subn_get_opa_sc_to_sl(smp, am, data, ibdev, port,
4132 					      resp_len, max_len);
4133 		break;
4134 	case OPA_ATTRIB_ID_SC_TO_VLT_MAP:
4135 		ret = __subn_get_opa_sc_to_vlt(smp, am, data, ibdev, port,
4136 					       resp_len, max_len);
4137 		break;
4138 	case OPA_ATTRIB_ID_SC_TO_VLNT_MAP:
4139 		ret = __subn_get_opa_sc_to_vlnt(smp, am, data, ibdev, port,
4140 						resp_len, max_len);
4141 		break;
4142 	case OPA_ATTRIB_ID_PORT_STATE_INFO:
4143 		ret = __subn_get_opa_psi(smp, am, data, ibdev, port,
4144 					 resp_len, max_len);
4145 		break;
4146 	case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE:
4147 		ret = __subn_get_opa_bct(smp, am, data, ibdev, port,
4148 					 resp_len, max_len);
4149 		break;
4150 	case OPA_ATTRIB_ID_CABLE_INFO:
4151 		ret = __subn_get_opa_cable_info(smp, am, data, ibdev, port,
4152 						resp_len, max_len);
4153 		break;
4154 	case IB_SMP_ATTR_VL_ARB_TABLE:
4155 		ret = __subn_get_opa_vl_arb(smp, am, data, ibdev, port,
4156 					    resp_len, max_len);
4157 		break;
4158 	case OPA_ATTRIB_ID_CONGESTION_INFO:
4159 		ret = __subn_get_opa_cong_info(smp, am, data, ibdev, port,
4160 					       resp_len, max_len);
4161 		break;
4162 	case OPA_ATTRIB_ID_HFI_CONGESTION_SETTING:
4163 		ret = __subn_get_opa_cong_setting(smp, am, data, ibdev,
4164 						  port, resp_len, max_len);
4165 		break;
4166 	case OPA_ATTRIB_ID_HFI_CONGESTION_LOG:
4167 		ret = __subn_get_opa_hfi1_cong_log(smp, am, data, ibdev,
4168 						   port, resp_len, max_len);
4169 		break;
4170 	case OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE:
4171 		ret = __subn_get_opa_cc_table(smp, am, data, ibdev, port,
4172 					      resp_len, max_len);
4173 		break;
4174 	case IB_SMP_ATTR_LED_INFO:
4175 		ret = __subn_get_opa_led_info(smp, am, data, ibdev, port,
4176 					      resp_len, max_len);
4177 		break;
4178 	case IB_SMP_ATTR_SM_INFO:
4179 		if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED)
4180 			return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
4181 		if (ibp->rvp.port_cap_flags & IB_PORT_SM)
4182 			return IB_MAD_RESULT_SUCCESS;
4183 		/* FALLTHROUGH */
4184 	default:
4185 		smp->status |= IB_SMP_UNSUP_METH_ATTR;
4186 		ret = reply((struct ib_mad_hdr *)smp);
4187 		break;
4188 	}
4189 	return ret;
4190 }
4191 
4192 static int subn_set_opa_sma(__be16 attr_id, struct opa_smp *smp, u32 am,
4193 			    u8 *data, struct ib_device *ibdev, u8 port,
4194 			    u32 *resp_len, u32 max_len, int local_mad)
4195 {
4196 	int ret;
4197 	struct hfi1_ibport *ibp = to_iport(ibdev, port);
4198 
4199 	switch (attr_id) {
4200 	case IB_SMP_ATTR_PORT_INFO:
4201 		ret = __subn_set_opa_portinfo(smp, am, data, ibdev, port,
4202 					      resp_len, max_len, local_mad);
4203 		break;
4204 	case IB_SMP_ATTR_PKEY_TABLE:
4205 		ret = __subn_set_opa_pkeytable(smp, am, data, ibdev, port,
4206 					       resp_len, max_len);
4207 		break;
4208 	case OPA_ATTRIB_ID_SL_TO_SC_MAP:
4209 		ret = __subn_set_opa_sl_to_sc(smp, am, data, ibdev, port,
4210 					      resp_len, max_len);
4211 		break;
4212 	case OPA_ATTRIB_ID_SC_TO_SL_MAP:
4213 		ret = __subn_set_opa_sc_to_sl(smp, am, data, ibdev, port,
4214 					      resp_len, max_len);
4215 		break;
4216 	case OPA_ATTRIB_ID_SC_TO_VLT_MAP:
4217 		ret = __subn_set_opa_sc_to_vlt(smp, am, data, ibdev, port,
4218 					       resp_len, max_len);
4219 		break;
4220 	case OPA_ATTRIB_ID_SC_TO_VLNT_MAP:
4221 		ret = __subn_set_opa_sc_to_vlnt(smp, am, data, ibdev, port,
4222 						resp_len, max_len);
4223 		break;
4224 	case OPA_ATTRIB_ID_PORT_STATE_INFO:
4225 		ret = __subn_set_opa_psi(smp, am, data, ibdev, port,
4226 					 resp_len, max_len, local_mad);
4227 		break;
4228 	case OPA_ATTRIB_ID_BUFFER_CONTROL_TABLE:
4229 		ret = __subn_set_opa_bct(smp, am, data, ibdev, port,
4230 					 resp_len, max_len);
4231 		break;
4232 	case IB_SMP_ATTR_VL_ARB_TABLE:
4233 		ret = __subn_set_opa_vl_arb(smp, am, data, ibdev, port,
4234 					    resp_len, max_len);
4235 		break;
4236 	case OPA_ATTRIB_ID_HFI_CONGESTION_SETTING:
4237 		ret = __subn_set_opa_cong_setting(smp, am, data, ibdev,
4238 						  port, resp_len, max_len);
4239 		break;
4240 	case OPA_ATTRIB_ID_CONGESTION_CONTROL_TABLE:
4241 		ret = __subn_set_opa_cc_table(smp, am, data, ibdev, port,
4242 					      resp_len, max_len);
4243 		break;
4244 	case IB_SMP_ATTR_LED_INFO:
4245 		ret = __subn_set_opa_led_info(smp, am, data, ibdev, port,
4246 					      resp_len, max_len);
4247 		break;
4248 	case IB_SMP_ATTR_SM_INFO:
4249 		if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED)
4250 			return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
4251 		if (ibp->rvp.port_cap_flags & IB_PORT_SM)
4252 			return IB_MAD_RESULT_SUCCESS;
4253 		/* FALLTHROUGH */
4254 	default:
4255 		smp->status |= IB_SMP_UNSUP_METH_ATTR;
4256 		ret = reply((struct ib_mad_hdr *)smp);
4257 		break;
4258 	}
4259 	return ret;
4260 }
4261 
4262 static inline void set_aggr_error(struct opa_aggregate *ag)
4263 {
4264 	ag->err_reqlength |= cpu_to_be16(0x8000);
4265 }
4266 
4267 static int subn_get_opa_aggregate(struct opa_smp *smp,
4268 				  struct ib_device *ibdev, u8 port,
4269 				  u32 *resp_len)
4270 {
4271 	int i;
4272 	u32 num_attr = be32_to_cpu(smp->attr_mod) & 0x000000ff;
4273 	u8 *next_smp = opa_get_smp_data(smp);
4274 
4275 	if (num_attr < 1 || num_attr > 117) {
4276 		smp->status |= IB_SMP_INVALID_FIELD;
4277 		return reply((struct ib_mad_hdr *)smp);
4278 	}
4279 
4280 	for (i = 0; i < num_attr; i++) {
4281 		struct opa_aggregate *agg;
4282 		size_t agg_data_len;
4283 		size_t agg_size;
4284 		u32 am;
4285 
4286 		agg = (struct opa_aggregate *)next_smp;
4287 		agg_data_len = (be16_to_cpu(agg->err_reqlength) & 0x007f) * 8;
4288 		agg_size = sizeof(*agg) + agg_data_len;
4289 		am = be32_to_cpu(agg->attr_mod);
4290 
4291 		*resp_len += agg_size;
4292 
4293 		if (next_smp + agg_size > ((u8 *)smp) + sizeof(*smp)) {
4294 			smp->status |= IB_SMP_INVALID_FIELD;
4295 			return reply((struct ib_mad_hdr *)smp);
4296 		}
4297 
4298 		/* zero the payload for this segment */
4299 		memset(next_smp + sizeof(*agg), 0, agg_data_len);
4300 
4301 		(void)subn_get_opa_sma(agg->attr_id, smp, am, agg->data,
4302 				       ibdev, port, NULL, (u32)agg_data_len);
4303 
4304 		if (smp->status & IB_SMP_INVALID_FIELD)
4305 			break;
4306 		if (smp->status & ~IB_SMP_DIRECTION) {
4307 			set_aggr_error(agg);
4308 			return reply((struct ib_mad_hdr *)smp);
4309 		}
4310 		next_smp += agg_size;
4311 	}
4312 
4313 	return reply((struct ib_mad_hdr *)smp);
4314 }
4315 
4316 static int subn_set_opa_aggregate(struct opa_smp *smp,
4317 				  struct ib_device *ibdev, u8 port,
4318 				  u32 *resp_len, int local_mad)
4319 {
4320 	int i;
4321 	u32 num_attr = be32_to_cpu(smp->attr_mod) & 0x000000ff;
4322 	u8 *next_smp = opa_get_smp_data(smp);
4323 
4324 	if (num_attr < 1 || num_attr > 117) {
4325 		smp->status |= IB_SMP_INVALID_FIELD;
4326 		return reply((struct ib_mad_hdr *)smp);
4327 	}
4328 
4329 	for (i = 0; i < num_attr; i++) {
4330 		struct opa_aggregate *agg;
4331 		size_t agg_data_len;
4332 		size_t agg_size;
4333 		u32 am;
4334 
4335 		agg = (struct opa_aggregate *)next_smp;
4336 		agg_data_len = (be16_to_cpu(agg->err_reqlength) & 0x007f) * 8;
4337 		agg_size = sizeof(*agg) + agg_data_len;
4338 		am = be32_to_cpu(agg->attr_mod);
4339 
4340 		*resp_len += agg_size;
4341 
4342 		if (next_smp + agg_size > ((u8 *)smp) + sizeof(*smp)) {
4343 			smp->status |= IB_SMP_INVALID_FIELD;
4344 			return reply((struct ib_mad_hdr *)smp);
4345 		}
4346 
4347 		(void)subn_set_opa_sma(agg->attr_id, smp, am, agg->data,
4348 				       ibdev, port, NULL, (u32)agg_data_len,
4349 				       local_mad);
4350 
4351 		if (smp->status & IB_SMP_INVALID_FIELD)
4352 			break;
4353 		if (smp->status & ~IB_SMP_DIRECTION) {
4354 			set_aggr_error(agg);
4355 			return reply((struct ib_mad_hdr *)smp);
4356 		}
4357 		next_smp += agg_size;
4358 	}
4359 
4360 	return reply((struct ib_mad_hdr *)smp);
4361 }
4362 
4363 /*
4364  * OPAv1 specifies that, on the transition to link up, these counters
4365  * are cleared:
4366  *   PortRcvErrors [*]
4367  *   LinkErrorRecovery
4368  *   LocalLinkIntegrityErrors
4369  *   ExcessiveBufferOverruns [*]
4370  *
4371  * [*] Error info associated with these counters is retained, but the
4372  * error info status is reset to 0.
4373  */
4374 void clear_linkup_counters(struct hfi1_devdata *dd)
4375 {
4376 	/* PortRcvErrors */
4377 	write_dev_cntr(dd, C_DC_RCV_ERR, CNTR_INVALID_VL, 0);
4378 	dd->err_info_rcvport.status_and_code &= ~OPA_EI_STATUS_SMASK;
4379 	/* LinkErrorRecovery */
4380 	write_dev_cntr(dd, C_DC_SEQ_CRC_CNT, CNTR_INVALID_VL, 0);
4381 	write_dev_cntr(dd, C_DC_REINIT_FROM_PEER_CNT, CNTR_INVALID_VL, 0);
4382 	/* LocalLinkIntegrityErrors */
4383 	write_dev_cntr(dd, C_DC_RX_REPLAY, CNTR_INVALID_VL, 0);
4384 	/* ExcessiveBufferOverruns */
4385 	write_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL, 0);
4386 	dd->rcv_ovfl_cnt = 0;
4387 	dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK;
4388 }
4389 
4390 static int is_full_mgmt_pkey_in_table(struct hfi1_ibport *ibp)
4391 {
4392 	unsigned int i;
4393 	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
4394 
4395 	for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i)
4396 		if (ppd->pkeys[i] == FULL_MGMT_P_KEY)
4397 			return 1;
4398 
4399 	return 0;
4400 }
4401 
4402 /*
4403  * is_local_mad() returns 1 if 'mad' is sent from, and destined to the
4404  * local node, 0 otherwise.
4405  */
4406 static int is_local_mad(struct hfi1_ibport *ibp, const struct opa_mad *mad,
4407 			const struct ib_wc *in_wc)
4408 {
4409 	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
4410 	const struct opa_smp *smp = (const struct opa_smp *)mad;
4411 
4412 	if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
4413 		return (smp->hop_cnt == 0 &&
4414 			smp->route.dr.dr_slid == OPA_LID_PERMISSIVE &&
4415 			smp->route.dr.dr_dlid == OPA_LID_PERMISSIVE);
4416 	}
4417 
4418 	return (in_wc->slid == ppd->lid);
4419 }
4420 
4421 /*
4422  * opa_local_smp_check() should only be called on MADs for which
4423  * is_local_mad() returns true. It applies the SMP checks that are
4424  * specific to SMPs which are sent from, and destined to this node.
4425  * opa_local_smp_check() returns 0 if the SMP passes its checks, 1
4426  * otherwise.
4427  *
4428  * SMPs which arrive from other nodes are instead checked by
4429  * opa_smp_check().
4430  */
4431 static int opa_local_smp_check(struct hfi1_ibport *ibp,
4432 			       const struct ib_wc *in_wc)
4433 {
4434 	struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
4435 	u16 pkey;
4436 
4437 	if (in_wc->pkey_index >= ARRAY_SIZE(ppd->pkeys))
4438 		return 1;
4439 
4440 	pkey = ppd->pkeys[in_wc->pkey_index];
4441 	/*
4442 	 * We need to do the "node-local" checks specified in OPAv1,
4443 	 * rev 0.90, section 9.10.26, which are:
4444 	 *   - pkey is 0x7fff, or 0xffff
4445 	 *   - Source QPN == 0 || Destination QPN == 0
4446 	 *   - the MAD header's management class is either
4447 	 *     IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE or
4448 	 *     IB_MGMT_CLASS_SUBN_LID_ROUTED
4449 	 *   - SLID != 0
4450 	 *
4451 	 * However, we know (and so don't need to check again) that,
4452 	 * for local SMPs, the MAD stack passes MADs with:
4453 	 *   - Source QPN of 0
4454 	 *   - MAD mgmt_class is IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
4455 	 *   - SLID is either: OPA_LID_PERMISSIVE (0xFFFFFFFF), or
4456 	 *     our own port's lid
4457 	 *
4458 	 */
4459 	if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY)
4460 		return 0;
4461 	ingress_pkey_table_fail(ppd, pkey, in_wc->slid);
4462 	return 1;
4463 }
4464 
4465 /**
4466  * hfi1_pkey_validation_pma - It validates PKEYs for incoming PMA MAD packets.
4467  * @ibp: IB port data
4468  * @in_mad: MAD packet with header and data
4469  * @in_wc: Work completion data such as source LID, port number, etc.
4470  *
4471  * These are all the possible logic rules for validating a pkey:
4472  *
4473  * a) If pkey neither FULL_MGMT_P_KEY nor LIM_MGMT_P_KEY,
4474  *    and NOT self-originated packet:
4475  *     Drop MAD packet as it should always be part of the
4476  *     management partition unless it's a self-originated packet.
4477  *
4478  * b) If pkey_index -> FULL_MGMT_P_KEY, and LIM_MGMT_P_KEY in pkey table:
4479  *     The packet is coming from a management node and the receiving node
4480  *     is also a management node, so it is safe for the packet to go through.
4481  *
4482  * c) If pkey_index -> FULL_MGMT_P_KEY, and LIM_MGMT_P_KEY is NOT in pkey table:
4483  *     Drop the packet as LIM_MGMT_P_KEY should always be in the pkey table.
4484  *     It could be an FM misconfiguration.
4485  *
4486  * d) If pkey_index -> LIM_MGMT_P_KEY and FULL_MGMT_P_KEY is NOT in pkey table:
4487  *     It is safe for the packet to go through since a non-management node is
4488  *     talking to another non-management node.
4489  *
4490  * e) If pkey_index -> LIM_MGMT_P_KEY and FULL_MGMT_P_KEY in pkey table:
4491  *     Drop the packet because a non-management node is talking to a
4492  *     management node, and it could be an attack.
4493  *
4494  * For the implementation, these rules can be simplied to only checking
4495  * for (a) and (e). There's no need to check for rule (b) as
4496  * the packet doesn't need to be dropped. Rule (c) is not possible in
4497  * the driver as LIM_MGMT_P_KEY is always in the pkey table.
4498  *
4499  * Return:
4500  * 0 - pkey is okay, -EINVAL it's a bad pkey
4501  */
4502 static int hfi1_pkey_validation_pma(struct hfi1_ibport *ibp,
4503 				    const struct opa_mad *in_mad,
4504 				    const struct ib_wc *in_wc)
4505 {
4506 	u16 pkey_value = hfi1_lookup_pkey_value(ibp, in_wc->pkey_index);
4507 
4508 	/* Rule (a) from above */
4509 	if (!is_local_mad(ibp, in_mad, in_wc) &&
4510 	    pkey_value != LIM_MGMT_P_KEY &&
4511 	    pkey_value != FULL_MGMT_P_KEY)
4512 		return -EINVAL;
4513 
4514 	/* Rule (e) from above */
4515 	if (pkey_value == LIM_MGMT_P_KEY &&
4516 	    is_full_mgmt_pkey_in_table(ibp))
4517 		return -EINVAL;
4518 
4519 	return 0;
4520 }
4521 
4522 static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
4523 			    u8 port, const struct opa_mad *in_mad,
4524 			    struct opa_mad *out_mad,
4525 			    u32 *resp_len, int local_mad)
4526 {
4527 	struct opa_smp *smp = (struct opa_smp *)out_mad;
4528 	struct hfi1_ibport *ibp = to_iport(ibdev, port);
4529 	u8 *data;
4530 	u32 am, data_size;
4531 	__be16 attr_id;
4532 	int ret;
4533 
4534 	*out_mad = *in_mad;
4535 	data = opa_get_smp_data(smp);
4536 	data_size = (u32)opa_get_smp_data_size(smp);
4537 
4538 	am = be32_to_cpu(smp->attr_mod);
4539 	attr_id = smp->attr_id;
4540 	if (smp->class_version != OPA_SM_CLASS_VERSION) {
4541 		smp->status |= IB_SMP_UNSUP_VERSION;
4542 		ret = reply((struct ib_mad_hdr *)smp);
4543 		return ret;
4544 	}
4545 	ret = check_mkey(ibp, (struct ib_mad_hdr *)smp, mad_flags, smp->mkey,
4546 			 smp->route.dr.dr_slid, smp->route.dr.return_path,
4547 			 smp->hop_cnt);
4548 	if (ret) {
4549 		u32 port_num = be32_to_cpu(smp->attr_mod);
4550 
4551 		/*
4552 		 * If this is a get/set portinfo, we already check the
4553 		 * M_Key if the MAD is for another port and the M_Key
4554 		 * is OK on the receiving port. This check is needed
4555 		 * to increment the error counters when the M_Key
4556 		 * fails to match on *both* ports.
4557 		 */
4558 		if (attr_id == IB_SMP_ATTR_PORT_INFO &&
4559 		    (smp->method == IB_MGMT_METHOD_GET ||
4560 		     smp->method == IB_MGMT_METHOD_SET) &&
4561 		    port_num && port_num <= ibdev->phys_port_cnt &&
4562 		    port != port_num)
4563 			(void)check_mkey(to_iport(ibdev, port_num),
4564 					  (struct ib_mad_hdr *)smp, 0,
4565 					  smp->mkey, smp->route.dr.dr_slid,
4566 					  smp->route.dr.return_path,
4567 					  smp->hop_cnt);
4568 		ret = IB_MAD_RESULT_FAILURE;
4569 		return ret;
4570 	}
4571 
4572 	*resp_len = opa_get_smp_header_size(smp);
4573 
4574 	switch (smp->method) {
4575 	case IB_MGMT_METHOD_GET:
4576 		switch (attr_id) {
4577 		default:
4578 			clear_opa_smp_data(smp);
4579 			ret = subn_get_opa_sma(attr_id, smp, am, data,
4580 					       ibdev, port, resp_len,
4581 					       data_size);
4582 			break;
4583 		case OPA_ATTRIB_ID_AGGREGATE:
4584 			ret = subn_get_opa_aggregate(smp, ibdev, port,
4585 						     resp_len);
4586 			break;
4587 		}
4588 		break;
4589 	case IB_MGMT_METHOD_SET:
4590 		switch (attr_id) {
4591 		default:
4592 			ret = subn_set_opa_sma(attr_id, smp, am, data,
4593 					       ibdev, port, resp_len,
4594 					       data_size, local_mad);
4595 			break;
4596 		case OPA_ATTRIB_ID_AGGREGATE:
4597 			ret = subn_set_opa_aggregate(smp, ibdev, port,
4598 						     resp_len, local_mad);
4599 			break;
4600 		}
4601 		break;
4602 	case IB_MGMT_METHOD_TRAP:
4603 	case IB_MGMT_METHOD_REPORT:
4604 	case IB_MGMT_METHOD_REPORT_RESP:
4605 	case IB_MGMT_METHOD_GET_RESP:
4606 		/*
4607 		 * The ib_mad module will call us to process responses
4608 		 * before checking for other consumers.
4609 		 * Just tell the caller to process it normally.
4610 		 */
4611 		ret = IB_MAD_RESULT_SUCCESS;
4612 		break;
4613 	case IB_MGMT_METHOD_TRAP_REPRESS:
4614 		subn_handle_opa_trap_repress(ibp, smp);
4615 		/* Always successful */
4616 		ret = IB_MAD_RESULT_SUCCESS;
4617 		break;
4618 	default:
4619 		smp->status |= IB_SMP_UNSUP_METHOD;
4620 		ret = reply((struct ib_mad_hdr *)smp);
4621 		break;
4622 	}
4623 
4624 	return ret;
4625 }
4626 
4627 static int process_subn(struct ib_device *ibdev, int mad_flags,
4628 			u8 port, const struct ib_mad *in_mad,
4629 			struct ib_mad *out_mad)
4630 {
4631 	struct ib_smp *smp = (struct ib_smp *)out_mad;
4632 	struct hfi1_ibport *ibp = to_iport(ibdev, port);
4633 	int ret;
4634 
4635 	*out_mad = *in_mad;
4636 	if (smp->class_version != 1) {
4637 		smp->status |= IB_SMP_UNSUP_VERSION;
4638 		ret = reply((struct ib_mad_hdr *)smp);
4639 		return ret;
4640 	}
4641 
4642 	ret = check_mkey(ibp, (struct ib_mad_hdr *)smp, mad_flags,
4643 			 smp->mkey, (__force __be32)smp->dr_slid,
4644 			 smp->return_path, smp->hop_cnt);
4645 	if (ret) {
4646 		u32 port_num = be32_to_cpu(smp->attr_mod);
4647 
4648 		/*
4649 		 * If this is a get/set portinfo, we already check the
4650 		 * M_Key if the MAD is for another port and the M_Key
4651 		 * is OK on the receiving port. This check is needed
4652 		 * to increment the error counters when the M_Key
4653 		 * fails to match on *both* ports.
4654 		 */
4655 		if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
4656 		    (smp->method == IB_MGMT_METHOD_GET ||
4657 		     smp->method == IB_MGMT_METHOD_SET) &&
4658 		    port_num && port_num <= ibdev->phys_port_cnt &&
4659 		    port != port_num)
4660 			(void)check_mkey(to_iport(ibdev, port_num),
4661 					 (struct ib_mad_hdr *)smp, 0,
4662 					 smp->mkey,
4663 					 (__force __be32)smp->dr_slid,
4664 					 smp->return_path, smp->hop_cnt);
4665 		ret = IB_MAD_RESULT_FAILURE;
4666 		return ret;
4667 	}
4668 
4669 	switch (smp->method) {
4670 	case IB_MGMT_METHOD_GET:
4671 		switch (smp->attr_id) {
4672 		case IB_SMP_ATTR_NODE_INFO:
4673 			ret = subn_get_nodeinfo(smp, ibdev, port);
4674 			break;
4675 		default:
4676 			smp->status |= IB_SMP_UNSUP_METH_ATTR;
4677 			ret = reply((struct ib_mad_hdr *)smp);
4678 			break;
4679 		}
4680 		break;
4681 	}
4682 
4683 	return ret;
4684 }
4685 
4686 static int process_perf(struct ib_device *ibdev, u8 port,
4687 			const struct ib_mad *in_mad,
4688 			struct ib_mad *out_mad)
4689 {
4690 	struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
4691 	struct ib_class_port_info *cpi = (struct ib_class_port_info *)
4692 						&pmp->data;
4693 	int ret = IB_MAD_RESULT_FAILURE;
4694 
4695 	*out_mad = *in_mad;
4696 	if (pmp->mad_hdr.class_version != 1) {
4697 		pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
4698 		ret = reply((struct ib_mad_hdr *)pmp);
4699 		return ret;
4700 	}
4701 
4702 	switch (pmp->mad_hdr.method) {
4703 	case IB_MGMT_METHOD_GET:
4704 		switch (pmp->mad_hdr.attr_id) {
4705 		case IB_PMA_PORT_COUNTERS:
4706 			ret = pma_get_ib_portcounters(pmp, ibdev, port);
4707 			break;
4708 		case IB_PMA_PORT_COUNTERS_EXT:
4709 			ret = pma_get_ib_portcounters_ext(pmp, ibdev, port);
4710 			break;
4711 		case IB_PMA_CLASS_PORT_INFO:
4712 			cpi->capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
4713 			ret = reply((struct ib_mad_hdr *)pmp);
4714 			break;
4715 		default:
4716 			pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
4717 			ret = reply((struct ib_mad_hdr *)pmp);
4718 			break;
4719 		}
4720 		break;
4721 
4722 	case IB_MGMT_METHOD_SET:
4723 		if (pmp->mad_hdr.attr_id) {
4724 			pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
4725 			ret = reply((struct ib_mad_hdr *)pmp);
4726 		}
4727 		break;
4728 
4729 	case IB_MGMT_METHOD_TRAP:
4730 	case IB_MGMT_METHOD_GET_RESP:
4731 		/*
4732 		 * The ib_mad module will call us to process responses
4733 		 * before checking for other consumers.
4734 		 * Just tell the caller to process it normally.
4735 		 */
4736 		ret = IB_MAD_RESULT_SUCCESS;
4737 		break;
4738 
4739 	default:
4740 		pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
4741 		ret = reply((struct ib_mad_hdr *)pmp);
4742 		break;
4743 	}
4744 
4745 	return ret;
4746 }
4747 
4748 static int process_perf_opa(struct ib_device *ibdev, u8 port,
4749 			    const struct opa_mad *in_mad,
4750 			    struct opa_mad *out_mad, u32 *resp_len)
4751 {
4752 	struct opa_pma_mad *pmp = (struct opa_pma_mad *)out_mad;
4753 	int ret;
4754 
4755 	*out_mad = *in_mad;
4756 
4757 	if (pmp->mad_hdr.class_version != OPA_SM_CLASS_VERSION) {
4758 		pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
4759 		return reply((struct ib_mad_hdr *)pmp);
4760 	}
4761 
4762 	*resp_len = sizeof(pmp->mad_hdr);
4763 
4764 	switch (pmp->mad_hdr.method) {
4765 	case IB_MGMT_METHOD_GET:
4766 		switch (pmp->mad_hdr.attr_id) {
4767 		case IB_PMA_CLASS_PORT_INFO:
4768 			ret = pma_get_opa_classportinfo(pmp, ibdev, resp_len);
4769 			break;
4770 		case OPA_PM_ATTRIB_ID_PORT_STATUS:
4771 			ret = pma_get_opa_portstatus(pmp, ibdev, port,
4772 						     resp_len);
4773 			break;
4774 		case OPA_PM_ATTRIB_ID_DATA_PORT_COUNTERS:
4775 			ret = pma_get_opa_datacounters(pmp, ibdev, port,
4776 						       resp_len);
4777 			break;
4778 		case OPA_PM_ATTRIB_ID_ERROR_PORT_COUNTERS:
4779 			ret = pma_get_opa_porterrors(pmp, ibdev, port,
4780 						     resp_len);
4781 			break;
4782 		case OPA_PM_ATTRIB_ID_ERROR_INFO:
4783 			ret = pma_get_opa_errorinfo(pmp, ibdev, port,
4784 						    resp_len);
4785 			break;
4786 		default:
4787 			pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
4788 			ret = reply((struct ib_mad_hdr *)pmp);
4789 			break;
4790 		}
4791 		break;
4792 
4793 	case IB_MGMT_METHOD_SET:
4794 		switch (pmp->mad_hdr.attr_id) {
4795 		case OPA_PM_ATTRIB_ID_CLEAR_PORT_STATUS:
4796 			ret = pma_set_opa_portstatus(pmp, ibdev, port,
4797 						     resp_len);
4798 			break;
4799 		case OPA_PM_ATTRIB_ID_ERROR_INFO:
4800 			ret = pma_set_opa_errorinfo(pmp, ibdev, port,
4801 						    resp_len);
4802 			break;
4803 		default:
4804 			pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
4805 			ret = reply((struct ib_mad_hdr *)pmp);
4806 			break;
4807 		}
4808 		break;
4809 
4810 	case IB_MGMT_METHOD_TRAP:
4811 	case IB_MGMT_METHOD_GET_RESP:
4812 		/*
4813 		 * The ib_mad module will call us to process responses
4814 		 * before checking for other consumers.
4815 		 * Just tell the caller to process it normally.
4816 		 */
4817 		ret = IB_MAD_RESULT_SUCCESS;
4818 		break;
4819 
4820 	default:
4821 		pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
4822 		ret = reply((struct ib_mad_hdr *)pmp);
4823 		break;
4824 	}
4825 
4826 	return ret;
4827 }
4828 
4829 static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags,
4830 				u8 port, const struct ib_wc *in_wc,
4831 				const struct ib_grh *in_grh,
4832 				const struct opa_mad *in_mad,
4833 				struct opa_mad *out_mad, size_t *out_mad_size,
4834 				u16 *out_mad_pkey_index)
4835 {
4836 	int ret;
4837 	int pkey_idx;
4838 	int local_mad = 0;
4839 	u32 resp_len = 0;
4840 	struct hfi1_ibport *ibp = to_iport(ibdev, port);
4841 
4842 	pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY);
4843 	if (pkey_idx < 0) {
4844 		pr_warn("failed to find limited mgmt pkey, defaulting 0x%x\n",
4845 			hfi1_get_pkey(ibp, 1));
4846 		pkey_idx = 1;
4847 	}
4848 	*out_mad_pkey_index = (u16)pkey_idx;
4849 
4850 	switch (in_mad->mad_hdr.mgmt_class) {
4851 	case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
4852 	case IB_MGMT_CLASS_SUBN_LID_ROUTED:
4853 		local_mad = is_local_mad(ibp, in_mad, in_wc);
4854 		if (local_mad) {
4855 			ret = opa_local_smp_check(ibp, in_wc);
4856 			if (ret)
4857 				return IB_MAD_RESULT_FAILURE;
4858 		}
4859 		ret = process_subn_opa(ibdev, mad_flags, port, in_mad,
4860 				       out_mad, &resp_len, local_mad);
4861 		goto bail;
4862 	case IB_MGMT_CLASS_PERF_MGMT:
4863 		ret = hfi1_pkey_validation_pma(ibp, in_mad, in_wc);
4864 		if (ret)
4865 			return IB_MAD_RESULT_FAILURE;
4866 
4867 		ret = process_perf_opa(ibdev, port, in_mad, out_mad, &resp_len);
4868 		goto bail;
4869 
4870 	default:
4871 		ret = IB_MAD_RESULT_SUCCESS;
4872 	}
4873 
4874 bail:
4875 	if (ret & IB_MAD_RESULT_REPLY)
4876 		*out_mad_size = round_up(resp_len, 8);
4877 	else if (ret & IB_MAD_RESULT_SUCCESS)
4878 		*out_mad_size = in_wc->byte_len - sizeof(struct ib_grh);
4879 
4880 	return ret;
4881 }
4882 
4883 static int hfi1_process_ib_mad(struct ib_device *ibdev, int mad_flags, u8 port,
4884 			       const struct ib_wc *in_wc,
4885 			       const struct ib_grh *in_grh,
4886 			       const struct ib_mad *in_mad,
4887 			       struct ib_mad *out_mad)
4888 {
4889 	int ret;
4890 
4891 	switch (in_mad->mad_hdr.mgmt_class) {
4892 	case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
4893 	case IB_MGMT_CLASS_SUBN_LID_ROUTED:
4894 		ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad);
4895 		break;
4896 	case IB_MGMT_CLASS_PERF_MGMT:
4897 		ret = process_perf(ibdev, port, in_mad, out_mad);
4898 		break;
4899 	default:
4900 		ret = IB_MAD_RESULT_SUCCESS;
4901 		break;
4902 	}
4903 
4904 	return ret;
4905 }
4906 
4907 /**
4908  * hfi1_process_mad - process an incoming MAD packet
4909  * @ibdev: the infiniband device this packet came in on
4910  * @mad_flags: MAD flags
4911  * @port: the port number this packet came in on
4912  * @in_wc: the work completion entry for this packet
4913  * @in_grh: the global route header for this packet
4914  * @in_mad: the incoming MAD
4915  * @out_mad: any outgoing MAD reply
4916  *
4917  * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
4918  * interested in processing.
4919  *
4920  * Note that the verbs framework has already done the MAD sanity checks,
4921  * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
4922  * MADs.
4923  *
4924  * This is called by the ib_mad module.
4925  */
4926 int hfi1_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
4927 		     const struct ib_wc *in_wc, const struct ib_grh *in_grh,
4928 		     const struct ib_mad_hdr *in_mad, size_t in_mad_size,
4929 		     struct ib_mad_hdr *out_mad, size_t *out_mad_size,
4930 		     u16 *out_mad_pkey_index)
4931 {
4932 	switch (in_mad->base_version) {
4933 	case OPA_MGMT_BASE_VERSION:
4934 		if (unlikely(in_mad_size != sizeof(struct opa_mad))) {
4935 			dev_err(ibdev->dev.parent, "invalid in_mad_size\n");
4936 			return IB_MAD_RESULT_FAILURE;
4937 		}
4938 		return hfi1_process_opa_mad(ibdev, mad_flags, port,
4939 					    in_wc, in_grh,
4940 					    (struct opa_mad *)in_mad,
4941 					    (struct opa_mad *)out_mad,
4942 					    out_mad_size,
4943 					    out_mad_pkey_index);
4944 	case IB_MGMT_BASE_VERSION:
4945 		return hfi1_process_ib_mad(ibdev, mad_flags, port,
4946 					  in_wc, in_grh,
4947 					  (const struct ib_mad *)in_mad,
4948 					  (struct ib_mad *)out_mad);
4949 	default:
4950 		break;
4951 	}
4952 
4953 	return IB_MAD_RESULT_FAILURE;
4954 }
4955