xref: /linux/drivers/infiniband/hw/qib/qib_iba7322.c (revision 3ad0876554cafa368f574d4d408468510543e9ff)
1 /*
2  * Copyright (c) 2012 - 2017 Intel Corporation.  All rights reserved.
3  * Copyright (c) 2008 - 2012 QLogic Corporation. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 /*
35  * This file contains all of the code that is specific to the
36  * InfiniPath 7322 chip
37  */
38 
39 #include <linux/interrupt.h>
40 #include <linux/pci.h>
41 #include <linux/delay.h>
42 #include <linux/io.h>
43 #include <linux/jiffies.h>
44 #include <linux/module.h>
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_smi.h>
47 #ifdef CONFIG_INFINIBAND_QIB_DCA
48 #include <linux/dca.h>
49 #endif
50 
51 #include "qib.h"
52 #include "qib_7322_regs.h"
53 #include "qib_qsfp.h"
54 
55 #include "qib_mad.h"
56 #include "qib_verbs.h"
57 
58 #undef pr_fmt
59 #define pr_fmt(fmt) QIB_DRV_NAME " " fmt
60 
61 static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
62 static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
63 static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op);
64 static irqreturn_t qib_7322intr(int irq, void *data);
65 static irqreturn_t qib_7322bufavail(int irq, void *data);
66 static irqreturn_t sdma_intr(int irq, void *data);
67 static irqreturn_t sdma_idle_intr(int irq, void *data);
68 static irqreturn_t sdma_progress_intr(int irq, void *data);
69 static irqreturn_t sdma_cleanup_intr(int irq, void *data);
70 static void qib_7322_txchk_change(struct qib_devdata *, u32, u32, u32,
71 				  struct qib_ctxtdata *rcd);
72 static u8 qib_7322_phys_portstate(u64);
73 static u32 qib_7322_iblink_state(u64);
74 static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
75 				   u16 linitcmd);
76 static void force_h1(struct qib_pportdata *);
77 static void adj_tx_serdes(struct qib_pportdata *);
78 static u32 qib_7322_setpbc_control(struct qib_pportdata *, u32, u8, u8);
79 static void qib_7322_mini_pcs_reset(struct qib_pportdata *);
80 
81 static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
82 static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
83 static void serdes_7322_los_enable(struct qib_pportdata *, int);
84 static int serdes_7322_init_old(struct qib_pportdata *);
85 static int serdes_7322_init_new(struct qib_pportdata *);
86 static void dump_sdma_7322_state(struct qib_pportdata *);
87 
88 #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
89 
90 /* LE2 serdes values for different cases */
91 #define LE2_DEFAULT 5
92 #define LE2_5m 4
93 #define LE2_QME 0
94 
95 /* Below is special-purpose, so only really works for the IB SerDes blocks. */
96 #define IBSD(hw_pidx) (hw_pidx + 2)
97 
98 /* these are variables for documentation and experimentation purposes */
99 static const unsigned rcv_int_timeout = 375;
100 static const unsigned rcv_int_count = 16;
101 static const unsigned sdma_idle_cnt = 64;
102 
103 /* Time to stop altering Rx Equalization parameters, after link up. */
104 #define RXEQ_DISABLE_MSECS 2500
105 
106 /*
107  * Number of VLs we are configured to use (to allow for more
108  * credits per vl, etc.)
109  */
110 ushort qib_num_cfg_vls = 2;
111 module_param_named(num_vls, qib_num_cfg_vls, ushort, S_IRUGO);
112 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
113 
114 static ushort qib_chase = 1;
115 module_param_named(chase, qib_chase, ushort, S_IRUGO);
116 MODULE_PARM_DESC(chase, "Enable state chase handling");
117 
118 static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
119 module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
120 MODULE_PARM_DESC(long_attenuation,
121 		 "attenuation cutoff (dB) for long copper cable setup");
122 
123 static ushort qib_singleport;
124 module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
125 MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
126 
127 static ushort qib_krcvq01_no_msi;
128 module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO);
129 MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2");
130 
131 /*
132  * Receive header queue sizes
133  */
134 static unsigned qib_rcvhdrcnt;
135 module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
136 MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
137 
138 static unsigned qib_rcvhdrsize;
139 module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
140 MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
141 
142 static unsigned qib_rcvhdrentsize;
143 module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
144 MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
145 
146 #define MAX_ATTEN_LEN 64 /* plenty for any real system */
147 /* for read back, default index is ~5m copper cable */
148 static char txselect_list[MAX_ATTEN_LEN] = "10";
149 static struct kparam_string kp_txselect = {
150 	.string = txselect_list,
151 	.maxlen = MAX_ATTEN_LEN
152 };
153 static int  setup_txselect(const char *, const struct kernel_param *);
154 module_param_call(txselect, setup_txselect, param_get_string,
155 		  &kp_txselect, S_IWUSR | S_IRUGO);
156 MODULE_PARM_DESC(txselect,
157 		 "Tx serdes indices (for no QSFP or invalid QSFP data)");
158 
159 #define BOARD_QME7342 5
160 #define BOARD_QMH7342 6
161 #define BOARD_QMH7360 9
162 #define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
163 		    BOARD_QMH7342)
164 #define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
165 		    BOARD_QME7342)
166 
167 #define KREG_IDX(regname)     (QIB_7322_##regname##_OFFS / sizeof(u64))
168 
169 #define KREG_IBPORT_IDX(regname) ((QIB_7322_##regname##_0_OFFS / sizeof(u64)))
170 
171 #define MASK_ACROSS(lsb, msb) \
172 	(((1ULL << ((msb) + 1 - (lsb))) - 1) << (lsb))
173 
174 #define SYM_RMASK(regname, fldname) ((u64)              \
175 	QIB_7322_##regname##_##fldname##_RMASK)
176 
177 #define SYM_MASK(regname, fldname) ((u64)               \
178 	QIB_7322_##regname##_##fldname##_RMASK <<       \
179 	 QIB_7322_##regname##_##fldname##_LSB)
180 
181 #define SYM_FIELD(value, regname, fldname) ((u64)	\
182 	(((value) >> SYM_LSB(regname, fldname)) &	\
183 	 SYM_RMASK(regname, fldname)))
184 
185 /* useful for things like LaFifoEmpty_0...7, TxCreditOK_0...7, etc. */
186 #define SYM_FIELD_ACROSS(value, regname, fldname, nbits) \
187 	(((value) >> SYM_LSB(regname, fldname)) & MASK_ACROSS(0, nbits))
188 
189 #define HWE_MASK(fldname) SYM_MASK(HwErrMask, fldname##Mask)
190 #define ERR_MASK(fldname) SYM_MASK(ErrMask, fldname##Mask)
191 #define ERR_MASK_N(fldname) SYM_MASK(ErrMask_0, fldname##Mask)
192 #define INT_MASK(fldname) SYM_MASK(IntMask, fldname##IntMask)
193 #define INT_MASK_P(fldname, port) SYM_MASK(IntMask, fldname##IntMask##_##port)
194 /* Below because most, but not all, fields of IntMask have that full suffix */
195 #define INT_MASK_PM(fldname, port) SYM_MASK(IntMask, fldname##Mask##_##port)
196 
197 
198 #define SYM_LSB(regname, fldname) (QIB_7322_##regname##_##fldname##_LSB)
199 
200 /*
201  * the size bits give us 2^N, in KB units.  0 marks as invalid,
202  * and 7 is reserved.  We currently use only 2KB and 4KB
203  */
204 #define IBA7322_TID_SZ_SHIFT QIB_7322_RcvTIDArray0_RT_BufSize_LSB
205 #define IBA7322_TID_SZ_2K (1UL<<IBA7322_TID_SZ_SHIFT) /* 2KB */
206 #define IBA7322_TID_SZ_4K (2UL<<IBA7322_TID_SZ_SHIFT) /* 4KB */
207 #define IBA7322_TID_PA_SHIFT 11U /* TID addr in chip stored w/o low bits */
208 
209 #define SendIBSLIDAssignMask \
210 	QIB_7322_SendIBSLIDAssign_0_SendIBSLIDAssign_15_0_RMASK
211 #define SendIBSLMCMask \
212 	QIB_7322_SendIBSLIDMask_0_SendIBSLIDMask_15_0_RMASK
213 
214 #define ExtLED_IB1_YEL SYM_MASK(EXTCtrl, LEDPort0YellowOn)
215 #define ExtLED_IB1_GRN SYM_MASK(EXTCtrl, LEDPort0GreenOn)
216 #define ExtLED_IB2_YEL SYM_MASK(EXTCtrl, LEDPort1YellowOn)
217 #define ExtLED_IB2_GRN SYM_MASK(EXTCtrl, LEDPort1GreenOn)
218 #define ExtLED_IB1_MASK (ExtLED_IB1_YEL | ExtLED_IB1_GRN)
219 #define ExtLED_IB2_MASK (ExtLED_IB2_YEL | ExtLED_IB2_GRN)
220 
221 #define _QIB_GPIO_SDA_NUM 1
222 #define _QIB_GPIO_SCL_NUM 0
223 #define QIB_EEPROM_WEN_NUM 14
224 #define QIB_TWSI_EEPROM_DEV 0xA2 /* All Production 7322 cards. */
225 
226 /* HW counter clock is at 4nsec */
227 #define QIB_7322_PSXMITWAIT_CHECK_RATE 4000
228 
229 /* full speed IB port 1 only */
230 #define PORT_SPD_CAP (QIB_IB_SDR | QIB_IB_DDR | QIB_IB_QDR)
231 #define PORT_SPD_CAP_SHIFT 3
232 
233 /* full speed featuremask, both ports */
234 #define DUAL_PORT_CAP (PORT_SPD_CAP | (PORT_SPD_CAP << PORT_SPD_CAP_SHIFT))
235 
236 /*
237  * This file contains almost all the chip-specific register information and
238  * access functions for the FAKED QLogic InfiniPath 7322 PCI-Express chip.
239  */
240 
241 /* Use defines to tie machine-generated names to lower-case names */
242 #define kr_contextcnt KREG_IDX(ContextCnt)
243 #define kr_control KREG_IDX(Control)
244 #define kr_counterregbase KREG_IDX(CntrRegBase)
245 #define kr_errclear KREG_IDX(ErrClear)
246 #define kr_errmask KREG_IDX(ErrMask)
247 #define kr_errstatus KREG_IDX(ErrStatus)
248 #define kr_extctrl KREG_IDX(EXTCtrl)
249 #define kr_extstatus KREG_IDX(EXTStatus)
250 #define kr_gpio_clear KREG_IDX(GPIOClear)
251 #define kr_gpio_mask KREG_IDX(GPIOMask)
252 #define kr_gpio_out KREG_IDX(GPIOOut)
253 #define kr_gpio_status KREG_IDX(GPIOStatus)
254 #define kr_hwdiagctrl KREG_IDX(HwDiagCtrl)
255 #define kr_debugportval KREG_IDX(DebugPortValueReg)
256 #define kr_fmask KREG_IDX(feature_mask)
257 #define kr_act_fmask KREG_IDX(active_feature_mask)
258 #define kr_hwerrclear KREG_IDX(HwErrClear)
259 #define kr_hwerrmask KREG_IDX(HwErrMask)
260 #define kr_hwerrstatus KREG_IDX(HwErrStatus)
261 #define kr_intclear KREG_IDX(IntClear)
262 #define kr_intmask KREG_IDX(IntMask)
263 #define kr_intredirect KREG_IDX(IntRedirect0)
264 #define kr_intstatus KREG_IDX(IntStatus)
265 #define kr_pagealign KREG_IDX(PageAlign)
266 #define kr_rcvavailtimeout KREG_IDX(RcvAvailTimeOut0)
267 #define kr_rcvctrl KREG_IDX(RcvCtrl) /* Common, but chip also has per-port */
268 #define kr_rcvegrbase KREG_IDX(RcvEgrBase)
269 #define kr_rcvegrcnt KREG_IDX(RcvEgrCnt)
270 #define kr_rcvhdrcnt KREG_IDX(RcvHdrCnt)
271 #define kr_rcvhdrentsize KREG_IDX(RcvHdrEntSize)
272 #define kr_rcvhdrsize KREG_IDX(RcvHdrSize)
273 #define kr_rcvtidbase KREG_IDX(RcvTIDBase)
274 #define kr_rcvtidcnt KREG_IDX(RcvTIDCnt)
275 #define kr_revision KREG_IDX(Revision)
276 #define kr_scratch KREG_IDX(Scratch)
277 #define kr_sendbuffererror KREG_IDX(SendBufErr0) /* and base for 1 and 2 */
278 #define kr_sendcheckmask KREG_IDX(SendCheckMask0) /* and 1, 2 */
279 #define kr_sendctrl KREG_IDX(SendCtrl)
280 #define kr_sendgrhcheckmask KREG_IDX(SendGRHCheckMask0) /* and 1, 2 */
281 #define kr_sendibpktmask KREG_IDX(SendIBPacketMask0) /* and 1, 2 */
282 #define kr_sendpioavailaddr KREG_IDX(SendBufAvailAddr)
283 #define kr_sendpiobufbase KREG_IDX(SendBufBase)
284 #define kr_sendpiobufcnt KREG_IDX(SendBufCnt)
285 #define kr_sendpiosize KREG_IDX(SendBufSize)
286 #define kr_sendregbase KREG_IDX(SendRegBase)
287 #define kr_sendbufavail0 KREG_IDX(SendBufAvail0)
288 #define kr_userregbase KREG_IDX(UserRegBase)
289 #define kr_intgranted KREG_IDX(Int_Granted)
290 #define kr_vecclr_wo_int KREG_IDX(vec_clr_without_int)
291 #define kr_intblocked KREG_IDX(IntBlocked)
292 #define kr_r_access KREG_IDX(SPC_JTAG_ACCESS_REG)
293 
294 /*
295  * per-port kernel registers.  Access only with qib_read_kreg_port()
296  * or qib_write_kreg_port()
297  */
298 #define krp_errclear KREG_IBPORT_IDX(ErrClear)
299 #define krp_errmask KREG_IBPORT_IDX(ErrMask)
300 #define krp_errstatus KREG_IBPORT_IDX(ErrStatus)
301 #define krp_highprio_0 KREG_IBPORT_IDX(HighPriority0)
302 #define krp_highprio_limit KREG_IBPORT_IDX(HighPriorityLimit)
303 #define krp_hrtbt_guid KREG_IBPORT_IDX(HRTBT_GUID)
304 #define krp_ib_pcsconfig KREG_IBPORT_IDX(IBPCSConfig)
305 #define krp_ibcctrl_a KREG_IBPORT_IDX(IBCCtrlA)
306 #define krp_ibcctrl_b KREG_IBPORT_IDX(IBCCtrlB)
307 #define krp_ibcctrl_c KREG_IBPORT_IDX(IBCCtrlC)
308 #define krp_ibcstatus_a KREG_IBPORT_IDX(IBCStatusA)
309 #define krp_ibcstatus_b KREG_IBPORT_IDX(IBCStatusB)
310 #define krp_txestatus KREG_IBPORT_IDX(TXEStatus)
311 #define krp_lowprio_0 KREG_IBPORT_IDX(LowPriority0)
312 #define krp_ncmodectrl KREG_IBPORT_IDX(IBNCModeCtrl)
313 #define krp_partitionkey KREG_IBPORT_IDX(RcvPartitionKey)
314 #define krp_psinterval KREG_IBPORT_IDX(PSInterval)
315 #define krp_psstart KREG_IBPORT_IDX(PSStart)
316 #define krp_psstat KREG_IBPORT_IDX(PSStat)
317 #define krp_rcvbthqp KREG_IBPORT_IDX(RcvBTHQP)
318 #define krp_rcvctrl KREG_IBPORT_IDX(RcvCtrl)
319 #define krp_rcvpktledcnt KREG_IBPORT_IDX(RcvPktLEDCnt)
320 #define krp_rcvqpmaptable KREG_IBPORT_IDX(RcvQPMapTableA)
321 #define krp_rxcreditvl0 KREG_IBPORT_IDX(RxCreditVL0)
322 #define krp_rxcreditvl15 (KREG_IBPORT_IDX(RxCreditVL0)+15)
323 #define krp_sendcheckcontrol KREG_IBPORT_IDX(SendCheckControl)
324 #define krp_sendctrl KREG_IBPORT_IDX(SendCtrl)
325 #define krp_senddmabase KREG_IBPORT_IDX(SendDmaBase)
326 #define krp_senddmabufmask0 KREG_IBPORT_IDX(SendDmaBufMask0)
327 #define krp_senddmabufmask1 (KREG_IBPORT_IDX(SendDmaBufMask0) + 1)
328 #define krp_senddmabufmask2 (KREG_IBPORT_IDX(SendDmaBufMask0) + 2)
329 #define krp_senddmabuf_use0 KREG_IBPORT_IDX(SendDmaBufUsed0)
330 #define krp_senddmabuf_use1 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 1)
331 #define krp_senddmabuf_use2 (KREG_IBPORT_IDX(SendDmaBufUsed0) + 2)
332 #define krp_senddmadesccnt KREG_IBPORT_IDX(SendDmaDescCnt)
333 #define krp_senddmahead KREG_IBPORT_IDX(SendDmaHead)
334 #define krp_senddmaheadaddr KREG_IBPORT_IDX(SendDmaHeadAddr)
335 #define krp_senddmaidlecnt KREG_IBPORT_IDX(SendDmaIdleCnt)
336 #define krp_senddmalengen KREG_IBPORT_IDX(SendDmaLenGen)
337 #define krp_senddmaprioritythld KREG_IBPORT_IDX(SendDmaPriorityThld)
338 #define krp_senddmareloadcnt KREG_IBPORT_IDX(SendDmaReloadCnt)
339 #define krp_senddmastatus KREG_IBPORT_IDX(SendDmaStatus)
340 #define krp_senddmatail KREG_IBPORT_IDX(SendDmaTail)
341 #define krp_sendhdrsymptom KREG_IBPORT_IDX(SendHdrErrSymptom)
342 #define krp_sendslid KREG_IBPORT_IDX(SendIBSLIDAssign)
343 #define krp_sendslidmask KREG_IBPORT_IDX(SendIBSLIDMask)
344 #define krp_ibsdtestiftx KREG_IBPORT_IDX(IB_SDTEST_IF_TX)
345 #define krp_adapt_dis_timer KREG_IBPORT_IDX(ADAPT_DISABLE_TIMER_THRESHOLD)
346 #define krp_tx_deemph_override KREG_IBPORT_IDX(IBSD_TX_DEEMPHASIS_OVERRIDE)
347 #define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
348 
349 /*
350  * Per-context kernel registers.  Access only with qib_read_kreg_ctxt()
351  * or qib_write_kreg_ctxt()
352  */
353 #define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
354 #define krc_rcvhdrtailaddr KREG_IDX(RcvHdrTailAddr0)
355 
356 /*
357  * TID Flow table, per context.  Reduces
358  * number of hdrq updates to one per flow (or on errors).
359  * context 0 and 1 share same memory, but have distinct
360  * addresses.  Since for now, we never use expected sends
361  * on kernel contexts, we don't worry about that (we initialize
362  * those entries for ctxt 0/1 on driver load twice, for example).
363  */
364 #define NUM_TIDFLOWS_CTXT 0x20 /* 0x20 per context; have to hardcode */
365 #define ur_rcvflowtable (KREG_IDX(RcvTIDFlowTable0) - KREG_IDX(RcvHdrTail0))
366 
367 /* these are the error bits in the tid flows, and are W1C */
368 #define TIDFLOW_ERRBITS  ( \
369 	(SYM_MASK(RcvTIDFlowTable0, GenMismatch) << \
370 	SYM_LSB(RcvTIDFlowTable0, GenMismatch)) | \
371 	(SYM_MASK(RcvTIDFlowTable0, SeqMismatch) << \
372 	SYM_LSB(RcvTIDFlowTable0, SeqMismatch)))
373 
374 /* Most (not all) Counters are per-IBport.
375  * Requires LBIntCnt is at offset 0 in the group
376  */
377 #define CREG_IDX(regname) \
378 ((QIB_7322_##regname##_0_OFFS - QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
379 
380 #define crp_badformat CREG_IDX(RxVersionErrCnt)
381 #define crp_err_rlen CREG_IDX(RxLenErrCnt)
382 #define crp_erricrc CREG_IDX(RxICRCErrCnt)
383 #define crp_errlink CREG_IDX(RxLinkMalformCnt)
384 #define crp_errlpcrc CREG_IDX(RxLPCRCErrCnt)
385 #define crp_errpkey CREG_IDX(RxPKeyMismatchCnt)
386 #define crp_errvcrc CREG_IDX(RxVCRCErrCnt)
387 #define crp_excessbufferovfl CREG_IDX(ExcessBufferOvflCnt)
388 #define crp_iblinkdown CREG_IDX(IBLinkDownedCnt)
389 #define crp_iblinkerrrecov CREG_IDX(IBLinkErrRecoveryCnt)
390 #define crp_ibstatuschange CREG_IDX(IBStatusChangeCnt)
391 #define crp_ibsymbolerr CREG_IDX(IBSymbolErrCnt)
392 #define crp_invalidrlen CREG_IDX(RxMaxMinLenErrCnt)
393 #define crp_locallinkintegrityerr CREG_IDX(LocalLinkIntegrityErrCnt)
394 #define crp_pktrcv CREG_IDX(RxDataPktCnt)
395 #define crp_pktrcvflowctrl CREG_IDX(RxFlowPktCnt)
396 #define crp_pktsend CREG_IDX(TxDataPktCnt)
397 #define crp_pktsendflow CREG_IDX(TxFlowPktCnt)
398 #define crp_psrcvdatacount CREG_IDX(PSRcvDataCount)
399 #define crp_psrcvpktscount CREG_IDX(PSRcvPktsCount)
400 #define crp_psxmitdatacount CREG_IDX(PSXmitDataCount)
401 #define crp_psxmitpktscount CREG_IDX(PSXmitPktsCount)
402 #define crp_psxmitwaitcount CREG_IDX(PSXmitWaitCount)
403 #define crp_rcvebp CREG_IDX(RxEBPCnt)
404 #define crp_rcvflowctrlviol CREG_IDX(RxFlowCtrlViolCnt)
405 #define crp_rcvovfl CREG_IDX(RxBufOvflCnt)
406 #define crp_rxdlidfltr CREG_IDX(RxDlidFltrCnt)
407 #define crp_rxdroppkt CREG_IDX(RxDroppedPktCnt)
408 #define crp_rxotherlocalphyerr CREG_IDX(RxOtherLocalPhyErrCnt)
409 #define crp_rxqpinvalidctxt CREG_IDX(RxQPInvalidContextCnt)
410 #define crp_rxvlerr CREG_IDX(RxVlErrCnt)
411 #define crp_sendstall CREG_IDX(TxFlowStallCnt)
412 #define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
413 #define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
414 #define crp_txlenerr CREG_IDX(TxLenErrCnt)
415 #define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
416 #define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
417 #define crp_txunderrun CREG_IDX(TxUnderrunCnt)
418 #define crp_txunsupvl CREG_IDX(TxUnsupVLErrCnt)
419 #define crp_vl15droppedpkt CREG_IDX(RxVL15DroppedPktCnt)
420 #define crp_wordrcv CREG_IDX(RxDwordCnt)
421 #define crp_wordsend CREG_IDX(TxDwordCnt)
422 #define crp_tx_creditstalls CREG_IDX(TxCreditUpToDateTimeOut)
423 
424 /* these are the (few) counters that are not port-specific */
425 #define CREG_DEVIDX(regname) ((QIB_7322_##regname##_OFFS - \
426 			QIB_7322_LBIntCnt_OFFS) / sizeof(u64))
427 #define cr_base_egrovfl CREG_DEVIDX(RxP0HdrEgrOvflCnt)
428 #define cr_lbint CREG_DEVIDX(LBIntCnt)
429 #define cr_lbstall CREG_DEVIDX(LBFlowStallCnt)
430 #define cr_pcieretrydiag CREG_DEVIDX(PcieRetryBufDiagQwordCnt)
431 #define cr_rxtidflowdrop CREG_DEVIDX(RxTidFlowDropCnt)
432 #define cr_tidfull CREG_DEVIDX(RxTIDFullErrCnt)
433 #define cr_tidinvalid CREG_DEVIDX(RxTIDValidErrCnt)
434 
435 /* no chip register for # of IB ports supported, so define */
436 #define NUM_IB_PORTS 2
437 
438 /* 1 VL15 buffer per hardware IB port, no register for this, so define */
439 #define NUM_VL15_BUFS NUM_IB_PORTS
440 
441 /*
442  * context 0 and 1 are special, and there is no chip register that
443  * defines this value, so we have to define it here.
444  * These are all allocated to either 0 or 1 for single port
445  * hardware configuration, otherwise each gets half
446  */
447 #define KCTXT0_EGRCNT 2048
448 
449 /* values for vl and port fields in PBC, 7322-specific */
450 #define PBC_PORT_SEL_LSB 26
451 #define PBC_PORT_SEL_RMASK 1
452 #define PBC_VL_NUM_LSB 27
453 #define PBC_VL_NUM_RMASK 7
454 #define PBC_7322_VL15_SEND (1ULL << 63) /* pbc; VL15, no credit check */
455 #define PBC_7322_VL15_SEND_CTRL (1ULL << 31) /* control version of same */
456 
457 static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
458 	[IB_RATE_2_5_GBPS] = 16,
459 	[IB_RATE_5_GBPS] = 8,
460 	[IB_RATE_10_GBPS] = 4,
461 	[IB_RATE_20_GBPS] = 2,
462 	[IB_RATE_30_GBPS] = 2,
463 	[IB_RATE_40_GBPS] = 1
464 };
465 
466 static const char * const qib_sdma_state_names[] = {
467 	[qib_sdma_state_s00_hw_down]          = "s00_HwDown",
468 	[qib_sdma_state_s10_hw_start_up_wait] = "s10_HwStartUpWait",
469 	[qib_sdma_state_s20_idle]             = "s20_Idle",
470 	[qib_sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
471 	[qib_sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
472 	[qib_sdma_state_s50_hw_halt_wait]     = "s50_HwHaltWait",
473 	[qib_sdma_state_s99_running]          = "s99_Running",
474 };
475 
476 #define IBA7322_LINKSPEED_SHIFT SYM_LSB(IBCStatusA_0, LinkSpeedActive)
477 #define IBA7322_LINKWIDTH_SHIFT SYM_LSB(IBCStatusA_0, LinkWidthActive)
478 
479 /* link training states, from IBC */
480 #define IB_7322_LT_STATE_DISABLED        0x00
481 #define IB_7322_LT_STATE_LINKUP          0x01
482 #define IB_7322_LT_STATE_POLLACTIVE      0x02
483 #define IB_7322_LT_STATE_POLLQUIET       0x03
484 #define IB_7322_LT_STATE_SLEEPDELAY      0x04
485 #define IB_7322_LT_STATE_SLEEPQUIET      0x05
486 #define IB_7322_LT_STATE_CFGDEBOUNCE     0x08
487 #define IB_7322_LT_STATE_CFGRCVFCFG      0x09
488 #define IB_7322_LT_STATE_CFGWAITRMT      0x0a
489 #define IB_7322_LT_STATE_CFGIDLE         0x0b
490 #define IB_7322_LT_STATE_RECOVERRETRAIN  0x0c
491 #define IB_7322_LT_STATE_TXREVLANES      0x0d
492 #define IB_7322_LT_STATE_RECOVERWAITRMT  0x0e
493 #define IB_7322_LT_STATE_RECOVERIDLE     0x0f
494 #define IB_7322_LT_STATE_CFGENH          0x10
495 #define IB_7322_LT_STATE_CFGTEST         0x11
496 #define IB_7322_LT_STATE_CFGWAITRMTTEST  0x12
497 #define IB_7322_LT_STATE_CFGWAITENH      0x13
498 
499 /* link state machine states from IBC */
500 #define IB_7322_L_STATE_DOWN             0x0
501 #define IB_7322_L_STATE_INIT             0x1
502 #define IB_7322_L_STATE_ARM              0x2
503 #define IB_7322_L_STATE_ACTIVE           0x3
504 #define IB_7322_L_STATE_ACT_DEFER        0x4
505 
506 static const u8 qib_7322_physportstate[0x20] = {
507 	[IB_7322_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
508 	[IB_7322_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
509 	[IB_7322_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
510 	[IB_7322_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
511 	[IB_7322_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
512 	[IB_7322_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
513 	[IB_7322_LT_STATE_CFGDEBOUNCE] = IB_PHYSPORTSTATE_CFG_TRAIN,
514 	[IB_7322_LT_STATE_CFGRCVFCFG] =
515 		IB_PHYSPORTSTATE_CFG_TRAIN,
516 	[IB_7322_LT_STATE_CFGWAITRMT] =
517 		IB_PHYSPORTSTATE_CFG_TRAIN,
518 	[IB_7322_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_IDLE,
519 	[IB_7322_LT_STATE_RECOVERRETRAIN] =
520 		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
521 	[IB_7322_LT_STATE_RECOVERWAITRMT] =
522 		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
523 	[IB_7322_LT_STATE_RECOVERIDLE] =
524 		IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
525 	[IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
526 	[IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
527 	[IB_7322_LT_STATE_CFGWAITRMTTEST] =
528 		IB_PHYSPORTSTATE_CFG_TRAIN,
529 	[IB_7322_LT_STATE_CFGWAITENH] =
530 		IB_PHYSPORTSTATE_CFG_WAIT_ENH,
531 	[0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
532 	[0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
533 	[0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
534 	[0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
535 };
536 
537 #ifdef CONFIG_INFINIBAND_QIB_DCA
538 struct qib_irq_notify {
539 	int rcv;
540 	void *arg;
541 	struct irq_affinity_notify notify;
542 };
543 #endif
544 
545 struct qib_chip_specific {
546 	u64 __iomem *cregbase;
547 	u64 *cntrs;
548 	spinlock_t rcvmod_lock; /* protect rcvctrl shadow changes */
549 	spinlock_t gpio_lock; /* RMW of shadows/regs for ExtCtrl and GPIO */
550 	u64 main_int_mask;      /* clear bits which have dedicated handlers */
551 	u64 int_enable_mask;  /* for per port interrupts in single port mode */
552 	u64 errormask;
553 	u64 hwerrmask;
554 	u64 gpio_out; /* shadow of kr_gpio_out, for rmw ops */
555 	u64 gpio_mask; /* shadow the gpio mask register */
556 	u64 extctrl; /* shadow the gpio output enable, etc... */
557 	u32 ncntrs;
558 	u32 nportcntrs;
559 	u32 cntrnamelen;
560 	u32 portcntrnamelen;
561 	u32 numctxts;
562 	u32 rcvegrcnt;
563 	u32 updthresh; /* current AvailUpdThld */
564 	u32 updthresh_dflt; /* default AvailUpdThld */
565 	u32 r1;
566 	u32 num_msix_entries;
567 	u32 sdmabufcnt;
568 	u32 lastbuf_for_pio;
569 	u32 stay_in_freeze;
570 	u32 recovery_ports_initted;
571 #ifdef CONFIG_INFINIBAND_QIB_DCA
572 	u32 dca_ctrl;
573 	int rhdr_cpu[18];
574 	int sdma_cpu[2];
575 	u64 dca_rcvhdr_ctrl[5]; /* B, C, D, E, F */
576 #endif
577 	struct qib_msix_entry *msix_entries;
578 	unsigned long *sendchkenable;
579 	unsigned long *sendgrhchk;
580 	unsigned long *sendibchk;
581 	u32 rcvavail_timeout[18];
582 	char emsgbuf[128]; /* for device error interrupt msg buffer */
583 };
584 
585 /* Table of entries in "human readable" form Tx Emphasis. */
586 struct txdds_ent {
587 	u8 amp;
588 	u8 pre;
589 	u8 main;
590 	u8 post;
591 };
592 
593 struct vendor_txdds_ent {
594 	u8 oui[QSFP_VOUI_LEN];
595 	u8 *partnum;
596 	struct txdds_ent sdr;
597 	struct txdds_ent ddr;
598 	struct txdds_ent qdr;
599 };
600 
601 static void write_tx_serdes_param(struct qib_pportdata *, struct txdds_ent *);
602 
603 #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
604 #define TXDDS_EXTRA_SZ 18 /* number of extra tx settings entries */
605 #define TXDDS_MFG_SZ 2    /* number of mfg tx settings entries */
606 #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
607 
608 #define H1_FORCE_VAL 8
609 #define H1_FORCE_QME 1 /*  may be overridden via setup_txselect() */
610 #define H1_FORCE_QMH 7 /*  may be overridden via setup_txselect() */
611 
612 /* The static and dynamic registers are paired, and the pairs indexed by spd */
613 #define krp_static_adapt_dis(spd) (KREG_IBPORT_IDX(ADAPT_DISABLE_STATIC_SDR) \
614 	+ ((spd) * 2))
615 
616 #define QDR_DFE_DISABLE_DELAY 4000 /* msec after LINKUP */
617 #define QDR_STATIC_ADAPT_DOWN 0xf0f0f0f0ULL /* link down, H1-H4 QDR adapts */
618 #define QDR_STATIC_ADAPT_DOWN_R1 0ULL /* r1 link down, H1-H4 QDR adapts */
619 #define QDR_STATIC_ADAPT_INIT 0xffffffffffULL /* up, disable H0,H1-8, LE */
620 #define QDR_STATIC_ADAPT_INIT_R1 0xf0ffffffffULL /* r1 up, disable H0,H1-8 */
621 
622 struct qib_chippport_specific {
623 	u64 __iomem *kpregbase;
624 	u64 __iomem *cpregbase;
625 	u64 *portcntrs;
626 	struct qib_pportdata *ppd;
627 	wait_queue_head_t autoneg_wait;
628 	struct delayed_work autoneg_work;
629 	struct delayed_work ipg_work;
630 	struct timer_list chase_timer;
631 	/*
632 	 * these 5 fields are used to establish deltas for IB symbol
633 	 * errors and linkrecovery errors.  They can be reported on
634 	 * some chips during link negotiation prior to INIT, and with
635 	 * DDR when faking DDR negotiations with non-IBTA switches.
636 	 * The chip counters are adjusted at driver unload if there is
637 	 * a non-zero delta.
638 	 */
639 	u64 ibdeltainprog;
640 	u64 ibsymdelta;
641 	u64 ibsymsnap;
642 	u64 iblnkerrdelta;
643 	u64 iblnkerrsnap;
644 	u64 iblnkdownsnap;
645 	u64 iblnkdowndelta;
646 	u64 ibmalfdelta;
647 	u64 ibmalfsnap;
648 	u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
649 	u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
650 	unsigned long qdr_dfe_time;
651 	unsigned long chase_end;
652 	u32 autoneg_tries;
653 	u32 recovery_init;
654 	u32 qdr_dfe_on;
655 	u32 qdr_reforce;
656 	/*
657 	 * Per-bay per-channel rcv QMH H1 values and Tx values for QDR.
658 	 * entry zero is unused, to simplify indexing
659 	 */
660 	u8 h1_val;
661 	u8 no_eep;  /* txselect table index to use if no qsfp info */
662 	u8 ipg_tries;
663 	u8 ibmalfusesnap;
664 	struct qib_qsfp_data qsfp_data;
665 	char epmsgbuf[192]; /* for port error interrupt msg buffer */
666 	char sdmamsgbuf[192]; /* for per-port sdma error messages */
667 };
668 
669 static struct {
670 	const char *name;
671 	irq_handler_t handler;
672 	int lsb;
673 	int port; /* 0 if not port-specific, else port # */
674 	int dca;
675 } irq_table[] = {
676 	{ "", qib_7322intr, -1, 0, 0 },
677 	{ " (buf avail)", qib_7322bufavail,
678 		SYM_LSB(IntStatus, SendBufAvail), 0, 0},
679 	{ " (sdma 0)", sdma_intr,
680 		SYM_LSB(IntStatus, SDmaInt_0), 1, 1 },
681 	{ " (sdma 1)", sdma_intr,
682 		SYM_LSB(IntStatus, SDmaInt_1), 2, 1 },
683 	{ " (sdmaI 0)", sdma_idle_intr,
684 		SYM_LSB(IntStatus, SDmaIdleInt_0), 1, 1},
685 	{ " (sdmaI 1)", sdma_idle_intr,
686 		SYM_LSB(IntStatus, SDmaIdleInt_1), 2, 1},
687 	{ " (sdmaP 0)", sdma_progress_intr,
688 		SYM_LSB(IntStatus, SDmaProgressInt_0), 1, 1 },
689 	{ " (sdmaP 1)", sdma_progress_intr,
690 		SYM_LSB(IntStatus, SDmaProgressInt_1), 2, 1 },
691 	{ " (sdmaC 0)", sdma_cleanup_intr,
692 		SYM_LSB(IntStatus, SDmaCleanupDone_0), 1, 0 },
693 	{ " (sdmaC 1)", sdma_cleanup_intr,
694 		SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 , 0},
695 };
696 
697 #ifdef CONFIG_INFINIBAND_QIB_DCA
698 
699 static const struct dca_reg_map {
700 	int     shadow_inx;
701 	int     lsb;
702 	u64     mask;
703 	u16     regno;
704 } dca_rcvhdr_reg_map[] = {
705 	{ 0, SYM_LSB(DCACtrlB, RcvHdrq0DCAOPH),
706 	   ~SYM_MASK(DCACtrlB, RcvHdrq0DCAOPH) , KREG_IDX(DCACtrlB) },
707 	{ 0, SYM_LSB(DCACtrlB, RcvHdrq1DCAOPH),
708 	   ~SYM_MASK(DCACtrlB, RcvHdrq1DCAOPH) , KREG_IDX(DCACtrlB) },
709 	{ 0, SYM_LSB(DCACtrlB, RcvHdrq2DCAOPH),
710 	   ~SYM_MASK(DCACtrlB, RcvHdrq2DCAOPH) , KREG_IDX(DCACtrlB) },
711 	{ 0, SYM_LSB(DCACtrlB, RcvHdrq3DCAOPH),
712 	   ~SYM_MASK(DCACtrlB, RcvHdrq3DCAOPH) , KREG_IDX(DCACtrlB) },
713 	{ 1, SYM_LSB(DCACtrlC, RcvHdrq4DCAOPH),
714 	   ~SYM_MASK(DCACtrlC, RcvHdrq4DCAOPH) , KREG_IDX(DCACtrlC) },
715 	{ 1, SYM_LSB(DCACtrlC, RcvHdrq5DCAOPH),
716 	   ~SYM_MASK(DCACtrlC, RcvHdrq5DCAOPH) , KREG_IDX(DCACtrlC) },
717 	{ 1, SYM_LSB(DCACtrlC, RcvHdrq6DCAOPH),
718 	   ~SYM_MASK(DCACtrlC, RcvHdrq6DCAOPH) , KREG_IDX(DCACtrlC) },
719 	{ 1, SYM_LSB(DCACtrlC, RcvHdrq7DCAOPH),
720 	   ~SYM_MASK(DCACtrlC, RcvHdrq7DCAOPH) , KREG_IDX(DCACtrlC) },
721 	{ 2, SYM_LSB(DCACtrlD, RcvHdrq8DCAOPH),
722 	   ~SYM_MASK(DCACtrlD, RcvHdrq8DCAOPH) , KREG_IDX(DCACtrlD) },
723 	{ 2, SYM_LSB(DCACtrlD, RcvHdrq9DCAOPH),
724 	   ~SYM_MASK(DCACtrlD, RcvHdrq9DCAOPH) , KREG_IDX(DCACtrlD) },
725 	{ 2, SYM_LSB(DCACtrlD, RcvHdrq10DCAOPH),
726 	   ~SYM_MASK(DCACtrlD, RcvHdrq10DCAOPH) , KREG_IDX(DCACtrlD) },
727 	{ 2, SYM_LSB(DCACtrlD, RcvHdrq11DCAOPH),
728 	   ~SYM_MASK(DCACtrlD, RcvHdrq11DCAOPH) , KREG_IDX(DCACtrlD) },
729 	{ 3, SYM_LSB(DCACtrlE, RcvHdrq12DCAOPH),
730 	   ~SYM_MASK(DCACtrlE, RcvHdrq12DCAOPH) , KREG_IDX(DCACtrlE) },
731 	{ 3, SYM_LSB(DCACtrlE, RcvHdrq13DCAOPH),
732 	   ~SYM_MASK(DCACtrlE, RcvHdrq13DCAOPH) , KREG_IDX(DCACtrlE) },
733 	{ 3, SYM_LSB(DCACtrlE, RcvHdrq14DCAOPH),
734 	   ~SYM_MASK(DCACtrlE, RcvHdrq14DCAOPH) , KREG_IDX(DCACtrlE) },
735 	{ 3, SYM_LSB(DCACtrlE, RcvHdrq15DCAOPH),
736 	   ~SYM_MASK(DCACtrlE, RcvHdrq15DCAOPH) , KREG_IDX(DCACtrlE) },
737 	{ 4, SYM_LSB(DCACtrlF, RcvHdrq16DCAOPH),
738 	   ~SYM_MASK(DCACtrlF, RcvHdrq16DCAOPH) , KREG_IDX(DCACtrlF) },
739 	{ 4, SYM_LSB(DCACtrlF, RcvHdrq17DCAOPH),
740 	   ~SYM_MASK(DCACtrlF, RcvHdrq17DCAOPH) , KREG_IDX(DCACtrlF) },
741 };
742 #endif
743 
744 /* ibcctrl bits */
745 #define QLOGIC_IB_IBCC_LINKINITCMD_DISABLE 1
746 /* cycle through TS1/TS2 till OK */
747 #define QLOGIC_IB_IBCC_LINKINITCMD_POLL 2
748 /* wait for TS1, then go on */
749 #define QLOGIC_IB_IBCC_LINKINITCMD_SLEEP 3
750 #define QLOGIC_IB_IBCC_LINKINITCMD_SHIFT 16
751 
752 #define QLOGIC_IB_IBCC_LINKCMD_DOWN 1           /* move to 0x11 */
753 #define QLOGIC_IB_IBCC_LINKCMD_ARMED 2          /* move to 0x21 */
754 #define QLOGIC_IB_IBCC_LINKCMD_ACTIVE 3 /* move to 0x31 */
755 
756 #define BLOB_7322_IBCHG 0x101
757 
758 static inline void qib_write_kreg(const struct qib_devdata *dd,
759 				  const u32 regno, u64 value);
760 static inline u32 qib_read_kreg32(const struct qib_devdata *, const u32);
761 static void write_7322_initregs(struct qib_devdata *);
762 static void write_7322_init_portregs(struct qib_pportdata *);
763 static void setup_7322_link_recovery(struct qib_pportdata *, u32);
764 static void check_7322_rxe_status(struct qib_pportdata *);
765 static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *, u64, u32 *);
766 #ifdef CONFIG_INFINIBAND_QIB_DCA
767 static void qib_setup_dca(struct qib_devdata *dd);
768 static void setup_dca_notifier(struct qib_devdata *dd, int msixnum);
769 static void reset_dca_notifier(struct qib_devdata *dd, int msixnum);
770 #endif
771 
772 /**
773  * qib_read_ureg32 - read 32-bit virtualized per-context register
774  * @dd: device
775  * @regno: register number
776  * @ctxt: context number
777  *
778  * Return the contents of a register that is virtualized to be per context.
779  * Returns -1 on errors (not distinguishable from valid contents at
780  * runtime; we may add a separate error variable at some point).
781  */
782 static inline u32 qib_read_ureg32(const struct qib_devdata *dd,
783 				  enum qib_ureg regno, int ctxt)
784 {
785 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
786 		return 0;
787 	return readl(regno + (u64 __iomem *)(
788 		(dd->ureg_align * ctxt) + (dd->userbase ?
789 		 (char __iomem *)dd->userbase :
790 		 (char __iomem *)dd->kregbase + dd->uregbase)));
791 }
792 
793 /**
794  * qib_read_ureg - read virtualized per-context register
795  * @dd: device
796  * @regno: register number
797  * @ctxt: context number
798  *
799  * Return the contents of a register that is virtualized to be per context.
800  * Returns -1 on errors (not distinguishable from valid contents at
801  * runtime; we may add a separate error variable at some point).
802  */
803 static inline u64 qib_read_ureg(const struct qib_devdata *dd,
804 				enum qib_ureg regno, int ctxt)
805 {
806 
807 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
808 		return 0;
809 	return readq(regno + (u64 __iomem *)(
810 		(dd->ureg_align * ctxt) + (dd->userbase ?
811 		 (char __iomem *)dd->userbase :
812 		 (char __iomem *)dd->kregbase + dd->uregbase)));
813 }
814 
815 /**
816  * qib_write_ureg - write virtualized per-context register
817  * @dd: device
818  * @regno: register number
819  * @value: value
820  * @ctxt: context
821  *
822  * Write the contents of a register that is virtualized to be per context.
823  */
824 static inline void qib_write_ureg(const struct qib_devdata *dd,
825 				  enum qib_ureg regno, u64 value, int ctxt)
826 {
827 	u64 __iomem *ubase;
828 
829 	if (dd->userbase)
830 		ubase = (u64 __iomem *)
831 			((char __iomem *) dd->userbase +
832 			 dd->ureg_align * ctxt);
833 	else
834 		ubase = (u64 __iomem *)
835 			(dd->uregbase +
836 			 (char __iomem *) dd->kregbase +
837 			 dd->ureg_align * ctxt);
838 
839 	if (dd->kregbase && (dd->flags & QIB_PRESENT))
840 		writeq(value, &ubase[regno]);
841 }
842 
843 static inline u32 qib_read_kreg32(const struct qib_devdata *dd,
844 				  const u32 regno)
845 {
846 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
847 		return -1;
848 	return readl((u32 __iomem *) &dd->kregbase[regno]);
849 }
850 
851 static inline u64 qib_read_kreg64(const struct qib_devdata *dd,
852 				  const u32 regno)
853 {
854 	if (!dd->kregbase || !(dd->flags & QIB_PRESENT))
855 		return -1;
856 	return readq(&dd->kregbase[regno]);
857 }
858 
859 static inline void qib_write_kreg(const struct qib_devdata *dd,
860 				  const u32 regno, u64 value)
861 {
862 	if (dd->kregbase && (dd->flags & QIB_PRESENT))
863 		writeq(value, &dd->kregbase[regno]);
864 }
865 
866 /*
867  * not many sanity checks for the port-specific kernel register routines,
868  * since they are only used when it's known to be safe.
869 */
870 static inline u64 qib_read_kreg_port(const struct qib_pportdata *ppd,
871 				     const u16 regno)
872 {
873 	if (!ppd->cpspec->kpregbase || !(ppd->dd->flags & QIB_PRESENT))
874 		return 0ULL;
875 	return readq(&ppd->cpspec->kpregbase[regno]);
876 }
877 
878 static inline void qib_write_kreg_port(const struct qib_pportdata *ppd,
879 				       const u16 regno, u64 value)
880 {
881 	if (ppd->cpspec && ppd->dd && ppd->cpspec->kpregbase &&
882 	    (ppd->dd->flags & QIB_PRESENT))
883 		writeq(value, &ppd->cpspec->kpregbase[regno]);
884 }
885 
886 /**
887  * qib_write_kreg_ctxt - write a device's per-ctxt 64-bit kernel register
888  * @dd: the qlogic_ib device
889  * @regno: the register number to write
890  * @ctxt: the context containing the register
891  * @value: the value to write
892  */
893 static inline void qib_write_kreg_ctxt(const struct qib_devdata *dd,
894 				       const u16 regno, unsigned ctxt,
895 				       u64 value)
896 {
897 	qib_write_kreg(dd, regno + ctxt, value);
898 }
899 
900 static inline u64 read_7322_creg(const struct qib_devdata *dd, u16 regno)
901 {
902 	if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
903 		return 0;
904 	return readq(&dd->cspec->cregbase[regno]);
905 
906 
907 }
908 
909 static inline u32 read_7322_creg32(const struct qib_devdata *dd, u16 regno)
910 {
911 	if (!dd->cspec->cregbase || !(dd->flags & QIB_PRESENT))
912 		return 0;
913 	return readl(&dd->cspec->cregbase[regno]);
914 
915 
916 }
917 
918 static inline void write_7322_creg_port(const struct qib_pportdata *ppd,
919 					u16 regno, u64 value)
920 {
921 	if (ppd->cpspec && ppd->cpspec->cpregbase &&
922 	    (ppd->dd->flags & QIB_PRESENT))
923 		writeq(value, &ppd->cpspec->cpregbase[regno]);
924 }
925 
926 static inline u64 read_7322_creg_port(const struct qib_pportdata *ppd,
927 				      u16 regno)
928 {
929 	if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
930 	    !(ppd->dd->flags & QIB_PRESENT))
931 		return 0;
932 	return readq(&ppd->cpspec->cpregbase[regno]);
933 }
934 
935 static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
936 					u16 regno)
937 {
938 	if (!ppd->cpspec || !ppd->cpspec->cpregbase ||
939 	    !(ppd->dd->flags & QIB_PRESENT))
940 		return 0;
941 	return readl(&ppd->cpspec->cpregbase[regno]);
942 }
943 
944 /* bits in Control register */
945 #define QLOGIC_IB_C_RESET SYM_MASK(Control, SyncReset)
946 #define QLOGIC_IB_C_SDMAFETCHPRIOEN SYM_MASK(Control, SDmaDescFetchPriorityEn)
947 
948 /* bits in general interrupt regs */
949 #define QIB_I_RCVURG_LSB SYM_LSB(IntMask, RcvUrg0IntMask)
950 #define QIB_I_RCVURG_RMASK MASK_ACROSS(0, 17)
951 #define QIB_I_RCVURG_MASK (QIB_I_RCVURG_RMASK << QIB_I_RCVURG_LSB)
952 #define QIB_I_RCVAVAIL_LSB SYM_LSB(IntMask, RcvAvail0IntMask)
953 #define QIB_I_RCVAVAIL_RMASK MASK_ACROSS(0, 17)
954 #define QIB_I_RCVAVAIL_MASK (QIB_I_RCVAVAIL_RMASK << QIB_I_RCVAVAIL_LSB)
955 #define QIB_I_C_ERROR INT_MASK(Err)
956 
957 #define QIB_I_SPIOSENT (INT_MASK_P(SendDone, 0) | INT_MASK_P(SendDone, 1))
958 #define QIB_I_SPIOBUFAVAIL INT_MASK(SendBufAvail)
959 #define QIB_I_GPIO INT_MASK(AssertGPIO)
960 #define QIB_I_P_SDMAINT(pidx) \
961 	(INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
962 	 INT_MASK_P(SDmaProgress, pidx) | \
963 	 INT_MASK_PM(SDmaCleanupDone, pidx))
964 
965 /* Interrupt bits that are "per port" */
966 #define QIB_I_P_BITSEXTANT(pidx) \
967 	(INT_MASK_P(Err, pidx) | INT_MASK_P(SendDone, pidx) | \
968 	INT_MASK_P(SDma, pidx) | INT_MASK_P(SDmaIdle, pidx) | \
969 	INT_MASK_P(SDmaProgress, pidx) | \
970 	INT_MASK_PM(SDmaCleanupDone, pidx))
971 
972 /* Interrupt bits that are common to a device */
973 /* currently unused: QIB_I_SPIOSENT */
974 #define QIB_I_C_BITSEXTANT \
975 	(QIB_I_RCVURG_MASK | QIB_I_RCVAVAIL_MASK | \
976 	QIB_I_SPIOSENT | \
977 	QIB_I_C_ERROR | QIB_I_SPIOBUFAVAIL | QIB_I_GPIO)
978 
979 #define QIB_I_BITSEXTANT (QIB_I_C_BITSEXTANT | \
980 	QIB_I_P_BITSEXTANT(0) | QIB_I_P_BITSEXTANT(1))
981 
982 /*
983  * Error bits that are "per port".
984  */
985 #define QIB_E_P_IBSTATUSCHANGED ERR_MASK_N(IBStatusChanged)
986 #define QIB_E_P_SHDR ERR_MASK_N(SHeadersErr)
987 #define QIB_E_P_VL15_BUF_MISUSE ERR_MASK_N(VL15BufMisuseErr)
988 #define QIB_E_P_SND_BUF_MISUSE ERR_MASK_N(SendBufMisuseErr)
989 #define QIB_E_P_SUNSUPVL ERR_MASK_N(SendUnsupportedVLErr)
990 #define QIB_E_P_SUNEXP_PKTNUM ERR_MASK_N(SendUnexpectedPktNumErr)
991 #define QIB_E_P_SDROP_DATA ERR_MASK_N(SendDroppedDataPktErr)
992 #define QIB_E_P_SDROP_SMP ERR_MASK_N(SendDroppedSmpPktErr)
993 #define QIB_E_P_SPKTLEN ERR_MASK_N(SendPktLenErr)
994 #define QIB_E_P_SUNDERRUN ERR_MASK_N(SendUnderRunErr)
995 #define QIB_E_P_SMAXPKTLEN ERR_MASK_N(SendMaxPktLenErr)
996 #define QIB_E_P_SMINPKTLEN ERR_MASK_N(SendMinPktLenErr)
997 #define QIB_E_P_RIBLOSTLINK ERR_MASK_N(RcvIBLostLinkErr)
998 #define QIB_E_P_RHDR ERR_MASK_N(RcvHdrErr)
999 #define QIB_E_P_RHDRLEN ERR_MASK_N(RcvHdrLenErr)
1000 #define QIB_E_P_RBADTID ERR_MASK_N(RcvBadTidErr)
1001 #define QIB_E_P_RBADVERSION ERR_MASK_N(RcvBadVersionErr)
1002 #define QIB_E_P_RIBFLOW ERR_MASK_N(RcvIBFlowErr)
1003 #define QIB_E_P_REBP ERR_MASK_N(RcvEBPErr)
1004 #define QIB_E_P_RUNSUPVL ERR_MASK_N(RcvUnsupportedVLErr)
1005 #define QIB_E_P_RUNEXPCHAR ERR_MASK_N(RcvUnexpectedCharErr)
1006 #define QIB_E_P_RSHORTPKTLEN ERR_MASK_N(RcvShortPktLenErr)
1007 #define QIB_E_P_RLONGPKTLEN ERR_MASK_N(RcvLongPktLenErr)
1008 #define QIB_E_P_RMAXPKTLEN ERR_MASK_N(RcvMaxPktLenErr)
1009 #define QIB_E_P_RMINPKTLEN ERR_MASK_N(RcvMinPktLenErr)
1010 #define QIB_E_P_RICRC ERR_MASK_N(RcvICRCErr)
1011 #define QIB_E_P_RVCRC ERR_MASK_N(RcvVCRCErr)
1012 #define QIB_E_P_RFORMATERR ERR_MASK_N(RcvFormatErr)
1013 
1014 #define QIB_E_P_SDMA1STDESC ERR_MASK_N(SDma1stDescErr)
1015 #define QIB_E_P_SDMABASE ERR_MASK_N(SDmaBaseErr)
1016 #define QIB_E_P_SDMADESCADDRMISALIGN ERR_MASK_N(SDmaDescAddrMisalignErr)
1017 #define QIB_E_P_SDMADWEN ERR_MASK_N(SDmaDwEnErr)
1018 #define QIB_E_P_SDMAGENMISMATCH ERR_MASK_N(SDmaGenMismatchErr)
1019 #define QIB_E_P_SDMAHALT ERR_MASK_N(SDmaHaltErr)
1020 #define QIB_E_P_SDMAMISSINGDW ERR_MASK_N(SDmaMissingDwErr)
1021 #define QIB_E_P_SDMAOUTOFBOUND ERR_MASK_N(SDmaOutOfBoundErr)
1022 #define QIB_E_P_SDMARPYTAG ERR_MASK_N(SDmaRpyTagErr)
1023 #define QIB_E_P_SDMATAILOUTOFBOUND ERR_MASK_N(SDmaTailOutOfBoundErr)
1024 #define QIB_E_P_SDMAUNEXPDATA ERR_MASK_N(SDmaUnexpDataErr)
1025 
1026 /* Error bits that are common to a device */
1027 #define QIB_E_RESET ERR_MASK(ResetNegated)
1028 #define QIB_E_HARDWARE ERR_MASK(HardwareErr)
1029 #define QIB_E_INVALIDADDR ERR_MASK(InvalidAddrErr)
1030 
1031 
1032 /*
1033  * Per chip (rather than per-port) errors.  Most either do
1034  * nothing but trigger a print (because they self-recover, or
1035  * always occur in tandem with other errors that handle the
1036  * issue), or because they indicate errors with no recovery,
1037  * but we want to know that they happened.
1038  */
1039 #define QIB_E_SBUF_VL15_MISUSE ERR_MASK(SBufVL15MisUseErr)
1040 #define QIB_E_BADEEP ERR_MASK(InvalidEEPCmd)
1041 #define QIB_E_VLMISMATCH ERR_MASK(SendVLMismatchErr)
1042 #define QIB_E_ARMLAUNCH ERR_MASK(SendArmLaunchErr)
1043 #define QIB_E_SPCLTRIG ERR_MASK(SendSpecialTriggerErr)
1044 #define QIB_E_RRCVHDRFULL ERR_MASK(RcvHdrFullErr)
1045 #define QIB_E_RRCVEGRFULL ERR_MASK(RcvEgrFullErr)
1046 #define QIB_E_RCVCTXTSHARE ERR_MASK(RcvContextShareErr)
1047 
1048 /* SDMA chip errors (not per port)
1049  * QIB_E_SDMA_BUF_DUP needs no special handling, because we will also get
1050  * the SDMAHALT error immediately, so we just print the dup error via the
1051  * E_AUTO mechanism.  This is true of most of the per-port fatal errors
1052  * as well, but since this is port-independent, by definition, it's
1053  * handled a bit differently.  SDMA_VL15 and SDMA_WRONG_PORT are per
1054  * packet send errors, and so are handled in the same manner as other
1055  * per-packet errors.
1056  */
1057 #define QIB_E_SDMA_VL15 ERR_MASK(SDmaVL15Err)
1058 #define QIB_E_SDMA_WRONG_PORT ERR_MASK(SDmaWrongPortErr)
1059 #define QIB_E_SDMA_BUF_DUP ERR_MASK(SDmaBufMaskDuplicateErr)
1060 
1061 /*
1062  * Below functionally equivalent to legacy QLOGIC_IB_E_PKTERRS
1063  * it is used to print "common" packet errors.
1064  */
1065 #define QIB_E_P_PKTERRS (QIB_E_P_SPKTLEN |\
1066 	QIB_E_P_SDROP_DATA | QIB_E_P_RVCRC |\
1067 	QIB_E_P_RICRC | QIB_E_P_RSHORTPKTLEN |\
1068 	QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1069 	QIB_E_P_REBP)
1070 
1071 /* Error Bits that Packet-related (Receive, per-port) */
1072 #define QIB_E_P_RPKTERRS (\
1073 	QIB_E_P_RHDRLEN | QIB_E_P_RBADTID | \
1074 	QIB_E_P_RBADVERSION | QIB_E_P_RHDR | \
1075 	QIB_E_P_RLONGPKTLEN | QIB_E_P_RSHORTPKTLEN |\
1076 	QIB_E_P_RMAXPKTLEN | QIB_E_P_RMINPKTLEN | \
1077 	QIB_E_P_RFORMATERR | QIB_E_P_RUNSUPVL | \
1078 	QIB_E_P_RUNEXPCHAR | QIB_E_P_RIBFLOW | QIB_E_P_REBP)
1079 
1080 /*
1081  * Error bits that are Send-related (per port)
1082  * (ARMLAUNCH excluded from E_SPKTERRS because it gets special handling).
1083  * All of these potentially need to have a buffer disarmed
1084  */
1085 #define QIB_E_P_SPKTERRS (\
1086 	QIB_E_P_SUNEXP_PKTNUM |\
1087 	QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1088 	QIB_E_P_SMAXPKTLEN |\
1089 	QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SHDR | \
1090 	QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN | \
1091 	QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNSUPVL)
1092 
1093 #define QIB_E_SPKTERRS ( \
1094 		QIB_E_SBUF_VL15_MISUSE | QIB_E_VLMISMATCH | \
1095 		ERR_MASK_N(SendUnsupportedVLErr) |			\
1096 		QIB_E_SPCLTRIG | QIB_E_SDMA_VL15 | QIB_E_SDMA_WRONG_PORT)
1097 
1098 #define QIB_E_P_SDMAERRS ( \
1099 	QIB_E_P_SDMAHALT | \
1100 	QIB_E_P_SDMADESCADDRMISALIGN | \
1101 	QIB_E_P_SDMAUNEXPDATA | \
1102 	QIB_E_P_SDMAMISSINGDW | \
1103 	QIB_E_P_SDMADWEN | \
1104 	QIB_E_P_SDMARPYTAG | \
1105 	QIB_E_P_SDMA1STDESC | \
1106 	QIB_E_P_SDMABASE | \
1107 	QIB_E_P_SDMATAILOUTOFBOUND | \
1108 	QIB_E_P_SDMAOUTOFBOUND | \
1109 	QIB_E_P_SDMAGENMISMATCH)
1110 
1111 /*
1112  * This sets some bits more than once, but makes it more obvious which
1113  * bits are not handled under other categories, and the repeat definition
1114  * is not a problem.
1115  */
1116 #define QIB_E_P_BITSEXTANT ( \
1117 	QIB_E_P_SPKTERRS | QIB_E_P_PKTERRS | QIB_E_P_RPKTERRS | \
1118 	QIB_E_P_RIBLOSTLINK | QIB_E_P_IBSTATUSCHANGED | \
1119 	QIB_E_P_SND_BUF_MISUSE | QIB_E_P_SUNDERRUN | \
1120 	QIB_E_P_SHDR | QIB_E_P_VL15_BUF_MISUSE | QIB_E_P_SDMAERRS \
1121 	)
1122 
1123 /*
1124  * These are errors that can occur when the link
1125  * changes state while a packet is being sent or received.  This doesn't
1126  * cover things like EBP or VCRC that can be the result of a sending
1127  * having the link change state, so we receive a "known bad" packet.
1128  * All of these are "per port", so renamed:
1129  */
1130 #define QIB_E_P_LINK_PKTERRS (\
1131 	QIB_E_P_SDROP_DATA | QIB_E_P_SDROP_SMP |\
1132 	QIB_E_P_SMINPKTLEN | QIB_E_P_SPKTLEN |\
1133 	QIB_E_P_RSHORTPKTLEN | QIB_E_P_RMINPKTLEN |\
1134 	QIB_E_P_RUNEXPCHAR)
1135 
1136 /*
1137  * This sets some bits more than once, but makes it more obvious which
1138  * bits are not handled under other categories (such as QIB_E_SPKTERRS),
1139  * and the repeat definition is not a problem.
1140  */
1141 #define QIB_E_C_BITSEXTANT (\
1142 	QIB_E_HARDWARE | QIB_E_INVALIDADDR | QIB_E_BADEEP |\
1143 	QIB_E_ARMLAUNCH | QIB_E_VLMISMATCH | QIB_E_RRCVHDRFULL |\
1144 	QIB_E_RRCVEGRFULL | QIB_E_RESET | QIB_E_SBUF_VL15_MISUSE)
1145 
1146 /* Likewise Neuter E_SPKT_ERRS_IGNORE */
1147 #define E_SPKT_ERRS_IGNORE 0
1148 
1149 #define QIB_EXTS_MEMBIST_DISABLED \
1150 	SYM_MASK(EXTStatus, MemBISTDisabled)
1151 #define QIB_EXTS_MEMBIST_ENDTEST \
1152 	SYM_MASK(EXTStatus, MemBISTEndTest)
1153 
1154 #define QIB_E_SPIOARMLAUNCH \
1155 	ERR_MASK(SendArmLaunchErr)
1156 
1157 #define IBA7322_IBCC_LINKINITCMD_MASK SYM_RMASK(IBCCtrlA_0, LinkInitCmd)
1158 #define IBA7322_IBCC_LINKCMD_SHIFT SYM_LSB(IBCCtrlA_0, LinkCmd)
1159 
1160 /*
1161  * IBTA_1_2 is set when multiple speeds are enabled (normal),
1162  * and also if forced QDR (only QDR enabled).  It's enabled for the
1163  * forced QDR case so that scrambling will be enabled by the TS3
1164  * exchange, when supported by both sides of the link.
1165  */
1166 #define IBA7322_IBC_IBTA_1_2_MASK SYM_MASK(IBCCtrlB_0, IB_ENHANCED_MODE)
1167 #define IBA7322_IBC_MAX_SPEED_MASK SYM_MASK(IBCCtrlB_0, SD_SPEED)
1168 #define IBA7322_IBC_SPEED_QDR SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR)
1169 #define IBA7322_IBC_SPEED_DDR SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR)
1170 #define IBA7322_IBC_SPEED_SDR SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR)
1171 #define IBA7322_IBC_SPEED_MASK (SYM_MASK(IBCCtrlB_0, SD_SPEED_SDR) | \
1172 	SYM_MASK(IBCCtrlB_0, SD_SPEED_DDR) | SYM_MASK(IBCCtrlB_0, SD_SPEED_QDR))
1173 #define IBA7322_IBC_SPEED_LSB SYM_LSB(IBCCtrlB_0, SD_SPEED_SDR)
1174 
1175 #define IBA7322_LEDBLINK_OFF_SHIFT SYM_LSB(RcvPktLEDCnt_0, OFFperiod)
1176 #define IBA7322_LEDBLINK_ON_SHIFT SYM_LSB(RcvPktLEDCnt_0, ONperiod)
1177 
1178 #define IBA7322_IBC_WIDTH_AUTONEG SYM_MASK(IBCCtrlB_0, IB_NUM_CHANNELS)
1179 #define IBA7322_IBC_WIDTH_4X_ONLY (1<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1180 #define IBA7322_IBC_WIDTH_1X_ONLY (0<<SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS))
1181 
1182 #define IBA7322_IBC_RXPOL_MASK SYM_MASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1183 #define IBA7322_IBC_RXPOL_LSB SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP)
1184 #define IBA7322_IBC_HRTBT_MASK (SYM_MASK(IBCCtrlB_0, HRTBT_AUTO) | \
1185 	SYM_MASK(IBCCtrlB_0, HRTBT_ENB))
1186 #define IBA7322_IBC_HRTBT_RMASK (IBA7322_IBC_HRTBT_MASK >> \
1187 	SYM_LSB(IBCCtrlB_0, HRTBT_ENB))
1188 #define IBA7322_IBC_HRTBT_LSB SYM_LSB(IBCCtrlB_0, HRTBT_ENB)
1189 
1190 #define IBA7322_REDIRECT_VEC_PER_REG 12
1191 
1192 #define IBA7322_SENDCHK_PKEY SYM_MASK(SendCheckControl_0, PKey_En)
1193 #define IBA7322_SENDCHK_BTHQP SYM_MASK(SendCheckControl_0, BTHQP_En)
1194 #define IBA7322_SENDCHK_SLID SYM_MASK(SendCheckControl_0, SLID_En)
1195 #define IBA7322_SENDCHK_RAW_IPV6 SYM_MASK(SendCheckControl_0, RawIPV6_En)
1196 #define IBA7322_SENDCHK_MINSZ SYM_MASK(SendCheckControl_0, PacketTooSmall_En)
1197 
1198 #define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
1199 
1200 #define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
1201 	.msg = #fldname , .sz = sizeof(#fldname) }
1202 #define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
1203 	fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
1204 static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
1205 	HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
1206 	HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
1207 	HWE_AUTO(PCIESerdesPClkNotDetect),
1208 	HWE_AUTO(PowerOnBISTFailed),
1209 	HWE_AUTO(TempsenseTholdReached),
1210 	HWE_AUTO(MemoryErr),
1211 	HWE_AUTO(PCIeBusParityErr),
1212 	HWE_AUTO(PcieCplTimeout),
1213 	HWE_AUTO(PciePoisonedTLP),
1214 	HWE_AUTO_P(SDmaMemReadErr, 1),
1215 	HWE_AUTO_P(SDmaMemReadErr, 0),
1216 	HWE_AUTO_P(IBCBusFromSPCParityErr, 1),
1217 	HWE_AUTO_P(IBCBusToSPCParityErr, 1),
1218 	HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
1219 	HWE_AUTO(statusValidNoEop),
1220 	HWE_AUTO(LATriggered),
1221 	{ .mask = 0, .sz = 0 }
1222 };
1223 
1224 #define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
1225 	.msg = #fldname, .sz = sizeof(#fldname) }
1226 #define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
1227 	.msg = #fldname, .sz = sizeof(#fldname) }
1228 static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
1229 	E_AUTO(RcvEgrFullErr),
1230 	E_AUTO(RcvHdrFullErr),
1231 	E_AUTO(ResetNegated),
1232 	E_AUTO(HardwareErr),
1233 	E_AUTO(InvalidAddrErr),
1234 	E_AUTO(SDmaVL15Err),
1235 	E_AUTO(SBufVL15MisUseErr),
1236 	E_AUTO(InvalidEEPCmd),
1237 	E_AUTO(RcvContextShareErr),
1238 	E_AUTO(SendVLMismatchErr),
1239 	E_AUTO(SendArmLaunchErr),
1240 	E_AUTO(SendSpecialTriggerErr),
1241 	E_AUTO(SDmaWrongPortErr),
1242 	E_AUTO(SDmaBufMaskDuplicateErr),
1243 	{ .mask = 0, .sz = 0 }
1244 };
1245 
1246 static const struct  qib_hwerror_msgs qib_7322p_error_msgs[] = {
1247 	E_P_AUTO(IBStatusChanged),
1248 	E_P_AUTO(SHeadersErr),
1249 	E_P_AUTO(VL15BufMisuseErr),
1250 	/*
1251 	 * SDmaHaltErr is not really an error, make it clearer;
1252 	 */
1253 	{.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted",
1254 		.sz = 11},
1255 	E_P_AUTO(SDmaDescAddrMisalignErr),
1256 	E_P_AUTO(SDmaUnexpDataErr),
1257 	E_P_AUTO(SDmaMissingDwErr),
1258 	E_P_AUTO(SDmaDwEnErr),
1259 	E_P_AUTO(SDmaRpyTagErr),
1260 	E_P_AUTO(SDma1stDescErr),
1261 	E_P_AUTO(SDmaBaseErr),
1262 	E_P_AUTO(SDmaTailOutOfBoundErr),
1263 	E_P_AUTO(SDmaOutOfBoundErr),
1264 	E_P_AUTO(SDmaGenMismatchErr),
1265 	E_P_AUTO(SendBufMisuseErr),
1266 	E_P_AUTO(SendUnsupportedVLErr),
1267 	E_P_AUTO(SendUnexpectedPktNumErr),
1268 	E_P_AUTO(SendDroppedDataPktErr),
1269 	E_P_AUTO(SendDroppedSmpPktErr),
1270 	E_P_AUTO(SendPktLenErr),
1271 	E_P_AUTO(SendUnderRunErr),
1272 	E_P_AUTO(SendMaxPktLenErr),
1273 	E_P_AUTO(SendMinPktLenErr),
1274 	E_P_AUTO(RcvIBLostLinkErr),
1275 	E_P_AUTO(RcvHdrErr),
1276 	E_P_AUTO(RcvHdrLenErr),
1277 	E_P_AUTO(RcvBadTidErr),
1278 	E_P_AUTO(RcvBadVersionErr),
1279 	E_P_AUTO(RcvIBFlowErr),
1280 	E_P_AUTO(RcvEBPErr),
1281 	E_P_AUTO(RcvUnsupportedVLErr),
1282 	E_P_AUTO(RcvUnexpectedCharErr),
1283 	E_P_AUTO(RcvShortPktLenErr),
1284 	E_P_AUTO(RcvLongPktLenErr),
1285 	E_P_AUTO(RcvMaxPktLenErr),
1286 	E_P_AUTO(RcvMinPktLenErr),
1287 	E_P_AUTO(RcvICRCErr),
1288 	E_P_AUTO(RcvVCRCErr),
1289 	E_P_AUTO(RcvFormatErr),
1290 	{ .mask = 0, .sz = 0 }
1291 };
1292 
1293 /*
1294  * Below generates "auto-message" for interrupts not specific to any port or
1295  * context
1296  */
1297 #define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
1298 	.msg = #fldname, .sz = sizeof(#fldname) }
1299 /* Below generates "auto-message" for interrupts specific to a port */
1300 #define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
1301 	SYM_LSB(IntMask, fldname##Mask##_0), \
1302 	SYM_LSB(IntMask, fldname##Mask##_1)), \
1303 	.msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1304 /* For some reason, the SerDesTrimDone bits are reversed */
1305 #define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
1306 	SYM_LSB(IntMask, fldname##Mask##_1), \
1307 	SYM_LSB(IntMask, fldname##Mask##_0)), \
1308 	.msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1309 /*
1310  * Below generates "auto-message" for interrupts specific to a context,
1311  * with ctxt-number appended
1312  */
1313 #define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
1314 	SYM_LSB(IntMask, fldname##0IntMask), \
1315 	SYM_LSB(IntMask, fldname##17IntMask)), \
1316 	.msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
1317 
1318 #define TXSYMPTOM_AUTO_P(fldname) \
1319 	{ .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
1320 	.msg = #fldname, .sz = sizeof(#fldname) }
1321 static const struct  qib_hwerror_msgs hdrchk_msgs[] = {
1322 	TXSYMPTOM_AUTO_P(NonKeyPacket),
1323 	TXSYMPTOM_AUTO_P(GRHFail),
1324 	TXSYMPTOM_AUTO_P(PkeyFail),
1325 	TXSYMPTOM_AUTO_P(QPFail),
1326 	TXSYMPTOM_AUTO_P(SLIDFail),
1327 	TXSYMPTOM_AUTO_P(RawIPV6),
1328 	TXSYMPTOM_AUTO_P(PacketTooSmall),
1329 	{ .mask = 0, .sz = 0 }
1330 };
1331 
1332 #define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
1333 
1334 /*
1335  * Called when we might have an error that is specific to a particular
1336  * PIO buffer, and may need to cancel that buffer, so it can be re-used,
1337  * because we don't need to force the update of pioavail
1338  */
1339 static void qib_disarm_7322_senderrbufs(struct qib_pportdata *ppd)
1340 {
1341 	struct qib_devdata *dd = ppd->dd;
1342 	u32 i;
1343 	int any;
1344 	u32 piobcnt = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
1345 	u32 regcnt = (piobcnt + BITS_PER_LONG - 1) / BITS_PER_LONG;
1346 	unsigned long sbuf[4];
1347 
1348 	/*
1349 	 * It's possible that sendbuffererror could have bits set; might
1350 	 * have already done this as a result of hardware error handling.
1351 	 */
1352 	any = 0;
1353 	for (i = 0; i < regcnt; ++i) {
1354 		sbuf[i] = qib_read_kreg64(dd, kr_sendbuffererror + i);
1355 		if (sbuf[i]) {
1356 			any = 1;
1357 			qib_write_kreg(dd, kr_sendbuffererror + i, sbuf[i]);
1358 		}
1359 	}
1360 
1361 	if (any)
1362 		qib_disarm_piobufs_set(dd, sbuf, piobcnt);
1363 }
1364 
1365 /* No txe_recover yet, if ever */
1366 
1367 /* No decode__errors yet */
1368 static void err_decode(char *msg, size_t len, u64 errs,
1369 		       const struct qib_hwerror_msgs *msp)
1370 {
1371 	u64 these, lmask;
1372 	int took, multi, n = 0;
1373 
1374 	while (errs && msp && msp->mask) {
1375 		multi = (msp->mask & (msp->mask - 1));
1376 		while (errs & msp->mask) {
1377 			these = (errs & msp->mask);
1378 			lmask = (these & (these - 1)) ^ these;
1379 			if (len) {
1380 				if (n++) {
1381 					/* separate the strings */
1382 					*msg++ = ',';
1383 					len--;
1384 				}
1385 				BUG_ON(!msp->sz);
1386 				/* msp->sz counts the nul */
1387 				took = min_t(size_t, msp->sz - (size_t)1, len);
1388 				memcpy(msg,  msp->msg, took);
1389 				len -= took;
1390 				msg += took;
1391 				if (len)
1392 					*msg = '\0';
1393 			}
1394 			errs &= ~lmask;
1395 			if (len && multi) {
1396 				/* More than one bit this mask */
1397 				int idx = -1;
1398 
1399 				while (lmask & msp->mask) {
1400 					++idx;
1401 					lmask >>= 1;
1402 				}
1403 				took = scnprintf(msg, len, "_%d", idx);
1404 				len -= took;
1405 				msg += took;
1406 			}
1407 		}
1408 		++msp;
1409 	}
1410 	/* If some bits are left, show in hex. */
1411 	if (len && errs)
1412 		snprintf(msg, len, "%sMORE:%llX", n ? "," : "",
1413 			(unsigned long long) errs);
1414 }
1415 
1416 /* only called if r1 set */
1417 static void flush_fifo(struct qib_pportdata *ppd)
1418 {
1419 	struct qib_devdata *dd = ppd->dd;
1420 	u32 __iomem *piobuf;
1421 	u32 bufn;
1422 	u32 *hdr;
1423 	u64 pbc;
1424 	const unsigned hdrwords = 7;
1425 	static struct ib_header ibhdr = {
1426 		.lrh[0] = cpu_to_be16(0xF000 | QIB_LRH_BTH),
1427 		.lrh[1] = IB_LID_PERMISSIVE,
1428 		.lrh[2] = cpu_to_be16(hdrwords + SIZE_OF_CRC),
1429 		.lrh[3] = IB_LID_PERMISSIVE,
1430 		.u.oth.bth[0] = cpu_to_be32(
1431 			(IB_OPCODE_UD_SEND_ONLY << 24) | QIB_DEFAULT_P_KEY),
1432 		.u.oth.bth[1] = cpu_to_be32(0),
1433 		.u.oth.bth[2] = cpu_to_be32(0),
1434 		.u.oth.u.ud.deth[0] = cpu_to_be32(0),
1435 		.u.oth.u.ud.deth[1] = cpu_to_be32(0),
1436 	};
1437 
1438 	/*
1439 	 * Send a dummy VL15 packet to flush the launch FIFO.
1440 	 * This will not actually be sent since the TxeBypassIbc bit is set.
1441 	 */
1442 	pbc = PBC_7322_VL15_SEND |
1443 		(((u64)ppd->hw_pidx) << (PBC_PORT_SEL_LSB + 32)) |
1444 		(hdrwords + SIZE_OF_CRC);
1445 	piobuf = qib_7322_getsendbuf(ppd, pbc, &bufn);
1446 	if (!piobuf)
1447 		return;
1448 	writeq(pbc, piobuf);
1449 	hdr = (u32 *) &ibhdr;
1450 	if (dd->flags & QIB_PIO_FLUSH_WC) {
1451 		qib_flush_wc();
1452 		qib_pio_copy(piobuf + 2, hdr, hdrwords - 1);
1453 		qib_flush_wc();
1454 		__raw_writel(hdr[hdrwords - 1], piobuf + hdrwords + 1);
1455 		qib_flush_wc();
1456 	} else
1457 		qib_pio_copy(piobuf + 2, hdr, hdrwords);
1458 	qib_sendbuf_done(dd, bufn);
1459 }
1460 
1461 /*
1462  * This is called with interrupts disabled and sdma_lock held.
1463  */
1464 static void qib_7322_sdma_sendctrl(struct qib_pportdata *ppd, unsigned op)
1465 {
1466 	struct qib_devdata *dd = ppd->dd;
1467 	u64 set_sendctrl = 0;
1468 	u64 clr_sendctrl = 0;
1469 
1470 	if (op & QIB_SDMA_SENDCTRL_OP_ENABLE)
1471 		set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1472 	else
1473 		clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaEnable);
1474 
1475 	if (op & QIB_SDMA_SENDCTRL_OP_INTENABLE)
1476 		set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1477 	else
1478 		clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaIntEnable);
1479 
1480 	if (op & QIB_SDMA_SENDCTRL_OP_HALT)
1481 		set_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1482 	else
1483 		clr_sendctrl |= SYM_MASK(SendCtrl_0, SDmaHalt);
1484 
1485 	if (op & QIB_SDMA_SENDCTRL_OP_DRAIN)
1486 		set_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1487 				SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1488 				SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1489 	else
1490 		clr_sendctrl |= SYM_MASK(SendCtrl_0, TxeBypassIbc) |
1491 				SYM_MASK(SendCtrl_0, TxeAbortIbc) |
1492 				SYM_MASK(SendCtrl_0, TxeDrainRmFifo);
1493 
1494 	spin_lock(&dd->sendctrl_lock);
1495 
1496 	/* If we are draining everything, block sends first */
1497 	if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1498 		ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
1499 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1500 		qib_write_kreg(dd, kr_scratch, 0);
1501 	}
1502 
1503 	ppd->p_sendctrl |= set_sendctrl;
1504 	ppd->p_sendctrl &= ~clr_sendctrl;
1505 
1506 	if (op & QIB_SDMA_SENDCTRL_OP_CLEANUP)
1507 		qib_write_kreg_port(ppd, krp_sendctrl,
1508 				    ppd->p_sendctrl |
1509 				    SYM_MASK(SendCtrl_0, SDmaCleanup));
1510 	else
1511 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1512 	qib_write_kreg(dd, kr_scratch, 0);
1513 
1514 	if (op & QIB_SDMA_SENDCTRL_OP_DRAIN) {
1515 		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
1516 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
1517 		qib_write_kreg(dd, kr_scratch, 0);
1518 	}
1519 
1520 	spin_unlock(&dd->sendctrl_lock);
1521 
1522 	if ((op & QIB_SDMA_SENDCTRL_OP_DRAIN) && ppd->dd->cspec->r1)
1523 		flush_fifo(ppd);
1524 }
1525 
1526 static void qib_7322_sdma_hw_clean_up(struct qib_pportdata *ppd)
1527 {
1528 	__qib_sdma_process_event(ppd, qib_sdma_event_e50_hw_cleaned);
1529 }
1530 
1531 static void qib_sdma_7322_setlengen(struct qib_pportdata *ppd)
1532 {
1533 	/*
1534 	 * Set SendDmaLenGen and clear and set
1535 	 * the MSB of the generation count to enable generation checking
1536 	 * and load the internal generation counter.
1537 	 */
1538 	qib_write_kreg_port(ppd, krp_senddmalengen, ppd->sdma_descq_cnt);
1539 	qib_write_kreg_port(ppd, krp_senddmalengen,
1540 			    ppd->sdma_descq_cnt |
1541 			    (1ULL << QIB_7322_SendDmaLenGen_0_Generation_MSB));
1542 }
1543 
1544 /*
1545  * Must be called with sdma_lock held, or before init finished.
1546  */
1547 static void qib_sdma_update_7322_tail(struct qib_pportdata *ppd, u16 tail)
1548 {
1549 	/* Commit writes to memory and advance the tail on the chip */
1550 	wmb();
1551 	ppd->sdma_descq_tail = tail;
1552 	qib_write_kreg_port(ppd, krp_senddmatail, tail);
1553 }
1554 
1555 /*
1556  * This is called with interrupts disabled and sdma_lock held.
1557  */
1558 static void qib_7322_sdma_hw_start_up(struct qib_pportdata *ppd)
1559 {
1560 	/*
1561 	 * Drain all FIFOs.
1562 	 * The hardware doesn't require this but we do it so that verbs
1563 	 * and user applications don't wait for link active to send stale
1564 	 * data.
1565 	 */
1566 	sendctrl_7322_mod(ppd, QIB_SENDCTRL_FLUSH);
1567 
1568 	qib_sdma_7322_setlengen(ppd);
1569 	qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
1570 	ppd->sdma_head_dma[0] = 0;
1571 	qib_7322_sdma_sendctrl(ppd,
1572 		ppd->sdma_state.current_op | QIB_SDMA_SENDCTRL_OP_CLEANUP);
1573 }
1574 
1575 #define DISABLES_SDMA ( \
1576 	QIB_E_P_SDMAHALT | \
1577 	QIB_E_P_SDMADESCADDRMISALIGN | \
1578 	QIB_E_P_SDMAMISSINGDW | \
1579 	QIB_E_P_SDMADWEN | \
1580 	QIB_E_P_SDMARPYTAG | \
1581 	QIB_E_P_SDMA1STDESC | \
1582 	QIB_E_P_SDMABASE | \
1583 	QIB_E_P_SDMATAILOUTOFBOUND | \
1584 	QIB_E_P_SDMAOUTOFBOUND | \
1585 	QIB_E_P_SDMAGENMISMATCH)
1586 
1587 static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
1588 {
1589 	unsigned long flags;
1590 	struct qib_devdata *dd = ppd->dd;
1591 
1592 	errs &= QIB_E_P_SDMAERRS;
1593 	err_decode(ppd->cpspec->sdmamsgbuf, sizeof(ppd->cpspec->sdmamsgbuf),
1594 		   errs, qib_7322p_error_msgs);
1595 
1596 	if (errs & QIB_E_P_SDMAUNEXPDATA)
1597 		qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
1598 			    ppd->port);
1599 
1600 	spin_lock_irqsave(&ppd->sdma_lock, flags);
1601 
1602 	if (errs != QIB_E_P_SDMAHALT) {
1603 		/* SDMA errors have QIB_E_P_SDMAHALT and another bit set */
1604 		qib_dev_porterr(dd, ppd->port,
1605 			"SDMA %s 0x%016llx %s\n",
1606 			qib_sdma_state_names[ppd->sdma_state.current_state],
1607 			errs, ppd->cpspec->sdmamsgbuf);
1608 		dump_sdma_7322_state(ppd);
1609 	}
1610 
1611 	switch (ppd->sdma_state.current_state) {
1612 	case qib_sdma_state_s00_hw_down:
1613 		break;
1614 
1615 	case qib_sdma_state_s10_hw_start_up_wait:
1616 		if (errs & QIB_E_P_SDMAHALT)
1617 			__qib_sdma_process_event(ppd,
1618 				qib_sdma_event_e20_hw_started);
1619 		break;
1620 
1621 	case qib_sdma_state_s20_idle:
1622 		break;
1623 
1624 	case qib_sdma_state_s30_sw_clean_up_wait:
1625 		break;
1626 
1627 	case qib_sdma_state_s40_hw_clean_up_wait:
1628 		if (errs & QIB_E_P_SDMAHALT)
1629 			__qib_sdma_process_event(ppd,
1630 				qib_sdma_event_e50_hw_cleaned);
1631 		break;
1632 
1633 	case qib_sdma_state_s50_hw_halt_wait:
1634 		if (errs & QIB_E_P_SDMAHALT)
1635 			__qib_sdma_process_event(ppd,
1636 				qib_sdma_event_e60_hw_halted);
1637 		break;
1638 
1639 	case qib_sdma_state_s99_running:
1640 		__qib_sdma_process_event(ppd, qib_sdma_event_e7322_err_halted);
1641 		__qib_sdma_process_event(ppd, qib_sdma_event_e60_hw_halted);
1642 		break;
1643 	}
1644 
1645 	spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1646 }
1647 
1648 /*
1649  * handle per-device errors (not per-port errors)
1650  */
1651 static noinline void handle_7322_errors(struct qib_devdata *dd)
1652 {
1653 	char *msg;
1654 	u64 iserr = 0;
1655 	u64 errs;
1656 	u64 mask;
1657 
1658 	qib_stats.sps_errints++;
1659 	errs = qib_read_kreg64(dd, kr_errstatus);
1660 	if (!errs) {
1661 		qib_devinfo(dd->pcidev,
1662 			"device error interrupt, but no error bits set!\n");
1663 		goto done;
1664 	}
1665 
1666 	/* don't report errors that are masked */
1667 	errs &= dd->cspec->errormask;
1668 	msg = dd->cspec->emsgbuf;
1669 
1670 	/* do these first, they are most important */
1671 	if (errs & QIB_E_HARDWARE) {
1672 		*msg = '\0';
1673 		qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
1674 	}
1675 
1676 	if (errs & QIB_E_SPKTERRS) {
1677 		qib_disarm_7322_senderrbufs(dd->pport);
1678 		qib_stats.sps_txerrs++;
1679 	} else if (errs & QIB_E_INVALIDADDR)
1680 		qib_stats.sps_txerrs++;
1681 	else if (errs & QIB_E_ARMLAUNCH) {
1682 		qib_stats.sps_txerrs++;
1683 		qib_disarm_7322_senderrbufs(dd->pport);
1684 	}
1685 	qib_write_kreg(dd, kr_errclear, errs);
1686 
1687 	/*
1688 	 * The ones we mask off are handled specially below
1689 	 * or above.  Also mask SDMADISABLED by default as it
1690 	 * is too chatty.
1691 	 */
1692 	mask = QIB_E_HARDWARE;
1693 	*msg = '\0';
1694 
1695 	err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask,
1696 		   qib_7322error_msgs);
1697 
1698 	/*
1699 	 * Getting reset is a tragedy for all ports. Mark the device
1700 	 * _and_ the ports as "offline" in way meaningful to each.
1701 	 */
1702 	if (errs & QIB_E_RESET) {
1703 		int pidx;
1704 
1705 		qib_dev_err(dd,
1706 			"Got reset, requires re-init (unload and reload driver)\n");
1707 		dd->flags &= ~QIB_INITTED;  /* needs re-init */
1708 		/* mark as having had error */
1709 		*dd->devstatusp |= QIB_STATUS_HWERROR;
1710 		for (pidx = 0; pidx < dd->num_pports; ++pidx)
1711 			if (dd->pport[pidx].link_speed_supported)
1712 				*dd->pport[pidx].statusp &= ~QIB_STATUS_IB_CONF;
1713 	}
1714 
1715 	if (*msg && iserr)
1716 		qib_dev_err(dd, "%s error\n", msg);
1717 
1718 	/*
1719 	 * If there were hdrq or egrfull errors, wake up any processes
1720 	 * waiting in poll.  We used to try to check which contexts had
1721 	 * the overflow, but given the cost of that and the chip reads
1722 	 * to support it, it's better to just wake everybody up if we
1723 	 * get an overflow; waiters can poll again if it's not them.
1724 	 */
1725 	if (errs & (ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr))) {
1726 		qib_handle_urcv(dd, ~0U);
1727 		if (errs & ERR_MASK(RcvEgrFullErr))
1728 			qib_stats.sps_buffull++;
1729 		else
1730 			qib_stats.sps_hdrfull++;
1731 	}
1732 
1733 done:
1734 	return;
1735 }
1736 
1737 static void qib_error_tasklet(unsigned long data)
1738 {
1739 	struct qib_devdata *dd = (struct qib_devdata *)data;
1740 
1741 	handle_7322_errors(dd);
1742 	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1743 }
1744 
1745 static void reenable_chase(struct timer_list *t)
1746 {
1747 	struct qib_chippport_specific *cp = from_timer(cp, t, chase_timer);
1748 	struct qib_pportdata *ppd = cp->ppd;
1749 
1750 	ppd->cpspec->chase_timer.expires = 0;
1751 	qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1752 		QLOGIC_IB_IBCC_LINKINITCMD_POLL);
1753 }
1754 
1755 static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow,
1756 		u8 ibclt)
1757 {
1758 	ppd->cpspec->chase_end = 0;
1759 
1760 	if (!qib_chase)
1761 		return;
1762 
1763 	qib_set_ib_7322_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
1764 		QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1765 	ppd->cpspec->chase_timer.expires = jiffies + QIB_CHASE_DIS_TIME;
1766 	add_timer(&ppd->cpspec->chase_timer);
1767 }
1768 
1769 static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1770 {
1771 	u8 ibclt;
1772 	unsigned long tnow;
1773 
1774 	ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
1775 
1776 	/*
1777 	 * Detect and handle the state chase issue, where we can
1778 	 * get stuck if we are unlucky on timing on both sides of
1779 	 * the link.   If we are, we disable, set a timer, and
1780 	 * then re-enable.
1781 	 */
1782 	switch (ibclt) {
1783 	case IB_7322_LT_STATE_CFGRCVFCFG:
1784 	case IB_7322_LT_STATE_CFGWAITRMT:
1785 	case IB_7322_LT_STATE_TXREVLANES:
1786 	case IB_7322_LT_STATE_CFGENH:
1787 		tnow = jiffies;
1788 		if (ppd->cpspec->chase_end &&
1789 		     time_after(tnow, ppd->cpspec->chase_end))
1790 			disable_chase(ppd, tnow, ibclt);
1791 		else if (!ppd->cpspec->chase_end)
1792 			ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
1793 		break;
1794 	default:
1795 		ppd->cpspec->chase_end = 0;
1796 		break;
1797 	}
1798 
1799 	if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
1800 	      ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
1801 	     ibclt == IB_7322_LT_STATE_LINKUP) &&
1802 	    (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
1803 		force_h1(ppd);
1804 		ppd->cpspec->qdr_reforce = 1;
1805 		if (!ppd->dd->cspec->r1)
1806 			serdes_7322_los_enable(ppd, 0);
1807 	} else if (ppd->cpspec->qdr_reforce &&
1808 		(ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
1809 		 (ibclt == IB_7322_LT_STATE_CFGENH ||
1810 		ibclt == IB_7322_LT_STATE_CFGIDLE ||
1811 		ibclt == IB_7322_LT_STATE_LINKUP))
1812 		force_h1(ppd);
1813 
1814 	if ((IS_QMH(ppd->dd) || IS_QME(ppd->dd)) &&
1815 	    ppd->link_speed_enabled == QIB_IB_QDR &&
1816 	    (ibclt == IB_7322_LT_STATE_CFGTEST ||
1817 	     ibclt == IB_7322_LT_STATE_CFGENH ||
1818 	     (ibclt >= IB_7322_LT_STATE_POLLACTIVE &&
1819 	      ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
1820 		adj_tx_serdes(ppd);
1821 
1822 	if (ibclt != IB_7322_LT_STATE_LINKUP) {
1823 		u8 ltstate = qib_7322_phys_portstate(ibcst);
1824 		u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
1825 					  LinkTrainingState);
1826 		if (!ppd->dd->cspec->r1 &&
1827 		    pibclt == IB_7322_LT_STATE_LINKUP &&
1828 		    ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1829 		    ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
1830 		    ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
1831 		    ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
1832 			/* If the link went down (but no into recovery,
1833 			 * turn LOS back on */
1834 			serdes_7322_los_enable(ppd, 1);
1835 		if (!ppd->cpspec->qdr_dfe_on &&
1836 		    ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
1837 			ppd->cpspec->qdr_dfe_on = 1;
1838 			ppd->cpspec->qdr_dfe_time = 0;
1839 			/* On link down, reenable QDR adaptation */
1840 			qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
1841 					    ppd->dd->cspec->r1 ?
1842 					    QDR_STATIC_ADAPT_DOWN_R1 :
1843 					    QDR_STATIC_ADAPT_DOWN);
1844 			pr_info(
1845 				"IB%u:%u re-enabled QDR adaptation ibclt %x\n",
1846 				ppd->dd->unit, ppd->port, ibclt);
1847 		}
1848 	}
1849 }
1850 
1851 static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
1852 
1853 /*
1854  * This is per-pport error handling.
1855  * will likely get it's own MSIx interrupt (one for each port,
1856  * although just a single handler).
1857  */
1858 static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
1859 {
1860 	char *msg;
1861 	u64 ignore_this_time = 0, iserr = 0, errs, fmask;
1862 	struct qib_devdata *dd = ppd->dd;
1863 
1864 	/* do this as soon as possible */
1865 	fmask = qib_read_kreg64(dd, kr_act_fmask);
1866 	if (!fmask)
1867 		check_7322_rxe_status(ppd);
1868 
1869 	errs = qib_read_kreg_port(ppd, krp_errstatus);
1870 	if (!errs)
1871 		qib_devinfo(dd->pcidev,
1872 			 "Port%d error interrupt, but no error bits set!\n",
1873 			 ppd->port);
1874 	if (!fmask)
1875 		errs &= ~QIB_E_P_IBSTATUSCHANGED;
1876 	if (!errs)
1877 		goto done;
1878 
1879 	msg = ppd->cpspec->epmsgbuf;
1880 	*msg = '\0';
1881 
1882 	if (errs & ~QIB_E_P_BITSEXTANT) {
1883 		err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
1884 			   errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
1885 		if (!*msg)
1886 			snprintf(msg, sizeof(ppd->cpspec->epmsgbuf),
1887 				 "no others");
1888 		qib_dev_porterr(dd, ppd->port,
1889 			"error interrupt with unknown errors 0x%016Lx set (and %s)\n",
1890 			(errs & ~QIB_E_P_BITSEXTANT), msg);
1891 		*msg = '\0';
1892 	}
1893 
1894 	if (errs & QIB_E_P_SHDR) {
1895 		u64 symptom;
1896 
1897 		/* determine cause, then write to clear */
1898 		symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
1899 		qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
1900 		err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), symptom,
1901 			   hdrchk_msgs);
1902 		*msg = '\0';
1903 		/* senderrbuf cleared in SPKTERRS below */
1904 	}
1905 
1906 	if (errs & QIB_E_P_SPKTERRS) {
1907 		if ((errs & QIB_E_P_LINK_PKTERRS) &&
1908 		    !(ppd->lflags & QIBL_LINKACTIVE)) {
1909 			/*
1910 			 * This can happen when trying to bring the link
1911 			 * up, but the IB link changes state at the "wrong"
1912 			 * time. The IB logic then complains that the packet
1913 			 * isn't valid.  We don't want to confuse people, so
1914 			 * we just don't print them, except at debug
1915 			 */
1916 			err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
1917 				   (errs & QIB_E_P_LINK_PKTERRS),
1918 				   qib_7322p_error_msgs);
1919 			*msg = '\0';
1920 			ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1921 		}
1922 		qib_disarm_7322_senderrbufs(ppd);
1923 	} else if ((errs & QIB_E_P_LINK_PKTERRS) &&
1924 		   !(ppd->lflags & QIBL_LINKACTIVE)) {
1925 		/*
1926 		 * This can happen when SMA is trying to bring the link
1927 		 * up, but the IB link changes state at the "wrong" time.
1928 		 * The IB logic then complains that the packet isn't
1929 		 * valid.  We don't want to confuse people, so we just
1930 		 * don't print them, except at debug
1931 		 */
1932 		err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs,
1933 			   qib_7322p_error_msgs);
1934 		ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
1935 		*msg = '\0';
1936 	}
1937 
1938 	qib_write_kreg_port(ppd, krp_errclear, errs);
1939 
1940 	errs &= ~ignore_this_time;
1941 	if (!errs)
1942 		goto done;
1943 
1944 	if (errs & QIB_E_P_RPKTERRS)
1945 		qib_stats.sps_rcverrs++;
1946 	if (errs & QIB_E_P_SPKTERRS)
1947 		qib_stats.sps_txerrs++;
1948 
1949 	iserr = errs & ~(QIB_E_P_RPKTERRS | QIB_E_P_PKTERRS);
1950 
1951 	if (errs & QIB_E_P_SDMAERRS)
1952 		sdma_7322_p_errors(ppd, errs);
1953 
1954 	if (errs & QIB_E_P_IBSTATUSCHANGED) {
1955 		u64 ibcs;
1956 		u8 ltstate;
1957 
1958 		ibcs = qib_read_kreg_port(ppd, krp_ibcstatus_a);
1959 		ltstate = qib_7322_phys_portstate(ibcs);
1960 
1961 		if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
1962 			handle_serdes_issues(ppd, ibcs);
1963 		if (!(ppd->cpspec->ibcctrl_a &
1964 		      SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn))) {
1965 			/*
1966 			 * We got our interrupt, so init code should be
1967 			 * happy and not try alternatives. Now squelch
1968 			 * other "chatter" from link-negotiation (pre Init)
1969 			 */
1970 			ppd->cpspec->ibcctrl_a |=
1971 				SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
1972 			qib_write_kreg_port(ppd, krp_ibcctrl_a,
1973 					    ppd->cpspec->ibcctrl_a);
1974 		}
1975 
1976 		/* Update our picture of width and speed from chip */
1977 		ppd->link_width_active =
1978 			(ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) ?
1979 			    IB_WIDTH_4X : IB_WIDTH_1X;
1980 		ppd->link_speed_active = (ibcs & SYM_MASK(IBCStatusA_0,
1981 			LinkSpeedQDR)) ? QIB_IB_QDR : (ibcs &
1982 			  SYM_MASK(IBCStatusA_0, LinkSpeedActive)) ?
1983 				   QIB_IB_DDR : QIB_IB_SDR;
1984 
1985 		if ((ppd->lflags & QIBL_IB_LINK_DISABLED) && ltstate !=
1986 		    IB_PHYSPORTSTATE_DISABLED)
1987 			qib_set_ib_7322_lstate(ppd, 0,
1988 			       QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
1989 		else
1990 			/*
1991 			 * Since going into a recovery state causes the link
1992 			 * state to go down and since recovery is transitory,
1993 			 * it is better if we "miss" ever seeing the link
1994 			 * training state go into recovery (i.e., ignore this
1995 			 * transition for link state special handling purposes)
1996 			 * without updating lastibcstat.
1997 			 */
1998 			if (ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
1999 			    ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
2000 			    ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
2001 			    ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
2002 				qib_handle_e_ibstatuschanged(ppd, ibcs);
2003 	}
2004 	if (*msg && iserr)
2005 		qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
2006 
2007 	if (ppd->state_wanted & ppd->lflags)
2008 		wake_up_interruptible(&ppd->state_wait);
2009 done:
2010 	return;
2011 }
2012 
2013 /* enable/disable chip from delivering interrupts */
2014 static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
2015 {
2016 	if (enable) {
2017 		if (dd->flags & QIB_BADINTR)
2018 			return;
2019 		qib_write_kreg(dd, kr_intmask, dd->cspec->int_enable_mask);
2020 		/* cause any pending enabled interrupts to be re-delivered */
2021 		qib_write_kreg(dd, kr_intclear, 0ULL);
2022 		if (dd->cspec->num_msix_entries) {
2023 			/* and same for MSIx */
2024 			u64 val = qib_read_kreg64(dd, kr_intgranted);
2025 
2026 			if (val)
2027 				qib_write_kreg(dd, kr_intgranted, val);
2028 		}
2029 	} else
2030 		qib_write_kreg(dd, kr_intmask, 0ULL);
2031 }
2032 
2033 /*
2034  * Try to cleanup as much as possible for anything that might have gone
2035  * wrong while in freeze mode, such as pio buffers being written by user
2036  * processes (causing armlaunch), send errors due to going into freeze mode,
2037  * etc., and try to avoid causing extra interrupts while doing so.
2038  * Forcibly update the in-memory pioavail register copies after cleanup
2039  * because the chip won't do it while in freeze mode (the register values
2040  * themselves are kept correct).
2041  * Make sure that we don't lose any important interrupts by using the chip
2042  * feature that says that writing 0 to a bit in *clear that is set in
2043  * *status will cause an interrupt to be generated again (if allowed by
2044  * the *mask value).
2045  * This is in chip-specific code because of all of the register accesses,
2046  * even though the details are similar on most chips.
2047  */
2048 static void qib_7322_clear_freeze(struct qib_devdata *dd)
2049 {
2050 	int pidx;
2051 
2052 	/* disable error interrupts, to avoid confusion */
2053 	qib_write_kreg(dd, kr_errmask, 0ULL);
2054 
2055 	for (pidx = 0; pidx < dd->num_pports; ++pidx)
2056 		if (dd->pport[pidx].link_speed_supported)
2057 			qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2058 					    0ULL);
2059 
2060 	/* also disable interrupts; errormask is sometimes overwritten */
2061 	qib_7322_set_intr_state(dd, 0);
2062 
2063 	/* clear the freeze, and be sure chip saw it */
2064 	qib_write_kreg(dd, kr_control, dd->control);
2065 	qib_read_kreg32(dd, kr_scratch);
2066 
2067 	/*
2068 	 * Force new interrupt if any hwerr, error or interrupt bits are
2069 	 * still set, and clear "safe" send packet errors related to freeze
2070 	 * and cancelling sends.  Re-enable error interrupts before possible
2071 	 * force of re-interrupt on pending interrupts.
2072 	 */
2073 	qib_write_kreg(dd, kr_hwerrclear, 0ULL);
2074 	qib_write_kreg(dd, kr_errclear, E_SPKT_ERRS_IGNORE);
2075 	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2076 	/* We need to purge per-port errs and reset mask, too */
2077 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
2078 		if (!dd->pport[pidx].link_speed_supported)
2079 			continue;
2080 		qib_write_kreg_port(dd->pport + pidx, krp_errclear, ~0Ull);
2081 		qib_write_kreg_port(dd->pport + pidx, krp_errmask, ~0Ull);
2082 	}
2083 	qib_7322_set_intr_state(dd, 1);
2084 }
2085 
2086 /* no error handling to speak of */
2087 /**
2088  * qib_7322_handle_hwerrors - display hardware errors.
2089  * @dd: the qlogic_ib device
2090  * @msg: the output buffer
2091  * @msgl: the size of the output buffer
2092  *
2093  * Use same msg buffer as regular errors to avoid excessive stack
2094  * use.  Most hardware errors are catastrophic, but for right now,
2095  * we'll print them and continue.  We reuse the same message buffer as
2096  * qib_handle_errors() to avoid excessive stack usage.
2097  */
2098 static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
2099 				     size_t msgl)
2100 {
2101 	u64 hwerrs;
2102 	u32 ctrl;
2103 	int isfatal = 0;
2104 
2105 	hwerrs = qib_read_kreg64(dd, kr_hwerrstatus);
2106 	if (!hwerrs)
2107 		goto bail;
2108 	if (hwerrs == ~0ULL) {
2109 		qib_dev_err(dd,
2110 			"Read of hardware error status failed (all bits set); ignoring\n");
2111 		goto bail;
2112 	}
2113 	qib_stats.sps_hwerrs++;
2114 
2115 	/* Always clear the error status register, except BIST fail */
2116 	qib_write_kreg(dd, kr_hwerrclear, hwerrs &
2117 		       ~HWE_MASK(PowerOnBISTFailed));
2118 
2119 	hwerrs &= dd->cspec->hwerrmask;
2120 
2121 	/* no EEPROM logging, yet */
2122 
2123 	if (hwerrs)
2124 		qib_devinfo(dd->pcidev,
2125 			"Hardware error: hwerr=0x%llx (cleared)\n",
2126 			(unsigned long long) hwerrs);
2127 
2128 	ctrl = qib_read_kreg32(dd, kr_control);
2129 	if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
2130 		/*
2131 		 * No recovery yet...
2132 		 */
2133 		if ((hwerrs & ~HWE_MASK(LATriggered)) ||
2134 		    dd->cspec->stay_in_freeze) {
2135 			/*
2136 			 * If any set that we aren't ignoring only make the
2137 			 * complaint once, in case it's stuck or recurring,
2138 			 * and we get here multiple times
2139 			 * Force link down, so switch knows, and
2140 			 * LEDs are turned off.
2141 			 */
2142 			if (dd->flags & QIB_INITTED)
2143 				isfatal = 1;
2144 		} else
2145 			qib_7322_clear_freeze(dd);
2146 	}
2147 
2148 	if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
2149 		isfatal = 1;
2150 		strlcpy(msg,
2151 			"[Memory BIST test failed, InfiniPath hardware unusable]",
2152 			msgl);
2153 		/* ignore from now on, so disable until driver reloaded */
2154 		dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
2155 		qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2156 	}
2157 
2158 	err_decode(msg, msgl, hwerrs, qib_7322_hwerror_msgs);
2159 
2160 	/* Ignore esoteric PLL failures et al. */
2161 
2162 	qib_dev_err(dd, "%s hardware error\n", msg);
2163 
2164 	if (hwerrs &
2165 		   (SYM_MASK(HwErrMask, SDmaMemReadErrMask_0) |
2166 		    SYM_MASK(HwErrMask, SDmaMemReadErrMask_1))) {
2167 		int pidx = 0;
2168 		int err;
2169 		unsigned long flags;
2170 		struct qib_pportdata *ppd = dd->pport;
2171 
2172 		for (; pidx < dd->num_pports; ++pidx, ppd++) {
2173 			err = 0;
2174 			if (pidx == 0 && (hwerrs &
2175 				SYM_MASK(HwErrMask, SDmaMemReadErrMask_0)))
2176 				err++;
2177 			if (pidx == 1 && (hwerrs &
2178 				SYM_MASK(HwErrMask, SDmaMemReadErrMask_1)))
2179 				err++;
2180 			if (err) {
2181 				spin_lock_irqsave(&ppd->sdma_lock, flags);
2182 				dump_sdma_7322_state(ppd);
2183 				spin_unlock_irqrestore(&ppd->sdma_lock, flags);
2184 			}
2185 		}
2186 	}
2187 
2188 	if (isfatal && !dd->diag_client) {
2189 		qib_dev_err(dd,
2190 			"Fatal Hardware Error, no longer usable, SN %.16s\n",
2191 			dd->serial);
2192 		/*
2193 		 * for /sys status file and user programs to print; if no
2194 		 * trailing brace is copied, we'll know it was truncated.
2195 		 */
2196 		if (dd->freezemsg)
2197 			snprintf(dd->freezemsg, dd->freezelen,
2198 				 "{%s}", msg);
2199 		qib_disable_after_error(dd);
2200 	}
2201 bail:;
2202 }
2203 
2204 /**
2205  * qib_7322_init_hwerrors - enable hardware errors
2206  * @dd: the qlogic_ib device
2207  *
2208  * now that we have finished initializing everything that might reasonably
2209  * cause a hardware error, and cleared those errors bits as they occur,
2210  * we can enable hardware errors in the mask (potentially enabling
2211  * freeze mode), and enable hardware errors as errors (along with
2212  * everything else) in errormask
2213  */
2214 static void qib_7322_init_hwerrors(struct qib_devdata *dd)
2215 {
2216 	int pidx;
2217 	u64 extsval;
2218 
2219 	extsval = qib_read_kreg64(dd, kr_extstatus);
2220 	if (!(extsval & (QIB_EXTS_MEMBIST_DISABLED |
2221 			 QIB_EXTS_MEMBIST_ENDTEST)))
2222 		qib_dev_err(dd, "MemBIST did not complete!\n");
2223 
2224 	/* never clear BIST failure, so reported on each driver load */
2225 	qib_write_kreg(dd, kr_hwerrclear, ~HWE_MASK(PowerOnBISTFailed));
2226 	qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
2227 
2228 	/* clear all */
2229 	qib_write_kreg(dd, kr_errclear, ~0ULL);
2230 	/* enable errors that are masked, at least this first time. */
2231 	qib_write_kreg(dd, kr_errmask, ~0ULL);
2232 	dd->cspec->errormask = qib_read_kreg64(dd, kr_errmask);
2233 	for (pidx = 0; pidx < dd->num_pports; ++pidx)
2234 		if (dd->pport[pidx].link_speed_supported)
2235 			qib_write_kreg_port(dd->pport + pidx, krp_errmask,
2236 					    ~0ULL);
2237 }
2238 
2239 /*
2240  * Disable and enable the armlaunch error.  Used for PIO bandwidth testing
2241  * on chips that are count-based, rather than trigger-based.  There is no
2242  * reference counting, but that's also fine, given the intended use.
2243  * Only chip-specific because it's all register accesses
2244  */
2245 static void qib_set_7322_armlaunch(struct qib_devdata *dd, u32 enable)
2246 {
2247 	if (enable) {
2248 		qib_write_kreg(dd, kr_errclear, QIB_E_SPIOARMLAUNCH);
2249 		dd->cspec->errormask |= QIB_E_SPIOARMLAUNCH;
2250 	} else
2251 		dd->cspec->errormask &= ~QIB_E_SPIOARMLAUNCH;
2252 	qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
2253 }
2254 
2255 /*
2256  * Formerly took parameter <which> in pre-shifted,
2257  * pre-merged form with LinkCmd and LinkInitCmd
2258  * together, and assuming the zero was NOP.
2259  */
2260 static void qib_set_ib_7322_lstate(struct qib_pportdata *ppd, u16 linkcmd,
2261 				   u16 linitcmd)
2262 {
2263 	u64 mod_wd;
2264 	struct qib_devdata *dd = ppd->dd;
2265 	unsigned long flags;
2266 
2267 	if (linitcmd == QLOGIC_IB_IBCC_LINKINITCMD_DISABLE) {
2268 		/*
2269 		 * If we are told to disable, note that so link-recovery
2270 		 * code does not attempt to bring us back up.
2271 		 * Also reset everything that we can, so we start
2272 		 * completely clean when re-enabled (before we
2273 		 * actually issue the disable to the IBC)
2274 		 */
2275 		qib_7322_mini_pcs_reset(ppd);
2276 		spin_lock_irqsave(&ppd->lflags_lock, flags);
2277 		ppd->lflags |= QIBL_IB_LINK_DISABLED;
2278 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2279 	} else if (linitcmd || linkcmd == QLOGIC_IB_IBCC_LINKCMD_DOWN) {
2280 		/*
2281 		 * Any other linkinitcmd will lead to LINKDOWN and then
2282 		 * to INIT (if all is well), so clear flag to let
2283 		 * link-recovery code attempt to bring us back up.
2284 		 */
2285 		spin_lock_irqsave(&ppd->lflags_lock, flags);
2286 		ppd->lflags &= ~QIBL_IB_LINK_DISABLED;
2287 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2288 		/*
2289 		 * Clear status change interrupt reduction so the
2290 		 * new state is seen.
2291 		 */
2292 		ppd->cpspec->ibcctrl_a &=
2293 			~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
2294 	}
2295 
2296 	mod_wd = (linkcmd << IBA7322_IBCC_LINKCMD_SHIFT) |
2297 		(linitcmd << QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2298 
2299 	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a |
2300 			    mod_wd);
2301 	/* write to chip to prevent back-to-back writes of ibc reg */
2302 	qib_write_kreg(dd, kr_scratch, 0);
2303 
2304 }
2305 
2306 /*
2307  * The total RCV buffer memory is 64KB, used for both ports, and is
2308  * in units of 64 bytes (same as IB flow control credit unit).
2309  * The consumedVL unit in the same registers are in 32 byte units!
2310  * So, a VL15 packet needs 4.50 IB credits, and 9 rx buffer chunks,
2311  * and we can therefore allocate just 9 IB credits for 2 VL15 packets
2312  * in krp_rxcreditvl15, rather than 10.
2313  */
2314 #define RCV_BUF_UNITSZ 64
2315 #define NUM_RCV_BUF_UNITS(dd) ((64 * 1024) / (RCV_BUF_UNITSZ * dd->num_pports))
2316 
2317 static void set_vls(struct qib_pportdata *ppd)
2318 {
2319 	int i, numvls, totcred, cred_vl, vl0extra;
2320 	struct qib_devdata *dd = ppd->dd;
2321 	u64 val;
2322 
2323 	numvls = qib_num_vls(ppd->vls_operational);
2324 
2325 	/*
2326 	 * Set up per-VL credits. Below is kluge based on these assumptions:
2327 	 * 1) port is disabled at the time early_init is called.
2328 	 * 2) give VL15 17 credits, for two max-plausible packets.
2329 	 * 3) Give VL0-N the rest, with any rounding excess used for VL0
2330 	 */
2331 	/* 2 VL15 packets @ 288 bytes each (including IB headers) */
2332 	totcred = NUM_RCV_BUF_UNITS(dd);
2333 	cred_vl = (2 * 288 + RCV_BUF_UNITSZ - 1) / RCV_BUF_UNITSZ;
2334 	totcred -= cred_vl;
2335 	qib_write_kreg_port(ppd, krp_rxcreditvl15, (u64) cred_vl);
2336 	cred_vl = totcred / numvls;
2337 	vl0extra = totcred - cred_vl * numvls;
2338 	qib_write_kreg_port(ppd, krp_rxcreditvl0, cred_vl + vl0extra);
2339 	for (i = 1; i < numvls; i++)
2340 		qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, cred_vl);
2341 	for (; i < 8; i++) /* no buffer space for other VLs */
2342 		qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
2343 
2344 	/* Notify IBC that credits need to be recalculated */
2345 	val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
2346 	val |= SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2347 	qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2348 	qib_write_kreg(dd, kr_scratch, 0ULL);
2349 	val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, CREDIT_CHANGE);
2350 	qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
2351 
2352 	for (i = 0; i < numvls; i++)
2353 		val = qib_read_kreg_port(ppd, krp_rxcreditvl0 + i);
2354 	val = qib_read_kreg_port(ppd, krp_rxcreditvl15);
2355 
2356 	/* Change the number of operational VLs */
2357 	ppd->cpspec->ibcctrl_a = (ppd->cpspec->ibcctrl_a &
2358 				~SYM_MASK(IBCCtrlA_0, NumVLane)) |
2359 		((u64)(numvls - 1) << SYM_LSB(IBCCtrlA_0, NumVLane));
2360 	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2361 	qib_write_kreg(dd, kr_scratch, 0ULL);
2362 }
2363 
2364 /*
2365  * The code that deals with actual SerDes is in serdes_7322_init().
2366  * Compared to the code for iba7220, it is minimal.
2367  */
2368 static int serdes_7322_init(struct qib_pportdata *ppd);
2369 
2370 /**
2371  * qib_7322_bringup_serdes - bring up the serdes
2372  * @ppd: physical port on the qlogic_ib device
2373  */
2374 static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
2375 {
2376 	struct qib_devdata *dd = ppd->dd;
2377 	u64 val, guid, ibc;
2378 	unsigned long flags;
2379 	int ret = 0;
2380 
2381 	/*
2382 	 * SerDes model not in Pd, but still need to
2383 	 * set up much of IBCCtrl and IBCDDRCtrl; move elsewhere
2384 	 * eventually.
2385 	 */
2386 	/* Put IBC in reset, sends disabled (should be in reset already) */
2387 	ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2388 	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
2389 	qib_write_kreg(dd, kr_scratch, 0ULL);
2390 
2391 	/* ensure previous Tx parameters are not still forced */
2392 	qib_write_kreg_port(ppd, krp_tx_deemph_override,
2393 		SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
2394 		reset_tx_deemphasis_override));
2395 
2396 	if (qib_compat_ddr_negotiate) {
2397 		ppd->cpspec->ibdeltainprog = 1;
2398 		ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
2399 						crp_ibsymbolerr);
2400 		ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
2401 						crp_iblinkerrrecov);
2402 	}
2403 
2404 	/* flowcontrolwatermark is in units of KBytes */
2405 	ibc = 0x5ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlWaterMark);
2406 	/*
2407 	 * Flow control is sent this often, even if no changes in
2408 	 * buffer space occur.  Units are 128ns for this chip.
2409 	 * Set to 3usec.
2410 	 */
2411 	ibc |= 24ULL << SYM_LSB(IBCCtrlA_0, FlowCtrlPeriod);
2412 	/* max error tolerance */
2413 	ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
2414 	/* IB credit flow control. */
2415 	ibc |= 0xfULL << SYM_LSB(IBCCtrlA_0, OverrunThreshold);
2416 	/*
2417 	 * set initial max size pkt IBC will send, including ICRC; it's the
2418 	 * PIO buffer size in dwords, less 1; also see qib_set_mtu()
2419 	 */
2420 	ibc |= ((u64)(ppd->ibmaxlen >> 2) + 1) <<
2421 		SYM_LSB(IBCCtrlA_0, MaxPktLen);
2422 	ppd->cpspec->ibcctrl_a = ibc; /* without linkcmd or linkinitcmd! */
2423 
2424 	/*
2425 	 * Reset the PCS interface to the serdes (and also ibc, which is still
2426 	 * in reset from above).  Writes new value of ibcctrl_a as last step.
2427 	 */
2428 	qib_7322_mini_pcs_reset(ppd);
2429 
2430 	if (!ppd->cpspec->ibcctrl_b) {
2431 		unsigned lse = ppd->link_speed_enabled;
2432 
2433 		/*
2434 		 * Not on re-init after reset, establish shadow
2435 		 * and force initial config.
2436 		 */
2437 		ppd->cpspec->ibcctrl_b = qib_read_kreg_port(ppd,
2438 							     krp_ibcctrl_b);
2439 		ppd->cpspec->ibcctrl_b &= ~(IBA7322_IBC_SPEED_QDR |
2440 				IBA7322_IBC_SPEED_DDR |
2441 				IBA7322_IBC_SPEED_SDR |
2442 				IBA7322_IBC_WIDTH_AUTONEG |
2443 				SYM_MASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED));
2444 		if (lse & (lse - 1)) /* Muliple speeds enabled */
2445 			ppd->cpspec->ibcctrl_b |=
2446 				(lse << IBA7322_IBC_SPEED_LSB) |
2447 				IBA7322_IBC_IBTA_1_2_MASK |
2448 				IBA7322_IBC_MAX_SPEED_MASK;
2449 		else
2450 			ppd->cpspec->ibcctrl_b |= (lse == QIB_IB_QDR) ?
2451 				IBA7322_IBC_SPEED_QDR |
2452 				 IBA7322_IBC_IBTA_1_2_MASK :
2453 				(lse == QIB_IB_DDR) ?
2454 					IBA7322_IBC_SPEED_DDR :
2455 					IBA7322_IBC_SPEED_SDR;
2456 		if ((ppd->link_width_enabled & (IB_WIDTH_1X | IB_WIDTH_4X)) ==
2457 		    (IB_WIDTH_1X | IB_WIDTH_4X))
2458 			ppd->cpspec->ibcctrl_b |= IBA7322_IBC_WIDTH_AUTONEG;
2459 		else
2460 			ppd->cpspec->ibcctrl_b |=
2461 				ppd->link_width_enabled == IB_WIDTH_4X ?
2462 				IBA7322_IBC_WIDTH_4X_ONLY :
2463 				IBA7322_IBC_WIDTH_1X_ONLY;
2464 
2465 		/* always enable these on driver reload, not sticky */
2466 		ppd->cpspec->ibcctrl_b |= (IBA7322_IBC_RXPOL_MASK |
2467 			IBA7322_IBC_HRTBT_MASK);
2468 	}
2469 	qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
2470 
2471 	/* setup so we have more time at CFGTEST to change H1 */
2472 	val = qib_read_kreg_port(ppd, krp_ibcctrl_c);
2473 	val &= ~SYM_MASK(IBCCtrlC_0, IB_FRONT_PORCH);
2474 	val |= 0xfULL << SYM_LSB(IBCCtrlC_0, IB_FRONT_PORCH);
2475 	qib_write_kreg_port(ppd, krp_ibcctrl_c, val);
2476 
2477 	serdes_7322_init(ppd);
2478 
2479 	guid = be64_to_cpu(ppd->guid);
2480 	if (!guid) {
2481 		if (dd->base_guid)
2482 			guid = be64_to_cpu(dd->base_guid) + ppd->port - 1;
2483 		ppd->guid = cpu_to_be64(guid);
2484 	}
2485 
2486 	qib_write_kreg_port(ppd, krp_hrtbt_guid, guid);
2487 	/* write to chip to prevent back-to-back writes of ibc reg */
2488 	qib_write_kreg(dd, kr_scratch, 0);
2489 
2490 	/* Enable port */
2491 	ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0, IBLinkEn);
2492 	set_vls(ppd);
2493 
2494 	/* initially come up DISABLED, without sending anything. */
2495 	val = ppd->cpspec->ibcctrl_a | (QLOGIC_IB_IBCC_LINKINITCMD_DISABLE <<
2496 					QLOGIC_IB_IBCC_LINKINITCMD_SHIFT);
2497 	qib_write_kreg_port(ppd, krp_ibcctrl_a, val);
2498 	qib_write_kreg(dd, kr_scratch, 0ULL);
2499 	/* clear the linkinit cmds */
2500 	ppd->cpspec->ibcctrl_a = val & ~SYM_MASK(IBCCtrlA_0, LinkInitCmd);
2501 
2502 	/* be paranoid against later code motion, etc. */
2503 	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
2504 	ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvIBPortEnable);
2505 	qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
2506 	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
2507 
2508 	/* Also enable IBSTATUSCHG interrupt.  */
2509 	val = qib_read_kreg_port(ppd, krp_errmask);
2510 	qib_write_kreg_port(ppd, krp_errmask,
2511 		val | ERR_MASK_N(IBStatusChanged));
2512 
2513 	/* Always zero until we start messing with SerDes for real */
2514 	return ret;
2515 }
2516 
2517 /**
2518  * qib_7322_quiet_serdes - set serdes to txidle
2519  * @dd: the qlogic_ib device
2520  * Called when driver is being unloaded
2521  */
2522 static void qib_7322_mini_quiet_serdes(struct qib_pportdata *ppd)
2523 {
2524 	u64 val;
2525 	unsigned long flags;
2526 
2527 	qib_set_ib_7322_lstate(ppd, 0, QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
2528 
2529 	spin_lock_irqsave(&ppd->lflags_lock, flags);
2530 	ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
2531 	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2532 	wake_up(&ppd->cpspec->autoneg_wait);
2533 	cancel_delayed_work_sync(&ppd->cpspec->autoneg_work);
2534 	if (ppd->dd->cspec->r1)
2535 		cancel_delayed_work_sync(&ppd->cpspec->ipg_work);
2536 
2537 	ppd->cpspec->chase_end = 0;
2538 	if (ppd->cpspec->chase_timer.function) /* if initted */
2539 		del_timer_sync(&ppd->cpspec->chase_timer);
2540 
2541 	/*
2542 	 * Despite the name, actually disables IBC as well. Do it when
2543 	 * we are as sure as possible that no more packets can be
2544 	 * received, following the down and the PCS reset.
2545 	 * The actual disabling happens in qib_7322_mini_pci_reset(),
2546 	 * along with the PCS being reset.
2547 	 */
2548 	ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, IBLinkEn);
2549 	qib_7322_mini_pcs_reset(ppd);
2550 
2551 	/*
2552 	 * Update the adjusted counters so the adjustment persists
2553 	 * across driver reload.
2554 	 */
2555 	if (ppd->cpspec->ibsymdelta || ppd->cpspec->iblnkerrdelta ||
2556 	    ppd->cpspec->ibdeltainprog || ppd->cpspec->iblnkdowndelta) {
2557 		struct qib_devdata *dd = ppd->dd;
2558 		u64 diagc;
2559 
2560 		/* enable counter writes */
2561 		diagc = qib_read_kreg64(dd, kr_hwdiagctrl);
2562 		qib_write_kreg(dd, kr_hwdiagctrl,
2563 			       diagc | SYM_MASK(HwDiagCtrl, CounterWrEnable));
2564 
2565 		if (ppd->cpspec->ibsymdelta || ppd->cpspec->ibdeltainprog) {
2566 			val = read_7322_creg32_port(ppd, crp_ibsymbolerr);
2567 			if (ppd->cpspec->ibdeltainprog)
2568 				val -= val - ppd->cpspec->ibsymsnap;
2569 			val -= ppd->cpspec->ibsymdelta;
2570 			write_7322_creg_port(ppd, crp_ibsymbolerr, val);
2571 		}
2572 		if (ppd->cpspec->iblnkerrdelta || ppd->cpspec->ibdeltainprog) {
2573 			val = read_7322_creg32_port(ppd, crp_iblinkerrrecov);
2574 			if (ppd->cpspec->ibdeltainprog)
2575 				val -= val - ppd->cpspec->iblnkerrsnap;
2576 			val -= ppd->cpspec->iblnkerrdelta;
2577 			write_7322_creg_port(ppd, crp_iblinkerrrecov, val);
2578 		}
2579 		if (ppd->cpspec->iblnkdowndelta) {
2580 			val = read_7322_creg32_port(ppd, crp_iblinkdown);
2581 			val += ppd->cpspec->iblnkdowndelta;
2582 			write_7322_creg_port(ppd, crp_iblinkdown, val);
2583 		}
2584 		/*
2585 		 * No need to save ibmalfdelta since IB perfcounters
2586 		 * are cleared on driver reload.
2587 		 */
2588 
2589 		/* and disable counter writes */
2590 		qib_write_kreg(dd, kr_hwdiagctrl, diagc);
2591 	}
2592 }
2593 
2594 /**
2595  * qib_setup_7322_setextled - set the state of the two external LEDs
2596  * @ppd: physical port on the qlogic_ib device
2597  * @on: whether the link is up or not
2598  *
2599  * The exact combo of LEDs if on is true is determined by looking
2600  * at the ibcstatus.
2601  *
2602  * These LEDs indicate the physical and logical state of IB link.
2603  * For this chip (at least with recommended board pinouts), LED1
2604  * is Yellow (logical state) and LED2 is Green (physical state),
2605  *
2606  * Note:  We try to match the Mellanox HCA LED behavior as best
2607  * we can.  Green indicates physical link state is OK (something is
2608  * plugged in, and we can train).
2609  * Amber indicates the link is logically up (ACTIVE).
2610  * Mellanox further blinks the amber LED to indicate data packet
2611  * activity, but we have no hardware support for that, so it would
2612  * require waking up every 10-20 msecs and checking the counters
2613  * on the chip, and then turning the LED off if appropriate.  That's
2614  * visible overhead, so not something we will do.
2615  */
2616 static void qib_setup_7322_setextled(struct qib_pportdata *ppd, u32 on)
2617 {
2618 	struct qib_devdata *dd = ppd->dd;
2619 	u64 extctl, ledblink = 0, val;
2620 	unsigned long flags;
2621 	int yel, grn;
2622 
2623 	/*
2624 	 * The diags use the LED to indicate diag info, so we leave
2625 	 * the external LED alone when the diags are running.
2626 	 */
2627 	if (dd->diag_client)
2628 		return;
2629 
2630 	/* Allow override of LED display for, e.g. Locating system in rack */
2631 	if (ppd->led_override) {
2632 		grn = (ppd->led_override & QIB_LED_PHYS);
2633 		yel = (ppd->led_override & QIB_LED_LOG);
2634 	} else if (on) {
2635 		val = qib_read_kreg_port(ppd, krp_ibcstatus_a);
2636 		grn = qib_7322_phys_portstate(val) ==
2637 			IB_PHYSPORTSTATE_LINKUP;
2638 		yel = qib_7322_iblink_state(val) == IB_PORT_ACTIVE;
2639 	} else {
2640 		grn = 0;
2641 		yel = 0;
2642 	}
2643 
2644 	spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2645 	extctl = dd->cspec->extctrl & (ppd->port == 1 ?
2646 		~ExtLED_IB1_MASK : ~ExtLED_IB2_MASK);
2647 	if (grn) {
2648 		extctl |= ppd->port == 1 ? ExtLED_IB1_GRN : ExtLED_IB2_GRN;
2649 		/*
2650 		 * Counts are in chip clock (4ns) periods.
2651 		 * This is 1/16 sec (66.6ms) on,
2652 		 * 3/16 sec (187.5 ms) off, with packets rcvd.
2653 		 */
2654 		ledblink = ((66600 * 1000UL / 4) << IBA7322_LEDBLINK_ON_SHIFT) |
2655 			((187500 * 1000UL / 4) << IBA7322_LEDBLINK_OFF_SHIFT);
2656 	}
2657 	if (yel)
2658 		extctl |= ppd->port == 1 ? ExtLED_IB1_YEL : ExtLED_IB2_YEL;
2659 	dd->cspec->extctrl = extctl;
2660 	qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
2661 	spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2662 
2663 	if (ledblink) /* blink the LED on packet receive */
2664 		qib_write_kreg_port(ppd, krp_rcvpktledcnt, ledblink);
2665 }
2666 
2667 #ifdef CONFIG_INFINIBAND_QIB_DCA
2668 
2669 static int qib_7322_notify_dca(struct qib_devdata *dd, unsigned long event)
2670 {
2671 	switch (event) {
2672 	case DCA_PROVIDER_ADD:
2673 		if (dd->flags & QIB_DCA_ENABLED)
2674 			break;
2675 		if (!dca_add_requester(&dd->pcidev->dev)) {
2676 			qib_devinfo(dd->pcidev, "DCA enabled\n");
2677 			dd->flags |= QIB_DCA_ENABLED;
2678 			qib_setup_dca(dd);
2679 		}
2680 		break;
2681 	case DCA_PROVIDER_REMOVE:
2682 		if (dd->flags & QIB_DCA_ENABLED) {
2683 			dca_remove_requester(&dd->pcidev->dev);
2684 			dd->flags &= ~QIB_DCA_ENABLED;
2685 			dd->cspec->dca_ctrl = 0;
2686 			qib_write_kreg(dd, KREG_IDX(DCACtrlA),
2687 				dd->cspec->dca_ctrl);
2688 		}
2689 		break;
2690 	}
2691 	return 0;
2692 }
2693 
2694 static void qib_update_rhdrq_dca(struct qib_ctxtdata *rcd, int cpu)
2695 {
2696 	struct qib_devdata *dd = rcd->dd;
2697 	struct qib_chip_specific *cspec = dd->cspec;
2698 
2699 	if (!(dd->flags & QIB_DCA_ENABLED))
2700 		return;
2701 	if (cspec->rhdr_cpu[rcd->ctxt] != cpu) {
2702 		const struct dca_reg_map *rmp;
2703 
2704 		cspec->rhdr_cpu[rcd->ctxt] = cpu;
2705 		rmp = &dca_rcvhdr_reg_map[rcd->ctxt];
2706 		cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] &= rmp->mask;
2707 		cspec->dca_rcvhdr_ctrl[rmp->shadow_inx] |=
2708 			(u64) dca3_get_tag(&dd->pcidev->dev, cpu) << rmp->lsb;
2709 		qib_devinfo(dd->pcidev,
2710 			"Ctxt %d cpu %d dca %llx\n", rcd->ctxt, cpu,
2711 			(long long) cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2712 		qib_write_kreg(dd, rmp->regno,
2713 			       cspec->dca_rcvhdr_ctrl[rmp->shadow_inx]);
2714 		cspec->dca_ctrl |= SYM_MASK(DCACtrlA, RcvHdrqDCAEnable);
2715 		qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2716 	}
2717 }
2718 
2719 static void qib_update_sdma_dca(struct qib_pportdata *ppd, int cpu)
2720 {
2721 	struct qib_devdata *dd = ppd->dd;
2722 	struct qib_chip_specific *cspec = dd->cspec;
2723 	unsigned pidx = ppd->port - 1;
2724 
2725 	if (!(dd->flags & QIB_DCA_ENABLED))
2726 		return;
2727 	if (cspec->sdma_cpu[pidx] != cpu) {
2728 		cspec->sdma_cpu[pidx] = cpu;
2729 		cspec->dca_rcvhdr_ctrl[4] &= ~(ppd->hw_pidx ?
2730 			SYM_MASK(DCACtrlF, SendDma1DCAOPH) :
2731 			SYM_MASK(DCACtrlF, SendDma0DCAOPH));
2732 		cspec->dca_rcvhdr_ctrl[4] |=
2733 			(u64) dca3_get_tag(&dd->pcidev->dev, cpu) <<
2734 				(ppd->hw_pidx ?
2735 					SYM_LSB(DCACtrlF, SendDma1DCAOPH) :
2736 					SYM_LSB(DCACtrlF, SendDma0DCAOPH));
2737 		qib_devinfo(dd->pcidev,
2738 			"sdma %d cpu %d dca %llx\n", ppd->hw_pidx, cpu,
2739 			(long long) cspec->dca_rcvhdr_ctrl[4]);
2740 		qib_write_kreg(dd, KREG_IDX(DCACtrlF),
2741 			       cspec->dca_rcvhdr_ctrl[4]);
2742 		cspec->dca_ctrl |= ppd->hw_pidx ?
2743 			SYM_MASK(DCACtrlA, SendDMAHead1DCAEnable) :
2744 			SYM_MASK(DCACtrlA, SendDMAHead0DCAEnable);
2745 		qib_write_kreg(dd, KREG_IDX(DCACtrlA), cspec->dca_ctrl);
2746 	}
2747 }
2748 
2749 static void qib_setup_dca(struct qib_devdata *dd)
2750 {
2751 	struct qib_chip_specific *cspec = dd->cspec;
2752 	int i;
2753 
2754 	for (i = 0; i < ARRAY_SIZE(cspec->rhdr_cpu); i++)
2755 		cspec->rhdr_cpu[i] = -1;
2756 	for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2757 		cspec->sdma_cpu[i] = -1;
2758 	cspec->dca_rcvhdr_ctrl[0] =
2759 		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq0DCAXfrCnt)) |
2760 		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq1DCAXfrCnt)) |
2761 		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq2DCAXfrCnt)) |
2762 		(1ULL << SYM_LSB(DCACtrlB, RcvHdrq3DCAXfrCnt));
2763 	cspec->dca_rcvhdr_ctrl[1] =
2764 		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq4DCAXfrCnt)) |
2765 		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq5DCAXfrCnt)) |
2766 		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq6DCAXfrCnt)) |
2767 		(1ULL << SYM_LSB(DCACtrlC, RcvHdrq7DCAXfrCnt));
2768 	cspec->dca_rcvhdr_ctrl[2] =
2769 		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq8DCAXfrCnt)) |
2770 		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq9DCAXfrCnt)) |
2771 		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq10DCAXfrCnt)) |
2772 		(1ULL << SYM_LSB(DCACtrlD, RcvHdrq11DCAXfrCnt));
2773 	cspec->dca_rcvhdr_ctrl[3] =
2774 		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq12DCAXfrCnt)) |
2775 		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq13DCAXfrCnt)) |
2776 		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq14DCAXfrCnt)) |
2777 		(1ULL << SYM_LSB(DCACtrlE, RcvHdrq15DCAXfrCnt));
2778 	cspec->dca_rcvhdr_ctrl[4] =
2779 		(1ULL << SYM_LSB(DCACtrlF, RcvHdrq16DCAXfrCnt)) |
2780 		(1ULL << SYM_LSB(DCACtrlF, RcvHdrq17DCAXfrCnt));
2781 	for (i = 0; i < ARRAY_SIZE(cspec->sdma_cpu); i++)
2782 		qib_write_kreg(dd, KREG_IDX(DCACtrlB) + i,
2783 			       cspec->dca_rcvhdr_ctrl[i]);
2784 	for (i = 0; i < cspec->num_msix_entries; i++)
2785 		setup_dca_notifier(dd, i);
2786 }
2787 
2788 static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
2789 			     const cpumask_t *mask)
2790 {
2791 	struct qib_irq_notify *n =
2792 		container_of(notify, struct qib_irq_notify, notify);
2793 	int cpu = cpumask_first(mask);
2794 
2795 	if (n->rcv) {
2796 		struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2797 
2798 		qib_update_rhdrq_dca(rcd, cpu);
2799 	} else {
2800 		struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2801 
2802 		qib_update_sdma_dca(ppd, cpu);
2803 	}
2804 }
2805 
2806 static void qib_irq_notifier_release(struct kref *ref)
2807 {
2808 	struct qib_irq_notify *n =
2809 		container_of(ref, struct qib_irq_notify, notify.kref);
2810 	struct qib_devdata *dd;
2811 
2812 	if (n->rcv) {
2813 		struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
2814 
2815 		dd = rcd->dd;
2816 	} else {
2817 		struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
2818 
2819 		dd = ppd->dd;
2820 	}
2821 	qib_devinfo(dd->pcidev,
2822 		"release on HCA notify 0x%p n 0x%p\n", ref, n);
2823 	kfree(n);
2824 }
2825 #endif
2826 
2827 static void qib_7322_free_irq(struct qib_devdata *dd)
2828 {
2829 	u64 intgranted;
2830 	int i;
2831 
2832 	dd->cspec->main_int_mask = ~0ULL;
2833 
2834 	for (i = 0; i < dd->cspec->num_msix_entries; i++) {
2835 		/* only free IRQs that were allocated */
2836 		if (dd->cspec->msix_entries[i].arg) {
2837 #ifdef CONFIG_INFINIBAND_QIB_DCA
2838 			reset_dca_notifier(dd, i);
2839 #endif
2840 			irq_set_affinity_hint(pci_irq_vector(dd->pcidev, i),
2841 					      NULL);
2842 			free_cpumask_var(dd->cspec->msix_entries[i].mask);
2843 			pci_free_irq(dd->pcidev, i,
2844 				     dd->cspec->msix_entries[i].arg);
2845 		}
2846 	}
2847 
2848 	/* If num_msix_entries was 0, disable the INTx IRQ */
2849 	if (!dd->cspec->num_msix_entries)
2850 		pci_free_irq(dd->pcidev, 0, dd);
2851 	else
2852 		dd->cspec->num_msix_entries = 0;
2853 
2854 	pci_free_irq_vectors(dd->pcidev);
2855 
2856 	/* make sure no MSIx interrupts are left pending */
2857 	intgranted = qib_read_kreg64(dd, kr_intgranted);
2858 	if (intgranted)
2859 		qib_write_kreg(dd, kr_intgranted, intgranted);
2860 }
2861 
2862 static void qib_setup_7322_cleanup(struct qib_devdata *dd)
2863 {
2864 	int i;
2865 
2866 #ifdef CONFIG_INFINIBAND_QIB_DCA
2867 	if (dd->flags & QIB_DCA_ENABLED) {
2868 		dca_remove_requester(&dd->pcidev->dev);
2869 		dd->flags &= ~QIB_DCA_ENABLED;
2870 		dd->cspec->dca_ctrl = 0;
2871 		qib_write_kreg(dd, KREG_IDX(DCACtrlA), dd->cspec->dca_ctrl);
2872 	}
2873 #endif
2874 
2875 	qib_7322_free_irq(dd);
2876 	kfree(dd->cspec->cntrs);
2877 	kfree(dd->cspec->sendchkenable);
2878 	kfree(dd->cspec->sendgrhchk);
2879 	kfree(dd->cspec->sendibchk);
2880 	kfree(dd->cspec->msix_entries);
2881 	for (i = 0; i < dd->num_pports; i++) {
2882 		unsigned long flags;
2883 		u32 mask = QSFP_GPIO_MOD_PRS_N |
2884 			(QSFP_GPIO_MOD_PRS_N << QSFP_GPIO_PORT2_SHIFT);
2885 
2886 		kfree(dd->pport[i].cpspec->portcntrs);
2887 		if (dd->flags & QIB_HAS_QSFP) {
2888 			spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
2889 			dd->cspec->gpio_mask &= ~mask;
2890 			qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
2891 			spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
2892 		}
2893 	}
2894 }
2895 
2896 /* handle SDMA interrupts */
2897 static void sdma_7322_intr(struct qib_devdata *dd, u64 istat)
2898 {
2899 	struct qib_pportdata *ppd0 = &dd->pport[0];
2900 	struct qib_pportdata *ppd1 = &dd->pport[1];
2901 	u64 intr0 = istat & (INT_MASK_P(SDma, 0) |
2902 		INT_MASK_P(SDmaIdle, 0) | INT_MASK_P(SDmaProgress, 0));
2903 	u64 intr1 = istat & (INT_MASK_P(SDma, 1) |
2904 		INT_MASK_P(SDmaIdle, 1) | INT_MASK_P(SDmaProgress, 1));
2905 
2906 	if (intr0)
2907 		qib_sdma_intr(ppd0);
2908 	if (intr1)
2909 		qib_sdma_intr(ppd1);
2910 
2911 	if (istat & INT_MASK_PM(SDmaCleanupDone, 0))
2912 		qib_sdma_process_event(ppd0, qib_sdma_event_e20_hw_started);
2913 	if (istat & INT_MASK_PM(SDmaCleanupDone, 1))
2914 		qib_sdma_process_event(ppd1, qib_sdma_event_e20_hw_started);
2915 }
2916 
2917 /*
2918  * Set or clear the Send buffer available interrupt enable bit.
2919  */
2920 static void qib_wantpiobuf_7322_intr(struct qib_devdata *dd, u32 needint)
2921 {
2922 	unsigned long flags;
2923 
2924 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
2925 	if (needint)
2926 		dd->sendctrl |= SYM_MASK(SendCtrl, SendIntBufAvail);
2927 	else
2928 		dd->sendctrl &= ~SYM_MASK(SendCtrl, SendIntBufAvail);
2929 	qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
2930 	qib_write_kreg(dd, kr_scratch, 0ULL);
2931 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
2932 }
2933 
2934 /*
2935  * Somehow got an interrupt with reserved bits set in interrupt status.
2936  * Print a message so we know it happened, then clear them.
2937  * keep mainline interrupt handler cache-friendly
2938  */
2939 static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
2940 {
2941 	u64 kills;
2942 	char msg[128];
2943 
2944 	kills = istat & ~QIB_I_BITSEXTANT;
2945 	qib_dev_err(dd,
2946 		"Clearing reserved interrupt(s) 0x%016llx: %s\n",
2947 		(unsigned long long) kills, msg);
2948 	qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
2949 }
2950 
2951 /* keep mainline interrupt handler cache-friendly */
2952 static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
2953 {
2954 	u32 gpiostatus;
2955 	int handled = 0;
2956 	int pidx;
2957 
2958 	/*
2959 	 * Boards for this chip currently don't use GPIO interrupts,
2960 	 * so clear by writing GPIOstatus to GPIOclear, and complain
2961 	 * to developer.  To avoid endless repeats, clear
2962 	 * the bits in the mask, since there is some kind of
2963 	 * programming error or chip problem.
2964 	 */
2965 	gpiostatus = qib_read_kreg32(dd, kr_gpio_status);
2966 	/*
2967 	 * In theory, writing GPIOstatus to GPIOclear could
2968 	 * have a bad side-effect on some diagnostic that wanted
2969 	 * to poll for a status-change, but the various shadows
2970 	 * make that problematic at best. Diags will just suppress
2971 	 * all GPIO interrupts during such tests.
2972 	 */
2973 	qib_write_kreg(dd, kr_gpio_clear, gpiostatus);
2974 	/*
2975 	 * Check for QSFP MOD_PRS changes
2976 	 * only works for single port if IB1 != pidx1
2977 	 */
2978 	for (pidx = 0; pidx < dd->num_pports && (dd->flags & QIB_HAS_QSFP);
2979 	     ++pidx) {
2980 		struct qib_pportdata *ppd;
2981 		struct qib_qsfp_data *qd;
2982 		u32 mask;
2983 
2984 		if (!dd->pport[pidx].link_speed_supported)
2985 			continue;
2986 		mask = QSFP_GPIO_MOD_PRS_N;
2987 		ppd = dd->pport + pidx;
2988 		mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
2989 		if (gpiostatus & dd->cspec->gpio_mask & mask) {
2990 			u64 pins;
2991 
2992 			qd = &ppd->cpspec->qsfp_data;
2993 			gpiostatus &= ~mask;
2994 			pins = qib_read_kreg64(dd, kr_extstatus);
2995 			pins >>= SYM_LSB(EXTStatus, GPIOIn);
2996 			if (!(pins & mask)) {
2997 				++handled;
2998 				qd->t_insert = jiffies;
2999 				queue_work(ib_wq, &qd->work);
3000 			}
3001 		}
3002 	}
3003 
3004 	if (gpiostatus && !handled) {
3005 		const u32 mask = qib_read_kreg32(dd, kr_gpio_mask);
3006 		u32 gpio_irq = mask & gpiostatus;
3007 
3008 		/*
3009 		 * Clear any troublemakers, and update chip from shadow
3010 		 */
3011 		dd->cspec->gpio_mask &= ~gpio_irq;
3012 		qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
3013 	}
3014 }
3015 
3016 /*
3017  * Handle errors and unusual events first, separate function
3018  * to improve cache hits for fast path interrupt handling.
3019  */
3020 static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
3021 {
3022 	if (istat & ~QIB_I_BITSEXTANT)
3023 		unknown_7322_ibits(dd, istat);
3024 	if (istat & QIB_I_GPIO)
3025 		unknown_7322_gpio_intr(dd);
3026 	if (istat & QIB_I_C_ERROR) {
3027 		qib_write_kreg(dd, kr_errmask, 0ULL);
3028 		tasklet_schedule(&dd->error_tasklet);
3029 	}
3030 	if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
3031 		handle_7322_p_errors(dd->rcd[0]->ppd);
3032 	if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
3033 		handle_7322_p_errors(dd->rcd[1]->ppd);
3034 }
3035 
3036 /*
3037  * Dynamically adjust the rcv int timeout for a context based on incoming
3038  * packet rate.
3039  */
3040 static void adjust_rcv_timeout(struct qib_ctxtdata *rcd, int npkts)
3041 {
3042 	struct qib_devdata *dd = rcd->dd;
3043 	u32 timeout = dd->cspec->rcvavail_timeout[rcd->ctxt];
3044 
3045 	/*
3046 	 * Dynamically adjust idle timeout on chip
3047 	 * based on number of packets processed.
3048 	 */
3049 	if (npkts < rcv_int_count && timeout > 2)
3050 		timeout >>= 1;
3051 	else if (npkts >= rcv_int_count && timeout < rcv_int_timeout)
3052 		timeout = min(timeout << 1, rcv_int_timeout);
3053 	else
3054 		return;
3055 
3056 	dd->cspec->rcvavail_timeout[rcd->ctxt] = timeout;
3057 	qib_write_kreg(dd, kr_rcvavailtimeout + rcd->ctxt, timeout);
3058 }
3059 
3060 /*
3061  * This is the main interrupt handler.
3062  * It will normally only be used for low frequency interrupts but may
3063  * have to handle all interrupts if INTx is enabled or fewer than normal
3064  * MSIx interrupts were allocated.
3065  * This routine should ignore the interrupt bits for any of the
3066  * dedicated MSIx handlers.
3067  */
3068 static irqreturn_t qib_7322intr(int irq, void *data)
3069 {
3070 	struct qib_devdata *dd = data;
3071 	irqreturn_t ret;
3072 	u64 istat;
3073 	u64 ctxtrbits;
3074 	u64 rmask;
3075 	unsigned i;
3076 	u32 npkts;
3077 
3078 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT) {
3079 		/*
3080 		 * This return value is not great, but we do not want the
3081 		 * interrupt core code to remove our interrupt handler
3082 		 * because we don't appear to be handling an interrupt
3083 		 * during a chip reset.
3084 		 */
3085 		ret = IRQ_HANDLED;
3086 		goto bail;
3087 	}
3088 
3089 	istat = qib_read_kreg64(dd, kr_intstatus);
3090 
3091 	if (unlikely(istat == ~0ULL)) {
3092 		qib_bad_intrstatus(dd);
3093 		qib_dev_err(dd, "Interrupt status all f's, skipping\n");
3094 		/* don't know if it was our interrupt or not */
3095 		ret = IRQ_NONE;
3096 		goto bail;
3097 	}
3098 
3099 	istat &= dd->cspec->main_int_mask;
3100 	if (unlikely(!istat)) {
3101 		/* already handled, or shared and not us */
3102 		ret = IRQ_NONE;
3103 		goto bail;
3104 	}
3105 
3106 	this_cpu_inc(*dd->int_counter);
3107 
3108 	/* handle "errors" of various kinds first, device ahead of port */
3109 	if (unlikely(istat & (~QIB_I_BITSEXTANT | QIB_I_GPIO |
3110 			      QIB_I_C_ERROR | INT_MASK_P(Err, 0) |
3111 			      INT_MASK_P(Err, 1))))
3112 		unlikely_7322_intr(dd, istat);
3113 
3114 	/*
3115 	 * Clear the interrupt bits we found set, relatively early, so we
3116 	 * "know" know the chip will have seen this by the time we process
3117 	 * the queue, and will re-interrupt if necessary.  The processor
3118 	 * itself won't take the interrupt again until we return.
3119 	 */
3120 	qib_write_kreg(dd, kr_intclear, istat);
3121 
3122 	/*
3123 	 * Handle kernel receive queues before checking for pio buffers
3124 	 * available since receives can overflow; piobuf waiters can afford
3125 	 * a few extra cycles, since they were waiting anyway.
3126 	 */
3127 	ctxtrbits = istat & (QIB_I_RCVAVAIL_MASK | QIB_I_RCVURG_MASK);
3128 	if (ctxtrbits) {
3129 		rmask = (1ULL << QIB_I_RCVAVAIL_LSB) |
3130 			(1ULL << QIB_I_RCVURG_LSB);
3131 		for (i = 0; i < dd->first_user_ctxt; i++) {
3132 			if (ctxtrbits & rmask) {
3133 				ctxtrbits &= ~rmask;
3134 				if (dd->rcd[i])
3135 					qib_kreceive(dd->rcd[i], NULL, &npkts);
3136 			}
3137 			rmask <<= 1;
3138 		}
3139 		if (ctxtrbits) {
3140 			ctxtrbits = (ctxtrbits >> QIB_I_RCVAVAIL_LSB) |
3141 				(ctxtrbits >> QIB_I_RCVURG_LSB);
3142 			qib_handle_urcv(dd, ctxtrbits);
3143 		}
3144 	}
3145 
3146 	if (istat & (QIB_I_P_SDMAINT(0) | QIB_I_P_SDMAINT(1)))
3147 		sdma_7322_intr(dd, istat);
3148 
3149 	if ((istat & QIB_I_SPIOBUFAVAIL) && (dd->flags & QIB_INITTED))
3150 		qib_ib_piobufavail(dd);
3151 
3152 	ret = IRQ_HANDLED;
3153 bail:
3154 	return ret;
3155 }
3156 
3157 /*
3158  * Dedicated receive packet available interrupt handler.
3159  */
3160 static irqreturn_t qib_7322pintr(int irq, void *data)
3161 {
3162 	struct qib_ctxtdata *rcd = data;
3163 	struct qib_devdata *dd = rcd->dd;
3164 	u32 npkts;
3165 
3166 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3167 		/*
3168 		 * This return value is not great, but we do not want the
3169 		 * interrupt core code to remove our interrupt handler
3170 		 * because we don't appear to be handling an interrupt
3171 		 * during a chip reset.
3172 		 */
3173 		return IRQ_HANDLED;
3174 
3175 	this_cpu_inc(*dd->int_counter);
3176 
3177 	/* Clear the interrupt bit we expect to be set. */
3178 	qib_write_kreg(dd, kr_intclear, ((1ULL << QIB_I_RCVAVAIL_LSB) |
3179 		       (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
3180 
3181 	qib_kreceive(rcd, NULL, &npkts);
3182 
3183 	return IRQ_HANDLED;
3184 }
3185 
3186 /*
3187  * Dedicated Send buffer available interrupt handler.
3188  */
3189 static irqreturn_t qib_7322bufavail(int irq, void *data)
3190 {
3191 	struct qib_devdata *dd = data;
3192 
3193 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3194 		/*
3195 		 * This return value is not great, but we do not want the
3196 		 * interrupt core code to remove our interrupt handler
3197 		 * because we don't appear to be handling an interrupt
3198 		 * during a chip reset.
3199 		 */
3200 		return IRQ_HANDLED;
3201 
3202 	this_cpu_inc(*dd->int_counter);
3203 
3204 	/* Clear the interrupt bit we expect to be set. */
3205 	qib_write_kreg(dd, kr_intclear, QIB_I_SPIOBUFAVAIL);
3206 
3207 	/* qib_ib_piobufavail() will clear the want PIO interrupt if needed */
3208 	if (dd->flags & QIB_INITTED)
3209 		qib_ib_piobufavail(dd);
3210 	else
3211 		qib_wantpiobuf_7322_intr(dd, 0);
3212 
3213 	return IRQ_HANDLED;
3214 }
3215 
3216 /*
3217  * Dedicated Send DMA interrupt handler.
3218  */
3219 static irqreturn_t sdma_intr(int irq, void *data)
3220 {
3221 	struct qib_pportdata *ppd = data;
3222 	struct qib_devdata *dd = ppd->dd;
3223 
3224 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3225 		/*
3226 		 * This return value is not great, but we do not want the
3227 		 * interrupt core code to remove our interrupt handler
3228 		 * because we don't appear to be handling an interrupt
3229 		 * during a chip reset.
3230 		 */
3231 		return IRQ_HANDLED;
3232 
3233 	this_cpu_inc(*dd->int_counter);
3234 
3235 	/* Clear the interrupt bit we expect to be set. */
3236 	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3237 		       INT_MASK_P(SDma, 1) : INT_MASK_P(SDma, 0));
3238 	qib_sdma_intr(ppd);
3239 
3240 	return IRQ_HANDLED;
3241 }
3242 
3243 /*
3244  * Dedicated Send DMA idle interrupt handler.
3245  */
3246 static irqreturn_t sdma_idle_intr(int irq, void *data)
3247 {
3248 	struct qib_pportdata *ppd = data;
3249 	struct qib_devdata *dd = ppd->dd;
3250 
3251 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3252 		/*
3253 		 * This return value is not great, but we do not want the
3254 		 * interrupt core code to remove our interrupt handler
3255 		 * because we don't appear to be handling an interrupt
3256 		 * during a chip reset.
3257 		 */
3258 		return IRQ_HANDLED;
3259 
3260 	this_cpu_inc(*dd->int_counter);
3261 
3262 	/* Clear the interrupt bit we expect to be set. */
3263 	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3264 		       INT_MASK_P(SDmaIdle, 1) : INT_MASK_P(SDmaIdle, 0));
3265 	qib_sdma_intr(ppd);
3266 
3267 	return IRQ_HANDLED;
3268 }
3269 
3270 /*
3271  * Dedicated Send DMA progress interrupt handler.
3272  */
3273 static irqreturn_t sdma_progress_intr(int irq, void *data)
3274 {
3275 	struct qib_pportdata *ppd = data;
3276 	struct qib_devdata *dd = ppd->dd;
3277 
3278 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3279 		/*
3280 		 * This return value is not great, but we do not want the
3281 		 * interrupt core code to remove our interrupt handler
3282 		 * because we don't appear to be handling an interrupt
3283 		 * during a chip reset.
3284 		 */
3285 		return IRQ_HANDLED;
3286 
3287 	this_cpu_inc(*dd->int_counter);
3288 
3289 	/* Clear the interrupt bit we expect to be set. */
3290 	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3291 		       INT_MASK_P(SDmaProgress, 1) :
3292 		       INT_MASK_P(SDmaProgress, 0));
3293 	qib_sdma_intr(ppd);
3294 
3295 	return IRQ_HANDLED;
3296 }
3297 
3298 /*
3299  * Dedicated Send DMA cleanup interrupt handler.
3300  */
3301 static irqreturn_t sdma_cleanup_intr(int irq, void *data)
3302 {
3303 	struct qib_pportdata *ppd = data;
3304 	struct qib_devdata *dd = ppd->dd;
3305 
3306 	if ((dd->flags & (QIB_PRESENT | QIB_BADINTR)) != QIB_PRESENT)
3307 		/*
3308 		 * This return value is not great, but we do not want the
3309 		 * interrupt core code to remove our interrupt handler
3310 		 * because we don't appear to be handling an interrupt
3311 		 * during a chip reset.
3312 		 */
3313 		return IRQ_HANDLED;
3314 
3315 	this_cpu_inc(*dd->int_counter);
3316 
3317 	/* Clear the interrupt bit we expect to be set. */
3318 	qib_write_kreg(dd, kr_intclear, ppd->hw_pidx ?
3319 		       INT_MASK_PM(SDmaCleanupDone, 1) :
3320 		       INT_MASK_PM(SDmaCleanupDone, 0));
3321 	qib_sdma_process_event(ppd, qib_sdma_event_e20_hw_started);
3322 
3323 	return IRQ_HANDLED;
3324 }
3325 
3326 #ifdef CONFIG_INFINIBAND_QIB_DCA
3327 
3328 static void reset_dca_notifier(struct qib_devdata *dd, int msixnum)
3329 {
3330 	if (!dd->cspec->msix_entries[msixnum].dca)
3331 		return;
3332 
3333 	qib_devinfo(dd->pcidev, "Disabling notifier on HCA %d irq %d\n",
3334 		    dd->unit, pci_irq_vector(dd->pcidev, msixnum));
3335 	irq_set_affinity_notifier(pci_irq_vector(dd->pcidev, msixnum), NULL);
3336 	dd->cspec->msix_entries[msixnum].notifier = NULL;
3337 }
3338 
3339 static void setup_dca_notifier(struct qib_devdata *dd, int msixnum)
3340 {
3341 	struct qib_msix_entry *m = &dd->cspec->msix_entries[msixnum];
3342 	struct qib_irq_notify *n;
3343 
3344 	if (!m->dca)
3345 		return;
3346 	n = kzalloc(sizeof(*n), GFP_KERNEL);
3347 	if (n) {
3348 		int ret;
3349 
3350 		m->notifier = n;
3351 		n->notify.irq = pci_irq_vector(dd->pcidev, msixnum);
3352 		n->notify.notify = qib_irq_notifier_notify;
3353 		n->notify.release = qib_irq_notifier_release;
3354 		n->arg = m->arg;
3355 		n->rcv = m->rcv;
3356 		qib_devinfo(dd->pcidev,
3357 			"set notifier irq %d rcv %d notify %p\n",
3358 			n->notify.irq, n->rcv, &n->notify);
3359 		ret = irq_set_affinity_notifier(
3360 				n->notify.irq,
3361 				&n->notify);
3362 		if (ret) {
3363 			m->notifier = NULL;
3364 			kfree(n);
3365 		}
3366 	}
3367 }
3368 
3369 #endif
3370 
3371 /*
3372  * Set up our chip-specific interrupt handler.
3373  * The interrupt type has already been setup, so
3374  * we just need to do the registration and error checking.
3375  * If we are using MSIx interrupts, we may fall back to
3376  * INTx later, if the interrupt handler doesn't get called
3377  * within 1/2 second (see verify_interrupt()).
3378  */
3379 static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
3380 {
3381 	int ret, i, msixnum;
3382 	u64 redirect[6];
3383 	u64 mask;
3384 	const struct cpumask *local_mask;
3385 	int firstcpu, secondcpu = 0, currrcvcpu = 0;
3386 
3387 	if (!dd->num_pports)
3388 		return;
3389 
3390 	if (clearpend) {
3391 		/*
3392 		 * if not switching interrupt types, be sure interrupts are
3393 		 * disabled, and then clear anything pending at this point,
3394 		 * because we are starting clean.
3395 		 */
3396 		qib_7322_set_intr_state(dd, 0);
3397 
3398 		/* clear the reset error, init error/hwerror mask */
3399 		qib_7322_init_hwerrors(dd);
3400 
3401 		/* clear any interrupt bits that might be set */
3402 		qib_write_kreg(dd, kr_intclear, ~0ULL);
3403 
3404 		/* make sure no pending MSIx intr, and clear diag reg */
3405 		qib_write_kreg(dd, kr_intgranted, ~0ULL);
3406 		qib_write_kreg(dd, kr_vecclr_wo_int, ~0ULL);
3407 	}
3408 
3409 	if (!dd->cspec->num_msix_entries) {
3410 		/* Try to get INTx interrupt */
3411 try_intx:
3412 		ret = pci_request_irq(dd->pcidev, 0, qib_7322intr, NULL, dd,
3413 				      QIB_DRV_NAME);
3414 		if (ret) {
3415 			qib_dev_err(
3416 				dd,
3417 				"Couldn't setup INTx interrupt (irq=%d): %d\n",
3418 				pci_irq_vector(dd->pcidev, 0), ret);
3419 			return;
3420 		}
3421 		dd->cspec->main_int_mask = ~0ULL;
3422 		return;
3423 	}
3424 
3425 	/* Try to get MSIx interrupts */
3426 	memset(redirect, 0, sizeof(redirect));
3427 	mask = ~0ULL;
3428 	msixnum = 0;
3429 	local_mask = cpumask_of_pcibus(dd->pcidev->bus);
3430 	firstcpu = cpumask_first(local_mask);
3431 	if (firstcpu >= nr_cpu_ids ||
3432 			cpumask_weight(local_mask) == num_online_cpus()) {
3433 		local_mask = topology_core_cpumask(0);
3434 		firstcpu = cpumask_first(local_mask);
3435 	}
3436 	if (firstcpu < nr_cpu_ids) {
3437 		secondcpu = cpumask_next(firstcpu, local_mask);
3438 		if (secondcpu >= nr_cpu_ids)
3439 			secondcpu = firstcpu;
3440 		currrcvcpu = secondcpu;
3441 	}
3442 	for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) {
3443 		irq_handler_t handler;
3444 		void *arg;
3445 		int lsb, reg, sh;
3446 #ifdef CONFIG_INFINIBAND_QIB_DCA
3447 		int dca = 0;
3448 #endif
3449 		if (i < ARRAY_SIZE(irq_table)) {
3450 			if (irq_table[i].port) {
3451 				/* skip if for a non-configured port */
3452 				if (irq_table[i].port > dd->num_pports)
3453 					continue;
3454 				arg = dd->pport + irq_table[i].port - 1;
3455 			} else
3456 				arg = dd;
3457 #ifdef CONFIG_INFINIBAND_QIB_DCA
3458 			dca = irq_table[i].dca;
3459 #endif
3460 			lsb = irq_table[i].lsb;
3461 			handler = irq_table[i].handler;
3462 			ret = pci_request_irq(dd->pcidev, msixnum, handler,
3463 					      NULL, arg, QIB_DRV_NAME "%d%s",
3464 					      dd->unit,
3465 					      irq_table[i].name);
3466 		} else {
3467 			unsigned ctxt;
3468 
3469 			ctxt = i - ARRAY_SIZE(irq_table);
3470 			/* per krcvq context receive interrupt */
3471 			arg = dd->rcd[ctxt];
3472 			if (!arg)
3473 				continue;
3474 			if (qib_krcvq01_no_msi && ctxt < 2)
3475 				continue;
3476 #ifdef CONFIG_INFINIBAND_QIB_DCA
3477 			dca = 1;
3478 #endif
3479 			lsb = QIB_I_RCVAVAIL_LSB + ctxt;
3480 			handler = qib_7322pintr;
3481 			ret = pci_request_irq(dd->pcidev, msixnum, handler,
3482 					      NULL, arg,
3483 					      QIB_DRV_NAME "%d (kctx)",
3484 					      dd->unit);
3485 		}
3486 
3487 		if (ret) {
3488 			/*
3489 			 * Shouldn't happen since the enable said we could
3490 			 * have as many as we are trying to setup here.
3491 			 */
3492 			qib_dev_err(dd,
3493 				    "Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
3494 				    msixnum,
3495 				    pci_irq_vector(dd->pcidev, msixnum),
3496 				    ret);
3497 			qib_7322_free_irq(dd);
3498 			pci_alloc_irq_vectors(dd->pcidev, 1, 1,
3499 					      PCI_IRQ_LEGACY);
3500 			goto try_intx;
3501 		}
3502 		dd->cspec->msix_entries[msixnum].arg = arg;
3503 #ifdef CONFIG_INFINIBAND_QIB_DCA
3504 		dd->cspec->msix_entries[msixnum].dca = dca;
3505 		dd->cspec->msix_entries[msixnum].rcv =
3506 			handler == qib_7322pintr;
3507 #endif
3508 		if (lsb >= 0) {
3509 			reg = lsb / IBA7322_REDIRECT_VEC_PER_REG;
3510 			sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) *
3511 				SYM_LSB(IntRedirect0, vec1);
3512 			mask &= ~(1ULL << lsb);
3513 			redirect[reg] |= ((u64) msixnum) << sh;
3514 		}
3515 		qib_read_kreg64(dd, 2 * msixnum + 1 +
3516 				(QIB_7322_MsixTable_OFFS / sizeof(u64)));
3517 		if (firstcpu < nr_cpu_ids &&
3518 			zalloc_cpumask_var(
3519 				&dd->cspec->msix_entries[msixnum].mask,
3520 				GFP_KERNEL)) {
3521 			if (handler == qib_7322pintr) {
3522 				cpumask_set_cpu(currrcvcpu,
3523 					dd->cspec->msix_entries[msixnum].mask);
3524 				currrcvcpu = cpumask_next(currrcvcpu,
3525 					local_mask);
3526 				if (currrcvcpu >= nr_cpu_ids)
3527 					currrcvcpu = secondcpu;
3528 			} else {
3529 				cpumask_set_cpu(firstcpu,
3530 					dd->cspec->msix_entries[msixnum].mask);
3531 			}
3532 			irq_set_affinity_hint(
3533 				pci_irq_vector(dd->pcidev, msixnum),
3534 				dd->cspec->msix_entries[msixnum].mask);
3535 		}
3536 		msixnum++;
3537 	}
3538 	/* Initialize the vector mapping */
3539 	for (i = 0; i < ARRAY_SIZE(redirect); i++)
3540 		qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
3541 	dd->cspec->main_int_mask = mask;
3542 	tasklet_init(&dd->error_tasklet, qib_error_tasklet,
3543 		(unsigned long)dd);
3544 }
3545 
3546 /**
3547  * qib_7322_boardname - fill in the board name and note features
3548  * @dd: the qlogic_ib device
3549  *
3550  * info will be based on the board revision register
3551  */
3552 static unsigned qib_7322_boardname(struct qib_devdata *dd)
3553 {
3554 	/* Will need enumeration of board-types here */
3555 	u32 boardid;
3556 	unsigned int features = DUAL_PORT_CAP;
3557 
3558 	boardid = SYM_FIELD(dd->revision, Revision, BoardID);
3559 
3560 	switch (boardid) {
3561 	case 0:
3562 		dd->boardname = "InfiniPath_QLE7342_Emulation";
3563 		break;
3564 	case 1:
3565 		dd->boardname = "InfiniPath_QLE7340";
3566 		dd->flags |= QIB_HAS_QSFP;
3567 		features = PORT_SPD_CAP;
3568 		break;
3569 	case 2:
3570 		dd->boardname = "InfiniPath_QLE7342";
3571 		dd->flags |= QIB_HAS_QSFP;
3572 		break;
3573 	case 3:
3574 		dd->boardname = "InfiniPath_QMI7342";
3575 		break;
3576 	case 4:
3577 		dd->boardname = "InfiniPath_Unsupported7342";
3578 		qib_dev_err(dd, "Unsupported version of QMH7342\n");
3579 		features = 0;
3580 		break;
3581 	case BOARD_QMH7342:
3582 		dd->boardname = "InfiniPath_QMH7342";
3583 		features = 0x24;
3584 		break;
3585 	case BOARD_QME7342:
3586 		dd->boardname = "InfiniPath_QME7342";
3587 		break;
3588 	case 8:
3589 		dd->boardname = "InfiniPath_QME7362";
3590 		dd->flags |= QIB_HAS_QSFP;
3591 		break;
3592 	case BOARD_QMH7360:
3593 		dd->boardname = "Intel IB QDR 1P FLR-QSFP Adptr";
3594 		dd->flags |= QIB_HAS_QSFP;
3595 		break;
3596 	case 15:
3597 		dd->boardname = "InfiniPath_QLE7342_TEST";
3598 		dd->flags |= QIB_HAS_QSFP;
3599 		break;
3600 	default:
3601 		dd->boardname = "InfiniPath_QLE73xy_UNKNOWN";
3602 		qib_dev_err(dd, "Unknown 7322 board type %u\n", boardid);
3603 		break;
3604 	}
3605 	dd->board_atten = 1; /* index into txdds_Xdr */
3606 
3607 	snprintf(dd->boardversion, sizeof(dd->boardversion),
3608 		 "ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
3609 		 QIB_CHIP_VERS_MAJ, QIB_CHIP_VERS_MIN, dd->boardname,
3610 		 (unsigned int)SYM_FIELD(dd->revision, Revision_R, Arch),
3611 		 dd->majrev, dd->minrev,
3612 		 (unsigned int)SYM_FIELD(dd->revision, Revision_R, SW));
3613 
3614 	if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
3615 		qib_devinfo(dd->pcidev,
3616 			    "IB%u: Forced to single port mode by module parameter\n",
3617 			    dd->unit);
3618 		features &= PORT_SPD_CAP;
3619 	}
3620 
3621 	return features;
3622 }
3623 
3624 /*
3625  * This routine sleeps, so it can only be called from user context, not
3626  * from interrupt context.
3627  */
3628 static int qib_do_7322_reset(struct qib_devdata *dd)
3629 {
3630 	u64 val;
3631 	u64 *msix_vecsave = NULL;
3632 	int i, msix_entries, ret = 1;
3633 	u16 cmdval;
3634 	u8 int_line, clinesz;
3635 	unsigned long flags;
3636 
3637 	/* Use dev_err so it shows up in logs, etc. */
3638 	qib_dev_err(dd, "Resetting InfiniPath unit %u\n", dd->unit);
3639 
3640 	qib_pcie_getcmd(dd, &cmdval, &int_line, &clinesz);
3641 
3642 	msix_entries = dd->cspec->num_msix_entries;
3643 
3644 	/* no interrupts till re-initted */
3645 	qib_7322_set_intr_state(dd, 0);
3646 
3647 	qib_7322_free_irq(dd);
3648 
3649 	if (msix_entries) {
3650 		/* can be up to 512 bytes, too big for stack */
3651 		msix_vecsave = kmalloc(2 * dd->cspec->num_msix_entries *
3652 			sizeof(u64), GFP_KERNEL);
3653 	}
3654 
3655 	/*
3656 	 * Core PCI (as of 2.6.18) doesn't save or rewrite the full vector
3657 	 * info that is set up by the BIOS, so we have to save and restore
3658 	 * it ourselves.   There is some risk something could change it,
3659 	 * after we save it, but since we have disabled the MSIx, it
3660 	 * shouldn't be touched...
3661 	 */
3662 	for (i = 0; i < msix_entries; i++) {
3663 		u64 vecaddr, vecdata;
3664 
3665 		vecaddr = qib_read_kreg64(dd, 2 * i +
3666 				  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3667 		vecdata = qib_read_kreg64(dd, 1 + 2 * i +
3668 				  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
3669 		if (msix_vecsave) {
3670 			msix_vecsave[2 * i] = vecaddr;
3671 			/* save it without the masked bit set */
3672 			msix_vecsave[1 + 2 * i] = vecdata & ~0x100000000ULL;
3673 		}
3674 	}
3675 
3676 	dd->pport->cpspec->ibdeltainprog = 0;
3677 	dd->pport->cpspec->ibsymdelta = 0;
3678 	dd->pport->cpspec->iblnkerrdelta = 0;
3679 	dd->pport->cpspec->ibmalfdelta = 0;
3680 	/* so we check interrupts work again */
3681 	dd->z_int_counter = qib_int_counter(dd);
3682 
3683 	/*
3684 	 * Keep chip from being accessed until we are ready.  Use
3685 	 * writeq() directly, to allow the write even though QIB_PRESENT
3686 	 * isn't set.
3687 	 */
3688 	dd->flags &= ~(QIB_INITTED | QIB_PRESENT | QIB_BADINTR);
3689 	dd->flags |= QIB_DOING_RESET;
3690 	val = dd->control | QLOGIC_IB_C_RESET;
3691 	writeq(val, &dd->kregbase[kr_control]);
3692 
3693 	for (i = 1; i <= 5; i++) {
3694 		/*
3695 		 * Allow MBIST, etc. to complete; longer on each retry.
3696 		 * We sometimes get machine checks from bus timeout if no
3697 		 * response, so for now, make it *really* long.
3698 		 */
3699 		msleep(1000 + (1 + i) * 3000);
3700 
3701 		qib_pcie_reenable(dd, cmdval, int_line, clinesz);
3702 
3703 		/*
3704 		 * Use readq directly, so we don't need to mark it as PRESENT
3705 		 * until we get a successful indication that all is well.
3706 		 */
3707 		val = readq(&dd->kregbase[kr_revision]);
3708 		if (val == dd->revision)
3709 			break;
3710 		if (i == 5) {
3711 			qib_dev_err(dd,
3712 				"Failed to initialize after reset, unusable\n");
3713 			ret = 0;
3714 			goto  bail;
3715 		}
3716 	}
3717 
3718 	dd->flags |= QIB_PRESENT; /* it's back */
3719 
3720 	if (msix_entries) {
3721 		/* restore the MSIx vector address and data if saved above */
3722 		for (i = 0; i < msix_entries; i++) {
3723 			if (!msix_vecsave || !msix_vecsave[2 * i])
3724 				continue;
3725 			qib_write_kreg(dd, 2 * i +
3726 				(QIB_7322_MsixTable_OFFS / sizeof(u64)),
3727 				msix_vecsave[2 * i]);
3728 			qib_write_kreg(dd, 1 + 2 * i +
3729 				(QIB_7322_MsixTable_OFFS / sizeof(u64)),
3730 				msix_vecsave[1 + 2 * i]);
3731 		}
3732 	}
3733 
3734 	/* initialize the remaining registers.  */
3735 	for (i = 0; i < dd->num_pports; ++i)
3736 		write_7322_init_portregs(&dd->pport[i]);
3737 	write_7322_initregs(dd);
3738 
3739 	if (qib_pcie_params(dd, dd->lbus_width, &msix_entries))
3740 		qib_dev_err(dd,
3741 			"Reset failed to setup PCIe or interrupts; continuing anyway\n");
3742 
3743 	dd->cspec->num_msix_entries = msix_entries;
3744 	qib_setup_7322_interrupt(dd, 1);
3745 
3746 	for (i = 0; i < dd->num_pports; ++i) {
3747 		struct qib_pportdata *ppd = &dd->pport[i];
3748 
3749 		spin_lock_irqsave(&ppd->lflags_lock, flags);
3750 		ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
3751 		ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
3752 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
3753 	}
3754 
3755 bail:
3756 	dd->flags &= ~QIB_DOING_RESET; /* OK or not, no longer resetting */
3757 	kfree(msix_vecsave);
3758 	return ret;
3759 }
3760 
3761 /**
3762  * qib_7322_put_tid - write a TID to the chip
3763  * @dd: the qlogic_ib device
3764  * @tidptr: pointer to the expected TID (in chip) to update
3765  * @tidtype: 0 for eager, 1 for expected
3766  * @pa: physical address of in memory buffer; tidinvalid if freeing
3767  */
3768 static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
3769 			     u32 type, unsigned long pa)
3770 {
3771 	if (!(dd->flags & QIB_PRESENT))
3772 		return;
3773 	if (pa != dd->tidinvalid) {
3774 		u64 chippa = pa >> IBA7322_TID_PA_SHIFT;
3775 
3776 		/* paranoia checks */
3777 		if (pa != (chippa << IBA7322_TID_PA_SHIFT)) {
3778 			qib_dev_err(dd, "Physaddr %lx not 2KB aligned!\n",
3779 				    pa);
3780 			return;
3781 		}
3782 		if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
3783 			qib_dev_err(dd,
3784 				"Physical page address 0x%lx larger than supported\n",
3785 				pa);
3786 			return;
3787 		}
3788 
3789 		if (type == RCVHQ_RCV_TYPE_EAGER)
3790 			chippa |= dd->tidtemplate;
3791 		else /* for now, always full 4KB page */
3792 			chippa |= IBA7322_TID_SZ_4K;
3793 		pa = chippa;
3794 	}
3795 	writeq(pa, tidptr);
3796 	mmiowb();
3797 }
3798 
3799 /**
3800  * qib_7322_clear_tids - clear all TID entries for a ctxt, expected and eager
3801  * @dd: the qlogic_ib device
3802  * @ctxt: the ctxt
3803  *
3804  * clear all TID entries for a ctxt, expected and eager.
3805  * Used from qib_close().
3806  */
3807 static void qib_7322_clear_tids(struct qib_devdata *dd,
3808 				struct qib_ctxtdata *rcd)
3809 {
3810 	u64 __iomem *tidbase;
3811 	unsigned long tidinv;
3812 	u32 ctxt;
3813 	int i;
3814 
3815 	if (!dd->kregbase || !rcd)
3816 		return;
3817 
3818 	ctxt = rcd->ctxt;
3819 
3820 	tidinv = dd->tidinvalid;
3821 	tidbase = (u64 __iomem *)
3822 		((char __iomem *) dd->kregbase +
3823 		 dd->rcvtidbase +
3824 		 ctxt * dd->rcvtidcnt * sizeof(*tidbase));
3825 
3826 	for (i = 0; i < dd->rcvtidcnt; i++)
3827 		qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
3828 				 tidinv);
3829 
3830 	tidbase = (u64 __iomem *)
3831 		((char __iomem *) dd->kregbase +
3832 		 dd->rcvegrbase +
3833 		 rcd->rcvegr_tid_base * sizeof(*tidbase));
3834 
3835 	for (i = 0; i < rcd->rcvegrcnt; i++)
3836 		qib_7322_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
3837 				 tidinv);
3838 }
3839 
3840 /**
3841  * qib_7322_tidtemplate - setup constants for TID updates
3842  * @dd: the qlogic_ib device
3843  *
3844  * We setup stuff that we use a lot, to avoid calculating each time
3845  */
3846 static void qib_7322_tidtemplate(struct qib_devdata *dd)
3847 {
3848 	/*
3849 	 * For now, we always allocate 4KB buffers (at init) so we can
3850 	 * receive max size packets.  We may want a module parameter to
3851 	 * specify 2KB or 4KB and/or make it per port instead of per device
3852 	 * for those who want to reduce memory footprint.  Note that the
3853 	 * rcvhdrentsize size must be large enough to hold the largest
3854 	 * IB header (currently 96 bytes) that we expect to handle (plus of
3855 	 * course the 2 dwords of RHF).
3856 	 */
3857 	if (dd->rcvegrbufsize == 2048)
3858 		dd->tidtemplate = IBA7322_TID_SZ_2K;
3859 	else if (dd->rcvegrbufsize == 4096)
3860 		dd->tidtemplate = IBA7322_TID_SZ_4K;
3861 	dd->tidinvalid = 0;
3862 }
3863 
3864 /**
3865  * qib_init_7322_get_base_info - set chip-specific flags for user code
3866  * @rcd: the qlogic_ib ctxt
3867  * @kbase: qib_base_info pointer
3868  *
3869  * We set the PCIE flag because the lower bandwidth on PCIe vs
3870  * HyperTransport can affect some user packet algorithims.
3871  */
3872 
3873 static int qib_7322_get_base_info(struct qib_ctxtdata *rcd,
3874 				  struct qib_base_info *kinfo)
3875 {
3876 	kinfo->spi_runtime_flags |= QIB_RUNTIME_CTXT_MSB_IN_QP |
3877 		QIB_RUNTIME_PCIE | QIB_RUNTIME_NODMA_RTAIL |
3878 		QIB_RUNTIME_HDRSUPP | QIB_RUNTIME_SDMA;
3879 	if (rcd->dd->cspec->r1)
3880 		kinfo->spi_runtime_flags |= QIB_RUNTIME_RCHK;
3881 	if (rcd->dd->flags & QIB_USE_SPCL_TRIG)
3882 		kinfo->spi_runtime_flags |= QIB_RUNTIME_SPECIAL_TRIGGER;
3883 
3884 	return 0;
3885 }
3886 
3887 static struct qib_message_header *
3888 qib_7322_get_msgheader(struct qib_devdata *dd, __le32 *rhf_addr)
3889 {
3890 	u32 offset = qib_hdrget_offset(rhf_addr);
3891 
3892 	return (struct qib_message_header *)
3893 		(rhf_addr - dd->rhf_offset + offset);
3894 }
3895 
3896 /*
3897  * Configure number of contexts.
3898  */
3899 static void qib_7322_config_ctxts(struct qib_devdata *dd)
3900 {
3901 	unsigned long flags;
3902 	u32 nchipctxts;
3903 
3904 	nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
3905 	dd->cspec->numctxts = nchipctxts;
3906 	if (qib_n_krcv_queues > 1 && dd->num_pports) {
3907 		dd->first_user_ctxt = NUM_IB_PORTS +
3908 			(qib_n_krcv_queues - 1) * dd->num_pports;
3909 		if (dd->first_user_ctxt > nchipctxts)
3910 			dd->first_user_ctxt = nchipctxts;
3911 		dd->n_krcv_queues = dd->first_user_ctxt / dd->num_pports;
3912 	} else {
3913 		dd->first_user_ctxt = NUM_IB_PORTS;
3914 		dd->n_krcv_queues = 1;
3915 	}
3916 
3917 	if (!qib_cfgctxts) {
3918 		int nctxts = dd->first_user_ctxt + num_online_cpus();
3919 
3920 		if (nctxts <= 6)
3921 			dd->ctxtcnt = 6;
3922 		else if (nctxts <= 10)
3923 			dd->ctxtcnt = 10;
3924 		else if (nctxts <= nchipctxts)
3925 			dd->ctxtcnt = nchipctxts;
3926 	} else if (qib_cfgctxts < dd->num_pports)
3927 		dd->ctxtcnt = dd->num_pports;
3928 	else if (qib_cfgctxts <= nchipctxts)
3929 		dd->ctxtcnt = qib_cfgctxts;
3930 	if (!dd->ctxtcnt) /* none of the above, set to max */
3931 		dd->ctxtcnt = nchipctxts;
3932 
3933 	/*
3934 	 * Chip can be configured for 6, 10, or 18 ctxts, and choice
3935 	 * affects number of eager TIDs per ctxt (1K, 2K, 4K).
3936 	 * Lock to be paranoid about later motion, etc.
3937 	 */
3938 	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
3939 	if (dd->ctxtcnt > 10)
3940 		dd->rcvctrl |= 2ULL << SYM_LSB(RcvCtrl, ContextCfg);
3941 	else if (dd->ctxtcnt > 6)
3942 		dd->rcvctrl |= 1ULL << SYM_LSB(RcvCtrl, ContextCfg);
3943 	/* else configure for default 6 receive ctxts */
3944 
3945 	/* The XRC opcode is 5. */
3946 	dd->rcvctrl |= 5ULL << SYM_LSB(RcvCtrl, XrcTypeCode);
3947 
3948 	/*
3949 	 * RcvCtrl *must* be written here so that the
3950 	 * chip understands how to change rcvegrcnt below.
3951 	 */
3952 	qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
3953 	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
3954 
3955 	/* kr_rcvegrcnt changes based on the number of contexts enabled */
3956 	dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
3957 	if (qib_rcvhdrcnt)
3958 		dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
3959 	else
3960 		dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt,
3961 				    dd->num_pports > 1 ? 1024U : 2048U);
3962 }
3963 
3964 static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
3965 {
3966 
3967 	int lsb, ret = 0;
3968 	u64 maskr; /* right-justified mask */
3969 
3970 	switch (which) {
3971 
3972 	case QIB_IB_CFG_LWID_ENB: /* Get allowed Link-width */
3973 		ret = ppd->link_width_enabled;
3974 		goto done;
3975 
3976 	case QIB_IB_CFG_LWID: /* Get currently active Link-width */
3977 		ret = ppd->link_width_active;
3978 		goto done;
3979 
3980 	case QIB_IB_CFG_SPD_ENB: /* Get allowed Link speeds */
3981 		ret = ppd->link_speed_enabled;
3982 		goto done;
3983 
3984 	case QIB_IB_CFG_SPD: /* Get current Link spd */
3985 		ret = ppd->link_speed_active;
3986 		goto done;
3987 
3988 	case QIB_IB_CFG_RXPOL_ENB: /* Get Auto-RX-polarity enable */
3989 		lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3990 		maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
3991 		break;
3992 
3993 	case QIB_IB_CFG_LREV_ENB: /* Get Auto-Lane-reversal enable */
3994 		lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3995 		maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
3996 		break;
3997 
3998 	case QIB_IB_CFG_LINKLATENCY:
3999 		ret = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
4000 			SYM_MASK(IBCStatusB_0, LinkRoundTripLatency);
4001 		goto done;
4002 
4003 	case QIB_IB_CFG_OP_VLS:
4004 		ret = ppd->vls_operational;
4005 		goto done;
4006 
4007 	case QIB_IB_CFG_VL_HIGH_CAP:
4008 		ret = 16;
4009 		goto done;
4010 
4011 	case QIB_IB_CFG_VL_LOW_CAP:
4012 		ret = 16;
4013 		goto done;
4014 
4015 	case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
4016 		ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4017 				OverrunThreshold);
4018 		goto done;
4019 
4020 	case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
4021 		ret = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4022 				PhyerrThreshold);
4023 		goto done;
4024 
4025 	case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
4026 		/* will only take effect when the link state changes */
4027 		ret = (ppd->cpspec->ibcctrl_a &
4028 		       SYM_MASK(IBCCtrlA_0, LinkDownDefaultState)) ?
4029 			IB_LINKINITCMD_SLEEP : IB_LINKINITCMD_POLL;
4030 		goto done;
4031 
4032 	case QIB_IB_CFG_HRTBT: /* Get Heartbeat off/enable/auto */
4033 		lsb = IBA7322_IBC_HRTBT_LSB;
4034 		maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
4035 		break;
4036 
4037 	case QIB_IB_CFG_PMA_TICKS:
4038 		/*
4039 		 * 0x00 = 10x link transfer rate or 4 nsec. for 2.5Gbs
4040 		 * Since the clock is always 250MHz, the value is 3, 1 or 0.
4041 		 */
4042 		if (ppd->link_speed_active == QIB_IB_QDR)
4043 			ret = 3;
4044 		else if (ppd->link_speed_active == QIB_IB_DDR)
4045 			ret = 1;
4046 		else
4047 			ret = 0;
4048 		goto done;
4049 
4050 	default:
4051 		ret = -EINVAL;
4052 		goto done;
4053 	}
4054 	ret = (int)((ppd->cpspec->ibcctrl_b >> lsb) & maskr);
4055 done:
4056 	return ret;
4057 }
4058 
4059 /*
4060  * Below again cribbed liberally from older version. Do not lean
4061  * heavily on it.
4062  */
4063 #define IBA7322_IBC_DLIDLMC_SHIFT QIB_7322_IBCCtrlB_0_IB_DLID_LSB
4064 #define IBA7322_IBC_DLIDLMC_MASK (QIB_7322_IBCCtrlB_0_IB_DLID_RMASK \
4065 	| (QIB_7322_IBCCtrlB_0_IB_DLID_MASK_RMASK << 16))
4066 
4067 static int qib_7322_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
4068 {
4069 	struct qib_devdata *dd = ppd->dd;
4070 	u64 maskr; /* right-justified mask */
4071 	int lsb, ret = 0;
4072 	u16 lcmd, licmd;
4073 	unsigned long flags;
4074 
4075 	switch (which) {
4076 	case QIB_IB_CFG_LIDLMC:
4077 		/*
4078 		 * Set LID and LMC. Combined to avoid possible hazard
4079 		 * caller puts LMC in 16MSbits, DLID in 16LSbits of val
4080 		 */
4081 		lsb = IBA7322_IBC_DLIDLMC_SHIFT;
4082 		maskr = IBA7322_IBC_DLIDLMC_MASK;
4083 		/*
4084 		 * For header-checking, the SLID in the packet will
4085 		 * be masked with SendIBSLMCMask, and compared
4086 		 * with SendIBSLIDAssignMask. Make sure we do not
4087 		 * set any bits not covered by the mask, or we get
4088 		 * false-positives.
4089 		 */
4090 		qib_write_kreg_port(ppd, krp_sendslid,
4091 				    val & (val >> 16) & SendIBSLIDAssignMask);
4092 		qib_write_kreg_port(ppd, krp_sendslidmask,
4093 				    (val >> 16) & SendIBSLMCMask);
4094 		break;
4095 
4096 	case QIB_IB_CFG_LWID_ENB: /* set allowed Link-width */
4097 		ppd->link_width_enabled = val;
4098 		/* convert IB value to chip register value */
4099 		if (val == IB_WIDTH_1X)
4100 			val = 0;
4101 		else if (val == IB_WIDTH_4X)
4102 			val = 1;
4103 		else
4104 			val = 3;
4105 		maskr = SYM_RMASK(IBCCtrlB_0, IB_NUM_CHANNELS);
4106 		lsb = SYM_LSB(IBCCtrlB_0, IB_NUM_CHANNELS);
4107 		break;
4108 
4109 	case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
4110 		/*
4111 		 * As with width, only write the actual register if the
4112 		 * link is currently down, otherwise takes effect on next
4113 		 * link change.  Since setting is being explicitly requested
4114 		 * (via MAD or sysfs), clear autoneg failure status if speed
4115 		 * autoneg is enabled.
4116 		 */
4117 		ppd->link_speed_enabled = val;
4118 		val <<= IBA7322_IBC_SPEED_LSB;
4119 		maskr = IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK |
4120 			IBA7322_IBC_MAX_SPEED_MASK;
4121 		if (val & (val - 1)) {
4122 			/* Muliple speeds enabled */
4123 			val |= IBA7322_IBC_IBTA_1_2_MASK |
4124 				IBA7322_IBC_MAX_SPEED_MASK;
4125 			spin_lock_irqsave(&ppd->lflags_lock, flags);
4126 			ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
4127 			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
4128 		} else if (val & IBA7322_IBC_SPEED_QDR)
4129 			val |= IBA7322_IBC_IBTA_1_2_MASK;
4130 		/* IBTA 1.2 mode + min/max + speed bits are contiguous */
4131 		lsb = SYM_LSB(IBCCtrlB_0, IB_ENHANCED_MODE);
4132 		break;
4133 
4134 	case QIB_IB_CFG_RXPOL_ENB: /* set Auto-RX-polarity enable */
4135 		lsb = SYM_LSB(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4136 		maskr = SYM_RMASK(IBCCtrlB_0, IB_POLARITY_REV_SUPP);
4137 		break;
4138 
4139 	case QIB_IB_CFG_LREV_ENB: /* set Auto-Lane-reversal enable */
4140 		lsb = SYM_LSB(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4141 		maskr = SYM_RMASK(IBCCtrlB_0, IB_LANE_REV_SUPPORTED);
4142 		break;
4143 
4144 	case QIB_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
4145 		maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4146 				  OverrunThreshold);
4147 		if (maskr != val) {
4148 			ppd->cpspec->ibcctrl_a &=
4149 				~SYM_MASK(IBCCtrlA_0, OverrunThreshold);
4150 			ppd->cpspec->ibcctrl_a |= (u64) val <<
4151 				SYM_LSB(IBCCtrlA_0, OverrunThreshold);
4152 			qib_write_kreg_port(ppd, krp_ibcctrl_a,
4153 					    ppd->cpspec->ibcctrl_a);
4154 			qib_write_kreg(dd, kr_scratch, 0ULL);
4155 		}
4156 		goto bail;
4157 
4158 	case QIB_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
4159 		maskr = SYM_FIELD(ppd->cpspec->ibcctrl_a, IBCCtrlA_0,
4160 				  PhyerrThreshold);
4161 		if (maskr != val) {
4162 			ppd->cpspec->ibcctrl_a &=
4163 				~SYM_MASK(IBCCtrlA_0, PhyerrThreshold);
4164 			ppd->cpspec->ibcctrl_a |= (u64) val <<
4165 				SYM_LSB(IBCCtrlA_0, PhyerrThreshold);
4166 			qib_write_kreg_port(ppd, krp_ibcctrl_a,
4167 					    ppd->cpspec->ibcctrl_a);
4168 			qib_write_kreg(dd, kr_scratch, 0ULL);
4169 		}
4170 		goto bail;
4171 
4172 	case QIB_IB_CFG_PKEYS: /* update pkeys */
4173 		maskr = (u64) ppd->pkeys[0] | ((u64) ppd->pkeys[1] << 16) |
4174 			((u64) ppd->pkeys[2] << 32) |
4175 			((u64) ppd->pkeys[3] << 48);
4176 		qib_write_kreg_port(ppd, krp_partitionkey, maskr);
4177 		goto bail;
4178 
4179 	case QIB_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
4180 		/* will only take effect when the link state changes */
4181 		if (val == IB_LINKINITCMD_POLL)
4182 			ppd->cpspec->ibcctrl_a &=
4183 				~SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
4184 		else /* SLEEP */
4185 			ppd->cpspec->ibcctrl_a |=
4186 				SYM_MASK(IBCCtrlA_0, LinkDownDefaultState);
4187 		qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
4188 		qib_write_kreg(dd, kr_scratch, 0ULL);
4189 		goto bail;
4190 
4191 	case QIB_IB_CFG_MTU: /* update the MTU in IBC */
4192 		/*
4193 		 * Update our housekeeping variables, and set IBC max
4194 		 * size, same as init code; max IBC is max we allow in
4195 		 * buffer, less the qword pbc, plus 1 for ICRC, in dwords
4196 		 * Set even if it's unchanged, print debug message only
4197 		 * on changes.
4198 		 */
4199 		val = (ppd->ibmaxlen >> 2) + 1;
4200 		ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0, MaxPktLen);
4201 		ppd->cpspec->ibcctrl_a |= (u64)val <<
4202 			SYM_LSB(IBCCtrlA_0, MaxPktLen);
4203 		qib_write_kreg_port(ppd, krp_ibcctrl_a,
4204 				    ppd->cpspec->ibcctrl_a);
4205 		qib_write_kreg(dd, kr_scratch, 0ULL);
4206 		goto bail;
4207 
4208 	case QIB_IB_CFG_LSTATE: /* set the IB link state */
4209 		switch (val & 0xffff0000) {
4210 		case IB_LINKCMD_DOWN:
4211 			lcmd = QLOGIC_IB_IBCC_LINKCMD_DOWN;
4212 			ppd->cpspec->ibmalfusesnap = 1;
4213 			ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
4214 				crp_errlink);
4215 			if (!ppd->cpspec->ibdeltainprog &&
4216 			    qib_compat_ddr_negotiate) {
4217 				ppd->cpspec->ibdeltainprog = 1;
4218 				ppd->cpspec->ibsymsnap =
4219 					read_7322_creg32_port(ppd,
4220 							      crp_ibsymbolerr);
4221 				ppd->cpspec->iblnkerrsnap =
4222 					read_7322_creg32_port(ppd,
4223 						      crp_iblinkerrrecov);
4224 			}
4225 			break;
4226 
4227 		case IB_LINKCMD_ARMED:
4228 			lcmd = QLOGIC_IB_IBCC_LINKCMD_ARMED;
4229 			if (ppd->cpspec->ibmalfusesnap) {
4230 				ppd->cpspec->ibmalfusesnap = 0;
4231 				ppd->cpspec->ibmalfdelta +=
4232 					read_7322_creg32_port(ppd,
4233 							      crp_errlink) -
4234 					ppd->cpspec->ibmalfsnap;
4235 			}
4236 			break;
4237 
4238 		case IB_LINKCMD_ACTIVE:
4239 			lcmd = QLOGIC_IB_IBCC_LINKCMD_ACTIVE;
4240 			break;
4241 
4242 		default:
4243 			ret = -EINVAL;
4244 			qib_dev_err(dd, "bad linkcmd req 0x%x\n", val >> 16);
4245 			goto bail;
4246 		}
4247 		switch (val & 0xffff) {
4248 		case IB_LINKINITCMD_NOP:
4249 			licmd = 0;
4250 			break;
4251 
4252 		case IB_LINKINITCMD_POLL:
4253 			licmd = QLOGIC_IB_IBCC_LINKINITCMD_POLL;
4254 			break;
4255 
4256 		case IB_LINKINITCMD_SLEEP:
4257 			licmd = QLOGIC_IB_IBCC_LINKINITCMD_SLEEP;
4258 			break;
4259 
4260 		case IB_LINKINITCMD_DISABLE:
4261 			licmd = QLOGIC_IB_IBCC_LINKINITCMD_DISABLE;
4262 			ppd->cpspec->chase_end = 0;
4263 			/*
4264 			 * stop state chase counter and timer, if running.
4265 			 * wait forpending timer, but don't clear .data (ppd)!
4266 			 */
4267 			if (ppd->cpspec->chase_timer.expires) {
4268 				del_timer_sync(&ppd->cpspec->chase_timer);
4269 				ppd->cpspec->chase_timer.expires = 0;
4270 			}
4271 			break;
4272 
4273 		default:
4274 			ret = -EINVAL;
4275 			qib_dev_err(dd, "bad linkinitcmd req 0x%x\n",
4276 				    val & 0xffff);
4277 			goto bail;
4278 		}
4279 		qib_set_ib_7322_lstate(ppd, lcmd, licmd);
4280 		goto bail;
4281 
4282 	case QIB_IB_CFG_OP_VLS:
4283 		if (ppd->vls_operational != val) {
4284 			ppd->vls_operational = val;
4285 			set_vls(ppd);
4286 		}
4287 		goto bail;
4288 
4289 	case QIB_IB_CFG_VL_HIGH_LIMIT:
4290 		qib_write_kreg_port(ppd, krp_highprio_limit, val);
4291 		goto bail;
4292 
4293 	case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
4294 		if (val > 3) {
4295 			ret = -EINVAL;
4296 			goto bail;
4297 		}
4298 		lsb = IBA7322_IBC_HRTBT_LSB;
4299 		maskr = IBA7322_IBC_HRTBT_RMASK; /* OR of AUTO and ENB */
4300 		break;
4301 
4302 	case QIB_IB_CFG_PORT:
4303 		/* val is the port number of the switch we are connected to. */
4304 		if (ppd->dd->cspec->r1) {
4305 			cancel_delayed_work(&ppd->cpspec->ipg_work);
4306 			ppd->cpspec->ipg_tries = 0;
4307 		}
4308 		goto bail;
4309 
4310 	default:
4311 		ret = -EINVAL;
4312 		goto bail;
4313 	}
4314 	ppd->cpspec->ibcctrl_b &= ~(maskr << lsb);
4315 	ppd->cpspec->ibcctrl_b |= (((u64) val & maskr) << lsb);
4316 	qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
4317 	qib_write_kreg(dd, kr_scratch, 0);
4318 bail:
4319 	return ret;
4320 }
4321 
4322 static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
4323 {
4324 	int ret = 0;
4325 	u64 val, ctrlb;
4326 
4327 	/* only IBC loopback, may add serdes and xgxs loopbacks later */
4328 	if (!strncmp(what, "ibc", 3)) {
4329 		ppd->cpspec->ibcctrl_a |= SYM_MASK(IBCCtrlA_0,
4330 						       Loopback);
4331 		val = 0; /* disable heart beat, so link will come up */
4332 		qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
4333 			 ppd->dd->unit, ppd->port);
4334 	} else if (!strncmp(what, "off", 3)) {
4335 		ppd->cpspec->ibcctrl_a &= ~SYM_MASK(IBCCtrlA_0,
4336 							Loopback);
4337 		/* enable heart beat again */
4338 		val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
4339 		qib_devinfo(ppd->dd->pcidev,
4340 			"Disabling IB%u:%u IBC loopback (normal)\n",
4341 			ppd->dd->unit, ppd->port);
4342 	} else
4343 		ret = -EINVAL;
4344 	if (!ret) {
4345 		qib_write_kreg_port(ppd, krp_ibcctrl_a,
4346 				    ppd->cpspec->ibcctrl_a);
4347 		ctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_HRTBT_MASK
4348 					     << IBA7322_IBC_HRTBT_LSB);
4349 		ppd->cpspec->ibcctrl_b = ctrlb | val;
4350 		qib_write_kreg_port(ppd, krp_ibcctrl_b,
4351 				    ppd->cpspec->ibcctrl_b);
4352 		qib_write_kreg(ppd->dd, kr_scratch, 0);
4353 	}
4354 	return ret;
4355 }
4356 
4357 static void get_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4358 			   struct ib_vl_weight_elem *vl)
4359 {
4360 	unsigned i;
4361 
4362 	for (i = 0; i < 16; i++, regno++, vl++) {
4363 		u32 val = qib_read_kreg_port(ppd, regno);
4364 
4365 		vl->vl = (val >> SYM_LSB(LowPriority0_0, VirtualLane)) &
4366 			SYM_RMASK(LowPriority0_0, VirtualLane);
4367 		vl->weight = (val >> SYM_LSB(LowPriority0_0, Weight)) &
4368 			SYM_RMASK(LowPriority0_0, Weight);
4369 	}
4370 }
4371 
4372 static void set_vl_weights(struct qib_pportdata *ppd, unsigned regno,
4373 			   struct ib_vl_weight_elem *vl)
4374 {
4375 	unsigned i;
4376 
4377 	for (i = 0; i < 16; i++, regno++, vl++) {
4378 		u64 val;
4379 
4380 		val = ((vl->vl & SYM_RMASK(LowPriority0_0, VirtualLane)) <<
4381 			SYM_LSB(LowPriority0_0, VirtualLane)) |
4382 		      ((vl->weight & SYM_RMASK(LowPriority0_0, Weight)) <<
4383 			SYM_LSB(LowPriority0_0, Weight));
4384 		qib_write_kreg_port(ppd, regno, val);
4385 	}
4386 	if (!(ppd->p_sendctrl & SYM_MASK(SendCtrl_0, IBVLArbiterEn))) {
4387 		struct qib_devdata *dd = ppd->dd;
4388 		unsigned long flags;
4389 
4390 		spin_lock_irqsave(&dd->sendctrl_lock, flags);
4391 		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, IBVLArbiterEn);
4392 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4393 		qib_write_kreg(dd, kr_scratch, 0);
4394 		spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4395 	}
4396 }
4397 
4398 static int qib_7322_get_ib_table(struct qib_pportdata *ppd, int which, void *t)
4399 {
4400 	switch (which) {
4401 	case QIB_IB_TBL_VL_HIGH_ARB:
4402 		get_vl_weights(ppd, krp_highprio_0, t);
4403 		break;
4404 
4405 	case QIB_IB_TBL_VL_LOW_ARB:
4406 		get_vl_weights(ppd, krp_lowprio_0, t);
4407 		break;
4408 
4409 	default:
4410 		return -EINVAL;
4411 	}
4412 	return 0;
4413 }
4414 
4415 static int qib_7322_set_ib_table(struct qib_pportdata *ppd, int which, void *t)
4416 {
4417 	switch (which) {
4418 	case QIB_IB_TBL_VL_HIGH_ARB:
4419 		set_vl_weights(ppd, krp_highprio_0, t);
4420 		break;
4421 
4422 	case QIB_IB_TBL_VL_LOW_ARB:
4423 		set_vl_weights(ppd, krp_lowprio_0, t);
4424 		break;
4425 
4426 	default:
4427 		return -EINVAL;
4428 	}
4429 	return 0;
4430 }
4431 
4432 static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
4433 				    u32 updegr, u32 egrhd, u32 npkts)
4434 {
4435 	/*
4436 	 * Need to write timeout register before updating rcvhdrhead to ensure
4437 	 * that the timer is enabled on reception of a packet.
4438 	 */
4439 	if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
4440 		adjust_rcv_timeout(rcd, npkts);
4441 	if (updegr)
4442 		qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
4443 	mmiowb();
4444 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4445 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
4446 	mmiowb();
4447 }
4448 
4449 static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
4450 {
4451 	u32 head, tail;
4452 
4453 	head = qib_read_ureg32(rcd->dd, ur_rcvhdrhead, rcd->ctxt);
4454 	if (rcd->rcvhdrtail_kvaddr)
4455 		tail = qib_get_rcvhdrtail(rcd);
4456 	else
4457 		tail = qib_read_ureg32(rcd->dd, ur_rcvhdrtail, rcd->ctxt);
4458 	return head == tail;
4459 }
4460 
4461 #define RCVCTRL_COMMON_MODS (QIB_RCVCTRL_CTXT_ENB | \
4462 	QIB_RCVCTRL_CTXT_DIS | \
4463 	QIB_RCVCTRL_TIDFLOW_ENB | \
4464 	QIB_RCVCTRL_TIDFLOW_DIS | \
4465 	QIB_RCVCTRL_TAILUPD_ENB | \
4466 	QIB_RCVCTRL_TAILUPD_DIS | \
4467 	QIB_RCVCTRL_INTRAVAIL_ENB | \
4468 	QIB_RCVCTRL_INTRAVAIL_DIS | \
4469 	QIB_RCVCTRL_BP_ENB | \
4470 	QIB_RCVCTRL_BP_DIS)
4471 
4472 #define RCVCTRL_PORT_MODS (QIB_RCVCTRL_CTXT_ENB | \
4473 	QIB_RCVCTRL_CTXT_DIS | \
4474 	QIB_RCVCTRL_PKEY_DIS | \
4475 	QIB_RCVCTRL_PKEY_ENB)
4476 
4477 /*
4478  * Modify the RCVCTRL register in chip-specific way. This
4479  * is a function because bit positions and (future) register
4480  * location is chip-specifc, but the needed operations are
4481  * generic. <op> is a bit-mask because we often want to
4482  * do multiple modifications.
4483  */
4484 static void rcvctrl_7322_mod(struct qib_pportdata *ppd, unsigned int op,
4485 			     int ctxt)
4486 {
4487 	struct qib_devdata *dd = ppd->dd;
4488 	struct qib_ctxtdata *rcd;
4489 	u64 mask, val;
4490 	unsigned long flags;
4491 
4492 	spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
4493 
4494 	if (op & QIB_RCVCTRL_TIDFLOW_ENB)
4495 		dd->rcvctrl |= SYM_MASK(RcvCtrl, TidFlowEnable);
4496 	if (op & QIB_RCVCTRL_TIDFLOW_DIS)
4497 		dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TidFlowEnable);
4498 	if (op & QIB_RCVCTRL_TAILUPD_ENB)
4499 		dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4500 	if (op & QIB_RCVCTRL_TAILUPD_DIS)
4501 		dd->rcvctrl &= ~SYM_MASK(RcvCtrl, TailUpd);
4502 	if (op & QIB_RCVCTRL_PKEY_ENB)
4503 		ppd->p_rcvctrl &= ~SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4504 	if (op & QIB_RCVCTRL_PKEY_DIS)
4505 		ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvPartitionKeyDisable);
4506 	if (ctxt < 0) {
4507 		mask = (1ULL << dd->ctxtcnt) - 1;
4508 		rcd = NULL;
4509 	} else {
4510 		mask = (1ULL << ctxt);
4511 		rcd = dd->rcd[ctxt];
4512 	}
4513 	if ((op & QIB_RCVCTRL_CTXT_ENB) && rcd) {
4514 		ppd->p_rcvctrl |=
4515 			(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4516 		if (!(dd->flags & QIB_NODMA_RTAIL)) {
4517 			op |= QIB_RCVCTRL_TAILUPD_ENB; /* need reg write */
4518 			dd->rcvctrl |= SYM_MASK(RcvCtrl, TailUpd);
4519 		}
4520 		/* Write these registers before the context is enabled. */
4521 		qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt,
4522 				    rcd->rcvhdrqtailaddr_phys);
4523 		qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt,
4524 				    rcd->rcvhdrq_phys);
4525 		rcd->seq_cnt = 1;
4526 	}
4527 	if (op & QIB_RCVCTRL_CTXT_DIS)
4528 		ppd->p_rcvctrl &=
4529 			~(mask << SYM_LSB(RcvCtrl_0, ContextEnableKernel));
4530 	if (op & QIB_RCVCTRL_BP_ENB)
4531 		dd->rcvctrl |= mask << SYM_LSB(RcvCtrl, dontDropRHQFull);
4532 	if (op & QIB_RCVCTRL_BP_DIS)
4533 		dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, dontDropRHQFull));
4534 	if (op & QIB_RCVCTRL_INTRAVAIL_ENB)
4535 		dd->rcvctrl |= (mask << SYM_LSB(RcvCtrl, IntrAvail));
4536 	if (op & QIB_RCVCTRL_INTRAVAIL_DIS)
4537 		dd->rcvctrl &= ~(mask << SYM_LSB(RcvCtrl, IntrAvail));
4538 	/*
4539 	 * Decide which registers to write depending on the ops enabled.
4540 	 * Special case is "flush" (no bits set at all)
4541 	 * which needs to write both.
4542 	 */
4543 	if (op == 0 || (op & RCVCTRL_COMMON_MODS))
4544 		qib_write_kreg(dd, kr_rcvctrl, dd->rcvctrl);
4545 	if (op == 0 || (op & RCVCTRL_PORT_MODS))
4546 		qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl);
4547 	if ((op & QIB_RCVCTRL_CTXT_ENB) && dd->rcd[ctxt]) {
4548 		/*
4549 		 * Init the context registers also; if we were
4550 		 * disabled, tail and head should both be zero
4551 		 * already from the enable, but since we don't
4552 		 * know, we have to do it explicitly.
4553 		 */
4554 		val = qib_read_ureg32(dd, ur_rcvegrindextail, ctxt);
4555 		qib_write_ureg(dd, ur_rcvegrindexhead, val, ctxt);
4556 
4557 		/* be sure enabling write seen; hd/tl should be 0 */
4558 		(void) qib_read_kreg32(dd, kr_scratch);
4559 		val = qib_read_ureg32(dd, ur_rcvhdrtail, ctxt);
4560 		dd->rcd[ctxt]->head = val;
4561 		/* If kctxt, interrupt on next receive. */
4562 		if (ctxt < dd->first_user_ctxt)
4563 			val |= dd->rhdrhead_intr_off;
4564 		qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4565 	} else if ((op & QIB_RCVCTRL_INTRAVAIL_ENB) &&
4566 		dd->rcd[ctxt] && dd->rhdrhead_intr_off) {
4567 		/* arm rcv interrupt */
4568 		val = dd->rcd[ctxt]->head | dd->rhdrhead_intr_off;
4569 		qib_write_ureg(dd, ur_rcvhdrhead, val, ctxt);
4570 	}
4571 	if (op & QIB_RCVCTRL_CTXT_DIS) {
4572 		unsigned f;
4573 
4574 		/* Now that the context is disabled, clear these registers. */
4575 		if (ctxt >= 0) {
4576 			qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr, ctxt, 0);
4577 			qib_write_kreg_ctxt(dd, krc_rcvhdraddr, ctxt, 0);
4578 			for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4579 				qib_write_ureg(dd, ur_rcvflowtable + f,
4580 					       TIDFLOW_ERRBITS, ctxt);
4581 		} else {
4582 			unsigned i;
4583 
4584 			for (i = 0; i < dd->cfgctxts; i++) {
4585 				qib_write_kreg_ctxt(dd, krc_rcvhdrtailaddr,
4586 						    i, 0);
4587 				qib_write_kreg_ctxt(dd, krc_rcvhdraddr, i, 0);
4588 				for (f = 0; f < NUM_TIDFLOWS_CTXT; f++)
4589 					qib_write_ureg(dd, ur_rcvflowtable + f,
4590 						       TIDFLOW_ERRBITS, i);
4591 			}
4592 		}
4593 	}
4594 	spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
4595 }
4596 
4597 /*
4598  * Modify the SENDCTRL register in chip-specific way. This
4599  * is a function where there are multiple such registers with
4600  * slightly different layouts.
4601  * The chip doesn't allow back-to-back sendctrl writes, so write
4602  * the scratch register after writing sendctrl.
4603  *
4604  * Which register is written depends on the operation.
4605  * Most operate on the common register, while
4606  * SEND_ENB and SEND_DIS operate on the per-port ones.
4607  * SEND_ENB is included in common because it can change SPCL_TRIG
4608  */
4609 #define SENDCTRL_COMMON_MODS (\
4610 	QIB_SENDCTRL_CLEAR | \
4611 	QIB_SENDCTRL_AVAIL_DIS | \
4612 	QIB_SENDCTRL_AVAIL_ENB | \
4613 	QIB_SENDCTRL_AVAIL_BLIP | \
4614 	QIB_SENDCTRL_DISARM | \
4615 	QIB_SENDCTRL_DISARM_ALL | \
4616 	QIB_SENDCTRL_SEND_ENB)
4617 
4618 #define SENDCTRL_PORT_MODS (\
4619 	QIB_SENDCTRL_CLEAR | \
4620 	QIB_SENDCTRL_SEND_ENB | \
4621 	QIB_SENDCTRL_SEND_DIS | \
4622 	QIB_SENDCTRL_FLUSH)
4623 
4624 static void sendctrl_7322_mod(struct qib_pportdata *ppd, u32 op)
4625 {
4626 	struct qib_devdata *dd = ppd->dd;
4627 	u64 tmp_dd_sendctrl;
4628 	unsigned long flags;
4629 
4630 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
4631 
4632 	/* First the dd ones that are "sticky", saved in shadow */
4633 	if (op & QIB_SENDCTRL_CLEAR)
4634 		dd->sendctrl = 0;
4635 	if (op & QIB_SENDCTRL_AVAIL_DIS)
4636 		dd->sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4637 	else if (op & QIB_SENDCTRL_AVAIL_ENB) {
4638 		dd->sendctrl |= SYM_MASK(SendCtrl, SendBufAvailUpd);
4639 		if (dd->flags & QIB_USE_SPCL_TRIG)
4640 			dd->sendctrl |= SYM_MASK(SendCtrl, SpecialTriggerEn);
4641 	}
4642 
4643 	/* Then the ppd ones that are "sticky", saved in shadow */
4644 	if (op & QIB_SENDCTRL_SEND_DIS)
4645 		ppd->p_sendctrl &= ~SYM_MASK(SendCtrl_0, SendEnable);
4646 	else if (op & QIB_SENDCTRL_SEND_ENB)
4647 		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, SendEnable);
4648 
4649 	if (op & QIB_SENDCTRL_DISARM_ALL) {
4650 		u32 i, last;
4651 
4652 		tmp_dd_sendctrl = dd->sendctrl;
4653 		last = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
4654 		/*
4655 		 * Disarm any buffers that are not yet launched,
4656 		 * disabling updates until done.
4657 		 */
4658 		tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4659 		for (i = 0; i < last; i++) {
4660 			qib_write_kreg(dd, kr_sendctrl,
4661 				       tmp_dd_sendctrl |
4662 				       SYM_MASK(SendCtrl, Disarm) | i);
4663 			qib_write_kreg(dd, kr_scratch, 0);
4664 		}
4665 	}
4666 
4667 	if (op & QIB_SENDCTRL_FLUSH) {
4668 		u64 tmp_ppd_sendctrl = ppd->p_sendctrl;
4669 
4670 		/*
4671 		 * Now drain all the fifos.  The Abort bit should never be
4672 		 * needed, so for now, at least, we don't use it.
4673 		 */
4674 		tmp_ppd_sendctrl |=
4675 			SYM_MASK(SendCtrl_0, TxeDrainRmFifo) |
4676 			SYM_MASK(SendCtrl_0, TxeDrainLaFifo) |
4677 			SYM_MASK(SendCtrl_0, TxeBypassIbc);
4678 		qib_write_kreg_port(ppd, krp_sendctrl, tmp_ppd_sendctrl);
4679 		qib_write_kreg(dd, kr_scratch, 0);
4680 	}
4681 
4682 	tmp_dd_sendctrl = dd->sendctrl;
4683 
4684 	if (op & QIB_SENDCTRL_DISARM)
4685 		tmp_dd_sendctrl |= SYM_MASK(SendCtrl, Disarm) |
4686 			((op & QIB_7322_SendCtrl_DisarmSendBuf_RMASK) <<
4687 			 SYM_LSB(SendCtrl, DisarmSendBuf));
4688 	if ((op & QIB_SENDCTRL_AVAIL_BLIP) &&
4689 	    (dd->sendctrl & SYM_MASK(SendCtrl, SendBufAvailUpd)))
4690 		tmp_dd_sendctrl &= ~SYM_MASK(SendCtrl, SendBufAvailUpd);
4691 
4692 	if (op == 0 || (op & SENDCTRL_COMMON_MODS)) {
4693 		qib_write_kreg(dd, kr_sendctrl, tmp_dd_sendctrl);
4694 		qib_write_kreg(dd, kr_scratch, 0);
4695 	}
4696 
4697 	if (op == 0 || (op & SENDCTRL_PORT_MODS)) {
4698 		qib_write_kreg_port(ppd, krp_sendctrl, ppd->p_sendctrl);
4699 		qib_write_kreg(dd, kr_scratch, 0);
4700 	}
4701 
4702 	if (op & QIB_SENDCTRL_AVAIL_BLIP) {
4703 		qib_write_kreg(dd, kr_sendctrl, dd->sendctrl);
4704 		qib_write_kreg(dd, kr_scratch, 0);
4705 	}
4706 
4707 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
4708 
4709 	if (op & QIB_SENDCTRL_FLUSH) {
4710 		u32 v;
4711 		/*
4712 		 * ensure writes have hit chip, then do a few
4713 		 * more reads, to allow DMA of pioavail registers
4714 		 * to occur, so in-memory copy is in sync with
4715 		 * the chip.  Not always safe to sleep.
4716 		 */
4717 		v = qib_read_kreg32(dd, kr_scratch);
4718 		qib_write_kreg(dd, kr_scratch, v);
4719 		v = qib_read_kreg32(dd, kr_scratch);
4720 		qib_write_kreg(dd, kr_scratch, v);
4721 		qib_read_kreg32(dd, kr_scratch);
4722 	}
4723 }
4724 
4725 #define _PORT_VIRT_FLAG 0x8000U /* "virtual", need adjustments */
4726 #define _PORT_64BIT_FLAG 0x10000U /* not "virtual", but 64bit */
4727 #define _PORT_CNTR_IDXMASK 0x7fffU /* mask off flags above */
4728 
4729 /**
4730  * qib_portcntr_7322 - read a per-port chip counter
4731  * @ppd: the qlogic_ib pport
4732  * @creg: the counter to read (not a chip offset)
4733  */
4734 static u64 qib_portcntr_7322(struct qib_pportdata *ppd, u32 reg)
4735 {
4736 	struct qib_devdata *dd = ppd->dd;
4737 	u64 ret = 0ULL;
4738 	u16 creg;
4739 	/* 0xffff for unimplemented or synthesized counters */
4740 	static const u32 xlator[] = {
4741 		[QIBPORTCNTR_PKTSEND] = crp_pktsend | _PORT_64BIT_FLAG,
4742 		[QIBPORTCNTR_WORDSEND] = crp_wordsend | _PORT_64BIT_FLAG,
4743 		[QIBPORTCNTR_PSXMITDATA] = crp_psxmitdatacount,
4744 		[QIBPORTCNTR_PSXMITPKTS] = crp_psxmitpktscount,
4745 		[QIBPORTCNTR_PSXMITWAIT] = crp_psxmitwaitcount,
4746 		[QIBPORTCNTR_SENDSTALL] = crp_sendstall,
4747 		[QIBPORTCNTR_PKTRCV] = crp_pktrcv | _PORT_64BIT_FLAG,
4748 		[QIBPORTCNTR_PSRCVDATA] = crp_psrcvdatacount,
4749 		[QIBPORTCNTR_PSRCVPKTS] = crp_psrcvpktscount,
4750 		[QIBPORTCNTR_RCVEBP] = crp_rcvebp,
4751 		[QIBPORTCNTR_RCVOVFL] = crp_rcvovfl,
4752 		[QIBPORTCNTR_WORDRCV] = crp_wordrcv | _PORT_64BIT_FLAG,
4753 		[QIBPORTCNTR_RXDROPPKT] = 0xffff, /* not needed  for 7322 */
4754 		[QIBPORTCNTR_RXLOCALPHYERR] = crp_rxotherlocalphyerr,
4755 		[QIBPORTCNTR_RXVLERR] = crp_rxvlerr,
4756 		[QIBPORTCNTR_ERRICRC] = crp_erricrc,
4757 		[QIBPORTCNTR_ERRVCRC] = crp_errvcrc,
4758 		[QIBPORTCNTR_ERRLPCRC] = crp_errlpcrc,
4759 		[QIBPORTCNTR_BADFORMAT] = crp_badformat,
4760 		[QIBPORTCNTR_ERR_RLEN] = crp_err_rlen,
4761 		[QIBPORTCNTR_IBSYMBOLERR] = crp_ibsymbolerr,
4762 		[QIBPORTCNTR_INVALIDRLEN] = crp_invalidrlen,
4763 		[QIBPORTCNTR_UNSUPVL] = crp_txunsupvl,
4764 		[QIBPORTCNTR_EXCESSBUFOVFL] = crp_excessbufferovfl,
4765 		[QIBPORTCNTR_ERRLINK] = crp_errlink,
4766 		[QIBPORTCNTR_IBLINKDOWN] = crp_iblinkdown,
4767 		[QIBPORTCNTR_IBLINKERRRECOV] = crp_iblinkerrrecov,
4768 		[QIBPORTCNTR_LLI] = crp_locallinkintegrityerr,
4769 		[QIBPORTCNTR_VL15PKTDROP] = crp_vl15droppedpkt,
4770 		[QIBPORTCNTR_ERRPKEY] = crp_errpkey,
4771 		/*
4772 		 * the next 3 aren't really counters, but were implemented
4773 		 * as counters in older chips, so still get accessed as
4774 		 * though they were counters from this code.
4775 		 */
4776 		[QIBPORTCNTR_PSINTERVAL] = krp_psinterval,
4777 		[QIBPORTCNTR_PSSTART] = krp_psstart,
4778 		[QIBPORTCNTR_PSSTAT] = krp_psstat,
4779 		/* pseudo-counter, summed for all ports */
4780 		[QIBPORTCNTR_KHDROVFL] = 0xffff,
4781 	};
4782 
4783 	if (reg >= ARRAY_SIZE(xlator)) {
4784 		qib_devinfo(ppd->dd->pcidev,
4785 			 "Unimplemented portcounter %u\n", reg);
4786 		goto done;
4787 	}
4788 	creg = xlator[reg] & _PORT_CNTR_IDXMASK;
4789 
4790 	/* handle non-counters and special cases first */
4791 	if (reg == QIBPORTCNTR_KHDROVFL) {
4792 		int i;
4793 
4794 		/* sum over all kernel contexts (skip if mini_init) */
4795 		for (i = 0; dd->rcd && i < dd->first_user_ctxt; i++) {
4796 			struct qib_ctxtdata *rcd = dd->rcd[i];
4797 
4798 			if (!rcd || rcd->ppd != ppd)
4799 				continue;
4800 			ret += read_7322_creg32(dd, cr_base_egrovfl + i);
4801 		}
4802 		goto done;
4803 	} else if (reg == QIBPORTCNTR_RXDROPPKT) {
4804 		/*
4805 		 * Used as part of the synthesis of port_rcv_errors
4806 		 * in the verbs code for IBTA counters.  Not needed for 7322,
4807 		 * because all the errors are already counted by other cntrs.
4808 		 */
4809 		goto done;
4810 	} else if (reg == QIBPORTCNTR_PSINTERVAL ||
4811 		   reg == QIBPORTCNTR_PSSTART || reg == QIBPORTCNTR_PSSTAT) {
4812 		/* were counters in older chips, now per-port kernel regs */
4813 		ret = qib_read_kreg_port(ppd, creg);
4814 		goto done;
4815 	}
4816 
4817 	/*
4818 	 * Only fast increment counters are 64 bits; use 32 bit reads to
4819 	 * avoid two independent reads when on Opteron.
4820 	 */
4821 	if (xlator[reg] & _PORT_64BIT_FLAG)
4822 		ret = read_7322_creg_port(ppd, creg);
4823 	else
4824 		ret = read_7322_creg32_port(ppd, creg);
4825 	if (creg == crp_ibsymbolerr) {
4826 		if (ppd->cpspec->ibdeltainprog)
4827 			ret -= ret - ppd->cpspec->ibsymsnap;
4828 		ret -= ppd->cpspec->ibsymdelta;
4829 	} else if (creg == crp_iblinkerrrecov) {
4830 		if (ppd->cpspec->ibdeltainprog)
4831 			ret -= ret - ppd->cpspec->iblnkerrsnap;
4832 		ret -= ppd->cpspec->iblnkerrdelta;
4833 	} else if (creg == crp_errlink)
4834 		ret -= ppd->cpspec->ibmalfdelta;
4835 	else if (creg == crp_iblinkdown)
4836 		ret += ppd->cpspec->iblnkdowndelta;
4837 done:
4838 	return ret;
4839 }
4840 
4841 /*
4842  * Device counter names (not port-specific), one line per stat,
4843  * single string.  Used by utilities like ipathstats to print the stats
4844  * in a way which works for different versions of drivers, without changing
4845  * the utility.  Names need to be 12 chars or less (w/o newline), for proper
4846  * display by utility.
4847  * Non-error counters are first.
4848  * Start of "error" conters is indicated by a leading "E " on the first
4849  * "error" counter, and doesn't count in label length.
4850  * The EgrOvfl list needs to be last so we truncate them at the configured
4851  * context count for the device.
4852  * cntr7322indices contains the corresponding register indices.
4853  */
4854 static const char cntr7322names[] =
4855 	"Interrupts\n"
4856 	"HostBusStall\n"
4857 	"E RxTIDFull\n"
4858 	"RxTIDInvalid\n"
4859 	"RxTIDFloDrop\n" /* 7322 only */
4860 	"Ctxt0EgrOvfl\n"
4861 	"Ctxt1EgrOvfl\n"
4862 	"Ctxt2EgrOvfl\n"
4863 	"Ctxt3EgrOvfl\n"
4864 	"Ctxt4EgrOvfl\n"
4865 	"Ctxt5EgrOvfl\n"
4866 	"Ctxt6EgrOvfl\n"
4867 	"Ctxt7EgrOvfl\n"
4868 	"Ctxt8EgrOvfl\n"
4869 	"Ctxt9EgrOvfl\n"
4870 	"Ctx10EgrOvfl\n"
4871 	"Ctx11EgrOvfl\n"
4872 	"Ctx12EgrOvfl\n"
4873 	"Ctx13EgrOvfl\n"
4874 	"Ctx14EgrOvfl\n"
4875 	"Ctx15EgrOvfl\n"
4876 	"Ctx16EgrOvfl\n"
4877 	"Ctx17EgrOvfl\n"
4878 	;
4879 
4880 static const u32 cntr7322indices[] = {
4881 	cr_lbint | _PORT_64BIT_FLAG,
4882 	cr_lbstall | _PORT_64BIT_FLAG,
4883 	cr_tidfull,
4884 	cr_tidinvalid,
4885 	cr_rxtidflowdrop,
4886 	cr_base_egrovfl + 0,
4887 	cr_base_egrovfl + 1,
4888 	cr_base_egrovfl + 2,
4889 	cr_base_egrovfl + 3,
4890 	cr_base_egrovfl + 4,
4891 	cr_base_egrovfl + 5,
4892 	cr_base_egrovfl + 6,
4893 	cr_base_egrovfl + 7,
4894 	cr_base_egrovfl + 8,
4895 	cr_base_egrovfl + 9,
4896 	cr_base_egrovfl + 10,
4897 	cr_base_egrovfl + 11,
4898 	cr_base_egrovfl + 12,
4899 	cr_base_egrovfl + 13,
4900 	cr_base_egrovfl + 14,
4901 	cr_base_egrovfl + 15,
4902 	cr_base_egrovfl + 16,
4903 	cr_base_egrovfl + 17,
4904 };
4905 
4906 /*
4907  * same as cntr7322names and cntr7322indices, but for port-specific counters.
4908  * portcntr7322indices is somewhat complicated by some registers needing
4909  * adjustments of various kinds, and those are ORed with _PORT_VIRT_FLAG
4910  */
4911 static const char portcntr7322names[] =
4912 	"TxPkt\n"
4913 	"TxFlowPkt\n"
4914 	"TxWords\n"
4915 	"RxPkt\n"
4916 	"RxFlowPkt\n"
4917 	"RxWords\n"
4918 	"TxFlowStall\n"
4919 	"TxDmaDesc\n"  /* 7220 and 7322-only */
4920 	"E RxDlidFltr\n"  /* 7220 and 7322-only */
4921 	"IBStatusChng\n"
4922 	"IBLinkDown\n"
4923 	"IBLnkRecov\n"
4924 	"IBRxLinkErr\n"
4925 	"IBSymbolErr\n"
4926 	"RxLLIErr\n"
4927 	"RxBadFormat\n"
4928 	"RxBadLen\n"
4929 	"RxBufOvrfl\n"
4930 	"RxEBP\n"
4931 	"RxFlowCtlErr\n"
4932 	"RxICRCerr\n"
4933 	"RxLPCRCerr\n"
4934 	"RxVCRCerr\n"
4935 	"RxInvalLen\n"
4936 	"RxInvalPKey\n"
4937 	"RxPktDropped\n"
4938 	"TxBadLength\n"
4939 	"TxDropped\n"
4940 	"TxInvalLen\n"
4941 	"TxUnderrun\n"
4942 	"TxUnsupVL\n"
4943 	"RxLclPhyErr\n" /* 7220 and 7322-only from here down */
4944 	"RxVL15Drop\n"
4945 	"RxVlErr\n"
4946 	"XcessBufOvfl\n"
4947 	"RxQPBadCtxt\n" /* 7322-only from here down */
4948 	"TXBadHeader\n"
4949 	;
4950 
4951 static const u32 portcntr7322indices[] = {
4952 	QIBPORTCNTR_PKTSEND | _PORT_VIRT_FLAG,
4953 	crp_pktsendflow,
4954 	QIBPORTCNTR_WORDSEND | _PORT_VIRT_FLAG,
4955 	QIBPORTCNTR_PKTRCV | _PORT_VIRT_FLAG,
4956 	crp_pktrcvflowctrl,
4957 	QIBPORTCNTR_WORDRCV | _PORT_VIRT_FLAG,
4958 	QIBPORTCNTR_SENDSTALL | _PORT_VIRT_FLAG,
4959 	crp_txsdmadesc | _PORT_64BIT_FLAG,
4960 	crp_rxdlidfltr,
4961 	crp_ibstatuschange,
4962 	QIBPORTCNTR_IBLINKDOWN | _PORT_VIRT_FLAG,
4963 	QIBPORTCNTR_IBLINKERRRECOV | _PORT_VIRT_FLAG,
4964 	QIBPORTCNTR_ERRLINK | _PORT_VIRT_FLAG,
4965 	QIBPORTCNTR_IBSYMBOLERR | _PORT_VIRT_FLAG,
4966 	QIBPORTCNTR_LLI | _PORT_VIRT_FLAG,
4967 	QIBPORTCNTR_BADFORMAT | _PORT_VIRT_FLAG,
4968 	QIBPORTCNTR_ERR_RLEN | _PORT_VIRT_FLAG,
4969 	QIBPORTCNTR_RCVOVFL | _PORT_VIRT_FLAG,
4970 	QIBPORTCNTR_RCVEBP | _PORT_VIRT_FLAG,
4971 	crp_rcvflowctrlviol,
4972 	QIBPORTCNTR_ERRICRC | _PORT_VIRT_FLAG,
4973 	QIBPORTCNTR_ERRLPCRC | _PORT_VIRT_FLAG,
4974 	QIBPORTCNTR_ERRVCRC | _PORT_VIRT_FLAG,
4975 	QIBPORTCNTR_INVALIDRLEN | _PORT_VIRT_FLAG,
4976 	QIBPORTCNTR_ERRPKEY | _PORT_VIRT_FLAG,
4977 	QIBPORTCNTR_RXDROPPKT | _PORT_VIRT_FLAG,
4978 	crp_txminmaxlenerr,
4979 	crp_txdroppedpkt,
4980 	crp_txlenerr,
4981 	crp_txunderrun,
4982 	crp_txunsupvl,
4983 	QIBPORTCNTR_RXLOCALPHYERR | _PORT_VIRT_FLAG,
4984 	QIBPORTCNTR_VL15PKTDROP | _PORT_VIRT_FLAG,
4985 	QIBPORTCNTR_RXVLERR | _PORT_VIRT_FLAG,
4986 	QIBPORTCNTR_EXCESSBUFOVFL | _PORT_VIRT_FLAG,
4987 	crp_rxqpinvalidctxt,
4988 	crp_txhdrerr,
4989 };
4990 
4991 /* do all the setup to make the counter reads efficient later */
4992 static void init_7322_cntrnames(struct qib_devdata *dd)
4993 {
4994 	int i, j = 0;
4995 	char *s;
4996 
4997 	for (i = 0, s = (char *)cntr7322names; s && j <= dd->cfgctxts;
4998 	     i++) {
4999 		/* we always have at least one counter before the egrovfl */
5000 		if (!j && !strncmp("Ctxt0EgrOvfl", s + 1, 12))
5001 			j = 1;
5002 		s = strchr(s + 1, '\n');
5003 		if (s && j)
5004 			j++;
5005 	}
5006 	dd->cspec->ncntrs = i;
5007 	if (!s)
5008 		/* full list; size is without terminating null */
5009 		dd->cspec->cntrnamelen = sizeof(cntr7322names) - 1;
5010 	else
5011 		dd->cspec->cntrnamelen = 1 + s - cntr7322names;
5012 	dd->cspec->cntrs = kmalloc(dd->cspec->ncntrs
5013 		* sizeof(u64), GFP_KERNEL);
5014 
5015 	for (i = 0, s = (char *)portcntr7322names; s; i++)
5016 		s = strchr(s + 1, '\n');
5017 	dd->cspec->nportcntrs = i - 1;
5018 	dd->cspec->portcntrnamelen = sizeof(portcntr7322names) - 1;
5019 	for (i = 0; i < dd->num_pports; ++i) {
5020 		dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
5021 			* sizeof(u64), GFP_KERNEL);
5022 	}
5023 }
5024 
5025 static u32 qib_read_7322cntrs(struct qib_devdata *dd, loff_t pos, char **namep,
5026 			      u64 **cntrp)
5027 {
5028 	u32 ret;
5029 
5030 	if (namep) {
5031 		ret = dd->cspec->cntrnamelen;
5032 		if (pos >= ret)
5033 			ret = 0; /* final read after getting everything */
5034 		else
5035 			*namep = (char *) cntr7322names;
5036 	} else {
5037 		u64 *cntr = dd->cspec->cntrs;
5038 		int i;
5039 
5040 		ret = dd->cspec->ncntrs * sizeof(u64);
5041 		if (!cntr || pos >= ret) {
5042 			/* everything read, or couldn't get memory */
5043 			ret = 0;
5044 			goto done;
5045 		}
5046 		*cntrp = cntr;
5047 		for (i = 0; i < dd->cspec->ncntrs; i++)
5048 			if (cntr7322indices[i] & _PORT_64BIT_FLAG)
5049 				*cntr++ = read_7322_creg(dd,
5050 							 cntr7322indices[i] &
5051 							 _PORT_CNTR_IDXMASK);
5052 			else
5053 				*cntr++ = read_7322_creg32(dd,
5054 							   cntr7322indices[i]);
5055 	}
5056 done:
5057 	return ret;
5058 }
5059 
5060 static u32 qib_read_7322portcntrs(struct qib_devdata *dd, loff_t pos, u32 port,
5061 				  char **namep, u64 **cntrp)
5062 {
5063 	u32 ret;
5064 
5065 	if (namep) {
5066 		ret = dd->cspec->portcntrnamelen;
5067 		if (pos >= ret)
5068 			ret = 0; /* final read after getting everything */
5069 		else
5070 			*namep = (char *)portcntr7322names;
5071 	} else {
5072 		struct qib_pportdata *ppd = &dd->pport[port];
5073 		u64 *cntr = ppd->cpspec->portcntrs;
5074 		int i;
5075 
5076 		ret = dd->cspec->nportcntrs * sizeof(u64);
5077 		if (!cntr || pos >= ret) {
5078 			/* everything read, or couldn't get memory */
5079 			ret = 0;
5080 			goto done;
5081 		}
5082 		*cntrp = cntr;
5083 		for (i = 0; i < dd->cspec->nportcntrs; i++) {
5084 			if (portcntr7322indices[i] & _PORT_VIRT_FLAG)
5085 				*cntr++ = qib_portcntr_7322(ppd,
5086 					portcntr7322indices[i] &
5087 					_PORT_CNTR_IDXMASK);
5088 			else if (portcntr7322indices[i] & _PORT_64BIT_FLAG)
5089 				*cntr++ = read_7322_creg_port(ppd,
5090 					   portcntr7322indices[i] &
5091 					    _PORT_CNTR_IDXMASK);
5092 			else
5093 				*cntr++ = read_7322_creg32_port(ppd,
5094 					   portcntr7322indices[i]);
5095 		}
5096 	}
5097 done:
5098 	return ret;
5099 }
5100 
5101 /**
5102  * qib_get_7322_faststats - get word counters from chip before they overflow
5103  * @opaque - contains a pointer to the qlogic_ib device qib_devdata
5104  *
5105  * VESTIGIAL IBA7322 has no "small fast counters", so the only
5106  * real purpose of this function is to maintain the notion of
5107  * "active time", which in turn is only logged into the eeprom,
5108  * which we don;t have, yet, for 7322-based boards.
5109  *
5110  * called from add_timer
5111  */
5112 static void qib_get_7322_faststats(struct timer_list *t)
5113 {
5114 	struct qib_devdata *dd = from_timer(dd, t, stats_timer);
5115 	struct qib_pportdata *ppd;
5116 	unsigned long flags;
5117 	u64 traffic_wds;
5118 	int pidx;
5119 
5120 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5121 		ppd = dd->pport + pidx;
5122 
5123 		/*
5124 		 * If port isn't enabled or not operational ports, or
5125 		 * diags is running (can cause memory diags to fail)
5126 		 * skip this port this time.
5127 		 */
5128 		if (!ppd->link_speed_supported || !(dd->flags & QIB_INITTED)
5129 		    || dd->diag_client)
5130 			continue;
5131 
5132 		/*
5133 		 * Maintain an activity timer, based on traffic
5134 		 * exceeding a threshold, so we need to check the word-counts
5135 		 * even if they are 64-bit.
5136 		 */
5137 		traffic_wds = qib_portcntr_7322(ppd, QIBPORTCNTR_WORDRCV) +
5138 			qib_portcntr_7322(ppd, QIBPORTCNTR_WORDSEND);
5139 		spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
5140 		traffic_wds -= ppd->dd->traffic_wds;
5141 		ppd->dd->traffic_wds += traffic_wds;
5142 		spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
5143 		if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
5144 						QIB_IB_QDR) &&
5145 		    (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
5146 				    QIBL_LINKACTIVE)) &&
5147 		    ppd->cpspec->qdr_dfe_time &&
5148 		    time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) {
5149 			ppd->cpspec->qdr_dfe_on = 0;
5150 
5151 			qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
5152 					    ppd->dd->cspec->r1 ?
5153 					    QDR_STATIC_ADAPT_INIT_R1 :
5154 					    QDR_STATIC_ADAPT_INIT);
5155 			force_h1(ppd);
5156 		}
5157 	}
5158 	mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
5159 }
5160 
5161 /*
5162  * If we were using MSIx, try to fallback to INTx.
5163  */
5164 static int qib_7322_intr_fallback(struct qib_devdata *dd)
5165 {
5166 	if (!dd->cspec->num_msix_entries)
5167 		return 0; /* already using INTx */
5168 
5169 	qib_devinfo(dd->pcidev,
5170 		"MSIx interrupt not detected, trying INTx interrupts\n");
5171 	qib_7322_free_irq(dd);
5172 	if (pci_alloc_irq_vectors(dd->pcidev, 1, 1, PCI_IRQ_LEGACY) < 0)
5173 		qib_dev_err(dd, "Failed to enable INTx\n");
5174 	qib_setup_7322_interrupt(dd, 0);
5175 	return 1;
5176 }
5177 
5178 /*
5179  * Reset the XGXS (between serdes and IBC).  Slightly less intrusive
5180  * than resetting the IBC or external link state, and useful in some
5181  * cases to cause some retraining.  To do this right, we reset IBC
5182  * as well, then return to previous state (which may be still in reset)
5183  * NOTE: some callers of this "know" this writes the current value
5184  * of cpspec->ibcctrl_a as part of it's operation, so if that changes,
5185  * check all callers.
5186  */
5187 static void qib_7322_mini_pcs_reset(struct qib_pportdata *ppd)
5188 {
5189 	u64 val;
5190 	struct qib_devdata *dd = ppd->dd;
5191 	const u64 reset_bits = SYM_MASK(IBPCSConfig_0, xcv_rreset) |
5192 		SYM_MASK(IBPCSConfig_0, xcv_treset) |
5193 		SYM_MASK(IBPCSConfig_0, tx_rx_reset);
5194 
5195 	val = qib_read_kreg_port(ppd, krp_ib_pcsconfig);
5196 	qib_write_kreg(dd, kr_hwerrmask,
5197 		       dd->cspec->hwerrmask & ~HWE_MASK(statusValidNoEop));
5198 	qib_write_kreg_port(ppd, krp_ibcctrl_a,
5199 			    ppd->cpspec->ibcctrl_a &
5200 			    ~SYM_MASK(IBCCtrlA_0, IBLinkEn));
5201 
5202 	qib_write_kreg_port(ppd, krp_ib_pcsconfig, val | reset_bits);
5203 	qib_read_kreg32(dd, kr_scratch);
5204 	qib_write_kreg_port(ppd, krp_ib_pcsconfig, val & ~reset_bits);
5205 	qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
5206 	qib_write_kreg(dd, kr_scratch, 0ULL);
5207 	qib_write_kreg(dd, kr_hwerrclear,
5208 		       SYM_MASK(HwErrClear, statusValidNoEopClear));
5209 	qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
5210 }
5211 
5212 /*
5213  * This code for non-IBTA-compliant IB speed negotiation is only known to
5214  * work for the SDR to DDR transition, and only between an HCA and a switch
5215  * with recent firmware.  It is based on observed heuristics, rather than
5216  * actual knowledge of the non-compliant speed negotiation.
5217  * It has a number of hard-coded fields, since the hope is to rewrite this
5218  * when a spec is available on how the negoation is intended to work.
5219  */
5220 static void autoneg_7322_sendpkt(struct qib_pportdata *ppd, u32 *hdr,
5221 				 u32 dcnt, u32 *data)
5222 {
5223 	int i;
5224 	u64 pbc;
5225 	u32 __iomem *piobuf;
5226 	u32 pnum, control, len;
5227 	struct qib_devdata *dd = ppd->dd;
5228 
5229 	i = 0;
5230 	len = 7 + dcnt + 1; /* 7 dword header, dword data, icrc */
5231 	control = qib_7322_setpbc_control(ppd, len, 0, 15);
5232 	pbc = ((u64) control << 32) | len;
5233 	while (!(piobuf = qib_7322_getsendbuf(ppd, pbc, &pnum))) {
5234 		if (i++ > 15)
5235 			return;
5236 		udelay(2);
5237 	}
5238 	/* disable header check on this packet, since it can't be valid */
5239 	dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_DIS1, NULL);
5240 	writeq(pbc, piobuf);
5241 	qib_flush_wc();
5242 	qib_pio_copy(piobuf + 2, hdr, 7);
5243 	qib_pio_copy(piobuf + 9, data, dcnt);
5244 	if (dd->flags & QIB_USE_SPCL_TRIG) {
5245 		u32 spcl_off = (pnum >= dd->piobcnt2k) ? 2047 : 1023;
5246 
5247 		qib_flush_wc();
5248 		__raw_writel(0xaebecede, piobuf + spcl_off);
5249 	}
5250 	qib_flush_wc();
5251 	qib_sendbuf_done(dd, pnum);
5252 	/* and re-enable hdr check */
5253 	dd->f_txchk_change(dd, pnum, 1, TXCHK_CHG_TYPE_ENAB1, NULL);
5254 }
5255 
5256 /*
5257  * _start packet gets sent twice at start, _done gets sent twice at end
5258  */
5259 static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
5260 {
5261 	struct qib_devdata *dd = ppd->dd;
5262 	static u32 swapped;
5263 	u32 dw, i, hcnt, dcnt, *data;
5264 	static u32 hdr[7] = { 0xf002ffff, 0x48ffff, 0x6400abba };
5265 	static u32 madpayload_start[0x40] = {
5266 		0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5267 		0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5268 		0x1, 0x1388, 0x15e, 0x1, /* rest 0's */
5269 		};
5270 	static u32 madpayload_done[0x40] = {
5271 		0x1810103, 0x1, 0x0, 0x0, 0x2c90000, 0x2c9, 0x0, 0x0,
5272 		0xffffffff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
5273 		0x40000001, 0x1388, 0x15e, /* rest 0's */
5274 		};
5275 
5276 	dcnt = ARRAY_SIZE(madpayload_start);
5277 	hcnt = ARRAY_SIZE(hdr);
5278 	if (!swapped) {
5279 		/* for maintainability, do it at runtime */
5280 		for (i = 0; i < hcnt; i++) {
5281 			dw = (__force u32) cpu_to_be32(hdr[i]);
5282 			hdr[i] = dw;
5283 		}
5284 		for (i = 0; i < dcnt; i++) {
5285 			dw = (__force u32) cpu_to_be32(madpayload_start[i]);
5286 			madpayload_start[i] = dw;
5287 			dw = (__force u32) cpu_to_be32(madpayload_done[i]);
5288 			madpayload_done[i] = dw;
5289 		}
5290 		swapped = 1;
5291 	}
5292 
5293 	data = which ? madpayload_done : madpayload_start;
5294 
5295 	autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5296 	qib_read_kreg64(dd, kr_scratch);
5297 	udelay(2);
5298 	autoneg_7322_sendpkt(ppd, hdr, dcnt, data);
5299 	qib_read_kreg64(dd, kr_scratch);
5300 	udelay(2);
5301 }
5302 
5303 /*
5304  * Do the absolute minimum to cause an IB speed change, and make it
5305  * ready, but don't actually trigger the change.   The caller will
5306  * do that when ready (if link is in Polling training state, it will
5307  * happen immediately, otherwise when link next goes down)
5308  *
5309  * This routine should only be used as part of the DDR autonegotation
5310  * code for devices that are not compliant with IB 1.2 (or code that
5311  * fixes things up for same).
5312  *
5313  * When link has gone down, and autoneg enabled, or autoneg has
5314  * failed and we give up until next time we set both speeds, and
5315  * then we want IBTA enabled as well as "use max enabled speed.
5316  */
5317 static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
5318 {
5319 	u64 newctrlb;
5320 
5321 	newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
5322 				    IBA7322_IBC_IBTA_1_2_MASK |
5323 				    IBA7322_IBC_MAX_SPEED_MASK);
5324 
5325 	if (speed & (speed - 1)) /* multiple speeds */
5326 		newctrlb |= (speed << IBA7322_IBC_SPEED_LSB) |
5327 				    IBA7322_IBC_IBTA_1_2_MASK |
5328 				    IBA7322_IBC_MAX_SPEED_MASK;
5329 	else
5330 		newctrlb |= speed == QIB_IB_QDR ?
5331 			IBA7322_IBC_SPEED_QDR | IBA7322_IBC_IBTA_1_2_MASK :
5332 			((speed == QIB_IB_DDR ?
5333 			  IBA7322_IBC_SPEED_DDR : IBA7322_IBC_SPEED_SDR));
5334 
5335 	if (newctrlb == ppd->cpspec->ibcctrl_b)
5336 		return;
5337 
5338 	ppd->cpspec->ibcctrl_b = newctrlb;
5339 	qib_write_kreg_port(ppd, krp_ibcctrl_b, ppd->cpspec->ibcctrl_b);
5340 	qib_write_kreg(ppd->dd, kr_scratch, 0);
5341 }
5342 
5343 /*
5344  * This routine is only used when we are not talking to another
5345  * IB 1.2-compliant device that we think can do DDR.
5346  * (This includes all existing switch chips as of Oct 2007.)
5347  * 1.2-compliant devices go directly to DDR prior to reaching INIT
5348  */
5349 static void try_7322_autoneg(struct qib_pportdata *ppd)
5350 {
5351 	unsigned long flags;
5352 
5353 	spin_lock_irqsave(&ppd->lflags_lock, flags);
5354 	ppd->lflags |= QIBL_IB_AUTONEG_INPROG;
5355 	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5356 	qib_autoneg_7322_send(ppd, 0);
5357 	set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5358 	qib_7322_mini_pcs_reset(ppd);
5359 	/* 2 msec is minimum length of a poll cycle */
5360 	queue_delayed_work(ib_wq, &ppd->cpspec->autoneg_work,
5361 			   msecs_to_jiffies(2));
5362 }
5363 
5364 /*
5365  * Handle the empirically determined mechanism for auto-negotiation
5366  * of DDR speed with switches.
5367  */
5368 static void autoneg_7322_work(struct work_struct *work)
5369 {
5370 	struct qib_pportdata *ppd;
5371 	u32 i;
5372 	unsigned long flags;
5373 
5374 	ppd = container_of(work, struct qib_chippport_specific,
5375 			    autoneg_work.work)->ppd;
5376 
5377 	/*
5378 	 * Busy wait for this first part, it should be at most a
5379 	 * few hundred usec, since we scheduled ourselves for 2msec.
5380 	 */
5381 	for (i = 0; i < 25; i++) {
5382 		if (SYM_FIELD(ppd->lastibcstat, IBCStatusA_0, LinkState)
5383 		     == IB_7322_LT_STATE_POLLQUIET) {
5384 			qib_set_linkstate(ppd, QIB_IB_LINKDOWN_DISABLE);
5385 			break;
5386 		}
5387 		udelay(100);
5388 	}
5389 
5390 	if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG))
5391 		goto done; /* we got there early or told to stop */
5392 
5393 	/* we expect this to timeout */
5394 	if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5395 			       !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5396 			       msecs_to_jiffies(90)))
5397 		goto done;
5398 	qib_7322_mini_pcs_reset(ppd);
5399 
5400 	/* we expect this to timeout */
5401 	if (wait_event_timeout(ppd->cpspec->autoneg_wait,
5402 			       !(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5403 			       msecs_to_jiffies(1700)))
5404 		goto done;
5405 	qib_7322_mini_pcs_reset(ppd);
5406 
5407 	set_7322_ibspeed_fast(ppd, QIB_IB_SDR);
5408 
5409 	/*
5410 	 * Wait up to 250 msec for link to train and get to INIT at DDR;
5411 	 * this should terminate early.
5412 	 */
5413 	wait_event_timeout(ppd->cpspec->autoneg_wait,
5414 		!(ppd->lflags & QIBL_IB_AUTONEG_INPROG),
5415 		msecs_to_jiffies(250));
5416 done:
5417 	if (ppd->lflags & QIBL_IB_AUTONEG_INPROG) {
5418 		spin_lock_irqsave(&ppd->lflags_lock, flags);
5419 		ppd->lflags &= ~QIBL_IB_AUTONEG_INPROG;
5420 		if (ppd->cpspec->autoneg_tries == AUTONEG_TRIES) {
5421 			ppd->lflags |= QIBL_IB_AUTONEG_FAILED;
5422 			ppd->cpspec->autoneg_tries = 0;
5423 		}
5424 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5425 		set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5426 	}
5427 }
5428 
5429 /*
5430  * This routine is used to request IPG set in the QLogic switch.
5431  * Only called if r1.
5432  */
5433 static void try_7322_ipg(struct qib_pportdata *ppd)
5434 {
5435 	struct qib_ibport *ibp = &ppd->ibport_data;
5436 	struct ib_mad_send_buf *send_buf;
5437 	struct ib_mad_agent *agent;
5438 	struct ib_smp *smp;
5439 	unsigned delay;
5440 	int ret;
5441 
5442 	agent = ibp->rvp.send_agent;
5443 	if (!agent)
5444 		goto retry;
5445 
5446 	send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR,
5447 				      IB_MGMT_MAD_DATA, GFP_ATOMIC,
5448 				      IB_MGMT_BASE_VERSION);
5449 	if (IS_ERR(send_buf))
5450 		goto retry;
5451 
5452 	if (!ibp->smi_ah) {
5453 		struct ib_ah *ah;
5454 
5455 		ah = qib_create_qp0_ah(ibp, be16_to_cpu(IB_LID_PERMISSIVE));
5456 		if (IS_ERR(ah))
5457 			ret = PTR_ERR(ah);
5458 		else {
5459 			send_buf->ah = ah;
5460 			ibp->smi_ah = ibah_to_rvtah(ah);
5461 			ret = 0;
5462 		}
5463 	} else {
5464 		send_buf->ah = &ibp->smi_ah->ibah;
5465 		ret = 0;
5466 	}
5467 
5468 	smp = send_buf->mad;
5469 	smp->base_version = IB_MGMT_BASE_VERSION;
5470 	smp->mgmt_class = IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE;
5471 	smp->class_version = 1;
5472 	smp->method = IB_MGMT_METHOD_SEND;
5473 	smp->hop_cnt = 1;
5474 	smp->attr_id = QIB_VENDOR_IPG;
5475 	smp->attr_mod = 0;
5476 
5477 	if (!ret)
5478 		ret = ib_post_send_mad(send_buf, NULL);
5479 	if (ret)
5480 		ib_free_send_mad(send_buf);
5481 retry:
5482 	delay = 2 << ppd->cpspec->ipg_tries;
5483 	queue_delayed_work(ib_wq, &ppd->cpspec->ipg_work,
5484 			   msecs_to_jiffies(delay));
5485 }
5486 
5487 /*
5488  * Timeout handler for setting IPG.
5489  * Only called if r1.
5490  */
5491 static void ipg_7322_work(struct work_struct *work)
5492 {
5493 	struct qib_pportdata *ppd;
5494 
5495 	ppd = container_of(work, struct qib_chippport_specific,
5496 			   ipg_work.work)->ppd;
5497 	if ((ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED | QIBL_LINKACTIVE))
5498 	    && ++ppd->cpspec->ipg_tries <= 10)
5499 		try_7322_ipg(ppd);
5500 }
5501 
5502 static u32 qib_7322_iblink_state(u64 ibcs)
5503 {
5504 	u32 state = (u32)SYM_FIELD(ibcs, IBCStatusA_0, LinkState);
5505 
5506 	switch (state) {
5507 	case IB_7322_L_STATE_INIT:
5508 		state = IB_PORT_INIT;
5509 		break;
5510 	case IB_7322_L_STATE_ARM:
5511 		state = IB_PORT_ARMED;
5512 		break;
5513 	case IB_7322_L_STATE_ACTIVE:
5514 		/* fall through */
5515 	case IB_7322_L_STATE_ACT_DEFER:
5516 		state = IB_PORT_ACTIVE;
5517 		break;
5518 	default: /* fall through */
5519 	case IB_7322_L_STATE_DOWN:
5520 		state = IB_PORT_DOWN;
5521 		break;
5522 	}
5523 	return state;
5524 }
5525 
5526 /* returns the IBTA port state, rather than the IBC link training state */
5527 static u8 qib_7322_phys_portstate(u64 ibcs)
5528 {
5529 	u8 state = (u8)SYM_FIELD(ibcs, IBCStatusA_0, LinkTrainingState);
5530 	return qib_7322_physportstate[state];
5531 }
5532 
5533 static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
5534 {
5535 	int ret = 0, symadj = 0;
5536 	unsigned long flags;
5537 	int mult;
5538 
5539 	spin_lock_irqsave(&ppd->lflags_lock, flags);
5540 	ppd->lflags &= ~QIBL_IB_FORCE_NOTIFY;
5541 	spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5542 
5543 	/* Update our picture of width and speed from chip */
5544 	if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) {
5545 		ppd->link_speed_active = QIB_IB_QDR;
5546 		mult = 4;
5547 	} else if (ibcs & SYM_MASK(IBCStatusA_0, LinkSpeedActive)) {
5548 		ppd->link_speed_active = QIB_IB_DDR;
5549 		mult = 2;
5550 	} else {
5551 		ppd->link_speed_active = QIB_IB_SDR;
5552 		mult = 1;
5553 	}
5554 	if (ibcs & SYM_MASK(IBCStatusA_0, LinkWidthActive)) {
5555 		ppd->link_width_active = IB_WIDTH_4X;
5556 		mult *= 4;
5557 	} else
5558 		ppd->link_width_active = IB_WIDTH_1X;
5559 	ppd->delay_mult = ib_rate_to_delay[mult_to_ib_rate(mult)];
5560 
5561 	if (!ibup) {
5562 		u64 clr;
5563 
5564 		/* Link went down. */
5565 		/* do IPG MAD again after linkdown, even if last time failed */
5566 		ppd->cpspec->ipg_tries = 0;
5567 		clr = qib_read_kreg_port(ppd, krp_ibcstatus_b) &
5568 			(SYM_MASK(IBCStatusB_0, heartbeat_timed_out) |
5569 			 SYM_MASK(IBCStatusB_0, heartbeat_crosstalk));
5570 		if (clr)
5571 			qib_write_kreg_port(ppd, krp_ibcstatus_b, clr);
5572 		if (!(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5573 				     QIBL_IB_AUTONEG_INPROG)))
5574 			set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5575 		if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5576 			struct qib_qsfp_data *qd =
5577 				&ppd->cpspec->qsfp_data;
5578 			/* unlock the Tx settings, speed may change */
5579 			qib_write_kreg_port(ppd, krp_tx_deemph_override,
5580 				SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
5581 				reset_tx_deemphasis_override));
5582 			qib_cancel_sends(ppd);
5583 			/* on link down, ensure sane pcs state */
5584 			qib_7322_mini_pcs_reset(ppd);
5585 			/* schedule the qsfp refresh which should turn the link
5586 			   off */
5587 			if (ppd->dd->flags & QIB_HAS_QSFP) {
5588 				qd->t_insert = jiffies;
5589 				queue_work(ib_wq, &qd->work);
5590 			}
5591 			spin_lock_irqsave(&ppd->sdma_lock, flags);
5592 			if (__qib_sdma_running(ppd))
5593 				__qib_sdma_process_event(ppd,
5594 					qib_sdma_event_e70_go_idle);
5595 			spin_unlock_irqrestore(&ppd->sdma_lock, flags);
5596 		}
5597 		clr = read_7322_creg32_port(ppd, crp_iblinkdown);
5598 		if (clr == ppd->cpspec->iblnkdownsnap)
5599 			ppd->cpspec->iblnkdowndelta++;
5600 	} else {
5601 		if (qib_compat_ddr_negotiate &&
5602 		    !(ppd->lflags & (QIBL_IB_AUTONEG_FAILED |
5603 				     QIBL_IB_AUTONEG_INPROG)) &&
5604 		    ppd->link_speed_active == QIB_IB_SDR &&
5605 		    (ppd->link_speed_enabled & QIB_IB_DDR)
5606 		    && ppd->cpspec->autoneg_tries < AUTONEG_TRIES) {
5607 			/* we are SDR, and auto-negotiation enabled */
5608 			++ppd->cpspec->autoneg_tries;
5609 			if (!ppd->cpspec->ibdeltainprog) {
5610 				ppd->cpspec->ibdeltainprog = 1;
5611 				ppd->cpspec->ibsymdelta +=
5612 					read_7322_creg32_port(ppd,
5613 						crp_ibsymbolerr) -
5614 						ppd->cpspec->ibsymsnap;
5615 				ppd->cpspec->iblnkerrdelta +=
5616 					read_7322_creg32_port(ppd,
5617 						crp_iblinkerrrecov) -
5618 						ppd->cpspec->iblnkerrsnap;
5619 			}
5620 			try_7322_autoneg(ppd);
5621 			ret = 1; /* no other IB status change processing */
5622 		} else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5623 			   ppd->link_speed_active == QIB_IB_SDR) {
5624 			qib_autoneg_7322_send(ppd, 1);
5625 			set_7322_ibspeed_fast(ppd, QIB_IB_DDR);
5626 			qib_7322_mini_pcs_reset(ppd);
5627 			udelay(2);
5628 			ret = 1; /* no other IB status change processing */
5629 		} else if ((ppd->lflags & QIBL_IB_AUTONEG_INPROG) &&
5630 			   (ppd->link_speed_active & QIB_IB_DDR)) {
5631 			spin_lock_irqsave(&ppd->lflags_lock, flags);
5632 			ppd->lflags &= ~(QIBL_IB_AUTONEG_INPROG |
5633 					 QIBL_IB_AUTONEG_FAILED);
5634 			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5635 			ppd->cpspec->autoneg_tries = 0;
5636 			/* re-enable SDR, for next link down */
5637 			set_7322_ibspeed_fast(ppd, ppd->link_speed_enabled);
5638 			wake_up(&ppd->cpspec->autoneg_wait);
5639 			symadj = 1;
5640 		} else if (ppd->lflags & QIBL_IB_AUTONEG_FAILED) {
5641 			/*
5642 			 * Clear autoneg failure flag, and do setup
5643 			 * so we'll try next time link goes down and
5644 			 * back to INIT (possibly connected to a
5645 			 * different device).
5646 			 */
5647 			spin_lock_irqsave(&ppd->lflags_lock, flags);
5648 			ppd->lflags &= ~QIBL_IB_AUTONEG_FAILED;
5649 			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5650 			ppd->cpspec->ibcctrl_b |= IBA7322_IBC_IBTA_1_2_MASK;
5651 			symadj = 1;
5652 		}
5653 		if (!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5654 			symadj = 1;
5655 			if (ppd->dd->cspec->r1 && ppd->cpspec->ipg_tries <= 10)
5656 				try_7322_ipg(ppd);
5657 			if (!ppd->cpspec->recovery_init)
5658 				setup_7322_link_recovery(ppd, 0);
5659 			ppd->cpspec->qdr_dfe_time = jiffies +
5660 				msecs_to_jiffies(QDR_DFE_DISABLE_DELAY);
5661 		}
5662 		ppd->cpspec->ibmalfusesnap = 0;
5663 		ppd->cpspec->ibmalfsnap = read_7322_creg32_port(ppd,
5664 			crp_errlink);
5665 	}
5666 	if (symadj) {
5667 		ppd->cpspec->iblnkdownsnap =
5668 			read_7322_creg32_port(ppd, crp_iblinkdown);
5669 		if (ppd->cpspec->ibdeltainprog) {
5670 			ppd->cpspec->ibdeltainprog = 0;
5671 			ppd->cpspec->ibsymdelta += read_7322_creg32_port(ppd,
5672 				crp_ibsymbolerr) - ppd->cpspec->ibsymsnap;
5673 			ppd->cpspec->iblnkerrdelta += read_7322_creg32_port(ppd,
5674 				crp_iblinkerrrecov) - ppd->cpspec->iblnkerrsnap;
5675 		}
5676 	} else if (!ibup && qib_compat_ddr_negotiate &&
5677 		   !ppd->cpspec->ibdeltainprog &&
5678 			!(ppd->lflags & QIBL_IB_AUTONEG_INPROG)) {
5679 		ppd->cpspec->ibdeltainprog = 1;
5680 		ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
5681 			crp_ibsymbolerr);
5682 		ppd->cpspec->iblnkerrsnap = read_7322_creg32_port(ppd,
5683 			crp_iblinkerrrecov);
5684 	}
5685 
5686 	if (!ret)
5687 		qib_setup_7322_setextled(ppd, ibup);
5688 	return ret;
5689 }
5690 
5691 /*
5692  * Does read/modify/write to appropriate registers to
5693  * set output and direction bits selected by mask.
5694  * these are in their canonical postions (e.g. lsb of
5695  * dir will end up in D48 of extctrl on existing chips).
5696  * returns contents of GP Inputs.
5697  */
5698 static int gpio_7322_mod(struct qib_devdata *dd, u32 out, u32 dir, u32 mask)
5699 {
5700 	u64 read_val, new_out;
5701 	unsigned long flags;
5702 
5703 	if (mask) {
5704 		/* some bits being written, lock access to GPIO */
5705 		dir &= mask;
5706 		out &= mask;
5707 		spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
5708 		dd->cspec->extctrl &= ~((u64)mask << SYM_LSB(EXTCtrl, GPIOOe));
5709 		dd->cspec->extctrl |= ((u64) dir << SYM_LSB(EXTCtrl, GPIOOe));
5710 		new_out = (dd->cspec->gpio_out & ~mask) | out;
5711 
5712 		qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
5713 		qib_write_kreg(dd, kr_gpio_out, new_out);
5714 		dd->cspec->gpio_out = new_out;
5715 		spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
5716 	}
5717 	/*
5718 	 * It is unlikely that a read at this time would get valid
5719 	 * data on a pin whose direction line was set in the same
5720 	 * call to this function. We include the read here because
5721 	 * that allows us to potentially combine a change on one pin with
5722 	 * a read on another, and because the old code did something like
5723 	 * this.
5724 	 */
5725 	read_val = qib_read_kreg64(dd, kr_extstatus);
5726 	return SYM_FIELD(read_val, EXTStatus, GPIOIn);
5727 }
5728 
5729 /* Enable writes to config EEPROM, if possible. Returns previous state */
5730 static int qib_7322_eeprom_wen(struct qib_devdata *dd, int wen)
5731 {
5732 	int prev_wen;
5733 	u32 mask;
5734 
5735 	mask = 1 << QIB_EEPROM_WEN_NUM;
5736 	prev_wen = ~gpio_7322_mod(dd, 0, 0, 0) >> QIB_EEPROM_WEN_NUM;
5737 	gpio_7322_mod(dd, wen ? 0 : mask, mask, mask);
5738 
5739 	return prev_wen & 1;
5740 }
5741 
5742 /*
5743  * Read fundamental info we need to use the chip.  These are
5744  * the registers that describe chip capabilities, and are
5745  * saved in shadow registers.
5746  */
5747 static void get_7322_chip_params(struct qib_devdata *dd)
5748 {
5749 	u64 val;
5750 	u32 piobufs;
5751 	int mtu;
5752 
5753 	dd->palign = qib_read_kreg32(dd, kr_pagealign);
5754 
5755 	dd->uregbase = qib_read_kreg32(dd, kr_userregbase);
5756 
5757 	dd->rcvtidcnt = qib_read_kreg32(dd, kr_rcvtidcnt);
5758 	dd->rcvtidbase = qib_read_kreg32(dd, kr_rcvtidbase);
5759 	dd->rcvegrbase = qib_read_kreg32(dd, kr_rcvegrbase);
5760 	dd->piobufbase = qib_read_kreg64(dd, kr_sendpiobufbase);
5761 	dd->pio2k_bufbase = dd->piobufbase & 0xffffffff;
5762 
5763 	val = qib_read_kreg64(dd, kr_sendpiobufcnt);
5764 	dd->piobcnt2k = val & ~0U;
5765 	dd->piobcnt4k = val >> 32;
5766 	val = qib_read_kreg64(dd, kr_sendpiosize);
5767 	dd->piosize2k = val & ~0U;
5768 	dd->piosize4k = val >> 32;
5769 
5770 	mtu = ib_mtu_enum_to_int(qib_ibmtu);
5771 	if (mtu == -1)
5772 		mtu = QIB_DEFAULT_MTU;
5773 	dd->pport[0].ibmtu = (u32)mtu;
5774 	dd->pport[1].ibmtu = (u32)mtu;
5775 
5776 	/* these may be adjusted in init_chip_wc_pat() */
5777 	dd->pio2kbase = (u32 __iomem *)
5778 		((char __iomem *) dd->kregbase + dd->pio2k_bufbase);
5779 	dd->pio4kbase = (u32 __iomem *)
5780 		((char __iomem *) dd->kregbase +
5781 		 (dd->piobufbase >> 32));
5782 	/*
5783 	 * 4K buffers take 2 pages; we use roundup just to be
5784 	 * paranoid; we calculate it once here, rather than on
5785 	 * ever buf allocate
5786 	 */
5787 	dd->align4k = ALIGN(dd->piosize4k, dd->palign);
5788 
5789 	piobufs = dd->piobcnt4k + dd->piobcnt2k + NUM_VL15_BUFS;
5790 
5791 	dd->pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2) /
5792 		(sizeof(u64) * BITS_PER_BYTE / 2);
5793 }
5794 
5795 /*
5796  * The chip base addresses in cspec and cpspec have to be set
5797  * after possible init_chip_wc_pat(), rather than in
5798  * get_7322_chip_params(), so split out as separate function
5799  */
5800 static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
5801 {
5802 	u32 cregbase;
5803 
5804 	cregbase = qib_read_kreg32(dd, kr_counterregbase);
5805 
5806 	dd->cspec->cregbase = (u64 __iomem *)(cregbase +
5807 		(char __iomem *)dd->kregbase);
5808 
5809 	dd->egrtidbase = (u64 __iomem *)
5810 		((char __iomem *) dd->kregbase + dd->rcvegrbase);
5811 
5812 	/* port registers are defined as relative to base of chip */
5813 	dd->pport[0].cpspec->kpregbase =
5814 		(u64 __iomem *)((char __iomem *)dd->kregbase);
5815 	dd->pport[1].cpspec->kpregbase =
5816 		(u64 __iomem *)(dd->palign +
5817 		(char __iomem *)dd->kregbase);
5818 	dd->pport[0].cpspec->cpregbase =
5819 		(u64 __iomem *)(qib_read_kreg_port(&dd->pport[0],
5820 		kr_counterregbase) + (char __iomem *)dd->kregbase);
5821 	dd->pport[1].cpspec->cpregbase =
5822 		(u64 __iomem *)(qib_read_kreg_port(&dd->pport[1],
5823 		kr_counterregbase) + (char __iomem *)dd->kregbase);
5824 }
5825 
5826 /*
5827  * This is a fairly special-purpose observer, so we only support
5828  * the port-specific parts of SendCtrl
5829  */
5830 
5831 #define SENDCTRL_SHADOWED (SYM_MASK(SendCtrl_0, SendEnable) |		\
5832 			   SYM_MASK(SendCtrl_0, SDmaEnable) |		\
5833 			   SYM_MASK(SendCtrl_0, SDmaIntEnable) |	\
5834 			   SYM_MASK(SendCtrl_0, SDmaSingleDescriptor) | \
5835 			   SYM_MASK(SendCtrl_0, SDmaHalt) |		\
5836 			   SYM_MASK(SendCtrl_0, IBVLArbiterEn) |	\
5837 			   SYM_MASK(SendCtrl_0, ForceCreditUpToDate))
5838 
5839 static int sendctrl_hook(struct qib_devdata *dd,
5840 			 const struct diag_observer *op, u32 offs,
5841 			 u64 *data, u64 mask, int only_32)
5842 {
5843 	unsigned long flags;
5844 	unsigned idx;
5845 	unsigned pidx;
5846 	struct qib_pportdata *ppd = NULL;
5847 	u64 local_data, all_bits;
5848 
5849 	/*
5850 	 * The fixed correspondence between Physical ports and pports is
5851 	 * severed. We need to hunt for the ppd that corresponds
5852 	 * to the offset we got. And we have to do that without admitting
5853 	 * we know the stride, apparently.
5854 	 */
5855 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5856 		u64 __iomem *psptr;
5857 		u32 psoffs;
5858 
5859 		ppd = dd->pport + pidx;
5860 		if (!ppd->cpspec->kpregbase)
5861 			continue;
5862 
5863 		psptr = ppd->cpspec->kpregbase + krp_sendctrl;
5864 		psoffs = (u32) (psptr - dd->kregbase) * sizeof(*psptr);
5865 		if (psoffs == offs)
5866 			break;
5867 	}
5868 
5869 	/* If pport is not being managed by driver, just avoid shadows. */
5870 	if (pidx >= dd->num_pports)
5871 		ppd = NULL;
5872 
5873 	/* In any case, "idx" is flat index in kreg space */
5874 	idx = offs / sizeof(u64);
5875 
5876 	all_bits = ~0ULL;
5877 	if (only_32)
5878 		all_bits >>= 32;
5879 
5880 	spin_lock_irqsave(&dd->sendctrl_lock, flags);
5881 	if (!ppd || (mask & all_bits) != all_bits) {
5882 		/*
5883 		 * At least some mask bits are zero, so we need
5884 		 * to read. The judgement call is whether from
5885 		 * reg or shadow. First-cut: read reg, and complain
5886 		 * if any bits which should be shadowed are different
5887 		 * from their shadowed value.
5888 		 */
5889 		if (only_32)
5890 			local_data = (u64)qib_read_kreg32(dd, idx);
5891 		else
5892 			local_data = qib_read_kreg64(dd, idx);
5893 		*data = (local_data & ~mask) | (*data & mask);
5894 	}
5895 	if (mask) {
5896 		/*
5897 		 * At least some mask bits are one, so we need
5898 		 * to write, but only shadow some bits.
5899 		 */
5900 		u64 sval, tval; /* Shadowed, transient */
5901 
5902 		/*
5903 		 * New shadow val is bits we don't want to touch,
5904 		 * ORed with bits we do, that are intended for shadow.
5905 		 */
5906 		if (ppd) {
5907 			sval = ppd->p_sendctrl & ~mask;
5908 			sval |= *data & SENDCTRL_SHADOWED & mask;
5909 			ppd->p_sendctrl = sval;
5910 		} else
5911 			sval = *data & SENDCTRL_SHADOWED & mask;
5912 		tval = sval | (*data & ~SENDCTRL_SHADOWED & mask);
5913 		qib_write_kreg(dd, idx, tval);
5914 		qib_write_kreg(dd, kr_scratch, 0Ull);
5915 	}
5916 	spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
5917 	return only_32 ? 4 : 8;
5918 }
5919 
5920 static const struct diag_observer sendctrl_0_observer = {
5921 	sendctrl_hook, KREG_IDX(SendCtrl_0) * sizeof(u64),
5922 	KREG_IDX(SendCtrl_0) * sizeof(u64)
5923 };
5924 
5925 static const struct diag_observer sendctrl_1_observer = {
5926 	sendctrl_hook, KREG_IDX(SendCtrl_1) * sizeof(u64),
5927 	KREG_IDX(SendCtrl_1) * sizeof(u64)
5928 };
5929 
5930 static ushort sdma_fetch_prio = 8;
5931 module_param_named(sdma_fetch_prio, sdma_fetch_prio, ushort, S_IRUGO);
5932 MODULE_PARM_DESC(sdma_fetch_prio, "SDMA descriptor fetch priority");
5933 
5934 /* Besides logging QSFP events, we set appropriate TxDDS values */
5935 static void init_txdds_table(struct qib_pportdata *ppd, int override);
5936 
5937 static void qsfp_7322_event(struct work_struct *work)
5938 {
5939 	struct qib_qsfp_data *qd;
5940 	struct qib_pportdata *ppd;
5941 	unsigned long pwrup;
5942 	unsigned long flags;
5943 	int ret;
5944 	u32 le2;
5945 
5946 	qd = container_of(work, struct qib_qsfp_data, work);
5947 	ppd = qd->ppd;
5948 	pwrup = qd->t_insert +
5949 		msecs_to_jiffies(QSFP_PWR_LAG_MSEC - QSFP_MODPRS_LAG_MSEC);
5950 
5951 	/* Delay for 20 msecs to allow ModPrs resistor to setup */
5952 	mdelay(QSFP_MODPRS_LAG_MSEC);
5953 
5954 	if (!qib_qsfp_mod_present(ppd)) {
5955 		ppd->cpspec->qsfp_data.modpresent = 0;
5956 		/* Set the physical link to disabled */
5957 		qib_set_ib_7322_lstate(ppd, 0,
5958 				       QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
5959 		spin_lock_irqsave(&ppd->lflags_lock, flags);
5960 		ppd->lflags &= ~QIBL_LINKV;
5961 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
5962 	} else {
5963 		/*
5964 		 * Some QSFP's not only do not respond until the full power-up
5965 		 * time, but may behave badly if we try. So hold off responding
5966 		 * to insertion.
5967 		 */
5968 		while (1) {
5969 			if (time_is_before_jiffies(pwrup))
5970 				break;
5971 			msleep(20);
5972 		}
5973 
5974 		ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
5975 
5976 		/*
5977 		 * Need to change LE2 back to defaults if we couldn't
5978 		 * read the cable type (to handle cable swaps), so do this
5979 		 * even on failure to read cable information.  We don't
5980 		 * get here for QME, so IS_QME check not needed here.
5981 		 */
5982 		if (!ret && !ppd->dd->cspec->r1) {
5983 			if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
5984 				le2 = LE2_QME;
5985 			else if (qd->cache.atten[1] >= qib_long_atten &&
5986 				 QSFP_IS_CU(qd->cache.tech))
5987 				le2 = LE2_5m;
5988 			else
5989 				le2 = LE2_DEFAULT;
5990 		} else
5991 			le2 = LE2_DEFAULT;
5992 		ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
5993 		/*
5994 		 * We always change parameteters, since we can choose
5995 		 * values for cables without eeproms, and the cable may have
5996 		 * changed from a cable with full or partial eeprom content
5997 		 * to one with partial or no content.
5998 		 */
5999 		init_txdds_table(ppd, 0);
6000 		/* The physical link is being re-enabled only when the
6001 		 * previous state was DISABLED and the VALID bit is not
6002 		 * set. This should only happen when  the cable has been
6003 		 * physically pulled. */
6004 		if (!ppd->cpspec->qsfp_data.modpresent &&
6005 		    (ppd->lflags & (QIBL_LINKV | QIBL_IB_LINK_DISABLED))) {
6006 			ppd->cpspec->qsfp_data.modpresent = 1;
6007 			qib_set_ib_7322_lstate(ppd, 0,
6008 				QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
6009 			spin_lock_irqsave(&ppd->lflags_lock, flags);
6010 			ppd->lflags |= QIBL_LINKV;
6011 			spin_unlock_irqrestore(&ppd->lflags_lock, flags);
6012 		}
6013 	}
6014 }
6015 
6016 /*
6017  * There is little we can do but complain to the user if QSFP
6018  * initialization fails.
6019  */
6020 static void qib_init_7322_qsfp(struct qib_pportdata *ppd)
6021 {
6022 	unsigned long flags;
6023 	struct qib_qsfp_data *qd = &ppd->cpspec->qsfp_data;
6024 	struct qib_devdata *dd = ppd->dd;
6025 	u64 mod_prs_bit = QSFP_GPIO_MOD_PRS_N;
6026 
6027 	mod_prs_bit <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
6028 	qd->ppd = ppd;
6029 	qib_qsfp_init(qd, qsfp_7322_event);
6030 	spin_lock_irqsave(&dd->cspec->gpio_lock, flags);
6031 	dd->cspec->extctrl |= (mod_prs_bit << SYM_LSB(EXTCtrl, GPIOInvert));
6032 	dd->cspec->gpio_mask |= mod_prs_bit;
6033 	qib_write_kreg(dd, kr_extctrl, dd->cspec->extctrl);
6034 	qib_write_kreg(dd, kr_gpio_mask, dd->cspec->gpio_mask);
6035 	spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags);
6036 }
6037 
6038 /*
6039  * called at device initialization time, and also if the txselect
6040  * module parameter is changed.  This is used for cables that don't
6041  * have valid QSFP EEPROMs (not present, or attenuation is zero).
6042  * We initialize to the default, then if there is a specific
6043  * unit,port match, we use that (and set it immediately, for the
6044  * current speed, if the link is at INIT or better).
6045  * String format is "default# unit#,port#=# ... u,p=#", separators must
6046  * be a SPACE character.  A newline terminates.  The u,p=# tuples may
6047  * optionally have "u,p=#,#", where the final # is the H1 value
6048  * The last specific match is used (actually, all are used, but last
6049  * one is the one that winds up set); if none at all, fall back on default.
6050  */
6051 static void set_no_qsfp_atten(struct qib_devdata *dd, int change)
6052 {
6053 	char *nxt, *str;
6054 	u32 pidx, unit, port, deflt, h1;
6055 	unsigned long val;
6056 	int any = 0, seth1;
6057 	int txdds_size;
6058 
6059 	str = txselect_list;
6060 
6061 	/* default number is validated in setup_txselect() */
6062 	deflt = simple_strtoul(str, &nxt, 0);
6063 	for (pidx = 0; pidx < dd->num_pports; ++pidx)
6064 		dd->pport[pidx].cpspec->no_eep = deflt;
6065 
6066 	txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
6067 	if (IS_QME(dd) || IS_QMH(dd))
6068 		txdds_size += TXDDS_MFG_SZ;
6069 
6070 	while (*nxt && nxt[1]) {
6071 		str = ++nxt;
6072 		unit = simple_strtoul(str, &nxt, 0);
6073 		if (nxt == str || !*nxt || *nxt != ',') {
6074 			while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6075 				;
6076 			continue;
6077 		}
6078 		str = ++nxt;
6079 		port = simple_strtoul(str, &nxt, 0);
6080 		if (nxt == str || *nxt != '=') {
6081 			while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6082 				;
6083 			continue;
6084 		}
6085 		str = ++nxt;
6086 		val = simple_strtoul(str, &nxt, 0);
6087 		if (nxt == str) {
6088 			while (*nxt && *nxt++ != ' ') /* skip to next, if any */
6089 				;
6090 			continue;
6091 		}
6092 		if (val >= txdds_size)
6093 			continue;
6094 		seth1 = 0;
6095 		h1 = 0; /* gcc thinks it might be used uninitted */
6096 		if (*nxt == ',' && nxt[1]) {
6097 			str = ++nxt;
6098 			h1 = (u32)simple_strtoul(str, &nxt, 0);
6099 			if (nxt == str)
6100 				while (*nxt && *nxt++ != ' ') /* skip */
6101 					;
6102 			else
6103 				seth1 = 1;
6104 		}
6105 		for (pidx = 0; dd->unit == unit && pidx < dd->num_pports;
6106 		     ++pidx) {
6107 			struct qib_pportdata *ppd = &dd->pport[pidx];
6108 
6109 			if (ppd->port != port || !ppd->link_speed_supported)
6110 				continue;
6111 			ppd->cpspec->no_eep = val;
6112 			if (seth1)
6113 				ppd->cpspec->h1_val = h1;
6114 			/* now change the IBC and serdes, overriding generic */
6115 			init_txdds_table(ppd, 1);
6116 			/* Re-enable the physical state machine on mezz boards
6117 			 * now that the correct settings have been set.
6118 			 * QSFP boards are handles by the QSFP event handler */
6119 			if (IS_QMH(dd) || IS_QME(dd))
6120 				qib_set_ib_7322_lstate(ppd, 0,
6121 					    QLOGIC_IB_IBCC_LINKINITCMD_SLEEP);
6122 			any++;
6123 		}
6124 		if (*nxt == '\n')
6125 			break; /* done */
6126 	}
6127 	if (change && !any) {
6128 		/* no specific setting, use the default.
6129 		 * Change the IBC and serdes, but since it's
6130 		 * general, don't override specific settings.
6131 		 */
6132 		for (pidx = 0; pidx < dd->num_pports; ++pidx)
6133 			if (dd->pport[pidx].link_speed_supported)
6134 				init_txdds_table(&dd->pport[pidx], 0);
6135 	}
6136 }
6137 
6138 /* handle the txselect parameter changing */
6139 static int setup_txselect(const char *str, const struct kernel_param *kp)
6140 {
6141 	struct qib_devdata *dd;
6142 	unsigned long val;
6143 	char *n;
6144 
6145 	if (strlen(str) >= ARRAY_SIZE(txselect_list)) {
6146 		pr_info("txselect_values string too long\n");
6147 		return -ENOSPC;
6148 	}
6149 	val = simple_strtoul(str, &n, 0);
6150 	if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
6151 				TXDDS_MFG_SZ)) {
6152 		pr_info("txselect_values must start with a number < %d\n",
6153 			TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
6154 		return -EINVAL;
6155 	}
6156 	strncpy(txselect_list, str, ARRAY_SIZE(txselect_list) - 1);
6157 
6158 	list_for_each_entry(dd, &qib_dev_list, list)
6159 		if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
6160 			set_no_qsfp_atten(dd, 1);
6161 	return 0;
6162 }
6163 
6164 /*
6165  * Write the final few registers that depend on some of the
6166  * init setup.  Done late in init, just before bringing up
6167  * the serdes.
6168  */
6169 static int qib_late_7322_initreg(struct qib_devdata *dd)
6170 {
6171 	int ret = 0, n;
6172 	u64 val;
6173 
6174 	qib_write_kreg(dd, kr_rcvhdrentsize, dd->rcvhdrentsize);
6175 	qib_write_kreg(dd, kr_rcvhdrsize, dd->rcvhdrsize);
6176 	qib_write_kreg(dd, kr_rcvhdrcnt, dd->rcvhdrcnt);
6177 	qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
6178 	val = qib_read_kreg64(dd, kr_sendpioavailaddr);
6179 	if (val != dd->pioavailregs_phys) {
6180 		qib_dev_err(dd,
6181 			"Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
6182 			(unsigned long) dd->pioavailregs_phys,
6183 			(unsigned long long) val);
6184 		ret = -EINVAL;
6185 	}
6186 
6187 	n = dd->piobcnt2k + dd->piobcnt4k + NUM_VL15_BUFS;
6188 	qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_KERN, NULL);
6189 	/* driver sends get pkey, lid, etc. checking also, to catch bugs */
6190 	qib_7322_txchk_change(dd, 0, n, TXCHK_CHG_TYPE_ENAB1, NULL);
6191 
6192 	qib_register_observer(dd, &sendctrl_0_observer);
6193 	qib_register_observer(dd, &sendctrl_1_observer);
6194 
6195 	dd->control &= ~QLOGIC_IB_C_SDMAFETCHPRIOEN;
6196 	qib_write_kreg(dd, kr_control, dd->control);
6197 	/*
6198 	 * Set SendDmaFetchPriority and init Tx params, including
6199 	 * QSFP handler on boards that have QSFP.
6200 	 * First set our default attenuation entry for cables that
6201 	 * don't have valid attenuation.
6202 	 */
6203 	set_no_qsfp_atten(dd, 0);
6204 	for (n = 0; n < dd->num_pports; ++n) {
6205 		struct qib_pportdata *ppd = dd->pport + n;
6206 
6207 		qib_write_kreg_port(ppd, krp_senddmaprioritythld,
6208 				    sdma_fetch_prio & 0xf);
6209 		/* Initialize qsfp if present on board. */
6210 		if (dd->flags & QIB_HAS_QSFP)
6211 			qib_init_7322_qsfp(ppd);
6212 	}
6213 	dd->control |= QLOGIC_IB_C_SDMAFETCHPRIOEN;
6214 	qib_write_kreg(dd, kr_control, dd->control);
6215 
6216 	return ret;
6217 }
6218 
6219 /* per IB port errors.  */
6220 #define SENDCTRL_PIBP (MASK_ACROSS(0, 1) | MASK_ACROSS(3, 3) | \
6221 	MASK_ACROSS(8, 15))
6222 #define RCVCTRL_PIBP (MASK_ACROSS(0, 17) | MASK_ACROSS(39, 41))
6223 #define ERRS_PIBP (MASK_ACROSS(57, 58) | MASK_ACROSS(54, 54) | \
6224 	MASK_ACROSS(36, 49) | MASK_ACROSS(29, 34) | MASK_ACROSS(14, 17) | \
6225 	MASK_ACROSS(0, 11))
6226 
6227 /*
6228  * Write the initialization per-port registers that need to be done at
6229  * driver load and after reset completes (i.e., that aren't done as part
6230  * of other init procedures called from qib_init.c).
6231  * Some of these should be redundant on reset, but play safe.
6232  */
6233 static void write_7322_init_portregs(struct qib_pportdata *ppd)
6234 {
6235 	u64 val;
6236 	int i;
6237 
6238 	if (!ppd->link_speed_supported) {
6239 		/* no buffer credits for this port */
6240 		for (i = 1; i < 8; i++)
6241 			qib_write_kreg_port(ppd, krp_rxcreditvl0 + i, 0);
6242 		qib_write_kreg_port(ppd, krp_ibcctrl_b, 0);
6243 		qib_write_kreg(ppd->dd, kr_scratch, 0);
6244 		return;
6245 	}
6246 
6247 	/*
6248 	 * Set the number of supported virtual lanes in IBC,
6249 	 * for flow control packet handling on unsupported VLs
6250 	 */
6251 	val = qib_read_kreg_port(ppd, krp_ibsdtestiftx);
6252 	val &= ~SYM_MASK(IB_SDTEST_IF_TX_0, VL_CAP);
6253 	val |= (u64)(ppd->vls_supported - 1) <<
6254 		SYM_LSB(IB_SDTEST_IF_TX_0, VL_CAP);
6255 	qib_write_kreg_port(ppd, krp_ibsdtestiftx, val);
6256 
6257 	qib_write_kreg_port(ppd, krp_rcvbthqp, QIB_KD_QP);
6258 
6259 	/* enable tx header checking */
6260 	qib_write_kreg_port(ppd, krp_sendcheckcontrol, IBA7322_SENDCHK_PKEY |
6261 			    IBA7322_SENDCHK_BTHQP | IBA7322_SENDCHK_SLID |
6262 			    IBA7322_SENDCHK_RAW_IPV6 | IBA7322_SENDCHK_MINSZ);
6263 
6264 	qib_write_kreg_port(ppd, krp_ncmodectrl,
6265 		SYM_MASK(IBNCModeCtrl_0, ScrambleCapLocal));
6266 
6267 	/*
6268 	 * Unconditionally clear the bufmask bits.  If SDMA is
6269 	 * enabled, we'll set them appropriately later.
6270 	 */
6271 	qib_write_kreg_port(ppd, krp_senddmabufmask0, 0);
6272 	qib_write_kreg_port(ppd, krp_senddmabufmask1, 0);
6273 	qib_write_kreg_port(ppd, krp_senddmabufmask2, 0);
6274 	if (ppd->dd->cspec->r1)
6275 		ppd->p_sendctrl |= SYM_MASK(SendCtrl_0, ForceCreditUpToDate);
6276 }
6277 
6278 /*
6279  * Write the initialization per-device registers that need to be done at
6280  * driver load and after reset completes (i.e., that aren't done as part
6281  * of other init procedures called from qib_init.c).  Also write per-port
6282  * registers that are affected by overall device config, such as QP mapping
6283  * Some of these should be redundant on reset, but play safe.
6284  */
6285 static void write_7322_initregs(struct qib_devdata *dd)
6286 {
6287 	struct qib_pportdata *ppd;
6288 	int i, pidx;
6289 	u64 val;
6290 
6291 	/* Set Multicast QPs received by port 2 to map to context one. */
6292 	qib_write_kreg(dd, KREG_IDX(RcvQPMulticastContext_1), 1);
6293 
6294 	for (pidx = 0; pidx < dd->num_pports; ++pidx) {
6295 		unsigned n, regno;
6296 		unsigned long flags;
6297 
6298 		if (dd->n_krcv_queues < 2 ||
6299 			!dd->pport[pidx].link_speed_supported)
6300 			continue;
6301 
6302 		ppd = &dd->pport[pidx];
6303 
6304 		/* be paranoid against later code motion, etc. */
6305 		spin_lock_irqsave(&dd->cspec->rcvmod_lock, flags);
6306 		ppd->p_rcvctrl |= SYM_MASK(RcvCtrl_0, RcvQPMapEnable);
6307 		spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags);
6308 
6309 		/* Initialize QP to context mapping */
6310 		regno = krp_rcvqpmaptable;
6311 		val = 0;
6312 		if (dd->num_pports > 1)
6313 			n = dd->first_user_ctxt / dd->num_pports;
6314 		else
6315 			n = dd->first_user_ctxt - 1;
6316 		for (i = 0; i < 32; ) {
6317 			unsigned ctxt;
6318 
6319 			if (dd->num_pports > 1)
6320 				ctxt = (i % n) * dd->num_pports + pidx;
6321 			else if (i % n)
6322 				ctxt = (i % n) + 1;
6323 			else
6324 				ctxt = ppd->hw_pidx;
6325 			val |= ctxt << (5 * (i % 6));
6326 			i++;
6327 			if (i % 6 == 0) {
6328 				qib_write_kreg_port(ppd, regno, val);
6329 				val = 0;
6330 				regno++;
6331 			}
6332 		}
6333 		qib_write_kreg_port(ppd, regno, val);
6334 	}
6335 
6336 	/*
6337 	 * Setup up interrupt mitigation for kernel contexts, but
6338 	 * not user contexts (user contexts use interrupts when
6339 	 * stalled waiting for any packet, so want those interrupts
6340 	 * right away).
6341 	 */
6342 	for (i = 0; i < dd->first_user_ctxt; i++) {
6343 		dd->cspec->rcvavail_timeout[i] = rcv_int_timeout;
6344 		qib_write_kreg(dd, kr_rcvavailtimeout + i, rcv_int_timeout);
6345 	}
6346 
6347 	/*
6348 	 * Initialize  as (disabled) rcvflow tables.  Application code
6349 	 * will setup each flow as it uses the flow.
6350 	 * Doesn't clear any of the error bits that might be set.
6351 	 */
6352 	val = TIDFLOW_ERRBITS; /* these are W1C */
6353 	for (i = 0; i < dd->cfgctxts; i++) {
6354 		int flow;
6355 
6356 		for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
6357 			qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
6358 	}
6359 
6360 	/*
6361 	 * dual cards init to dual port recovery, single port cards to
6362 	 * the one port.  Dual port cards may later adjust to 1 port,
6363 	 * and then back to dual port if both ports are connected
6364 	 * */
6365 	if (dd->num_pports)
6366 		setup_7322_link_recovery(dd->pport, dd->num_pports > 1);
6367 }
6368 
6369 static int qib_init_7322_variables(struct qib_devdata *dd)
6370 {
6371 	struct qib_pportdata *ppd;
6372 	unsigned features, pidx, sbufcnt;
6373 	int ret, mtu;
6374 	u32 sbufs, updthresh;
6375 	resource_size_t vl15off;
6376 
6377 	/* pport structs are contiguous, allocated after devdata */
6378 	ppd = (struct qib_pportdata *)(dd + 1);
6379 	dd->pport = ppd;
6380 	ppd[0].dd = dd;
6381 	ppd[1].dd = dd;
6382 
6383 	dd->cspec = (struct qib_chip_specific *)(ppd + 2);
6384 
6385 	ppd[0].cpspec = (struct qib_chippport_specific *)(dd->cspec + 1);
6386 	ppd[1].cpspec = &ppd[0].cpspec[1];
6387 	ppd[0].cpspec->ppd = &ppd[0]; /* for autoneg_7322_work() */
6388 	ppd[1].cpspec->ppd = &ppd[1]; /* for autoneg_7322_work() */
6389 
6390 	spin_lock_init(&dd->cspec->rcvmod_lock);
6391 	spin_lock_init(&dd->cspec->gpio_lock);
6392 
6393 	/* we haven't yet set QIB_PRESENT, so use read directly */
6394 	dd->revision = readq(&dd->kregbase[kr_revision]);
6395 
6396 	if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
6397 		qib_dev_err(dd,
6398 			"Revision register read failure, giving up initialization\n");
6399 		ret = -ENODEV;
6400 		goto bail;
6401 	}
6402 	dd->flags |= QIB_PRESENT;  /* now register routines work */
6403 
6404 	dd->majrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMajor);
6405 	dd->minrev = (u8) SYM_FIELD(dd->revision, Revision_R, ChipRevMinor);
6406 	dd->cspec->r1 = dd->minrev == 1;
6407 
6408 	get_7322_chip_params(dd);
6409 	features = qib_7322_boardname(dd);
6410 
6411 	/* now that piobcnt2k and 4k set, we can allocate these */
6412 	sbufcnt = dd->piobcnt2k + dd->piobcnt4k +
6413 		NUM_VL15_BUFS + BITS_PER_LONG - 1;
6414 	sbufcnt /= BITS_PER_LONG;
6415 	dd->cspec->sendchkenable = kmalloc(sbufcnt *
6416 		sizeof(*dd->cspec->sendchkenable), GFP_KERNEL);
6417 	dd->cspec->sendgrhchk = kmalloc(sbufcnt *
6418 		sizeof(*dd->cspec->sendgrhchk), GFP_KERNEL);
6419 	dd->cspec->sendibchk = kmalloc(sbufcnt *
6420 		sizeof(*dd->cspec->sendibchk), GFP_KERNEL);
6421 	if (!dd->cspec->sendchkenable || !dd->cspec->sendgrhchk ||
6422 		!dd->cspec->sendibchk) {
6423 		ret = -ENOMEM;
6424 		goto bail;
6425 	}
6426 
6427 	ppd = dd->pport;
6428 
6429 	/*
6430 	 * GPIO bits for TWSI data and clock,
6431 	 * used for serial EEPROM.
6432 	 */
6433 	dd->gpio_sda_num = _QIB_GPIO_SDA_NUM;
6434 	dd->gpio_scl_num = _QIB_GPIO_SCL_NUM;
6435 	dd->twsi_eeprom_dev = QIB_TWSI_EEPROM_DEV;
6436 
6437 	dd->flags |= QIB_HAS_INTX | QIB_HAS_LINK_LATENCY |
6438 		QIB_NODMA_RTAIL | QIB_HAS_VLSUPP | QIB_HAS_HDRSUPP |
6439 		QIB_HAS_THRESH_UPDATE |
6440 		(sdma_idle_cnt ? QIB_HAS_SDMA_TIMEOUT : 0);
6441 	dd->flags |= qib_special_trigger ?
6442 		QIB_USE_SPCL_TRIG : QIB_HAS_SEND_DMA;
6443 
6444 	/*
6445 	 * Setup initial values.  These may change when PAT is enabled, but
6446 	 * we need these to do initial chip register accesses.
6447 	 */
6448 	qib_7322_set_baseaddrs(dd);
6449 
6450 	mtu = ib_mtu_enum_to_int(qib_ibmtu);
6451 	if (mtu == -1)
6452 		mtu = QIB_DEFAULT_MTU;
6453 
6454 	dd->cspec->int_enable_mask = QIB_I_BITSEXTANT;
6455 	/* all hwerrors become interrupts, unless special purposed */
6456 	dd->cspec->hwerrmask = ~0ULL;
6457 	/*  link_recovery setup causes these errors, so ignore them,
6458 	 *  other than clearing them when they occur */
6459 	dd->cspec->hwerrmask &=
6460 		~(SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_0) |
6461 		  SYM_MASK(HwErrMask, IBSerdesPClkNotDetectMask_1) |
6462 		  HWE_MASK(LATriggered));
6463 
6464 	for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
6465 		struct qib_chippport_specific *cp = ppd->cpspec;
6466 
6467 		ppd->link_speed_supported = features & PORT_SPD_CAP;
6468 		features >>=  PORT_SPD_CAP_SHIFT;
6469 		if (!ppd->link_speed_supported) {
6470 			/* single port mode (7340, or configured) */
6471 			dd->skip_kctxt_mask |= 1 << pidx;
6472 			if (pidx == 0) {
6473 				/* Make sure port is disabled. */
6474 				qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6475 				qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6476 				ppd[0] = ppd[1];
6477 				dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6478 						  IBSerdesPClkNotDetectMask_0)
6479 						  | SYM_MASK(HwErrMask,
6480 						  SDmaMemReadErrMask_0));
6481 				dd->cspec->int_enable_mask &= ~(
6482 				     SYM_MASK(IntMask, SDmaCleanupDoneMask_0) |
6483 				     SYM_MASK(IntMask, SDmaIdleIntMask_0) |
6484 				     SYM_MASK(IntMask, SDmaProgressIntMask_0) |
6485 				     SYM_MASK(IntMask, SDmaIntMask_0) |
6486 				     SYM_MASK(IntMask, ErrIntMask_0) |
6487 				     SYM_MASK(IntMask, SendDoneIntMask_0));
6488 			} else {
6489 				/* Make sure port is disabled. */
6490 				qib_write_kreg_port(ppd, krp_rcvctrl, 0);
6491 				qib_write_kreg_port(ppd, krp_ibcctrl_a, 0);
6492 				dd->cspec->hwerrmask &= ~(SYM_MASK(HwErrMask,
6493 						  IBSerdesPClkNotDetectMask_1)
6494 						  | SYM_MASK(HwErrMask,
6495 						  SDmaMemReadErrMask_1));
6496 				dd->cspec->int_enable_mask &= ~(
6497 				     SYM_MASK(IntMask, SDmaCleanupDoneMask_1) |
6498 				     SYM_MASK(IntMask, SDmaIdleIntMask_1) |
6499 				     SYM_MASK(IntMask, SDmaProgressIntMask_1) |
6500 				     SYM_MASK(IntMask, SDmaIntMask_1) |
6501 				     SYM_MASK(IntMask, ErrIntMask_1) |
6502 				     SYM_MASK(IntMask, SendDoneIntMask_1));
6503 			}
6504 			continue;
6505 		}
6506 
6507 		dd->num_pports++;
6508 		ret = qib_init_pportdata(ppd, dd, pidx, dd->num_pports);
6509 		if (ret) {
6510 			dd->num_pports--;
6511 			goto bail;
6512 		}
6513 
6514 		ppd->link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
6515 		ppd->link_width_enabled = IB_WIDTH_4X;
6516 		ppd->link_speed_enabled = ppd->link_speed_supported;
6517 		/*
6518 		 * Set the initial values to reasonable default, will be set
6519 		 * for real when link is up.
6520 		 */
6521 		ppd->link_width_active = IB_WIDTH_4X;
6522 		ppd->link_speed_active = QIB_IB_SDR;
6523 		ppd->delay_mult = ib_rate_to_delay[IB_RATE_10_GBPS];
6524 		switch (qib_num_cfg_vls) {
6525 		case 1:
6526 			ppd->vls_supported = IB_VL_VL0;
6527 			break;
6528 		case 2:
6529 			ppd->vls_supported = IB_VL_VL0_1;
6530 			break;
6531 		default:
6532 			qib_devinfo(dd->pcidev,
6533 				    "Invalid num_vls %u, using 4 VLs\n",
6534 				    qib_num_cfg_vls);
6535 			qib_num_cfg_vls = 4;
6536 			/* fall through */
6537 		case 4:
6538 			ppd->vls_supported = IB_VL_VL0_3;
6539 			break;
6540 		case 8:
6541 			if (mtu <= 2048)
6542 				ppd->vls_supported = IB_VL_VL0_7;
6543 			else {
6544 				qib_devinfo(dd->pcidev,
6545 					    "Invalid num_vls %u for MTU %d , using 4 VLs\n",
6546 					    qib_num_cfg_vls, mtu);
6547 				ppd->vls_supported = IB_VL_VL0_3;
6548 				qib_num_cfg_vls = 4;
6549 			}
6550 			break;
6551 		}
6552 		ppd->vls_operational = ppd->vls_supported;
6553 
6554 		init_waitqueue_head(&cp->autoneg_wait);
6555 		INIT_DELAYED_WORK(&cp->autoneg_work,
6556 				  autoneg_7322_work);
6557 		if (ppd->dd->cspec->r1)
6558 			INIT_DELAYED_WORK(&cp->ipg_work, ipg_7322_work);
6559 
6560 		/*
6561 		 * For Mez and similar cards, no qsfp info, so do
6562 		 * the "cable info" setup here.  Can be overridden
6563 		 * in adapter-specific routines.
6564 		 */
6565 		if (!(dd->flags & QIB_HAS_QSFP)) {
6566 			if (!IS_QMH(dd) && !IS_QME(dd))
6567 				qib_devinfo(dd->pcidev,
6568 					"IB%u:%u: Unknown mezzanine card type\n",
6569 					dd->unit, ppd->port);
6570 			cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
6571 			/*
6572 			 * Choose center value as default tx serdes setting
6573 			 * until changed through module parameter.
6574 			 */
6575 			ppd->cpspec->no_eep = IS_QMH(dd) ?
6576 				TXDDS_TABLE_SZ + 2 : TXDDS_TABLE_SZ + 4;
6577 		} else
6578 			cp->h1_val = H1_FORCE_VAL;
6579 
6580 		/* Avoid writes to chip for mini_init */
6581 		if (!qib_mini_init)
6582 			write_7322_init_portregs(ppd);
6583 
6584 		timer_setup(&cp->chase_timer, reenable_chase, 0);
6585 
6586 		ppd++;
6587 	}
6588 
6589 	dd->rcvhdrentsize = qib_rcvhdrentsize ?
6590 		qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
6591 	dd->rcvhdrsize = qib_rcvhdrsize ?
6592 		qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
6593 	dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
6594 
6595 	/* we always allocate at least 2048 bytes for eager buffers */
6596 	dd->rcvegrbufsize = max(mtu, 2048);
6597 	BUG_ON(!is_power_of_2(dd->rcvegrbufsize));
6598 	dd->rcvegrbufsize_shift = ilog2(dd->rcvegrbufsize);
6599 
6600 	qib_7322_tidtemplate(dd);
6601 
6602 	/*
6603 	 * We can request a receive interrupt for 1 or
6604 	 * more packets from current offset.
6605 	 */
6606 	dd->rhdrhead_intr_off =
6607 		(u64) rcv_int_count << IBA7322_HDRHEAD_PKTINT_SHIFT;
6608 
6609 	/* setup the stats timer; the add_timer is done at end of init */
6610 	timer_setup(&dd->stats_timer, qib_get_7322_faststats, 0);
6611 
6612 	dd->ureg_align = 0x10000;  /* 64KB alignment */
6613 
6614 	dd->piosize2kmax_dwords = dd->piosize2k >> 2;
6615 
6616 	qib_7322_config_ctxts(dd);
6617 	qib_set_ctxtcnt(dd);
6618 
6619 	/*
6620 	 * We do not set WC on the VL15 buffers to avoid
6621 	 * a rare problem with unaligned writes from
6622 	 * interrupt-flushed store buffers, so we need
6623 	 * to map those separately here.  We can't solve
6624 	 * this for the rarely used mtrr case.
6625 	 */
6626 	ret = init_chip_wc_pat(dd, 0);
6627 	if (ret)
6628 		goto bail;
6629 
6630 	/* vl15 buffers start just after the 4k buffers */
6631 	vl15off = dd->physaddr + (dd->piobufbase >> 32) +
6632 		  dd->piobcnt4k * dd->align4k;
6633 	dd->piovl15base	= ioremap_nocache(vl15off,
6634 					  NUM_VL15_BUFS * dd->align4k);
6635 	if (!dd->piovl15base) {
6636 		ret = -ENOMEM;
6637 		goto bail;
6638 	}
6639 
6640 	qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
6641 
6642 	ret = 0;
6643 	if (qib_mini_init)
6644 		goto bail;
6645 	if (!dd->num_pports) {
6646 		qib_dev_err(dd, "No ports enabled, giving up initialization\n");
6647 		goto bail; /* no error, so can still figure out why err */
6648 	}
6649 
6650 	write_7322_initregs(dd);
6651 	ret = qib_create_ctxts(dd);
6652 	init_7322_cntrnames(dd);
6653 
6654 	updthresh = 8U; /* update threshold */
6655 
6656 	/* use all of 4KB buffers for the kernel SDMA, zero if !SDMA.
6657 	 * reserve the update threshold amount for other kernel use, such
6658 	 * as sending SMI, MAD, and ACKs, or 3, whichever is greater,
6659 	 * unless we aren't enabling SDMA, in which case we want to use
6660 	 * all the 4k bufs for the kernel.
6661 	 * if this was less than the update threshold, we could wait
6662 	 * a long time for an update.  Coded this way because we
6663 	 * sometimes change the update threshold for various reasons,
6664 	 * and we want this to remain robust.
6665 	 */
6666 	if (dd->flags & QIB_HAS_SEND_DMA) {
6667 		dd->cspec->sdmabufcnt = dd->piobcnt4k;
6668 		sbufs = updthresh > 3 ? updthresh : 3;
6669 	} else {
6670 		dd->cspec->sdmabufcnt = 0;
6671 		sbufs = dd->piobcnt4k;
6672 	}
6673 	dd->cspec->lastbuf_for_pio = dd->piobcnt2k + dd->piobcnt4k -
6674 		dd->cspec->sdmabufcnt;
6675 	dd->lastctxt_piobuf = dd->cspec->lastbuf_for_pio - sbufs;
6676 	dd->cspec->lastbuf_for_pio--; /* range is <= , not < */
6677 	dd->last_pio = dd->cspec->lastbuf_for_pio;
6678 	dd->pbufsctxt = (dd->cfgctxts > dd->first_user_ctxt) ?
6679 		dd->lastctxt_piobuf / (dd->cfgctxts - dd->first_user_ctxt) : 0;
6680 
6681 	/*
6682 	 * If we have 16 user contexts, we will have 7 sbufs
6683 	 * per context, so reduce the update threshold to match.  We
6684 	 * want to update before we actually run out, at low pbufs/ctxt
6685 	 * so give ourselves some margin.
6686 	 */
6687 	if (dd->pbufsctxt >= 2 && dd->pbufsctxt - 2 < updthresh)
6688 		updthresh = dd->pbufsctxt - 2;
6689 	dd->cspec->updthresh_dflt = updthresh;
6690 	dd->cspec->updthresh = updthresh;
6691 
6692 	/* before full enable, no interrupts, no locking needed */
6693 	dd->sendctrl |= ((updthresh & SYM_RMASK(SendCtrl, AvailUpdThld))
6694 			     << SYM_LSB(SendCtrl, AvailUpdThld)) |
6695 			SYM_MASK(SendCtrl, SendBufAvailPad64Byte);
6696 
6697 	dd->psxmitwait_supported = 1;
6698 	dd->psxmitwait_check_rate = QIB_7322_PSXMITWAIT_CHECK_RATE;
6699 bail:
6700 	if (!dd->ctxtcnt)
6701 		dd->ctxtcnt = 1; /* for other initialization code */
6702 
6703 	return ret;
6704 }
6705 
6706 static u32 __iomem *qib_7322_getsendbuf(struct qib_pportdata *ppd, u64 pbc,
6707 					u32 *pbufnum)
6708 {
6709 	u32 first, last, plen = pbc & QIB_PBC_LENGTH_MASK;
6710 	struct qib_devdata *dd = ppd->dd;
6711 
6712 	/* last is same for 2k and 4k, because we use 4k if all 2k busy */
6713 	if (pbc & PBC_7322_VL15_SEND) {
6714 		first = dd->piobcnt2k + dd->piobcnt4k + ppd->hw_pidx;
6715 		last = first;
6716 	} else {
6717 		if ((plen + 1) > dd->piosize2kmax_dwords)
6718 			first = dd->piobcnt2k;
6719 		else
6720 			first = 0;
6721 		last = dd->cspec->lastbuf_for_pio;
6722 	}
6723 	return qib_getsendbuf_range(dd, pbufnum, first, last);
6724 }
6725 
6726 static void qib_set_cntr_7322_sample(struct qib_pportdata *ppd, u32 intv,
6727 				     u32 start)
6728 {
6729 	qib_write_kreg_port(ppd, krp_psinterval, intv);
6730 	qib_write_kreg_port(ppd, krp_psstart, start);
6731 }
6732 
6733 /*
6734  * Must be called with sdma_lock held, or before init finished.
6735  */
6736 static void qib_sdma_set_7322_desc_cnt(struct qib_pportdata *ppd, unsigned cnt)
6737 {
6738 	qib_write_kreg_port(ppd, krp_senddmadesccnt, cnt);
6739 }
6740 
6741 /*
6742  * sdma_lock should be acquired before calling this routine
6743  */
6744 static void dump_sdma_7322_state(struct qib_pportdata *ppd)
6745 {
6746 	u64 reg, reg1, reg2;
6747 
6748 	reg = qib_read_kreg_port(ppd, krp_senddmastatus);
6749 	qib_dev_porterr(ppd->dd, ppd->port,
6750 		"SDMA senddmastatus: 0x%016llx\n", reg);
6751 
6752 	reg = qib_read_kreg_port(ppd, krp_sendctrl);
6753 	qib_dev_porterr(ppd->dd, ppd->port,
6754 		"SDMA sendctrl: 0x%016llx\n", reg);
6755 
6756 	reg = qib_read_kreg_port(ppd, krp_senddmabase);
6757 	qib_dev_porterr(ppd->dd, ppd->port,
6758 		"SDMA senddmabase: 0x%016llx\n", reg);
6759 
6760 	reg = qib_read_kreg_port(ppd, krp_senddmabufmask0);
6761 	reg1 = qib_read_kreg_port(ppd, krp_senddmabufmask1);
6762 	reg2 = qib_read_kreg_port(ppd, krp_senddmabufmask2);
6763 	qib_dev_porterr(ppd->dd, ppd->port,
6764 		"SDMA senddmabufmask 0:%llx  1:%llx  2:%llx\n",
6765 		 reg, reg1, reg2);
6766 
6767 	/* get bufuse bits, clear them, and print them again if non-zero */
6768 	reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
6769 	qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg);
6770 	reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
6771 	qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg1);
6772 	reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
6773 	qib_write_kreg_port(ppd, krp_senddmabuf_use0, reg2);
6774 	/* 0 and 1 should always be zero, so print as short form */
6775 	qib_dev_porterr(ppd->dd, ppd->port,
6776 		 "SDMA current senddmabuf_use 0:%llx  1:%llx  2:%llx\n",
6777 		 reg, reg1, reg2);
6778 	reg = qib_read_kreg_port(ppd, krp_senddmabuf_use0);
6779 	reg1 = qib_read_kreg_port(ppd, krp_senddmabuf_use1);
6780 	reg2 = qib_read_kreg_port(ppd, krp_senddmabuf_use2);
6781 	/* 0 and 1 should always be zero, so print as short form */
6782 	qib_dev_porterr(ppd->dd, ppd->port,
6783 		 "SDMA cleared senddmabuf_use 0:%llx  1:%llx  2:%llx\n",
6784 		 reg, reg1, reg2);
6785 
6786 	reg = qib_read_kreg_port(ppd, krp_senddmatail);
6787 	qib_dev_porterr(ppd->dd, ppd->port,
6788 		"SDMA senddmatail: 0x%016llx\n", reg);
6789 
6790 	reg = qib_read_kreg_port(ppd, krp_senddmahead);
6791 	qib_dev_porterr(ppd->dd, ppd->port,
6792 		"SDMA senddmahead: 0x%016llx\n", reg);
6793 
6794 	reg = qib_read_kreg_port(ppd, krp_senddmaheadaddr);
6795 	qib_dev_porterr(ppd->dd, ppd->port,
6796 		"SDMA senddmaheadaddr: 0x%016llx\n", reg);
6797 
6798 	reg = qib_read_kreg_port(ppd, krp_senddmalengen);
6799 	qib_dev_porterr(ppd->dd, ppd->port,
6800 		"SDMA senddmalengen: 0x%016llx\n", reg);
6801 
6802 	reg = qib_read_kreg_port(ppd, krp_senddmadesccnt);
6803 	qib_dev_porterr(ppd->dd, ppd->port,
6804 		"SDMA senddmadesccnt: 0x%016llx\n", reg);
6805 
6806 	reg = qib_read_kreg_port(ppd, krp_senddmaidlecnt);
6807 	qib_dev_porterr(ppd->dd, ppd->port,
6808 		"SDMA senddmaidlecnt: 0x%016llx\n", reg);
6809 
6810 	reg = qib_read_kreg_port(ppd, krp_senddmaprioritythld);
6811 	qib_dev_porterr(ppd->dd, ppd->port,
6812 		"SDMA senddmapriorityhld: 0x%016llx\n", reg);
6813 
6814 	reg = qib_read_kreg_port(ppd, krp_senddmareloadcnt);
6815 	qib_dev_porterr(ppd->dd, ppd->port,
6816 		"SDMA senddmareloadcnt: 0x%016llx\n", reg);
6817 
6818 	dump_sdma_state(ppd);
6819 }
6820 
6821 static struct sdma_set_state_action sdma_7322_action_table[] = {
6822 	[qib_sdma_state_s00_hw_down] = {
6823 		.go_s99_running_tofalse = 1,
6824 		.op_enable = 0,
6825 		.op_intenable = 0,
6826 		.op_halt = 0,
6827 		.op_drain = 0,
6828 	},
6829 	[qib_sdma_state_s10_hw_start_up_wait] = {
6830 		.op_enable = 0,
6831 		.op_intenable = 1,
6832 		.op_halt = 1,
6833 		.op_drain = 0,
6834 	},
6835 	[qib_sdma_state_s20_idle] = {
6836 		.op_enable = 1,
6837 		.op_intenable = 1,
6838 		.op_halt = 1,
6839 		.op_drain = 0,
6840 	},
6841 	[qib_sdma_state_s30_sw_clean_up_wait] = {
6842 		.op_enable = 0,
6843 		.op_intenable = 1,
6844 		.op_halt = 1,
6845 		.op_drain = 0,
6846 	},
6847 	[qib_sdma_state_s40_hw_clean_up_wait] = {
6848 		.op_enable = 1,
6849 		.op_intenable = 1,
6850 		.op_halt = 1,
6851 		.op_drain = 0,
6852 	},
6853 	[qib_sdma_state_s50_hw_halt_wait] = {
6854 		.op_enable = 1,
6855 		.op_intenable = 1,
6856 		.op_halt = 1,
6857 		.op_drain = 1,
6858 	},
6859 	[qib_sdma_state_s99_running] = {
6860 		.op_enable = 1,
6861 		.op_intenable = 1,
6862 		.op_halt = 0,
6863 		.op_drain = 0,
6864 		.go_s99_running_totrue = 1,
6865 	},
6866 };
6867 
6868 static void qib_7322_sdma_init_early(struct qib_pportdata *ppd)
6869 {
6870 	ppd->sdma_state.set_state_action = sdma_7322_action_table;
6871 }
6872 
6873 static int init_sdma_7322_regs(struct qib_pportdata *ppd)
6874 {
6875 	struct qib_devdata *dd = ppd->dd;
6876 	unsigned lastbuf, erstbuf;
6877 	u64 senddmabufmask[3] = { 0 };
6878 	int n, ret = 0;
6879 
6880 	qib_write_kreg_port(ppd, krp_senddmabase, ppd->sdma_descq_phys);
6881 	qib_sdma_7322_setlengen(ppd);
6882 	qib_sdma_update_7322_tail(ppd, 0); /* Set SendDmaTail */
6883 	qib_write_kreg_port(ppd, krp_senddmareloadcnt, sdma_idle_cnt);
6884 	qib_write_kreg_port(ppd, krp_senddmadesccnt, 0);
6885 	qib_write_kreg_port(ppd, krp_senddmaheadaddr, ppd->sdma_head_phys);
6886 
6887 	if (dd->num_pports)
6888 		n = dd->cspec->sdmabufcnt / dd->num_pports; /* no remainder */
6889 	else
6890 		n = dd->cspec->sdmabufcnt; /* failsafe for init */
6891 	erstbuf = (dd->piobcnt2k + dd->piobcnt4k) -
6892 		((dd->num_pports == 1 || ppd->port == 2) ? n :
6893 		dd->cspec->sdmabufcnt);
6894 	lastbuf = erstbuf + n;
6895 
6896 	ppd->sdma_state.first_sendbuf = erstbuf;
6897 	ppd->sdma_state.last_sendbuf = lastbuf;
6898 	for (; erstbuf < lastbuf; ++erstbuf) {
6899 		unsigned word = erstbuf / BITS_PER_LONG;
6900 		unsigned bit = erstbuf & (BITS_PER_LONG - 1);
6901 
6902 		BUG_ON(word >= 3);
6903 		senddmabufmask[word] |= 1ULL << bit;
6904 	}
6905 	qib_write_kreg_port(ppd, krp_senddmabufmask0, senddmabufmask[0]);
6906 	qib_write_kreg_port(ppd, krp_senddmabufmask1, senddmabufmask[1]);
6907 	qib_write_kreg_port(ppd, krp_senddmabufmask2, senddmabufmask[2]);
6908 	return ret;
6909 }
6910 
6911 /* sdma_lock must be held */
6912 static u16 qib_sdma_7322_gethead(struct qib_pportdata *ppd)
6913 {
6914 	struct qib_devdata *dd = ppd->dd;
6915 	int sane;
6916 	int use_dmahead;
6917 	u16 swhead;
6918 	u16 swtail;
6919 	u16 cnt;
6920 	u16 hwhead;
6921 
6922 	use_dmahead = __qib_sdma_running(ppd) &&
6923 		(dd->flags & QIB_HAS_SDMA_TIMEOUT);
6924 retry:
6925 	hwhead = use_dmahead ?
6926 		(u16) le64_to_cpu(*ppd->sdma_head_dma) :
6927 		(u16) qib_read_kreg_port(ppd, krp_senddmahead);
6928 
6929 	swhead = ppd->sdma_descq_head;
6930 	swtail = ppd->sdma_descq_tail;
6931 	cnt = ppd->sdma_descq_cnt;
6932 
6933 	if (swhead < swtail)
6934 		/* not wrapped */
6935 		sane = (hwhead >= swhead) & (hwhead <= swtail);
6936 	else if (swhead > swtail)
6937 		/* wrapped around */
6938 		sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
6939 			(hwhead <= swtail);
6940 	else
6941 		/* empty */
6942 		sane = (hwhead == swhead);
6943 
6944 	if (unlikely(!sane)) {
6945 		if (use_dmahead) {
6946 			/* try one more time, directly from the register */
6947 			use_dmahead = 0;
6948 			goto retry;
6949 		}
6950 		/* proceed as if no progress */
6951 		hwhead = swhead;
6952 	}
6953 
6954 	return hwhead;
6955 }
6956 
6957 static int qib_sdma_7322_busy(struct qib_pportdata *ppd)
6958 {
6959 	u64 hwstatus = qib_read_kreg_port(ppd, krp_senddmastatus);
6960 
6961 	return (hwstatus & SYM_MASK(SendDmaStatus_0, ScoreBoardDrainInProg)) ||
6962 	       (hwstatus & SYM_MASK(SendDmaStatus_0, HaltInProg)) ||
6963 	       !(hwstatus & SYM_MASK(SendDmaStatus_0, InternalSDmaHalt)) ||
6964 	       !(hwstatus & SYM_MASK(SendDmaStatus_0, ScbEmpty));
6965 }
6966 
6967 /*
6968  * Compute the amount of delay before sending the next packet if the
6969  * port's send rate differs from the static rate set for the QP.
6970  * The delay affects the next packet and the amount of the delay is
6971  * based on the length of the this packet.
6972  */
6973 static u32 qib_7322_setpbc_control(struct qib_pportdata *ppd, u32 plen,
6974 				   u8 srate, u8 vl)
6975 {
6976 	u8 snd_mult = ppd->delay_mult;
6977 	u8 rcv_mult = ib_rate_to_delay[srate];
6978 	u32 ret;
6979 
6980 	ret = rcv_mult > snd_mult ? ((plen + 1) >> 1) * snd_mult : 0;
6981 
6982 	/* Indicate VL15, else set the VL in the control word */
6983 	if (vl == 15)
6984 		ret |= PBC_7322_VL15_SEND_CTRL;
6985 	else
6986 		ret |= vl << PBC_VL_NUM_LSB;
6987 	ret |= ((u32)(ppd->hw_pidx)) << PBC_PORT_SEL_LSB;
6988 
6989 	return ret;
6990 }
6991 
6992 /*
6993  * Enable the per-port VL15 send buffers for use.
6994  * They follow the rest of the buffers, without a config parameter.
6995  * This was in initregs, but that is done before the shadow
6996  * is set up, and this has to be done after the shadow is
6997  * set up.
6998  */
6999 static void qib_7322_initvl15_bufs(struct qib_devdata *dd)
7000 {
7001 	unsigned vl15bufs;
7002 
7003 	vl15bufs = dd->piobcnt2k + dd->piobcnt4k;
7004 	qib_chg_pioavailkernel(dd, vl15bufs, NUM_VL15_BUFS,
7005 			       TXCHK_CHG_TYPE_KERN, NULL);
7006 }
7007 
7008 static void qib_7322_init_ctxt(struct qib_ctxtdata *rcd)
7009 {
7010 	if (rcd->ctxt < NUM_IB_PORTS) {
7011 		if (rcd->dd->num_pports > 1) {
7012 			rcd->rcvegrcnt = KCTXT0_EGRCNT / 2;
7013 			rcd->rcvegr_tid_base = rcd->ctxt ? rcd->rcvegrcnt : 0;
7014 		} else {
7015 			rcd->rcvegrcnt = KCTXT0_EGRCNT;
7016 			rcd->rcvegr_tid_base = 0;
7017 		}
7018 	} else {
7019 		rcd->rcvegrcnt = rcd->dd->cspec->rcvegrcnt;
7020 		rcd->rcvegr_tid_base = KCTXT0_EGRCNT +
7021 			(rcd->ctxt - NUM_IB_PORTS) * rcd->rcvegrcnt;
7022 	}
7023 }
7024 
7025 #define QTXSLEEPS 5000
7026 static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
7027 				  u32 len, u32 which, struct qib_ctxtdata *rcd)
7028 {
7029 	int i;
7030 	const int last = start + len - 1;
7031 	const int lastr = last / BITS_PER_LONG;
7032 	u32 sleeps = 0;
7033 	int wait = rcd != NULL;
7034 	unsigned long flags;
7035 
7036 	while (wait) {
7037 		unsigned long shadow = 0;
7038 		int cstart, previ = -1;
7039 
7040 		/*
7041 		 * when flipping from kernel to user, we can't change
7042 		 * the checking type if the buffer is allocated to the
7043 		 * driver.   It's OK the other direction, because it's
7044 		 * from close, and we have just disarm'ed all the
7045 		 * buffers.  All the kernel to kernel changes are also
7046 		 * OK.
7047 		 */
7048 		for (cstart = start; cstart <= last; cstart++) {
7049 			i = ((2 * cstart) + QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
7050 				/ BITS_PER_LONG;
7051 			if (i != previ) {
7052 				shadow = (unsigned long)
7053 					le64_to_cpu(dd->pioavailregs_dma[i]);
7054 				previ = i;
7055 			}
7056 			if (test_bit(((2 * cstart) +
7057 				      QLOGIC_IB_SENDPIOAVAIL_BUSY_SHIFT)
7058 				     % BITS_PER_LONG, &shadow))
7059 				break;
7060 		}
7061 
7062 		if (cstart > last)
7063 			break;
7064 
7065 		if (sleeps == QTXSLEEPS)
7066 			break;
7067 		/* make sure we see an updated copy next time around */
7068 		sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7069 		sleeps++;
7070 		msleep(20);
7071 	}
7072 
7073 	switch (which) {
7074 	case TXCHK_CHG_TYPE_DIS1:
7075 		/*
7076 		 * disable checking on a range; used by diags; just
7077 		 * one buffer, but still written generically
7078 		 */
7079 		for (i = start; i <= last; i++)
7080 			clear_bit(i, dd->cspec->sendchkenable);
7081 		break;
7082 
7083 	case TXCHK_CHG_TYPE_ENAB1:
7084 		/*
7085 		 * (re)enable checking on a range; used by diags; just
7086 		 * one buffer, but still written generically; read
7087 		 * scratch to be sure buffer actually triggered, not
7088 		 * just flushed from processor.
7089 		 */
7090 		qib_read_kreg32(dd, kr_scratch);
7091 		for (i = start; i <= last; i++)
7092 			set_bit(i, dd->cspec->sendchkenable);
7093 		break;
7094 
7095 	case TXCHK_CHG_TYPE_KERN:
7096 		/* usable by kernel */
7097 		for (i = start; i <= last; i++) {
7098 			set_bit(i, dd->cspec->sendibchk);
7099 			clear_bit(i, dd->cspec->sendgrhchk);
7100 		}
7101 		spin_lock_irqsave(&dd->uctxt_lock, flags);
7102 		/* see if we need to raise avail update threshold */
7103 		for (i = dd->first_user_ctxt;
7104 		     dd->cspec->updthresh != dd->cspec->updthresh_dflt
7105 		     && i < dd->cfgctxts; i++)
7106 			if (dd->rcd[i] && dd->rcd[i]->subctxt_cnt &&
7107 			   ((dd->rcd[i]->piocnt / dd->rcd[i]->subctxt_cnt) - 1)
7108 			   < dd->cspec->updthresh_dflt)
7109 				break;
7110 		spin_unlock_irqrestore(&dd->uctxt_lock, flags);
7111 		if (i == dd->cfgctxts) {
7112 			spin_lock_irqsave(&dd->sendctrl_lock, flags);
7113 			dd->cspec->updthresh = dd->cspec->updthresh_dflt;
7114 			dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
7115 			dd->sendctrl |= (dd->cspec->updthresh &
7116 					 SYM_RMASK(SendCtrl, AvailUpdThld)) <<
7117 					   SYM_LSB(SendCtrl, AvailUpdThld);
7118 			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7119 			sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7120 		}
7121 		break;
7122 
7123 	case TXCHK_CHG_TYPE_USER:
7124 		/* for user process */
7125 		for (i = start; i <= last; i++) {
7126 			clear_bit(i, dd->cspec->sendibchk);
7127 			set_bit(i, dd->cspec->sendgrhchk);
7128 		}
7129 		spin_lock_irqsave(&dd->sendctrl_lock, flags);
7130 		if (rcd && rcd->subctxt_cnt && ((rcd->piocnt
7131 			/ rcd->subctxt_cnt) - 1) < dd->cspec->updthresh) {
7132 			dd->cspec->updthresh = (rcd->piocnt /
7133 						rcd->subctxt_cnt) - 1;
7134 			dd->sendctrl &= ~SYM_MASK(SendCtrl, AvailUpdThld);
7135 			dd->sendctrl |= (dd->cspec->updthresh &
7136 					SYM_RMASK(SendCtrl, AvailUpdThld))
7137 					<< SYM_LSB(SendCtrl, AvailUpdThld);
7138 			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7139 			sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
7140 		} else
7141 			spin_unlock_irqrestore(&dd->sendctrl_lock, flags);
7142 		break;
7143 
7144 	default:
7145 		break;
7146 	}
7147 
7148 	for (i = start / BITS_PER_LONG; which >= 2 && i <= lastr; ++i)
7149 		qib_write_kreg(dd, kr_sendcheckmask + i,
7150 			       dd->cspec->sendchkenable[i]);
7151 
7152 	for (i = start / BITS_PER_LONG; which < 2 && i <= lastr; ++i) {
7153 		qib_write_kreg(dd, kr_sendgrhcheckmask + i,
7154 			       dd->cspec->sendgrhchk[i]);
7155 		qib_write_kreg(dd, kr_sendibpktmask + i,
7156 			       dd->cspec->sendibchk[i]);
7157 	}
7158 
7159 	/*
7160 	 * Be sure whatever we did was seen by the chip and acted upon,
7161 	 * before we return.  Mostly important for which >= 2.
7162 	 */
7163 	qib_read_kreg32(dd, kr_scratch);
7164 }
7165 
7166 
7167 /* useful for trigger analyzers, etc. */
7168 static void writescratch(struct qib_devdata *dd, u32 val)
7169 {
7170 	qib_write_kreg(dd, kr_scratch, val);
7171 }
7172 
7173 /* Dummy for now, use chip regs soon */
7174 static int qib_7322_tempsense_rd(struct qib_devdata *dd, int regnum)
7175 {
7176 	return -ENXIO;
7177 }
7178 
7179 /**
7180  * qib_init_iba7322_funcs - set up the chip-specific function pointers
7181  * @dev: the pci_dev for qlogic_ib device
7182  * @ent: pci_device_id struct for this dev
7183  *
7184  * Also allocates, inits, and returns the devdata struct for this
7185  * device instance
7186  *
7187  * This is global, and is called directly at init to set up the
7188  * chip-specific function pointers for later use.
7189  */
7190 struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
7191 					   const struct pci_device_id *ent)
7192 {
7193 	struct qib_devdata *dd;
7194 	int ret, i;
7195 	u32 tabsize, actual_cnt = 0;
7196 
7197 	dd = qib_alloc_devdata(pdev,
7198 		NUM_IB_PORTS * sizeof(struct qib_pportdata) +
7199 		sizeof(struct qib_chip_specific) +
7200 		NUM_IB_PORTS * sizeof(struct qib_chippport_specific));
7201 	if (IS_ERR(dd))
7202 		goto bail;
7203 
7204 	dd->f_bringup_serdes    = qib_7322_bringup_serdes;
7205 	dd->f_cleanup           = qib_setup_7322_cleanup;
7206 	dd->f_clear_tids        = qib_7322_clear_tids;
7207 	dd->f_free_irq          = qib_7322_free_irq;
7208 	dd->f_get_base_info     = qib_7322_get_base_info;
7209 	dd->f_get_msgheader     = qib_7322_get_msgheader;
7210 	dd->f_getsendbuf        = qib_7322_getsendbuf;
7211 	dd->f_gpio_mod          = gpio_7322_mod;
7212 	dd->f_eeprom_wen        = qib_7322_eeprom_wen;
7213 	dd->f_hdrqempty         = qib_7322_hdrqempty;
7214 	dd->f_ib_updown         = qib_7322_ib_updown;
7215 	dd->f_init_ctxt         = qib_7322_init_ctxt;
7216 	dd->f_initvl15_bufs     = qib_7322_initvl15_bufs;
7217 	dd->f_intr_fallback     = qib_7322_intr_fallback;
7218 	dd->f_late_initreg      = qib_late_7322_initreg;
7219 	dd->f_setpbc_control    = qib_7322_setpbc_control;
7220 	dd->f_portcntr          = qib_portcntr_7322;
7221 	dd->f_put_tid           = qib_7322_put_tid;
7222 	dd->f_quiet_serdes      = qib_7322_mini_quiet_serdes;
7223 	dd->f_rcvctrl           = rcvctrl_7322_mod;
7224 	dd->f_read_cntrs        = qib_read_7322cntrs;
7225 	dd->f_read_portcntrs    = qib_read_7322portcntrs;
7226 	dd->f_reset             = qib_do_7322_reset;
7227 	dd->f_init_sdma_regs    = init_sdma_7322_regs;
7228 	dd->f_sdma_busy         = qib_sdma_7322_busy;
7229 	dd->f_sdma_gethead      = qib_sdma_7322_gethead;
7230 	dd->f_sdma_sendctrl     = qib_7322_sdma_sendctrl;
7231 	dd->f_sdma_set_desc_cnt = qib_sdma_set_7322_desc_cnt;
7232 	dd->f_sdma_update_tail  = qib_sdma_update_7322_tail;
7233 	dd->f_sendctrl          = sendctrl_7322_mod;
7234 	dd->f_set_armlaunch     = qib_set_7322_armlaunch;
7235 	dd->f_set_cntr_sample   = qib_set_cntr_7322_sample;
7236 	dd->f_iblink_state      = qib_7322_iblink_state;
7237 	dd->f_ibphys_portstate  = qib_7322_phys_portstate;
7238 	dd->f_get_ib_cfg        = qib_7322_get_ib_cfg;
7239 	dd->f_set_ib_cfg        = qib_7322_set_ib_cfg;
7240 	dd->f_set_ib_loopback   = qib_7322_set_loopback;
7241 	dd->f_get_ib_table      = qib_7322_get_ib_table;
7242 	dd->f_set_ib_table      = qib_7322_set_ib_table;
7243 	dd->f_set_intr_state    = qib_7322_set_intr_state;
7244 	dd->f_setextled         = qib_setup_7322_setextled;
7245 	dd->f_txchk_change      = qib_7322_txchk_change;
7246 	dd->f_update_usrhead    = qib_update_7322_usrhead;
7247 	dd->f_wantpiobuf_intr   = qib_wantpiobuf_7322_intr;
7248 	dd->f_xgxs_reset        = qib_7322_mini_pcs_reset;
7249 	dd->f_sdma_hw_clean_up  = qib_7322_sdma_hw_clean_up;
7250 	dd->f_sdma_hw_start_up  = qib_7322_sdma_hw_start_up;
7251 	dd->f_sdma_init_early   = qib_7322_sdma_init_early;
7252 	dd->f_writescratch      = writescratch;
7253 	dd->f_tempsense_rd	= qib_7322_tempsense_rd;
7254 #ifdef CONFIG_INFINIBAND_QIB_DCA
7255 	dd->f_notify_dca	= qib_7322_notify_dca;
7256 #endif
7257 	/*
7258 	 * Do remaining PCIe setup and save PCIe values in dd.
7259 	 * Any error printing is already done by the init code.
7260 	 * On return, we have the chip mapped, but chip registers
7261 	 * are not set up until start of qib_init_7322_variables.
7262 	 */
7263 	ret = qib_pcie_ddinit(dd, pdev, ent);
7264 	if (ret < 0)
7265 		goto bail_free;
7266 
7267 	/* initialize chip-specific variables */
7268 	ret = qib_init_7322_variables(dd);
7269 	if (ret)
7270 		goto bail_cleanup;
7271 
7272 	if (qib_mini_init || !dd->num_pports)
7273 		goto bail;
7274 
7275 	/*
7276 	 * Determine number of vectors we want; depends on port count
7277 	 * and number of configured kernel receive queues actually used.
7278 	 * Should also depend on whether sdma is enabled or not, but
7279 	 * that's such a rare testing case it's not worth worrying about.
7280 	 */
7281 	tabsize = dd->first_user_ctxt + ARRAY_SIZE(irq_table);
7282 	for (i = 0; i < tabsize; i++)
7283 		if ((i < ARRAY_SIZE(irq_table) &&
7284 		     irq_table[i].port <= dd->num_pports) ||
7285 		    (i >= ARRAY_SIZE(irq_table) &&
7286 		     dd->rcd[i - ARRAY_SIZE(irq_table)]))
7287 			actual_cnt++;
7288 	/* reduce by ctxt's < 2 */
7289 	if (qib_krcvq01_no_msi)
7290 		actual_cnt -= dd->num_pports;
7291 
7292 	tabsize = actual_cnt;
7293 	dd->cspec->msix_entries = kzalloc(tabsize *
7294 			sizeof(struct qib_msix_entry), GFP_KERNEL);
7295 	if (!dd->cspec->msix_entries)
7296 		tabsize = 0;
7297 
7298 	if (qib_pcie_params(dd, 8, &tabsize))
7299 		qib_dev_err(dd,
7300 			"Failed to setup PCIe or interrupts; continuing anyway\n");
7301 	/* may be less than we wanted, if not enough available */
7302 	dd->cspec->num_msix_entries = tabsize;
7303 
7304 	/* setup interrupt handler */
7305 	qib_setup_7322_interrupt(dd, 1);
7306 
7307 	/* clear diagctrl register, in case diags were running and crashed */
7308 	qib_write_kreg(dd, kr_hwdiagctrl, 0);
7309 #ifdef CONFIG_INFINIBAND_QIB_DCA
7310 	if (!dca_add_requester(&pdev->dev)) {
7311 		qib_devinfo(dd->pcidev, "DCA enabled\n");
7312 		dd->flags |= QIB_DCA_ENABLED;
7313 		qib_setup_dca(dd);
7314 	}
7315 #endif
7316 	goto bail;
7317 
7318 bail_cleanup:
7319 	qib_pcie_ddcleanup(dd);
7320 bail_free:
7321 	qib_free_devdata(dd);
7322 	dd = ERR_PTR(ret);
7323 bail:
7324 	return dd;
7325 }
7326 
7327 /*
7328  * Set the table entry at the specified index from the table specifed.
7329  * There are 3 * TXDDS_TABLE_SZ entries in all per port, with the first
7330  * TXDDS_TABLE_SZ for SDR, the next for DDR, and the last for QDR.
7331  * 'idx' below addresses the correct entry, while its 4 LSBs select the
7332  * corresponding entry (one of TXDDS_TABLE_SZ) from the selected table.
7333  */
7334 #define DDS_ENT_AMP_LSB 14
7335 #define DDS_ENT_MAIN_LSB 9
7336 #define DDS_ENT_POST_LSB 5
7337 #define DDS_ENT_PRE_XTRA_LSB 3
7338 #define DDS_ENT_PRE_LSB 0
7339 
7340 /*
7341  * Set one entry in the TxDDS table for spec'd port
7342  * ridx picks one of the entries, while tp points
7343  * to the appropriate table entry.
7344  */
7345 static void set_txdds(struct qib_pportdata *ppd, int ridx,
7346 		      const struct txdds_ent *tp)
7347 {
7348 	struct qib_devdata *dd = ppd->dd;
7349 	u32 pack_ent;
7350 	int regidx;
7351 
7352 	/* Get correct offset in chip-space, and in source table */
7353 	regidx = KREG_IBPORT_IDX(IBSD_DDS_MAP_TABLE) + ridx;
7354 	/*
7355 	 * We do not use qib_write_kreg_port() because it was intended
7356 	 * only for registers in the lower "port specific" pages.
7357 	 * So do index calculation  by hand.
7358 	 */
7359 	if (ppd->hw_pidx)
7360 		regidx += (dd->palign / sizeof(u64));
7361 
7362 	pack_ent = tp->amp << DDS_ENT_AMP_LSB;
7363 	pack_ent |= tp->main << DDS_ENT_MAIN_LSB;
7364 	pack_ent |= tp->pre << DDS_ENT_PRE_LSB;
7365 	pack_ent |= tp->post << DDS_ENT_POST_LSB;
7366 	qib_write_kreg(dd, regidx, pack_ent);
7367 	/* Prevent back-to-back writes by hitting scratch */
7368 	qib_write_kreg(ppd->dd, kr_scratch, 0);
7369 }
7370 
7371 static const struct vendor_txdds_ent vendor_txdds[] = {
7372 	{ /* Amphenol 1m 30awg NoEq */
7373 		{ 0x41, 0x50, 0x48 }, "584470002       ",
7374 		{ 10,  0,  0,  5 }, { 10,  0,  0,  9 }, {  7,  1,  0, 13 },
7375 	},
7376 	{ /* Amphenol 3m 28awg NoEq */
7377 		{ 0x41, 0x50, 0x48 }, "584470004       ",
7378 		{  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  0,  1,  7, 15 },
7379 	},
7380 	{ /* Finisar 3m OM2 Optical */
7381 		{ 0x00, 0x90, 0x65 }, "FCBG410QB1C03-QL",
7382 		{  0,  0,  0,  3 }, {  0,  0,  0,  4 }, {  0,  0,  0, 13 },
7383 	},
7384 	{ /* Finisar 30m OM2 Optical */
7385 		{ 0x00, 0x90, 0x65 }, "FCBG410QB1C30-QL",
7386 		{  0,  0,  0,  1 }, {  0,  0,  0,  5 }, {  0,  0,  0, 11 },
7387 	},
7388 	{ /* Finisar Default OM2 Optical */
7389 		{ 0x00, 0x90, 0x65 }, NULL,
7390 		{  0,  0,  0,  2 }, {  0,  0,  0,  5 }, {  0,  0,  0, 12 },
7391 	},
7392 	{ /* Gore 1m 30awg NoEq */
7393 		{ 0x00, 0x21, 0x77 }, "QSN3300-1       ",
7394 		{  0,  0,  0,  6 }, {  0,  0,  0,  9 }, {  0,  1,  0, 15 },
7395 	},
7396 	{ /* Gore 2m 30awg NoEq */
7397 		{ 0x00, 0x21, 0x77 }, "QSN3300-2       ",
7398 		{  0,  0,  0,  8 }, {  0,  0,  0, 10 }, {  0,  1,  7, 15 },
7399 	},
7400 	{ /* Gore 1m 28awg NoEq */
7401 		{ 0x00, 0x21, 0x77 }, "QSN3800-1       ",
7402 		{  0,  0,  0,  6 }, {  0,  0,  0,  8 }, {  0,  1,  0, 15 },
7403 	},
7404 	{ /* Gore 3m 28awg NoEq */
7405 		{ 0x00, 0x21, 0x77 }, "QSN3800-3       ",
7406 		{  0,  0,  0,  9 }, {  0,  0,  0, 13 }, {  0,  1,  7, 15 },
7407 	},
7408 	{ /* Gore 5m 24awg Eq */
7409 		{ 0x00, 0x21, 0x77 }, "QSN7000-5       ",
7410 		{  0,  0,  0,  7 }, {  0,  0,  0,  9 }, {  0,  1,  3, 15 },
7411 	},
7412 	{ /* Gore 7m 24awg Eq */
7413 		{ 0x00, 0x21, 0x77 }, "QSN7000-7       ",
7414 		{  0,  0,  0,  9 }, {  0,  0,  0, 11 }, {  0,  2,  6, 15 },
7415 	},
7416 	{ /* Gore 5m 26awg Eq */
7417 		{ 0x00, 0x21, 0x77 }, "QSN7600-5       ",
7418 		{  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  0,  1,  9, 13 },
7419 	},
7420 	{ /* Gore 7m 26awg Eq */
7421 		{ 0x00, 0x21, 0x77 }, "QSN7600-7       ",
7422 		{  0,  0,  0,  8 }, {  0,  0,  0, 11 }, {  10,  1,  8, 15 },
7423 	},
7424 	{ /* Intersil 12m 24awg Active */
7425 		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1224",
7426 		{  0,  0,  0,  2 }, {  0,  0,  0,  5 }, {  0,  3,  0,  9 },
7427 	},
7428 	{ /* Intersil 10m 28awg Active */
7429 		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP1028",
7430 		{  0,  0,  0,  6 }, {  0,  0,  0,  4 }, {  0,  2,  0,  2 },
7431 	},
7432 	{ /* Intersil 7m 30awg Active */
7433 		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0730",
7434 		{  0,  0,  0,  6 }, {  0,  0,  0,  4 }, {  0,  1,  0,  3 },
7435 	},
7436 	{ /* Intersil 5m 32awg Active */
7437 		{ 0x00, 0x30, 0xB4 }, "QLX4000CQSFP0532",
7438 		{  0,  0,  0,  6 }, {  0,  0,  0,  6 }, {  0,  2,  0,  8 },
7439 	},
7440 	{ /* Intersil Default Active */
7441 		{ 0x00, 0x30, 0xB4 }, NULL,
7442 		{  0,  0,  0,  6 }, {  0,  0,  0,  5 }, {  0,  2,  0,  5 },
7443 	},
7444 	{ /* Luxtera 20m Active Optical */
7445 		{ 0x00, 0x25, 0x63 }, NULL,
7446 		{  0,  0,  0,  5 }, {  0,  0,  0,  8 }, {  0,  2,  0,  12 },
7447 	},
7448 	{ /* Molex 1M Cu loopback */
7449 		{ 0x00, 0x09, 0x3A }, "74763-0025      ",
7450 		{  2,  2,  6, 15 }, {  2,  2,  6, 15 }, {  2,  2,  6, 15 },
7451 	},
7452 	{ /* Molex 2m 28awg NoEq */
7453 		{ 0x00, 0x09, 0x3A }, "74757-2201      ",
7454 		{  0,  0,  0,  6 }, {  0,  0,  0,  9 }, {  0,  1,  1, 15 },
7455 	},
7456 };
7457 
7458 static const struct txdds_ent txdds_sdr[TXDDS_TABLE_SZ] = {
7459 	/* amp, pre, main, post */
7460 	{  2, 2, 15,  6 },	/* Loopback */
7461 	{  0, 0,  0,  1 },	/*  2 dB */
7462 	{  0, 0,  0,  2 },	/*  3 dB */
7463 	{  0, 0,  0,  3 },	/*  4 dB */
7464 	{  0, 0,  0,  4 },	/*  5 dB */
7465 	{  0, 0,  0,  5 },	/*  6 dB */
7466 	{  0, 0,  0,  6 },	/*  7 dB */
7467 	{  0, 0,  0,  7 },	/*  8 dB */
7468 	{  0, 0,  0,  8 },	/*  9 dB */
7469 	{  0, 0,  0,  9 },	/* 10 dB */
7470 	{  0, 0,  0, 10 },	/* 11 dB */
7471 	{  0, 0,  0, 11 },	/* 12 dB */
7472 	{  0, 0,  0, 12 },	/* 13 dB */
7473 	{  0, 0,  0, 13 },	/* 14 dB */
7474 	{  0, 0,  0, 14 },	/* 15 dB */
7475 	{  0, 0,  0, 15 },	/* 16 dB */
7476 };
7477 
7478 static const struct txdds_ent txdds_ddr[TXDDS_TABLE_SZ] = {
7479 	/* amp, pre, main, post */
7480 	{  2, 2, 15,  6 },	/* Loopback */
7481 	{  0, 0,  0,  8 },	/*  2 dB */
7482 	{  0, 0,  0,  8 },	/*  3 dB */
7483 	{  0, 0,  0,  9 },	/*  4 dB */
7484 	{  0, 0,  0,  9 },	/*  5 dB */
7485 	{  0, 0,  0, 10 },	/*  6 dB */
7486 	{  0, 0,  0, 10 },	/*  7 dB */
7487 	{  0, 0,  0, 11 },	/*  8 dB */
7488 	{  0, 0,  0, 11 },	/*  9 dB */
7489 	{  0, 0,  0, 12 },	/* 10 dB */
7490 	{  0, 0,  0, 12 },	/* 11 dB */
7491 	{  0, 0,  0, 13 },	/* 12 dB */
7492 	{  0, 0,  0, 13 },	/* 13 dB */
7493 	{  0, 0,  0, 14 },	/* 14 dB */
7494 	{  0, 0,  0, 14 },	/* 15 dB */
7495 	{  0, 0,  0, 15 },	/* 16 dB */
7496 };
7497 
7498 static const struct txdds_ent txdds_qdr[TXDDS_TABLE_SZ] = {
7499 	/* amp, pre, main, post */
7500 	{  2, 2, 15,  6 },	/* Loopback */
7501 	{  0, 1,  0,  7 },	/*  2 dB (also QMH7342) */
7502 	{  0, 1,  0,  9 },	/*  3 dB (also QMH7342) */
7503 	{  0, 1,  0, 11 },	/*  4 dB */
7504 	{  0, 1,  0, 13 },	/*  5 dB */
7505 	{  0, 1,  0, 15 },	/*  6 dB */
7506 	{  0, 1,  3, 15 },	/*  7 dB */
7507 	{  0, 1,  7, 15 },	/*  8 dB */
7508 	{  0, 1,  7, 15 },	/*  9 dB */
7509 	{  0, 1,  8, 15 },	/* 10 dB */
7510 	{  0, 1,  9, 15 },	/* 11 dB */
7511 	{  0, 1, 10, 15 },	/* 12 dB */
7512 	{  0, 2,  6, 15 },	/* 13 dB */
7513 	{  0, 2,  7, 15 },	/* 14 dB */
7514 	{  0, 2,  8, 15 },	/* 15 dB */
7515 	{  0, 2,  9, 15 },	/* 16 dB */
7516 };
7517 
7518 /*
7519  * extra entries for use with txselect, for indices >= TXDDS_TABLE_SZ.
7520  * These are mostly used for mez cards going through connectors
7521  * and backplane traces, but can be used to add other "unusual"
7522  * table values as well.
7523  */
7524 static const struct txdds_ent txdds_extra_sdr[TXDDS_EXTRA_SZ] = {
7525 	/* amp, pre, main, post */
7526 	{  0, 0, 0,  1 },	/* QMH7342 backplane settings */
7527 	{  0, 0, 0,  1 },	/* QMH7342 backplane settings */
7528 	{  0, 0, 0,  2 },	/* QMH7342 backplane settings */
7529 	{  0, 0, 0,  2 },	/* QMH7342 backplane settings */
7530 	{  0, 0, 0,  3 },	/* QMH7342 backplane settings */
7531 	{  0, 0, 0,  4 },	/* QMH7342 backplane settings */
7532 	{  0, 1, 4, 15 },	/* QME7342 backplane settings 1.0 */
7533 	{  0, 1, 3, 15 },	/* QME7342 backplane settings 1.0 */
7534 	{  0, 1, 0, 12 },	/* QME7342 backplane settings 1.0 */
7535 	{  0, 1, 0, 11 },	/* QME7342 backplane settings 1.0 */
7536 	{  0, 1, 0,  9 },	/* QME7342 backplane settings 1.0 */
7537 	{  0, 1, 0, 14 },	/* QME7342 backplane settings 1.0 */
7538 	{  0, 1, 2, 15 },	/* QME7342 backplane settings 1.0 */
7539 	{  0, 1, 0, 11 },       /* QME7342 backplane settings 1.1 */
7540 	{  0, 1, 0,  7 },       /* QME7342 backplane settings 1.1 */
7541 	{  0, 1, 0,  9 },       /* QME7342 backplane settings 1.1 */
7542 	{  0, 1, 0,  6 },       /* QME7342 backplane settings 1.1 */
7543 	{  0, 1, 0,  8 },       /* QME7342 backplane settings 1.1 */
7544 };
7545 
7546 static const struct txdds_ent txdds_extra_ddr[TXDDS_EXTRA_SZ] = {
7547 	/* amp, pre, main, post */
7548 	{  0, 0, 0,  7 },	/* QMH7342 backplane settings */
7549 	{  0, 0, 0,  7 },	/* QMH7342 backplane settings */
7550 	{  0, 0, 0,  8 },	/* QMH7342 backplane settings */
7551 	{  0, 0, 0,  8 },	/* QMH7342 backplane settings */
7552 	{  0, 0, 0,  9 },	/* QMH7342 backplane settings */
7553 	{  0, 0, 0, 10 },	/* QMH7342 backplane settings */
7554 	{  0, 1, 4, 15 },	/* QME7342 backplane settings 1.0 */
7555 	{  0, 1, 3, 15 },	/* QME7342 backplane settings 1.0 */
7556 	{  0, 1, 0, 12 },	/* QME7342 backplane settings 1.0 */
7557 	{  0, 1, 0, 11 },	/* QME7342 backplane settings 1.0 */
7558 	{  0, 1, 0,  9 },	/* QME7342 backplane settings 1.0 */
7559 	{  0, 1, 0, 14 },	/* QME7342 backplane settings 1.0 */
7560 	{  0, 1, 2, 15 },	/* QME7342 backplane settings 1.0 */
7561 	{  0, 1, 0, 11 },       /* QME7342 backplane settings 1.1 */
7562 	{  0, 1, 0,  7 },       /* QME7342 backplane settings 1.1 */
7563 	{  0, 1, 0,  9 },       /* QME7342 backplane settings 1.1 */
7564 	{  0, 1, 0,  6 },       /* QME7342 backplane settings 1.1 */
7565 	{  0, 1, 0,  8 },       /* QME7342 backplane settings 1.1 */
7566 };
7567 
7568 static const struct txdds_ent txdds_extra_qdr[TXDDS_EXTRA_SZ] = {
7569 	/* amp, pre, main, post */
7570 	{  0, 1,  0,  4 },	/* QMH7342 backplane settings */
7571 	{  0, 1,  0,  5 },	/* QMH7342 backplane settings */
7572 	{  0, 1,  0,  6 },	/* QMH7342 backplane settings */
7573 	{  0, 1,  0,  8 },	/* QMH7342 backplane settings */
7574 	{  0, 1,  0, 10 },	/* QMH7342 backplane settings */
7575 	{  0, 1,  0, 12 },	/* QMH7342 backplane settings */
7576 	{  0, 1,  4, 15 },	/* QME7342 backplane settings 1.0 */
7577 	{  0, 1,  3, 15 },	/* QME7342 backplane settings 1.0 */
7578 	{  0, 1,  0, 12 },	/* QME7342 backplane settings 1.0 */
7579 	{  0, 1,  0, 11 },	/* QME7342 backplane settings 1.0 */
7580 	{  0, 1,  0,  9 },	/* QME7342 backplane settings 1.0 */
7581 	{  0, 1,  0, 14 },	/* QME7342 backplane settings 1.0 */
7582 	{  0, 1,  2, 15 },	/* QME7342 backplane settings 1.0 */
7583 	{  0, 1,  0, 11 },      /* QME7342 backplane settings 1.1 */
7584 	{  0, 1,  0,  7 },      /* QME7342 backplane settings 1.1 */
7585 	{  0, 1,  0,  9 },      /* QME7342 backplane settings 1.1 */
7586 	{  0, 1,  0,  6 },      /* QME7342 backplane settings 1.1 */
7587 	{  0, 1,  0,  8 },      /* QME7342 backplane settings 1.1 */
7588 };
7589 
7590 static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
7591 	/* amp, pre, main, post */
7592 	{ 0, 0, 0, 0 },         /* QME7342 mfg settings */
7593 	{ 0, 0, 0, 6 },         /* QME7342 P2 mfg settings */
7594 };
7595 
7596 static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
7597 					       unsigned atten)
7598 {
7599 	/*
7600 	 * The attenuation table starts at 2dB for entry 1,
7601 	 * with entry 0 being the loopback entry.
7602 	 */
7603 	if (atten <= 2)
7604 		atten = 1;
7605 	else if (atten > TXDDS_TABLE_SZ)
7606 		atten = TXDDS_TABLE_SZ - 1;
7607 	else
7608 		atten--;
7609 	return txdds + atten;
7610 }
7611 
7612 /*
7613  * if override is set, the module parameter txselect has a value
7614  * for this specific port, so use it, rather than our normal mechanism.
7615  */
7616 static void find_best_ent(struct qib_pportdata *ppd,
7617 			  const struct txdds_ent **sdr_dds,
7618 			  const struct txdds_ent **ddr_dds,
7619 			  const struct txdds_ent **qdr_dds, int override)
7620 {
7621 	struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache;
7622 	int idx;
7623 
7624 	/* Search table of known cables */
7625 	for (idx = 0; !override && idx < ARRAY_SIZE(vendor_txdds); ++idx) {
7626 		const struct vendor_txdds_ent *v = vendor_txdds + idx;
7627 
7628 		if (!memcmp(v->oui, qd->oui, QSFP_VOUI_LEN) &&
7629 		    (!v->partnum ||
7630 		     !memcmp(v->partnum, qd->partnum, QSFP_PN_LEN))) {
7631 			*sdr_dds = &v->sdr;
7632 			*ddr_dds = &v->ddr;
7633 			*qdr_dds = &v->qdr;
7634 			return;
7635 		}
7636 	}
7637 
7638 	/* Active cables don't have attenuation so we only set SERDES
7639 	 * settings to account for the attenuation of the board traces. */
7640 	if (!override && QSFP_IS_ACTIVE(qd->tech)) {
7641 		*sdr_dds = txdds_sdr + ppd->dd->board_atten;
7642 		*ddr_dds = txdds_ddr + ppd->dd->board_atten;
7643 		*qdr_dds = txdds_qdr + ppd->dd->board_atten;
7644 		return;
7645 	}
7646 
7647 	if (!override && QSFP_HAS_ATTEN(qd->tech) && (qd->atten[0] ||
7648 						      qd->atten[1])) {
7649 		*sdr_dds = get_atten_table(txdds_sdr, qd->atten[0]);
7650 		*ddr_dds = get_atten_table(txdds_ddr, qd->atten[0]);
7651 		*qdr_dds = get_atten_table(txdds_qdr, qd->atten[1]);
7652 		return;
7653 	} else if (ppd->cpspec->no_eep < TXDDS_TABLE_SZ) {
7654 		/*
7655 		 * If we have no (or incomplete) data from the cable
7656 		 * EEPROM, or no QSFP, or override is set, use the
7657 		 * module parameter value to index into the attentuation
7658 		 * table.
7659 		 */
7660 		idx = ppd->cpspec->no_eep;
7661 		*sdr_dds = &txdds_sdr[idx];
7662 		*ddr_dds = &txdds_ddr[idx];
7663 		*qdr_dds = &txdds_qdr[idx];
7664 	} else if (ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
7665 		/* similar to above, but index into the "extra" table. */
7666 		idx = ppd->cpspec->no_eep - TXDDS_TABLE_SZ;
7667 		*sdr_dds = &txdds_extra_sdr[idx];
7668 		*ddr_dds = &txdds_extra_ddr[idx];
7669 		*qdr_dds = &txdds_extra_qdr[idx];
7670 	} else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
7671 		   ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
7672 					  TXDDS_MFG_SZ)) {
7673 		idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
7674 		pr_info("IB%u:%u use idx %u into txdds_mfg\n",
7675 			ppd->dd->unit, ppd->port, idx);
7676 		*sdr_dds = &txdds_extra_mfg[idx];
7677 		*ddr_dds = &txdds_extra_mfg[idx];
7678 		*qdr_dds = &txdds_extra_mfg[idx];
7679 	} else {
7680 		/* this shouldn't happen, it's range checked */
7681 		*sdr_dds = txdds_sdr + qib_long_atten;
7682 		*ddr_dds = txdds_ddr + qib_long_atten;
7683 		*qdr_dds = txdds_qdr + qib_long_atten;
7684 	}
7685 }
7686 
7687 static void init_txdds_table(struct qib_pportdata *ppd, int override)
7688 {
7689 	const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
7690 	struct txdds_ent *dds;
7691 	int idx;
7692 	int single_ent = 0;
7693 
7694 	find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, override);
7695 
7696 	/* for mez cards or override, use the selected value for all entries */
7697 	if (!(ppd->dd->flags & QIB_HAS_QSFP) || override)
7698 		single_ent = 1;
7699 
7700 	/* Fill in the first entry with the best entry found. */
7701 	set_txdds(ppd, 0, sdr_dds);
7702 	set_txdds(ppd, TXDDS_TABLE_SZ, ddr_dds);
7703 	set_txdds(ppd, 2 * TXDDS_TABLE_SZ, qdr_dds);
7704 	if (ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
7705 		QIBL_LINKACTIVE)) {
7706 		dds = (struct txdds_ent *)(ppd->link_speed_active ==
7707 					   QIB_IB_QDR ?  qdr_dds :
7708 					   (ppd->link_speed_active ==
7709 					    QIB_IB_DDR ? ddr_dds : sdr_dds));
7710 		write_tx_serdes_param(ppd, dds);
7711 	}
7712 
7713 	/* Fill in the remaining entries with the default table values. */
7714 	for (idx = 1; idx < ARRAY_SIZE(txdds_sdr); ++idx) {
7715 		set_txdds(ppd, idx, single_ent ? sdr_dds : txdds_sdr + idx);
7716 		set_txdds(ppd, idx + TXDDS_TABLE_SZ,
7717 			  single_ent ? ddr_dds : txdds_ddr + idx);
7718 		set_txdds(ppd, idx + 2 * TXDDS_TABLE_SZ,
7719 			  single_ent ? qdr_dds : txdds_qdr + idx);
7720 	}
7721 }
7722 
7723 #define KR_AHB_ACC KREG_IDX(ahb_access_ctrl)
7724 #define KR_AHB_TRANS KREG_IDX(ahb_transaction_reg)
7725 #define AHB_TRANS_RDY SYM_MASK(ahb_transaction_reg, ahb_rdy)
7726 #define AHB_ADDR_LSB SYM_LSB(ahb_transaction_reg, ahb_address)
7727 #define AHB_DATA_LSB SYM_LSB(ahb_transaction_reg, ahb_data)
7728 #define AHB_WR SYM_MASK(ahb_transaction_reg, write_not_read)
7729 #define AHB_TRANS_TRIES 10
7730 
7731 /*
7732  * The chan argument is 0=chan0, 1=chan1, 2=pll, 3=chan2, 4=chan4,
7733  * 5=subsystem which is why most calls have "chan + chan >> 1"
7734  * for the channel argument.
7735  */
7736 static u32 ahb_mod(struct qib_devdata *dd, int quad, int chan, int addr,
7737 		    u32 data, u32 mask)
7738 {
7739 	u32 rd_data, wr_data, sz_mask;
7740 	u64 trans, acc, prev_acc;
7741 	u32 ret = 0xBAD0BAD;
7742 	int tries;
7743 
7744 	prev_acc = qib_read_kreg64(dd, KR_AHB_ACC);
7745 	/* From this point on, make sure we return access */
7746 	acc = (quad << 1) | 1;
7747 	qib_write_kreg(dd, KR_AHB_ACC, acc);
7748 
7749 	for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7750 		trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7751 		if (trans & AHB_TRANS_RDY)
7752 			break;
7753 	}
7754 	if (tries >= AHB_TRANS_TRIES) {
7755 		qib_dev_err(dd, "No ahb_rdy in %d tries\n", AHB_TRANS_TRIES);
7756 		goto bail;
7757 	}
7758 
7759 	/* If mask is not all 1s, we need to read, but different SerDes
7760 	 * entities have different sizes
7761 	 */
7762 	sz_mask = (1UL << ((quad == 1) ? 32 : 16)) - 1;
7763 	wr_data = data & mask & sz_mask;
7764 	if ((~mask & sz_mask) != 0) {
7765 		trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7766 		qib_write_kreg(dd, KR_AHB_TRANS, trans);
7767 
7768 		for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7769 			trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7770 			if (trans & AHB_TRANS_RDY)
7771 				break;
7772 		}
7773 		if (tries >= AHB_TRANS_TRIES) {
7774 			qib_dev_err(dd, "No Rd ahb_rdy in %d tries\n",
7775 				    AHB_TRANS_TRIES);
7776 			goto bail;
7777 		}
7778 		/* Re-read in case host split reads and read data first */
7779 		trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7780 		rd_data = (uint32_t)(trans >> AHB_DATA_LSB);
7781 		wr_data |= (rd_data & ~mask & sz_mask);
7782 	}
7783 
7784 	/* If mask is not zero, we need to write. */
7785 	if (mask & sz_mask) {
7786 		trans = ((chan << 6) | addr) << (AHB_ADDR_LSB + 1);
7787 		trans |= ((uint64_t)wr_data << AHB_DATA_LSB);
7788 		trans |= AHB_WR;
7789 		qib_write_kreg(dd, KR_AHB_TRANS, trans);
7790 
7791 		for (tries = 1; tries < AHB_TRANS_TRIES; ++tries) {
7792 			trans = qib_read_kreg64(dd, KR_AHB_TRANS);
7793 			if (trans & AHB_TRANS_RDY)
7794 				break;
7795 		}
7796 		if (tries >= AHB_TRANS_TRIES) {
7797 			qib_dev_err(dd, "No Wr ahb_rdy in %d tries\n",
7798 				    AHB_TRANS_TRIES);
7799 			goto bail;
7800 		}
7801 	}
7802 	ret = wr_data;
7803 bail:
7804 	qib_write_kreg(dd, KR_AHB_ACC, prev_acc);
7805 	return ret;
7806 }
7807 
7808 static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
7809 			     unsigned mask)
7810 {
7811 	struct qib_devdata *dd = ppd->dd;
7812 	int chan;
7813 
7814 	for (chan = 0; chan < SERDES_CHANS; ++chan) {
7815 		ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7816 			data, mask);
7817 		ahb_mod(dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)), addr,
7818 			0, 0);
7819 	}
7820 }
7821 
7822 static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
7823 {
7824 	u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
7825 	u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
7826 
7827 	if (enable && !state) {
7828 		pr_info("IB%u:%u Turning LOS on\n",
7829 			ppd->dd->unit, ppd->port);
7830 		data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7831 	} else if (!enable && state) {
7832 		pr_info("IB%u:%u Turning LOS off\n",
7833 			ppd->dd->unit, ppd->port);
7834 		data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7835 	}
7836 	qib_write_kreg_port(ppd, krp_serdesctrl, data);
7837 }
7838 
7839 static int serdes_7322_init(struct qib_pportdata *ppd)
7840 {
7841 	int ret = 0;
7842 
7843 	if (ppd->dd->cspec->r1)
7844 		ret = serdes_7322_init_old(ppd);
7845 	else
7846 		ret = serdes_7322_init_new(ppd);
7847 	return ret;
7848 }
7849 
7850 static int serdes_7322_init_old(struct qib_pportdata *ppd)
7851 {
7852 	u32 le_val;
7853 
7854 	/*
7855 	 * Initialize the Tx DDS tables.  Also done every QSFP event,
7856 	 * for adapters with QSFP
7857 	 */
7858 	init_txdds_table(ppd, 0);
7859 
7860 	/* ensure no tx overrides from earlier driver loads */
7861 	qib_write_kreg_port(ppd, krp_tx_deemph_override,
7862 		SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7863 		reset_tx_deemphasis_override));
7864 
7865 	/* Patch some SerDes defaults to "Better for IB" */
7866 	/* Timing Loop Bandwidth: cdr_timing[11:9] = 0 */
7867 	ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
7868 
7869 	/* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
7870 	ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
7871 	/* Enable LE2: rxle2en_r2a addr 13 bit [6] = 1 */
7872 	ibsd_wr_allchans(ppd, 13, (1 << 6), (1 << 6));
7873 
7874 	/* May be overridden in qsfp_7322_event */
7875 	le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
7876 	ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
7877 
7878 	/* enable LE1 adaptation for all but QME, which is disabled */
7879 	le_val = IS_QME(ppd->dd) ? 0 : 1;
7880 	ibsd_wr_allchans(ppd, 13, (le_val << 5), (1 << 5));
7881 
7882 	/* Clear cmode-override, may be set from older driver */
7883 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7884 
7885 	/* Timing Recovery: rxtapsel addr 5 bits [9:8] = 0 */
7886 	ibsd_wr_allchans(ppd, 5, (0 << 8), BMASK(9, 8));
7887 
7888 	/* setup LoS params; these are subsystem, so chan == 5 */
7889 	/* LoS filter threshold_count on, ch 0-3, set to 8 */
7890 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
7891 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
7892 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
7893 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
7894 
7895 	/* LoS filter threshold_count off, ch 0-3, set to 4 */
7896 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
7897 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
7898 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
7899 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
7900 
7901 	/* LoS filter select enabled */
7902 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
7903 
7904 	/* LoS target data:  SDR=4, DDR=2, QDR=1 */
7905 	ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
7906 	ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
7907 	ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
7908 
7909 	serdes_7322_los_enable(ppd, 1);
7910 
7911 	/* rxbistena; set 0 to avoid effects of it switch later */
7912 	ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
7913 
7914 	/* Configure 4 DFE taps, and only they adapt */
7915 	ibsd_wr_allchans(ppd, 16, 0 << 0, BMASK(1, 0));
7916 
7917 	/* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
7918 	le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
7919 	ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
7920 
7921 	/*
7922 	 * Set receive adaptation mode.  SDR and DDR adaptation are
7923 	 * always on, and QDR is initially enabled; later disabled.
7924 	 */
7925 	qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
7926 	qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
7927 	qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
7928 			    ppd->dd->cspec->r1 ?
7929 			    QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
7930 	ppd->cpspec->qdr_dfe_on = 1;
7931 
7932 	/* FLoop LOS gate: PPM filter  enabled */
7933 	ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
7934 
7935 	/* rx offset center enabled */
7936 	ibsd_wr_allchans(ppd, 12, 1 << 4, 1 << 4);
7937 
7938 	if (!ppd->dd->cspec->r1) {
7939 		ibsd_wr_allchans(ppd, 12, 1 << 12, 1 << 12);
7940 		ibsd_wr_allchans(ppd, 12, 2 << 8, 0x0f << 8);
7941 	}
7942 
7943 	/* Set the frequency loop bandwidth to 15 */
7944 	ibsd_wr_allchans(ppd, 2, 15 << 5, BMASK(8, 5));
7945 
7946 	return 0;
7947 }
7948 
7949 static int serdes_7322_init_new(struct qib_pportdata *ppd)
7950 {
7951 	unsigned long tend;
7952 	u32 le_val, rxcaldone;
7953 	int chan, chan_done = (1 << SERDES_CHANS) - 1;
7954 
7955 	/* Clear cmode-override, may be set from older driver */
7956 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
7957 
7958 	/* ensure no tx overrides from earlier driver loads */
7959 	qib_write_kreg_port(ppd, krp_tx_deemph_override,
7960 		SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
7961 		reset_tx_deemphasis_override));
7962 
7963 	/* START OF LSI SUGGESTED SERDES BRINGUP */
7964 	/* Reset - Calibration Setup */
7965 	/*       Stop DFE adaptaion */
7966 	ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
7967 	/*       Disable LE1 */
7968 	ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
7969 	/*       Disable autoadapt for LE1 */
7970 	ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
7971 	/*       Disable LE2 */
7972 	ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
7973 	/*       Disable VGA */
7974 	ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
7975 	/*       Disable AFE Offset Cancel */
7976 	ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
7977 	/*       Disable Timing Loop */
7978 	ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
7979 	/*       Disable Frequency Loop */
7980 	ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
7981 	/*       Disable Baseline Wander Correction */
7982 	ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
7983 	/*       Disable RX Calibration */
7984 	ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
7985 	/*       Disable RX Offset Calibration */
7986 	ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
7987 	/*       Select BB CDR */
7988 	ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
7989 	/*       CDR Step Size */
7990 	ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
7991 	/*       Enable phase Calibration */
7992 	ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
7993 	/*       DFE Bandwidth [2:14-12] */
7994 	ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
7995 	/*       DFE Config (4 taps only) */
7996 	ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
7997 	/*       Gain Loop Bandwidth */
7998 	if (!ppd->dd->cspec->r1) {
7999 		ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
8000 		ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
8001 	} else {
8002 		ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
8003 	}
8004 	/*       Baseline Wander Correction Gain [13:4-0] (leave as default) */
8005 	/*       Baseline Wander Correction Gain [3:7-5] (leave as default) */
8006 	/*       Data Rate Select [5:7-6] (leave as default) */
8007 	/*       RX Parallel Word Width [3:10-8] (leave as default) */
8008 
8009 	/* RX REST */
8010 	/*       Single- or Multi-channel reset */
8011 	/*       RX Analog reset */
8012 	/*       RX Digital reset */
8013 	ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
8014 	msleep(20);
8015 	/*       RX Analog reset */
8016 	ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
8017 	msleep(20);
8018 	/*       RX Digital reset */
8019 	ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
8020 	msleep(20);
8021 
8022 	/* setup LoS params; these are subsystem, so chan == 5 */
8023 	/* LoS filter threshold_count on, ch 0-3, set to 8 */
8024 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
8025 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
8026 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
8027 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
8028 
8029 	/* LoS filter threshold_count off, ch 0-3, set to 4 */
8030 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
8031 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
8032 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
8033 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
8034 
8035 	/* LoS filter select enabled */
8036 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
8037 
8038 	/* LoS target data:  SDR=4, DDR=2, QDR=1 */
8039 	ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
8040 	ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
8041 	ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
8042 
8043 	/* Turn on LOS on initial SERDES init */
8044 	serdes_7322_los_enable(ppd, 1);
8045 	/* FLoop LOS gate: PPM filter  enabled */
8046 	ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
8047 
8048 	/* RX LATCH CALIBRATION */
8049 	/*       Enable Eyefinder Phase Calibration latch */
8050 	ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
8051 	/*       Enable RX Offset Calibration latch */
8052 	ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
8053 	msleep(20);
8054 	/*       Start Calibration */
8055 	ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
8056 	tend = jiffies + msecs_to_jiffies(500);
8057 	while (chan_done && !time_is_before_jiffies(tend)) {
8058 		msleep(20);
8059 		for (chan = 0; chan < SERDES_CHANS; ++chan) {
8060 			rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
8061 					    (chan + (chan >> 1)),
8062 					    25, 0, 0);
8063 			if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
8064 			    (~chan_done & (1 << chan)) == 0)
8065 				chan_done &= ~(1 << chan);
8066 		}
8067 	}
8068 	if (chan_done) {
8069 		pr_info("Serdes %d calibration not done after .5 sec: 0x%x\n",
8070 			 IBSD(ppd->hw_pidx), chan_done);
8071 	} else {
8072 		for (chan = 0; chan < SERDES_CHANS; ++chan) {
8073 			rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
8074 					    (chan + (chan >> 1)),
8075 					    25, 0, 0);
8076 			if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
8077 				pr_info("Serdes %d chan %d calibration failed\n",
8078 					IBSD(ppd->hw_pidx), chan);
8079 		}
8080 	}
8081 
8082 	/*       Turn off Calibration */
8083 	ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
8084 	msleep(20);
8085 
8086 	/* BRING RX UP */
8087 	/*       Set LE2 value (May be overridden in qsfp_7322_event) */
8088 	le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
8089 	ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
8090 	/*       Set LE2 Loop bandwidth */
8091 	ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
8092 	/*       Enable LE2 */
8093 	ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
8094 	msleep(20);
8095 	/*       Enable H0 only */
8096 	ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
8097 	/* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
8098 	le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
8099 	ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
8100 	/*       Enable VGA */
8101 	ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
8102 	msleep(20);
8103 	/*       Set Frequency Loop Bandwidth */
8104 	ibsd_wr_allchans(ppd, 2, (15 << 5), BMASK(8, 5));
8105 	/*       Enable Frequency Loop */
8106 	ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
8107 	/*       Set Timing Loop Bandwidth */
8108 	ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
8109 	/*       Enable Timing Loop */
8110 	ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
8111 	msleep(50);
8112 	/*       Enable DFE
8113 	 *       Set receive adaptation mode.  SDR and DDR adaptation are
8114 	 *       always on, and QDR is initially enabled; later disabled.
8115 	 */
8116 	qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
8117 	qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
8118 	qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
8119 			    ppd->dd->cspec->r1 ?
8120 			    QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
8121 	ppd->cpspec->qdr_dfe_on = 1;
8122 	/*       Disable LE1  */
8123 	ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
8124 	/*       Disable auto adapt for LE1 */
8125 	ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
8126 	msleep(20);
8127 	/*       Enable AFE Offset Cancel */
8128 	ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
8129 	/*       Enable Baseline Wander Correction */
8130 	ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
8131 	/* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
8132 	ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
8133 	/* VGA output common mode */
8134 	ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
8135 
8136 	/*
8137 	 * Initialize the Tx DDS tables.  Also done every QSFP event,
8138 	 * for adapters with QSFP
8139 	 */
8140 	init_txdds_table(ppd, 0);
8141 
8142 	return 0;
8143 }
8144 
8145 /* start adjust QMH serdes parameters */
8146 
8147 static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
8148 {
8149 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8150 		9, code << 9, 0x3f << 9);
8151 }
8152 
8153 static void set_man_mode_h1(struct qib_pportdata *ppd, int chan,
8154 	int enable, u32 tapenable)
8155 {
8156 	if (enable)
8157 		ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8158 			1, 3 << 10, 0x1f << 10);
8159 	else
8160 		ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8161 			1, 0, 0x1f << 10);
8162 }
8163 
8164 /* Set clock to 1, 0, 1, 0 */
8165 static void clock_man(struct qib_pportdata *ppd, int chan)
8166 {
8167 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8168 		4, 0x4000, 0x4000);
8169 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8170 		4, 0, 0x4000);
8171 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8172 		4, 0x4000, 0x4000);
8173 	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), (chan + (chan >> 1)),
8174 		4, 0, 0x4000);
8175 }
8176 
8177 /*
8178  * write the current Tx serdes pre,post,main,amp settings into the serdes.
8179  * The caller must pass the settings appropriate for the current speed,
8180  * or not care if they are correct for the current speed.
8181  */
8182 static void write_tx_serdes_param(struct qib_pportdata *ppd,
8183 				  struct txdds_ent *txdds)
8184 {
8185 	u64 deemph;
8186 
8187 	deemph = qib_read_kreg_port(ppd, krp_tx_deemph_override);
8188 	/* field names for amp, main, post, pre, respectively */
8189 	deemph &= ~(SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txampcntl_d2a) |
8190 		    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txc0_ena) |
8191 		    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcp1_ena) |
8192 		    SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0, txcn1_ena));
8193 
8194 	deemph |= SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8195 			   tx_override_deemphasis_select);
8196 	deemph |= (txdds->amp & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8197 		    txampcntl_d2a)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8198 				       txampcntl_d2a);
8199 	deemph |= (txdds->main & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8200 		     txc0_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8201 				   txc0_ena);
8202 	deemph |= (txdds->post & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8203 		     txcp1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8204 				    txcp1_ena);
8205 	deemph |= (txdds->pre & SYM_RMASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8206 		     txcn1_ena)) << SYM_LSB(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
8207 				    txcn1_ena);
8208 	qib_write_kreg_port(ppd, krp_tx_deemph_override, deemph);
8209 }
8210 
8211 /*
8212  * Set the parameters for mez cards on link bounce, so they are
8213  * always exactly what was requested.  Similar logic to init_txdds
8214  * but does just the serdes.
8215  */
8216 static void adj_tx_serdes(struct qib_pportdata *ppd)
8217 {
8218 	const struct txdds_ent *sdr_dds, *ddr_dds, *qdr_dds;
8219 	struct txdds_ent *dds;
8220 
8221 	find_best_ent(ppd, &sdr_dds, &ddr_dds, &qdr_dds, 1);
8222 	dds = (struct txdds_ent *)(ppd->link_speed_active == QIB_IB_QDR ?
8223 		qdr_dds : (ppd->link_speed_active == QIB_IB_DDR ?
8224 				ddr_dds : sdr_dds));
8225 	write_tx_serdes_param(ppd, dds);
8226 }
8227 
8228 /* set QDR forced value for H1, if needed */
8229 static void force_h1(struct qib_pportdata *ppd)
8230 {
8231 	int chan;
8232 
8233 	ppd->cpspec->qdr_reforce = 0;
8234 	if (!ppd->dd->cspec->r1)
8235 		return;
8236 
8237 	for (chan = 0; chan < SERDES_CHANS; chan++) {
8238 		set_man_mode_h1(ppd, chan, 1, 0);
8239 		set_man_code(ppd, chan, ppd->cpspec->h1_val);
8240 		clock_man(ppd, chan);
8241 		set_man_mode_h1(ppd, chan, 0, 0);
8242 	}
8243 }
8244 
8245 #define SJA_EN SYM_MASK(SPC_JTAG_ACCESS_REG, SPC_JTAG_ACCESS_EN)
8246 #define BISTEN_LSB SYM_LSB(SPC_JTAG_ACCESS_REG, bist_en)
8247 
8248 #define R_OPCODE_LSB 3
8249 #define R_OP_NOP 0
8250 #define R_OP_SHIFT 2
8251 #define R_OP_UPDATE 3
8252 #define R_TDI_LSB 2
8253 #define R_TDO_LSB 1
8254 #define R_RDY 1
8255 
8256 static int qib_r_grab(struct qib_devdata *dd)
8257 {
8258 	u64 val = SJA_EN;
8259 
8260 	qib_write_kreg(dd, kr_r_access, val);
8261 	qib_read_kreg32(dd, kr_scratch);
8262 	return 0;
8263 }
8264 
8265 /* qib_r_wait_for_rdy() not only waits for the ready bit, it
8266  * returns the current state of R_TDO
8267  */
8268 static int qib_r_wait_for_rdy(struct qib_devdata *dd)
8269 {
8270 	u64 val;
8271 	int timeout;
8272 
8273 	for (timeout = 0; timeout < 100 ; ++timeout) {
8274 		val = qib_read_kreg32(dd, kr_r_access);
8275 		if (val & R_RDY)
8276 			return (val >> R_TDO_LSB) & 1;
8277 	}
8278 	return -1;
8279 }
8280 
8281 static int qib_r_shift(struct qib_devdata *dd, int bisten,
8282 		       int len, u8 *inp, u8 *outp)
8283 {
8284 	u64 valbase, val;
8285 	int ret, pos;
8286 
8287 	valbase = SJA_EN | (bisten << BISTEN_LSB) |
8288 		(R_OP_SHIFT << R_OPCODE_LSB);
8289 	ret = qib_r_wait_for_rdy(dd);
8290 	if (ret < 0)
8291 		goto bail;
8292 	for (pos = 0; pos < len; ++pos) {
8293 		val = valbase;
8294 		if (outp) {
8295 			outp[pos >> 3] &= ~(1 << (pos & 7));
8296 			outp[pos >> 3] |= (ret << (pos & 7));
8297 		}
8298 		if (inp) {
8299 			int tdi = inp[pos >> 3] >> (pos & 7);
8300 
8301 			val |= ((tdi & 1) << R_TDI_LSB);
8302 		}
8303 		qib_write_kreg(dd, kr_r_access, val);
8304 		qib_read_kreg32(dd, kr_scratch);
8305 		ret = qib_r_wait_for_rdy(dd);
8306 		if (ret < 0)
8307 			break;
8308 	}
8309 	/* Restore to NOP between operations. */
8310 	val =  SJA_EN | (bisten << BISTEN_LSB);
8311 	qib_write_kreg(dd, kr_r_access, val);
8312 	qib_read_kreg32(dd, kr_scratch);
8313 	ret = qib_r_wait_for_rdy(dd);
8314 
8315 	if (ret >= 0)
8316 		ret = pos;
8317 bail:
8318 	return ret;
8319 }
8320 
8321 static int qib_r_update(struct qib_devdata *dd, int bisten)
8322 {
8323 	u64 val;
8324 	int ret;
8325 
8326 	val = SJA_EN | (bisten << BISTEN_LSB) | (R_OP_UPDATE << R_OPCODE_LSB);
8327 	ret = qib_r_wait_for_rdy(dd);
8328 	if (ret >= 0) {
8329 		qib_write_kreg(dd, kr_r_access, val);
8330 		qib_read_kreg32(dd, kr_scratch);
8331 	}
8332 	return ret;
8333 }
8334 
8335 #define BISTEN_PORT_SEL 15
8336 #define LEN_PORT_SEL 625
8337 #define BISTEN_AT 17
8338 #define LEN_AT 156
8339 #define BISTEN_ETM 16
8340 #define LEN_ETM 632
8341 
8342 #define BIT2BYTE(x) (((x) +  BITS_PER_BYTE - 1) / BITS_PER_BYTE)
8343 
8344 /* these are common for all IB port use cases. */
8345 static u8 reset_at[BIT2BYTE(LEN_AT)] = {
8346 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8347 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
8348 };
8349 static u8 reset_atetm[BIT2BYTE(LEN_ETM)] = {
8350 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8351 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8352 	0x00, 0x00, 0x00, 0x80, 0xe3, 0x81, 0x73, 0x3c, 0x70, 0x8e,
8353 	0x07, 0xce, 0xf1, 0xc0, 0x39, 0x1e, 0x38, 0xc7, 0x03, 0xe7,
8354 	0x78, 0xe0, 0x1c, 0x0f, 0x9c, 0x7f, 0x80, 0x73, 0x0f, 0x70,
8355 	0xde, 0x01, 0xce, 0x39, 0xc0, 0xf9, 0x06, 0x38, 0xd7, 0x00,
8356 	0xe7, 0x19, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8357 	0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
8358 };
8359 static u8 at[BIT2BYTE(LEN_AT)] = {
8360 	0x00, 0x00, 0x18, 0x00, 0x00, 0x00, 0x18, 0x00, 0x00, 0x00,
8361 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
8362 };
8363 
8364 /* used for IB1 or IB2, only one in use */
8365 static u8 atetm_1port[BIT2BYTE(LEN_ETM)] = {
8366 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8367 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8368 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8369 	0x00, 0x10, 0xf2, 0x80, 0x83, 0x1e, 0x38, 0x00, 0x00, 0x00,
8370 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8371 	0x00, 0x00, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xc8, 0x03,
8372 	0x07, 0x7b, 0xa0, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x18, 0x00,
8373 	0x18, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0x00,
8374 };
8375 
8376 /* used when both IB1 and IB2 are in use */
8377 static u8 atetm_2port[BIT2BYTE(LEN_ETM)] = {
8378 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8379 	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79,
8380 	0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
8381 	0x00, 0x00, 0xf8, 0x80, 0x83, 0x1e, 0x38, 0xe0, 0x03, 0x05,
8382 	0x7b, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
8383 	0xa2, 0x0f, 0x50, 0xf4, 0x41, 0x00, 0x18, 0x78, 0xd1, 0x07,
8384 	0x02, 0x7c, 0x80, 0x3e, 0x00, 0x02, 0x00, 0x00, 0x3e, 0x00,
8385 	0x02, 0x00, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00,
8386 };
8387 
8388 /* used when only IB1 is in use */
8389 static u8 portsel_port1[BIT2BYTE(LEN_PORT_SEL)] = {
8390 	0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8391 	0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8392 	0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8393 	0x13, 0x78, 0x78, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8394 	0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8395 	0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8396 	0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8397 	0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8398 };
8399 
8400 /* used when only IB2 is in use */
8401 static u8 portsel_port2[BIT2BYTE(LEN_PORT_SEL)] = {
8402 	0x32, 0x65, 0xa4, 0x7b, 0x10, 0x98, 0xdc, 0xfe, 0x39, 0x39,
8403 	0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x73, 0x32, 0x32, 0x32,
8404 	0x32, 0x32, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8405 	0x39, 0x78, 0x78, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39, 0x39,
8406 	0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x74, 0x32,
8407 	0x32, 0x32, 0x32, 0x32, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8408 	0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a,
8409 	0x3a, 0x3a, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x01,
8410 };
8411 
8412 /* used when both IB1 and IB2 are in use */
8413 static u8 portsel_2port[BIT2BYTE(LEN_PORT_SEL)] = {
8414 	0x32, 0xba, 0x54, 0x76, 0x10, 0x98, 0xdc, 0xfe, 0x13, 0x13,
8415 	0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x73, 0x0c, 0x0c, 0x0c,
8416 	0x0c, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8417 	0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
8418 	0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x74, 0x32,
8419 	0x32, 0x32, 0x32, 0x32, 0x14, 0x14, 0x14, 0x14, 0x14, 0x3a,
8420 	0x3a, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14, 0x14,
8421 	0x14, 0x14, 0x9f, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
8422 };
8423 
8424 /*
8425  * Do setup to properly handle IB link recovery; if port is zero, we
8426  * are initializing to cover both ports; otherwise we are initializing
8427  * to cover a single port card, or the port has reached INIT and we may
8428  * need to switch coverage types.
8429  */
8430 static void setup_7322_link_recovery(struct qib_pportdata *ppd, u32 both)
8431 {
8432 	u8 *portsel, *etm;
8433 	struct qib_devdata *dd = ppd->dd;
8434 
8435 	if (!ppd->dd->cspec->r1)
8436 		return;
8437 	if (!both) {
8438 		dd->cspec->recovery_ports_initted++;
8439 		ppd->cpspec->recovery_init = 1;
8440 	}
8441 	if (!both && dd->cspec->recovery_ports_initted == 1) {
8442 		portsel = ppd->port == 1 ? portsel_port1 : portsel_port2;
8443 		etm = atetm_1port;
8444 	} else {
8445 		portsel = portsel_2port;
8446 		etm = atetm_2port;
8447 	}
8448 
8449 	if (qib_r_grab(dd) < 0 ||
8450 		qib_r_shift(dd, BISTEN_ETM, LEN_ETM, reset_atetm, NULL) < 0 ||
8451 		qib_r_update(dd, BISTEN_ETM) < 0 ||
8452 		qib_r_shift(dd, BISTEN_AT, LEN_AT, reset_at, NULL) < 0 ||
8453 		qib_r_update(dd, BISTEN_AT) < 0 ||
8454 		qib_r_shift(dd, BISTEN_PORT_SEL, LEN_PORT_SEL,
8455 			    portsel, NULL) < 0 ||
8456 		qib_r_update(dd, BISTEN_PORT_SEL) < 0 ||
8457 		qib_r_shift(dd, BISTEN_AT, LEN_AT, at, NULL) < 0 ||
8458 		qib_r_update(dd, BISTEN_AT) < 0 ||
8459 		qib_r_shift(dd, BISTEN_ETM, LEN_ETM, etm, NULL) < 0 ||
8460 		qib_r_update(dd, BISTEN_ETM) < 0)
8461 		qib_dev_err(dd, "Failed IB link recovery setup\n");
8462 }
8463 
8464 static void check_7322_rxe_status(struct qib_pportdata *ppd)
8465 {
8466 	struct qib_devdata *dd = ppd->dd;
8467 	u64 fmask;
8468 
8469 	if (dd->cspec->recovery_ports_initted != 1)
8470 		return; /* rest doesn't apply to dualport */
8471 	qib_write_kreg(dd, kr_control, dd->control |
8472 		       SYM_MASK(Control, FreezeMode));
8473 	(void)qib_read_kreg64(dd, kr_scratch);
8474 	udelay(3); /* ibcreset asserted 400ns, be sure that's over */
8475 	fmask = qib_read_kreg64(dd, kr_act_fmask);
8476 	if (!fmask) {
8477 		/*
8478 		 * require a powercycle before we'll work again, and make
8479 		 * sure we get no more interrupts, and don't turn off
8480 		 * freeze.
8481 		 */
8482 		ppd->dd->cspec->stay_in_freeze = 1;
8483 		qib_7322_set_intr_state(ppd->dd, 0);
8484 		qib_write_kreg(dd, kr_fmask, 0ULL);
8485 		qib_dev_err(dd, "HCA unusable until powercycled\n");
8486 		return; /* eventually reset */
8487 	}
8488 
8489 	qib_write_kreg(ppd->dd, kr_hwerrclear,
8490 	    SYM_MASK(HwErrClear, IBSerdesPClkNotDetectClear_1));
8491 
8492 	/* don't do the full clear_freeze(), not needed for this */
8493 	qib_write_kreg(dd, kr_control, dd->control);
8494 	qib_read_kreg32(dd, kr_scratch);
8495 	/* take IBC out of reset */
8496 	if (ppd->link_speed_supported) {
8497 		ppd->cpspec->ibcctrl_a &=
8498 			~SYM_MASK(IBCCtrlA_0, IBStatIntReductionEn);
8499 		qib_write_kreg_port(ppd, krp_ibcctrl_a,
8500 				    ppd->cpspec->ibcctrl_a);
8501 		qib_read_kreg32(dd, kr_scratch);
8502 		if (ppd->lflags & QIBL_IB_LINK_DISABLED)
8503 			qib_set_ib_7322_lstate(ppd, 0,
8504 				QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
8505 	}
8506 }
8507