xref: /illumos-gate/usr/src/uts/common/io/cxgbe/t4nex/t4_nexus.c (revision c990198d76d849d7844c4637a3158c1432cce551)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source. A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * This file is part of the Chelsio T4 support code.
14  *
15  * Copyright (C) 2010-2013 Chelsio Communications.  All rights reserved.
16  *
17  * This program is distributed in the hope that it will be useful, but WITHOUT
18  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19  * FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
20  * release for licensing terms and conditions.
21  */
22 
23 /*
24  * Copyright 2023 Oxide Computer Company
25  */
26 
27 #include <sys/ddi.h>
28 #include <sys/sunddi.h>
29 #include <sys/sunndi.h>
30 #include <sys/modctl.h>
31 #include <sys/conf.h>
32 #include <sys/devops.h>
33 #include <sys/pci.h>
34 #include <sys/atomic.h>
35 #include <sys/types.h>
36 #include <sys/file.h>
37 #include <sys/errno.h>
38 #include <sys/open.h>
39 #include <sys/cred.h>
40 #include <sys/stat.h>
41 #include <sys/mkdev.h>
42 #include <sys/queue.h>
43 #include <sys/containerof.h>
44 #include <sys/sensors.h>
45 #include <sys/firmload.h>
46 
47 #include "version.h"
48 #include "common/common.h"
49 #include "common/t4_msg.h"
50 #include "common/t4_regs.h"
51 #include "common/t4_extra_regs.h"
52 #include "t4_l2t.h"
53 
54 static int t4_cb_open(dev_t *devp, int flag, int otyp, cred_t *credp);
55 static int t4_cb_close(dev_t dev, int flag, int otyp, cred_t *credp);
56 static int t4_cb_ioctl(dev_t dev, int cmd, intptr_t d, int mode, cred_t *credp,
57     int *rp);
58 struct cb_ops t4_cb_ops = {
59 	.cb_open =		t4_cb_open,
60 	.cb_close =		t4_cb_close,
61 	.cb_strategy =		nodev,
62 	.cb_print =		nodev,
63 	.cb_dump =		nodev,
64 	.cb_read =		nodev,
65 	.cb_write =		nodev,
66 	.cb_ioctl =		t4_cb_ioctl,
67 	.cb_devmap =		nodev,
68 	.cb_mmap =		nodev,
69 	.cb_segmap =		nodev,
70 	.cb_chpoll =		nochpoll,
71 	.cb_prop_op =		ddi_prop_op,
72 	.cb_flag =		D_MP,
73 	.cb_rev =		CB_REV,
74 	.cb_aread =		nodev,
75 	.cb_awrite =		nodev
76 };
77 
78 static int t4_bus_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op,
79     void *arg, void *result);
80 static int t4_bus_config(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op,
81     void *arg, dev_info_t **cdipp);
82 static int t4_bus_unconfig(dev_info_t *dip, uint_t flags,
83     ddi_bus_config_op_t op, void *arg);
84 struct bus_ops t4_bus_ops = {
85 	.busops_rev =		BUSO_REV,
86 	.bus_ctl =		t4_bus_ctl,
87 	.bus_prop_op =		ddi_bus_prop_op,
88 	.bus_config =		t4_bus_config,
89 	.bus_unconfig =		t4_bus_unconfig,
90 };
91 
92 static int t4_devo_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg,
93     void **rp);
94 static int t4_devo_probe(dev_info_t *dip);
95 static int t4_devo_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
96 static int t4_devo_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
97 static int t4_devo_quiesce(dev_info_t *dip);
98 struct dev_ops t4_dev_ops = {
99 	.devo_rev =		DEVO_REV,
100 	.devo_getinfo =		t4_devo_getinfo,
101 	.devo_identify =	nulldev,
102 	.devo_probe =		t4_devo_probe,
103 	.devo_attach =		t4_devo_attach,
104 	.devo_detach =		t4_devo_detach,
105 	.devo_reset =		nodev,
106 	.devo_cb_ops =		&t4_cb_ops,
107 	.devo_bus_ops =		&t4_bus_ops,
108 	.devo_quiesce =		&t4_devo_quiesce,
109 };
110 
111 static struct modldrv modldrv = {
112 	.drv_modops =		&mod_driverops,
113 	.drv_linkinfo =		"Chelsio T4 nexus " DRV_VERSION,
114 	.drv_dev_ops =		&t4_dev_ops
115 };
116 
117 static struct modlinkage modlinkage = {
118 	.ml_rev =		MODREV_1,
119 	.ml_linkage =		{&modldrv, NULL},
120 };
121 
122 void *t4_list;
123 
124 struct intrs_and_queues {
125 	int intr_type;		/* DDI_INTR_TYPE_* */
126 	int nirq;		/* Number of vectors */
127 	int intr_fwd;		/* Interrupts forwarded */
128 	int ntxq10g;		/* # of NIC txq's for each 10G port */
129 	int nrxq10g;		/* # of NIC rxq's for each 10G port */
130 	int ntxq1g;		/* # of NIC txq's for each 1G port */
131 	int nrxq1g;		/* # of NIC rxq's for each 1G port */
132 #ifdef TCP_OFFLOAD_ENABLE
133 	int nofldtxq10g;	/* # of TOE txq's for each 10G port */
134 	int nofldrxq10g;	/* # of TOE rxq's for each 10G port */
135 	int nofldtxq1g;		/* # of TOE txq's for each 1G port */
136 	int nofldrxq1g;		/* # of TOE rxq's for each 1G port */
137 #endif
138 };
139 
140 static int cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss,
141     mblk_t *m);
142 static int fw_msg_not_handled(struct adapter *, const __be64 *);
143 int t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h);
144 static unsigned int getpf(struct adapter *sc);
145 static int prep_firmware(struct adapter *sc);
146 static int upload_config_file(struct adapter *sc, uint32_t *mt, uint32_t *ma);
147 static int partition_resources(struct adapter *sc);
148 static int adap__pre_init_tweaks(struct adapter *sc);
149 static int get_params__pre_init(struct adapter *sc);
150 static int get_params__post_init(struct adapter *sc);
151 static int set_params__post_init(struct adapter *);
152 static void setup_memwin(struct adapter *sc);
153 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
154     uint32_t *);
155 void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
156 uint32_t position_memwin(struct adapter *, int, uint32_t);
157 static int prop_lookup_int_array(struct adapter *sc, char *name, int *data,
158     uint_t count);
159 static int prop_lookup_int_array(struct adapter *sc, char *name, int *data,
160     uint_t count);
161 static int init_driver_props(struct adapter *sc, struct driver_properties *p);
162 static int remove_extra_props(struct adapter *sc, int n10g, int n1g);
163 static int cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
164     struct intrs_and_queues *iaq);
165 static int add_child_node(struct adapter *sc, int idx);
166 static int remove_child_node(struct adapter *sc, int idx);
167 static kstat_t *setup_kstats(struct adapter *sc);
168 static kstat_t *setup_wc_kstats(struct adapter *);
169 static int update_wc_kstats(kstat_t *, int);
170 #ifdef TCP_OFFLOAD_ENABLE
171 static int toe_capability(struct port_info *pi, int enable);
172 static int activate_uld(struct adapter *sc, int id, struct uld_softc *usc);
173 static int deactivate_uld(struct uld_softc *usc);
174 #endif
175 static kmutex_t t4_adapter_list_lock;
176 static SLIST_HEAD(, adapter) t4_adapter_list;
177 #ifdef TCP_OFFLOAD_ENABLE
178 static kmutex_t t4_uld_list_lock;
179 static SLIST_HEAD(, uld_info) t4_uld_list;
180 #endif
181 
182 static int t4_temperature_read(void *, sensor_ioctl_scalar_t *);
183 static int t4_voltage_read(void *, sensor_ioctl_scalar_t *);
184 static const ksensor_ops_t t4_temp_ops = {
185 	.kso_kind = ksensor_kind_temperature,
186 	.kso_scalar = t4_temperature_read
187 };
188 
189 static const ksensor_ops_t t4_volt_ops = {
190 	.kso_kind = ksensor_kind_voltage,
191 	.kso_scalar = t4_voltage_read
192 };
193 
194 static int t4_ufm_getcaps(ddi_ufm_handle_t *, void *, ddi_ufm_cap_t *);
195 static int t4_ufm_fill_image(ddi_ufm_handle_t *, void *, uint_t,
196     ddi_ufm_image_t *);
197 static int t4_ufm_fill_slot(ddi_ufm_handle_t *, void *, uint_t, uint_t,
198     ddi_ufm_slot_t *);
199 static ddi_ufm_ops_t t4_ufm_ops = {
200 	.ddi_ufm_op_fill_image = t4_ufm_fill_image,
201 	.ddi_ufm_op_fill_slot = t4_ufm_fill_slot,
202 	.ddi_ufm_op_getcaps = t4_ufm_getcaps
203 };
204 
205 int
_init(void)206 _init(void)
207 {
208 	int rc;
209 
210 	rc = ddi_soft_state_init(&t4_list, sizeof (struct adapter), 0);
211 	if (rc != 0)
212 		return (rc);
213 
214 	rc = mod_install(&modlinkage);
215 	if (rc != 0)
216 		ddi_soft_state_fini(&t4_list);
217 
218 	mutex_init(&t4_adapter_list_lock, NULL, MUTEX_DRIVER, NULL);
219 	SLIST_INIT(&t4_adapter_list);
220 
221 #ifdef TCP_OFFLOAD_ENABLE
222 	mutex_init(&t4_uld_list_lock, NULL, MUTEX_DRIVER, NULL);
223 	SLIST_INIT(&t4_uld_list);
224 #endif
225 
226 	return (rc);
227 }
228 
229 int
_fini(void)230 _fini(void)
231 {
232 	int rc;
233 
234 	rc = mod_remove(&modlinkage);
235 	if (rc != 0)
236 		return (rc);
237 
238 	ddi_soft_state_fini(&t4_list);
239 	return (0);
240 }
241 
242 int
_info(struct modinfo * mi)243 _info(struct modinfo *mi)
244 {
245 	return (mod_info(&modlinkage, mi));
246 }
247 
248 /* ARGSUSED */
249 static int
t4_devo_getinfo(dev_info_t * dip,ddi_info_cmd_t cmd,void * arg,void ** rp)250 t4_devo_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **rp)
251 {
252 	struct adapter *sc;
253 	minor_t minor;
254 
255 	minor = getminor((dev_t)arg);	/* same as instance# in our case */
256 
257 	if (cmd == DDI_INFO_DEVT2DEVINFO) {
258 		sc = ddi_get_soft_state(t4_list, minor);
259 		if (sc == NULL)
260 			return (DDI_FAILURE);
261 
262 		ASSERT(sc->dev == (dev_t)arg);
263 		*rp = (void *)sc->dip;
264 	} else if (cmd == DDI_INFO_DEVT2INSTANCE)
265 		*rp = (void *) (unsigned long) minor;
266 	else
267 		ASSERT(0);
268 
269 	return (DDI_SUCCESS);
270 }
271 
272 static int
t4_devo_probe(dev_info_t * dip)273 t4_devo_probe(dev_info_t *dip)
274 {
275 	int rc, id, *reg;
276 	uint_t n, pf;
277 
278 	id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
279 	    "device-id", 0xffff);
280 	if (id == 0xffff)
281 		return (DDI_PROBE_DONTCARE);
282 
283 	rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
284 	    "reg", &reg, &n);
285 	if (rc != DDI_SUCCESS)
286 		return (DDI_PROBE_DONTCARE);
287 
288 	pf = PCI_REG_FUNC_G(reg[0]);
289 	ddi_prop_free(reg);
290 
291 	/* Prevent driver attachment on any PF except 0 on the FPGA */
292 	if (id == 0xa000 && pf != 0)
293 		return (DDI_PROBE_FAILURE);
294 
295 	return (DDI_PROBE_DONTCARE);
296 }
297 
298 static int
t4_devo_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)299 t4_devo_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
300 {
301 	struct adapter *sc = NULL;
302 	struct sge *s;
303 	int i, instance, rc = DDI_SUCCESS, rqidx, tqidx, q;
304 	int irq = 0, nxg = 0, n1g = 0;
305 #ifdef TCP_OFFLOAD_ENABLE
306 	int ofld_rqidx, ofld_tqidx;
307 #endif
308 	char name[16];
309 	struct driver_properties *prp;
310 	struct intrs_and_queues iaq;
311 	ddi_device_acc_attr_t da = {
312 		.devacc_attr_version = DDI_DEVICE_ATTR_V0,
313 		.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC,
314 		.devacc_attr_dataorder = DDI_STRICTORDER_ACC
315 	};
316 	ddi_device_acc_attr_t da1 = {
317 		.devacc_attr_version = DDI_DEVICE_ATTR_V0,
318 		.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC,
319 		.devacc_attr_dataorder = DDI_STRICTORDER_ACC
320 	};
321 
322 	if (cmd != DDI_ATTACH)
323 		return (DDI_FAILURE);
324 
325 	/*
326 	 * Allocate space for soft state.
327 	 */
328 	instance = ddi_get_instance(dip);
329 	rc = ddi_soft_state_zalloc(t4_list, instance);
330 	if (rc != DDI_SUCCESS) {
331 		cxgb_printf(dip, CE_WARN,
332 		    "failed to allocate soft state: %d", rc);
333 		return (DDI_FAILURE);
334 	}
335 
336 	sc = ddi_get_soft_state(t4_list, instance);
337 	sc->dip = dip;
338 	sc->dev = makedevice(ddi_driver_major(dip), instance);
339 	mutex_init(&sc->lock, NULL, MUTEX_DRIVER, NULL);
340 	cv_init(&sc->cv, NULL, CV_DRIVER, NULL);
341 	mutex_init(&sc->sfl_lock, NULL, MUTEX_DRIVER, NULL);
342 	TAILQ_INIT(&sc->sfl);
343 	mutex_init(&sc->mbox_lock, NULL, MUTEX_DRIVER, NULL);
344 	STAILQ_INIT(&sc->mbox_list);
345 
346 	mutex_enter(&t4_adapter_list_lock);
347 	SLIST_INSERT_HEAD(&t4_adapter_list, sc, link);
348 	mutex_exit(&t4_adapter_list_lock);
349 
350 	sc->pf = getpf(sc);
351 	if (sc->pf > 8) {
352 		rc = EINVAL;
353 		cxgb_printf(dip, CE_WARN,
354 		    "failed to determine PCI PF# of device");
355 		goto done;
356 	}
357 	sc->mbox = sc->pf;
358 
359 	/* Initialize the driver properties */
360 	prp = &sc->props;
361 	(void)init_driver_props(sc, prp);
362 
363 	/*
364 	 * Enable access to the PCI config space.
365 	 */
366 	rc = pci_config_setup(dip, &sc->pci_regh);
367 	if (rc != DDI_SUCCESS) {
368 		cxgb_printf(dip, CE_WARN,
369 		    "failed to enable PCI config space access: %d", rc);
370 		goto done;
371 	}
372 
373 	/* TODO: Set max read request to 4K */
374 
375 	/*
376 	 * Enable MMIO access.
377 	 */
378 	rc = ddi_regs_map_setup(dip, 1, &sc->regp, 0, 0, &da, &sc->regh);
379 	if (rc != DDI_SUCCESS) {
380 		cxgb_printf(dip, CE_WARN,
381 		    "failed to map device registers: %d", rc);
382 		goto done;
383 	}
384 
385 	(void) memset(sc->chan_map, 0xff, sizeof (sc->chan_map));
386 
387 	/*
388 	 * Initialize cpl handler.
389 	 */
390 	for (i = 0; i < ARRAY_SIZE(sc->cpl_handler); i++) {
391 		sc->cpl_handler[i] = cpl_not_handled;
392 	}
393 
394 	for (i = 0; i < ARRAY_SIZE(sc->fw_msg_handler); i++) {
395 		sc->fw_msg_handler[i] = fw_msg_not_handled;
396 	}
397 
398 	for (i = 0; i < NCHAN; i++) {
399 		(void) snprintf(name, sizeof (name), "%s-%d",
400 				"reclaim", i);
401 		sc->tq[i] = ddi_taskq_create(sc->dip,
402 		   name, 1, TASKQ_DEFAULTPRI, 0);
403 
404 		if (sc->tq[i] == NULL) {
405 			cxgb_printf(dip, CE_WARN,
406 				   "failed to create task queues");
407 			rc = DDI_FAILURE;
408 			goto done;
409 		}
410 	}
411 
412 	/*
413 	 * Prepare the adapter for operation.
414 	 */
415 	rc = -t4_prep_adapter(sc, false);
416 	if (rc != 0) {
417 		cxgb_printf(dip, CE_WARN, "failed to prepare adapter: %d", rc);
418 		goto done;
419 	}
420 
421 	/*
422 	 * Enable BAR1 access.
423 	 */
424 	sc->doorbells |= DOORBELL_KDB;
425 	rc = ddi_regs_map_setup(dip, 2, &sc->reg1p, 0, 0, &da1, &sc->reg1h);
426 	if (rc != DDI_SUCCESS) {
427 		cxgb_printf(dip, CE_WARN,
428 		    "failed to map BAR1 device registers: %d", rc);
429 		goto done;
430 	} else {
431 		if (is_t5(sc->params.chip)) {
432 			sc->doorbells |= DOORBELL_UDB;
433 			if (prp->wc) {
434 				/*
435 				 * Enable write combining on BAR2.  This is the
436 				 * userspace doorbell BAR and is split into 128B
437 				 * (UDBS_SEG_SIZE) doorbell regions, each associated
438 				 * with an egress queue.  The first 64B has the doorbell
439 				 * and the second 64B can be used to submit a tx work
440 				 * request with an implicit doorbell.
441 				 */
442 				sc->doorbells &= ~DOORBELL_UDB;
443 				sc->doorbells |= (DOORBELL_WCWR |
444 				    DOORBELL_UDBWC);
445 				t4_write_reg(sc, A_SGE_STAT_CFG,
446 				    V_STATSOURCE_T5(7) | V_STATMODE(0));
447 			}
448 		}
449 	}
450 
451 	/*
452 	 * Do this really early.  Note that minor number = instance.
453 	 */
454 	(void) snprintf(name, sizeof (name), "%s,%d", T4_NEXUS_NAME, instance);
455 	rc = ddi_create_minor_node(dip, name, S_IFCHR, instance,
456 	    DDI_NT_NEXUS, 0);
457 	if (rc != DDI_SUCCESS) {
458 		cxgb_printf(dip, CE_WARN,
459 		    "failed to create device node: %d", rc);
460 		rc = DDI_SUCCESS; /* carry on */
461 	}
462 
463 	/* Do this early. Memory window is required for loading config file. */
464 	setup_memwin(sc);
465 
466 	/* Prepare the firmware for operation */
467 	rc = prep_firmware(sc);
468 	if (rc != 0)
469 		goto done; /* error message displayed already */
470 
471 	rc = adap__pre_init_tweaks(sc);
472 	if (rc != 0)
473 		goto done;
474 
475 	rc = get_params__pre_init(sc);
476 	if (rc != 0)
477 		goto done; /* error message displayed already */
478 
479 	t4_sge_init(sc);
480 
481 	if (sc->flags & MASTER_PF) {
482 		/* get basic stuff going */
483 		rc = -t4_fw_initialize(sc, sc->mbox);
484 		if (rc != 0) {
485 			cxgb_printf(sc->dip, CE_WARN,
486 			    "early init failed: %d.\n", rc);
487 			goto done;
488 		}
489 	}
490 
491 	rc = get_params__post_init(sc);
492 	if (rc != 0)
493 		goto done; /* error message displayed already */
494 
495 	rc = set_params__post_init(sc);
496 	if (rc != 0)
497 		goto done; /* error message displayed already */
498 
499 	/*
500 	 * TODO: This is the place to call t4_set_filter_mode()
501 	 */
502 
503 	/* tweak some settings */
504 	t4_write_reg(sc, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) | V_RXTSHIFTMAXR1(4) |
505 	    V_RXTSHIFTMAXR2(15) | V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
506 	    V_KEEPALIVEMAXR1(4) | V_KEEPALIVEMAXR2(9));
507 	t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
508 
509 	/*
510 	 * Work-around for bug 2619
511 	 * Set DisableVlan field in TP_RSS_CONFIG_VRT register so that the
512 	 * VLAN tag extraction is disabled.
513 	 */
514 	t4_set_reg_field(sc, A_TP_RSS_CONFIG_VRT, F_DISABLEVLAN, F_DISABLEVLAN);
515 
516 	/* Store filter mode */
517 	t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &sc->filter_mode, 1,
518 	    A_TP_VLAN_PRI_MAP);
519 
520 	/*
521 	 * First pass over all the ports - allocate VIs and initialize some
522 	 * basic parameters like mac address, port type, etc.  We also figure
523 	 * out whether a port is 10G or 1G and use that information when
524 	 * calculating how many interrupts to attempt to allocate.
525 	 */
526 	for_each_port(sc, i) {
527 		struct port_info *pi;
528 
529 		pi = kmem_zalloc(sizeof (*pi), KM_SLEEP);
530 		sc->port[i] = pi;
531 
532 		/* These must be set before t4_port_init */
533 		pi->adapter = sc;
534 		/* LINTED: E_ASSIGN_NARROW_CONV */
535 		pi->port_id = i;
536 	}
537 
538 	/* Allocate the vi and initialize parameters like mac addr */
539 	rc = -t4_port_init(sc, sc->mbox, sc->pf, 0);
540 	if (rc) {
541 		cxgb_printf(dip, CE_WARN,
542 			    "unable to initialize port: %d", rc);
543 		goto done;
544 	}
545 
546 	for_each_port(sc, i) {
547 		struct port_info *pi = sc->port[i];
548 
549 		mutex_init(&pi->lock, NULL, MUTEX_DRIVER, NULL);
550 		pi->mtu = ETHERMTU;
551 
552 		if (is_10XG_port(pi)) {
553 			nxg++;
554 			pi->tmr_idx = prp->tmr_idx_10g;
555 			pi->pktc_idx = prp->pktc_idx_10g;
556 		} else {
557 			n1g++;
558 			pi->tmr_idx = prp->tmr_idx_1g;
559 			pi->pktc_idx = prp->pktc_idx_1g;
560 		}
561 
562 		pi->xact_addr_filt = -1;
563 		t4_mc_init(pi);
564 
565 		setbit(&sc->registered_device_map, i);
566 	}
567 
568 	(void) remove_extra_props(sc, nxg, n1g);
569 
570 	if (sc->registered_device_map == 0) {
571 		cxgb_printf(dip, CE_WARN, "no usable ports");
572 		rc = DDI_FAILURE;
573 		goto done;
574 	}
575 
576 	rc = cfg_itype_and_nqueues(sc, nxg, n1g, &iaq);
577 	if (rc != 0)
578 		goto done; /* error message displayed already */
579 
580 	sc->intr_type = iaq.intr_type;
581 	sc->intr_count = iaq.nirq;
582 
583 	if (sc->props.multi_rings && (sc->intr_type != DDI_INTR_TYPE_MSIX)) {
584 		sc->props.multi_rings = 0;
585 		cxgb_printf(dip, CE_WARN,
586 		    "Multiple rings disabled as interrupt type is not MSI-X");
587 	}
588 
589 	if (sc->props.multi_rings && iaq.intr_fwd) {
590 		sc->props.multi_rings = 0;
591 		cxgb_printf(dip, CE_WARN,
592 		    "Multiple rings disabled as interrupts are forwarded");
593 	}
594 
595 	if (!sc->props.multi_rings) {
596 		iaq.ntxq10g = 1;
597 		iaq.ntxq1g = 1;
598 	}
599 	s = &sc->sge;
600 	s->nrxq = nxg * iaq.nrxq10g + n1g * iaq.nrxq1g;
601 	s->ntxq = nxg * iaq.ntxq10g + n1g * iaq.ntxq1g;
602 	s->neq = s->ntxq + s->nrxq;	/* the fl in an rxq is an eq */
603 #ifdef TCP_OFFLOAD_ENABLE
604 	/* control queues, 1 per port + 1 mgmtq */
605 	s->neq += sc->params.nports + 1;
606 #endif
607 	s->niq = s->nrxq + 1;		/* 1 extra for firmware event queue */
608 	if (iaq.intr_fwd != 0)
609 		sc->flags |= INTR_FWD;
610 #ifdef TCP_OFFLOAD_ENABLE
611 	if (is_offload(sc) != 0) {
612 
613 		s->nofldrxq = nxg * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
614 		s->nofldtxq = nxg * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
615 		s->neq += s->nofldtxq + s->nofldrxq;
616 		s->niq += s->nofldrxq;
617 
618 		s->ofld_rxq = kmem_zalloc(s->nofldrxq *
619 		    sizeof (struct sge_ofld_rxq), KM_SLEEP);
620 		s->ofld_txq = kmem_zalloc(s->nofldtxq *
621 		    sizeof (struct sge_wrq), KM_SLEEP);
622 		s->ctrlq = kmem_zalloc(sc->params.nports *
623 		    sizeof (struct sge_wrq), KM_SLEEP);
624 
625 	}
626 #endif
627 	s->rxq = kmem_zalloc(s->nrxq * sizeof (struct sge_rxq), KM_SLEEP);
628 	s->txq = kmem_zalloc(s->ntxq * sizeof (struct sge_txq), KM_SLEEP);
629 	s->iqmap = kmem_zalloc(s->iqmap_sz * sizeof (struct sge_iq *), KM_SLEEP);
630 	s->eqmap = kmem_zalloc(s->eqmap_sz * sizeof (struct sge_eq *), KM_SLEEP);
631 
632 	sc->intr_handle = kmem_zalloc(sc->intr_count *
633 	    sizeof (ddi_intr_handle_t), KM_SLEEP);
634 
635 	/*
636 	 * Second pass over the ports.  This time we know the number of rx and
637 	 * tx queues that each port should get.
638 	 */
639 	rqidx = tqidx = 0;
640 #ifdef TCP_OFFLOAD_ENABLE
641 	ofld_rqidx = ofld_tqidx = 0;
642 #endif
643 	for_each_port(sc, i) {
644 		struct port_info *pi = sc->port[i];
645 
646 		if (pi == NULL)
647 			continue;
648 
649 		t4_mc_cb_init(pi);
650 		/* LINTED: E_ASSIGN_NARROW_CONV */
651 		pi->first_rxq = rqidx;
652 		/* LINTED: E_ASSIGN_NARROW_CONV */
653 		pi->nrxq = (is_10XG_port(pi)) ? iaq.nrxq10g
654 		    : iaq.nrxq1g;
655 		/* LINTED: E_ASSIGN_NARROW_CONV */
656 		pi->first_txq = tqidx;
657 		/* LINTED: E_ASSIGN_NARROW_CONV */
658 		pi->ntxq = (is_10XG_port(pi)) ? iaq.ntxq10g
659 		    : iaq.ntxq1g;
660 
661 		rqidx += pi->nrxq;
662 		tqidx += pi->ntxq;
663 
664 #ifdef TCP_OFFLOAD_ENABLE
665 		if (is_offload(sc) != 0) {
666 			/* LINTED: E_ASSIGN_NARROW_CONV */
667 			pi->first_ofld_rxq = ofld_rqidx;
668 			pi->nofldrxq = max(1, pi->nrxq / 4);
669 
670 			/* LINTED: E_ASSIGN_NARROW_CONV */
671 			pi->first_ofld_txq = ofld_tqidx;
672 			pi->nofldtxq = max(1, pi->ntxq / 2);
673 
674 			ofld_rqidx += pi->nofldrxq;
675 			ofld_tqidx += pi->nofldtxq;
676 		}
677 #endif
678 
679 		/*
680 		 * Enable hw checksumming and LSO for all ports by default.
681 		 * They can be disabled using ndd (hw_csum and hw_lso).
682 		 */
683 		pi->features |= (CXGBE_HW_CSUM | CXGBE_HW_LSO);
684 	}
685 
686 #ifdef TCP_OFFLOAD_ENABLE
687 		sc->l2t = t4_init_l2t(sc);
688 #endif
689 
690 	/*
691 	 * Setup Interrupts.
692 	 */
693 
694 	i = 0;
695 	rc = ddi_intr_alloc(dip, sc->intr_handle, sc->intr_type, 0,
696 	    sc->intr_count, &i, DDI_INTR_ALLOC_STRICT);
697 	if (rc != DDI_SUCCESS) {
698 		cxgb_printf(dip, CE_WARN,
699 		    "failed to allocate %d interrupt(s) of type %d: %d, %d",
700 		    sc->intr_count, sc->intr_type, rc, i);
701 		goto done;
702 	}
703 	ASSERT(sc->intr_count == i); /* allocation was STRICT */
704 	(void) ddi_intr_get_cap(sc->intr_handle[0], &sc->intr_cap);
705 	(void) ddi_intr_get_pri(sc->intr_handle[0], &sc->intr_pri);
706 	if (sc->intr_count == 1) {
707 		ASSERT(sc->flags & INTR_FWD);
708 		(void) ddi_intr_add_handler(sc->intr_handle[0], t4_intr_all, sc,
709 		    &s->fwq);
710 	} else {
711 		/* Multiple interrupts.  The first one is always error intr */
712 		(void) ddi_intr_add_handler(sc->intr_handle[0], t4_intr_err, sc,
713 		    NULL);
714 		irq++;
715 
716 		/* The second one is always the firmware event queue */
717 		(void) ddi_intr_add_handler(sc->intr_handle[1], t4_intr, sc,
718 		    &s->fwq);
719 		irq++;
720 		/*
721 		 * Note that if INTR_FWD is set then either the NIC rx
722 		 * queues or (exclusive or) the TOE rx queueus will be taking
723 		 * direct interrupts.
724 		 *
725 		 * There is no need to check for is_offload(sc) as nofldrxq
726 		 * will be 0 if offload is disabled.
727 		 */
728 		for_each_port(sc, i) {
729 			struct port_info *pi = sc->port[i];
730 			struct sge_rxq *rxq;
731 #ifdef TCP_OFFLOAD_ENABLE
732 			struct sge_ofld_rxq *ofld_rxq;
733 
734 			/*
735 			 * Skip over the NIC queues if they aren't taking direct
736 			 * interrupts.
737 			 */
738 			if ((sc->flags & INTR_FWD) &&
739 			    pi->nofldrxq > pi->nrxq)
740 				goto ofld_queues;
741 #endif
742 			rxq = &s->rxq[pi->first_rxq];
743 			for (q = 0; q < pi->nrxq; q++, rxq++) {
744 				(void) ddi_intr_add_handler(
745 				    sc->intr_handle[irq], t4_intr, sc,
746 				    &rxq->iq);
747 				irq++;
748 			}
749 
750 #ifdef TCP_OFFLOAD_ENABLE
751 			/*
752 			 * Skip over the offload queues if they aren't taking
753 			 * direct interrupts.
754 			 */
755 			if ((sc->flags & INTR_FWD))
756 				continue;
757 ofld_queues:
758 			ofld_rxq = &s->ofld_rxq[pi->first_ofld_rxq];
759 			for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
760 				(void) ddi_intr_add_handler(
761 				    sc->intr_handle[irq], t4_intr, sc,
762 				    &ofld_rxq->iq);
763 				irq++;
764 			}
765 #endif
766 		}
767 
768 	}
769 	sc->flags |= INTR_ALLOCATED;
770 
771 	if ((rc = ksensor_create_scalar_pcidev(dip, SENSOR_KIND_TEMPERATURE,
772 	    &t4_temp_ops, sc, "temp", &sc->temp_sensor)) != 0) {
773 		cxgb_printf(dip, CE_WARN, "failed to create temperature "
774 		    "sensor: %d", rc);
775 		rc = DDI_FAILURE;
776 		goto done;
777 	}
778 
779 	if ((rc = ksensor_create_scalar_pcidev(dip, SENSOR_KIND_VOLTAGE,
780 	    &t4_volt_ops, sc, "vdd", &sc->volt_sensor)) != 0) {
781 		cxgb_printf(dip, CE_WARN, "failed to create voltage "
782 		    "sensor: %d", rc);
783 		rc = DDI_FAILURE;
784 		goto done;
785 	}
786 
787 
788 	if ((rc = ddi_ufm_init(dip, DDI_UFM_CURRENT_VERSION, &t4_ufm_ops,
789 	    &sc->ufm_hdl, sc)) != 0) {
790 		cxgb_printf(dip, CE_WARN, "failed to enable UFM ops: %d", rc);
791 		rc = DDI_FAILURE;
792 		goto done;
793 	}
794 	ddi_ufm_update(sc->ufm_hdl);
795 	ddi_report_dev(dip);
796 
797 	/*
798 	 * Hardware/Firmware/etc. Version/Revision IDs.
799 	 */
800 	t4_dump_version_info(sc);
801 
802 	cxgb_printf(dip, CE_NOTE,
803 		    "(%d rxq, %d txq total) %d %s.",
804 		    rqidx, tqidx, sc->intr_count,
805 		    sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" :
806 		    sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" :
807 		    "fixed interrupt");
808 
809 	sc->ksp = setup_kstats(sc);
810 	sc->ksp_stat = setup_wc_kstats(sc);
811 	sc->params.drv_memwin = MEMWIN_NIC;
812 
813 done:
814 	if (rc != DDI_SUCCESS) {
815 		(void) t4_devo_detach(dip, DDI_DETACH);
816 
817 		/* rc may have errno style errors or DDI errors */
818 		rc = DDI_FAILURE;
819 	}
820 
821 	return (rc);
822 }
823 
824 static int
t4_devo_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)825 t4_devo_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
826 {
827 	int instance, i;
828 	struct adapter *sc;
829 	struct port_info *pi;
830 	struct sge *s;
831 
832 	if (cmd != DDI_DETACH)
833 		return (DDI_FAILURE);
834 
835 	instance = ddi_get_instance(dip);
836 	sc = ddi_get_soft_state(t4_list, instance);
837 	if (sc == NULL)
838 		return (DDI_SUCCESS);
839 
840 	if (sc->flags & FULL_INIT_DONE) {
841 		t4_intr_disable(sc);
842 		for_each_port(sc, i) {
843 			pi = sc->port[i];
844 			if (pi && pi->flags & PORT_INIT_DONE)
845 				(void) port_full_uninit(pi);
846 		}
847 		(void) adapter_full_uninit(sc);
848 	}
849 
850 	/* Safe to call no matter what */
851 	if (sc->ufm_hdl != NULL) {
852 		ddi_ufm_fini(sc->ufm_hdl);
853 		sc->ufm_hdl = NULL;
854 	}
855 	(void) ksensor_remove(dip, KSENSOR_ALL_IDS);
856 	ddi_prop_remove_all(dip);
857 	ddi_remove_minor_node(dip, NULL);
858 
859 	for (i = 0; i < NCHAN; i++) {
860 		if (sc->tq[i]) {
861 			ddi_taskq_wait(sc->tq[i]);
862 			ddi_taskq_destroy(sc->tq[i]);
863 		}
864 	}
865 
866 	if (sc->ksp != NULL)
867 		kstat_delete(sc->ksp);
868 	if (sc->ksp_stat != NULL)
869 		kstat_delete(sc->ksp_stat);
870 
871 	s = &sc->sge;
872 	if (s->rxq != NULL)
873 		kmem_free(s->rxq, s->nrxq * sizeof (struct sge_rxq));
874 #ifdef TCP_OFFLOAD_ENABLE
875 	if (s->ofld_txq != NULL)
876 		kmem_free(s->ofld_txq, s->nofldtxq * sizeof (struct sge_wrq));
877 	if (s->ofld_rxq != NULL)
878 		kmem_free(s->ofld_rxq,
879 		    s->nofldrxq * sizeof (struct sge_ofld_rxq));
880 	if (s->ctrlq != NULL)
881 		kmem_free(s->ctrlq,
882 		    sc->params.nports * sizeof (struct sge_wrq));
883 #endif
884 	if (s->txq != NULL)
885 		kmem_free(s->txq, s->ntxq * sizeof (struct sge_txq));
886 	if (s->iqmap != NULL)
887 		kmem_free(s->iqmap, s->iqmap_sz * sizeof (struct sge_iq *));
888 	if (s->eqmap != NULL)
889 		kmem_free(s->eqmap, s->eqmap_sz * sizeof (struct sge_eq *));
890 
891 	if (s->rxbuf_cache != NULL)
892 		rxbuf_cache_destroy(s->rxbuf_cache);
893 
894 	if (sc->flags & INTR_ALLOCATED) {
895 		for (i = 0; i < sc->intr_count; i++) {
896 			(void) ddi_intr_remove_handler(sc->intr_handle[i]);
897 			(void) ddi_intr_free(sc->intr_handle[i]);
898 		}
899 		sc->flags &= ~INTR_ALLOCATED;
900 	}
901 
902 	if (sc->intr_handle != NULL) {
903 		kmem_free(sc->intr_handle,
904 		    sc->intr_count * sizeof (*sc->intr_handle));
905 	}
906 
907 	for_each_port(sc, i) {
908 		pi = sc->port[i];
909 		if (pi != NULL) {
910 			mutex_destroy(&pi->lock);
911 			kmem_free(pi, sizeof (*pi));
912 			clrbit(&sc->registered_device_map, i);
913 		}
914 	}
915 
916 	if (sc->flags & FW_OK)
917 		(void) t4_fw_bye(sc, sc->mbox);
918 
919 	if (sc->reg1h != NULL)
920 		ddi_regs_map_free(&sc->reg1h);
921 
922 	if (sc->regh != NULL)
923 		ddi_regs_map_free(&sc->regh);
924 
925 	if (sc->pci_regh != NULL)
926 		pci_config_teardown(&sc->pci_regh);
927 
928 	mutex_enter(&t4_adapter_list_lock);
929 	SLIST_REMOVE(&t4_adapter_list, sc, adapter, link);
930 	mutex_exit(&t4_adapter_list_lock);
931 
932 	mutex_destroy(&sc->mbox_lock);
933 	mutex_destroy(&sc->lock);
934 	cv_destroy(&sc->cv);
935 	mutex_destroy(&sc->sfl_lock);
936 
937 #ifdef DEBUG
938 	bzero(sc, sizeof (*sc));
939 #endif
940 	ddi_soft_state_free(t4_list, instance);
941 
942 	return (DDI_SUCCESS);
943 }
944 
945 static int
t4_devo_quiesce(dev_info_t * dip)946 t4_devo_quiesce(dev_info_t *dip)
947 {
948 	int instance;
949 	struct adapter *sc;
950 
951 	instance = ddi_get_instance(dip);
952 	sc = ddi_get_soft_state(t4_list, instance);
953 	if (sc == NULL)
954 		return (DDI_SUCCESS);
955 
956 	t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
957 	t4_intr_disable(sc);
958 	t4_write_reg(sc, A_PL_RST, F_PIORSTMODE | F_PIORST);
959 
960 	return (DDI_SUCCESS);
961 }
962 
963 static int
t4_bus_ctl(dev_info_t * dip,dev_info_t * rdip,ddi_ctl_enum_t op,void * arg,void * result)964 t4_bus_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op, void *arg,
965     void *result)
966 {
967 	char s[4];
968 	struct port_info *pi;
969 	dev_info_t *child = (dev_info_t *)arg;
970 
971 	switch (op) {
972 	case DDI_CTLOPS_REPORTDEV:
973 		pi = ddi_get_parent_data(rdip);
974 		pi->instance = ddi_get_instance(dip);
975 		pi->child_inst = ddi_get_instance(rdip);
976 		cmn_err(CE_CONT, "?%s%d is port %s on %s%d\n",
977 		    ddi_node_name(rdip), ddi_get_instance(rdip),
978 		    ddi_get_name_addr(rdip), ddi_driver_name(dip),
979 		    ddi_get_instance(dip));
980 		return (DDI_SUCCESS);
981 
982 	case DDI_CTLOPS_INITCHILD:
983 		pi = ddi_get_parent_data(child);
984 		if (pi == NULL)
985 			return (DDI_NOT_WELL_FORMED);
986 		(void) snprintf(s, sizeof (s), "%d", pi->port_id);
987 		ddi_set_name_addr(child, s);
988 		return (DDI_SUCCESS);
989 
990 	case DDI_CTLOPS_UNINITCHILD:
991 		ddi_set_name_addr(child, NULL);
992 		return (DDI_SUCCESS);
993 
994 	case DDI_CTLOPS_ATTACH:
995 	case DDI_CTLOPS_DETACH:
996 		return (DDI_SUCCESS);
997 
998 	default:
999 		return (ddi_ctlops(dip, rdip, op, arg, result));
1000 	}
1001 }
1002 
1003 static int
t4_bus_config(dev_info_t * dip,uint_t flags,ddi_bus_config_op_t op,void * arg,dev_info_t ** cdipp)1004 t4_bus_config(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op, void *arg,
1005     dev_info_t **cdipp)
1006 {
1007 	int instance, i;
1008 	struct adapter *sc;
1009 
1010 	instance = ddi_get_instance(dip);
1011 	sc = ddi_get_soft_state(t4_list, instance);
1012 
1013 	if (op == BUS_CONFIG_ONE) {
1014 		char *c;
1015 
1016 		/*
1017 		 * arg is something like "cxgb@0" where 0 is the port_id hanging
1018 		 * off this nexus.
1019 		 */
1020 
1021 		c = arg;
1022 		while (*(c + 1))
1023 			c++;
1024 
1025 		/* There should be exactly 1 digit after '@' */
1026 		if (*(c - 1) != '@')
1027 			return (NDI_FAILURE);
1028 
1029 		i = *c - '0';
1030 
1031 		if (add_child_node(sc, i) != 0)
1032 			return (NDI_FAILURE);
1033 
1034 		flags |= NDI_ONLINE_ATTACH;
1035 
1036 	} else if (op == BUS_CONFIG_ALL || op == BUS_CONFIG_DRIVER) {
1037 		/* Allocate and bind all child device nodes */
1038 		for_each_port(sc, i)
1039 		    (void) add_child_node(sc, i);
1040 		flags |= NDI_ONLINE_ATTACH;
1041 	}
1042 
1043 	return (ndi_busop_bus_config(dip, flags, op, arg, cdipp, 0));
1044 }
1045 
1046 static int
t4_bus_unconfig(dev_info_t * dip,uint_t flags,ddi_bus_config_op_t op,void * arg)1047 t4_bus_unconfig(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op,
1048     void *arg)
1049 {
1050 	int instance, i, rc;
1051 	struct adapter *sc;
1052 
1053 	instance = ddi_get_instance(dip);
1054 	sc = ddi_get_soft_state(t4_list, instance);
1055 
1056 	if (op == BUS_CONFIG_ONE || op == BUS_UNCONFIG_ALL ||
1057 	    op == BUS_UNCONFIG_DRIVER)
1058 		flags |= NDI_UNCONFIG;
1059 
1060 	rc = ndi_busop_bus_unconfig(dip, flags, op, arg);
1061 	if (rc != 0)
1062 		return (rc);
1063 
1064 	if (op == BUS_UNCONFIG_ONE) {
1065 		char *c;
1066 
1067 		c = arg;
1068 		while (*(c + 1))
1069 			c++;
1070 
1071 		if (*(c - 1) != '@')
1072 			return (NDI_SUCCESS);
1073 
1074 		i = *c - '0';
1075 
1076 		rc = remove_child_node(sc, i);
1077 
1078 	} else if (op == BUS_UNCONFIG_ALL || op == BUS_UNCONFIG_DRIVER) {
1079 
1080 		for_each_port(sc, i)
1081 		    (void) remove_child_node(sc, i);
1082 	}
1083 
1084 	return (rc);
1085 }
1086 
1087 /* ARGSUSED */
1088 static int
t4_cb_open(dev_t * devp,int flag,int otyp,cred_t * credp)1089 t4_cb_open(dev_t *devp, int flag, int otyp, cred_t *credp)
1090 {
1091 	struct adapter *sc;
1092 
1093 	if (otyp != OTYP_CHR)
1094 		return (EINVAL);
1095 
1096 	sc = ddi_get_soft_state(t4_list, getminor(*devp));
1097 	if (sc == NULL)
1098 		return (ENXIO);
1099 
1100 	return (atomic_cas_uint(&sc->open, 0, EBUSY));
1101 }
1102 
1103 /* ARGSUSED */
1104 static int
t4_cb_close(dev_t dev,int flag,int otyp,cred_t * credp)1105 t4_cb_close(dev_t dev, int flag, int otyp, cred_t *credp)
1106 {
1107 	struct adapter *sc;
1108 
1109 	sc = ddi_get_soft_state(t4_list, getminor(dev));
1110 	if (sc == NULL)
1111 		return (EINVAL);
1112 
1113 	(void) atomic_swap_uint(&sc->open, 0);
1114 	return (0);
1115 }
1116 
1117 /* ARGSUSED */
1118 static int
t4_cb_ioctl(dev_t dev,int cmd,intptr_t d,int mode,cred_t * credp,int * rp)1119 t4_cb_ioctl(dev_t dev, int cmd, intptr_t d, int mode, cred_t *credp, int *rp)
1120 {
1121 	int instance;
1122 	struct adapter *sc;
1123 	void *data = (void *)d;
1124 
1125 	if (crgetuid(credp) != 0)
1126 		return (EPERM);
1127 
1128 	instance = getminor(dev);
1129 	sc = ddi_get_soft_state(t4_list, instance);
1130 	if (sc == NULL)
1131 		return (EINVAL);
1132 
1133 	return (t4_ioctl(sc, cmd, data, mode));
1134 }
1135 
1136 static unsigned int
getpf(struct adapter * sc)1137 getpf(struct adapter *sc)
1138 {
1139 	int rc, *data;
1140 	uint_t n, pf;
1141 
1142 	rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sc->dip,
1143 	    DDI_PROP_DONTPASS, "reg", &data, &n);
1144 	if (rc != DDI_SUCCESS) {
1145 		cxgb_printf(sc->dip, CE_WARN,
1146 		    "failed to lookup \"reg\" property: %d", rc);
1147 		return (0xff);
1148 	}
1149 
1150 	pf = PCI_REG_FUNC_G(data[0]);
1151 	ddi_prop_free(data);
1152 
1153 	return (pf);
1154 }
1155 
1156 /*
1157  * Install a compatible firmware (if required), establish contact with it,
1158  * become the master, and reset the device.
1159  */
1160 static int
prep_firmware(struct adapter * sc)1161 prep_firmware(struct adapter *sc)
1162 {
1163 	int rc;
1164 	size_t fw_size;
1165 	int reset = 1;
1166 	enum dev_state state;
1167 	unsigned char *fw_data;
1168 	struct fw_hdr *card_fw, *hdr;
1169 	const char *fw_file = NULL;
1170 	firmware_handle_t fw_hdl;
1171 	struct fw_info fi, *fw_info = &fi;
1172 
1173 	struct driver_properties *p = &sc->props;
1174 
1175 	/* Contact firmware, request master */
1176 	rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MUST, &state);
1177 	if (rc < 0) {
1178 		rc = -rc;
1179 		cxgb_printf(sc->dip, CE_WARN,
1180 		    "failed to connect to the firmware: %d.", rc);
1181 		return (rc);
1182 	}
1183 
1184 	if (rc == sc->mbox)
1185 		sc->flags |= MASTER_PF;
1186 
1187 	/* We may need FW version info for later reporting */
1188 	t4_get_version_info(sc);
1189 
1190 	switch(CHELSIO_CHIP_VERSION(sc->params.chip)) {
1191 	case CHELSIO_T4:
1192 		fw_file = "t4fw.bin";
1193 		break;
1194 	case CHELSIO_T5:
1195 		fw_file = "t5fw.bin";
1196 		break;
1197 	case CHELSIO_T6:
1198 		fw_file = "t6fw.bin";
1199 		break;
1200 	default:
1201 		cxgb_printf(sc->dip, CE_WARN, "Adapter type not supported\n");
1202 		return (EINVAL);
1203 	}
1204 
1205 	if (firmware_open(T4_PORT_NAME, fw_file, &fw_hdl) != 0) {
1206 		cxgb_printf(sc->dip, CE_WARN, "Could not open %s\n", fw_file);
1207 		return (EINVAL);
1208 	}
1209 
1210 	fw_size = firmware_get_size(fw_hdl);
1211 
1212 	if (fw_size < sizeof (struct fw_hdr)) {
1213 		cxgb_printf(sc->dip, CE_WARN, "%s is too small (%ld bytes)\n",
1214 		    fw_file, fw_size);
1215 		firmware_close(fw_hdl);
1216 		return (EINVAL);
1217 	}
1218 
1219 	if (fw_size > FLASH_FW_MAX_SIZE) {
1220 		cxgb_printf(sc->dip, CE_WARN,
1221 		    "%s is too large (%ld bytes, max allowed is %ld)\n",
1222 		    fw_file, fw_size, FLASH_FW_MAX_SIZE);
1223 		firmware_close(fw_hdl);
1224 		return (EFBIG);
1225 	}
1226 
1227 	fw_data = kmem_zalloc(fw_size, KM_SLEEP);
1228 	if (firmware_read(fw_hdl, 0, fw_data, fw_size) != 0) {
1229 		cxgb_printf(sc->dip, CE_WARN, "Failed to read from %s\n",
1230 		    fw_file);
1231 		firmware_close(fw_hdl);
1232 		kmem_free(fw_data, fw_size);
1233 		return (EINVAL);
1234 	}
1235 	firmware_close(fw_hdl);
1236 
1237 	bzero(fw_info, sizeof (*fw_info));
1238 	fw_info->chip = CHELSIO_CHIP_VERSION(sc->params.chip);
1239 
1240 	hdr = (struct fw_hdr *)fw_data;
1241 	fw_info->fw_hdr.fw_ver = hdr->fw_ver;
1242 	fw_info->fw_hdr.chip = hdr->chip;
1243 	fw_info->fw_hdr.intfver_nic = hdr->intfver_nic;
1244 	fw_info->fw_hdr.intfver_vnic = hdr->intfver_vnic;
1245 	fw_info->fw_hdr.intfver_ofld = hdr->intfver_ofld;
1246 	fw_info->fw_hdr.intfver_ri = hdr->intfver_ri;
1247 	fw_info->fw_hdr.intfver_iscsipdu = hdr->intfver_iscsipdu;
1248 	fw_info->fw_hdr.intfver_iscsi = hdr->intfver_iscsi;
1249 	fw_info->fw_hdr.intfver_fcoepdu = hdr->intfver_fcoepdu;
1250 	fw_info->fw_hdr.intfver_fcoe = hdr->intfver_fcoe;
1251 
1252 	/* allocate memory to read the header of the firmware on the card */
1253 	card_fw = kmem_zalloc(sizeof (*card_fw), KM_SLEEP);
1254 
1255 	rc = -t4_prep_fw(sc, fw_info, fw_data, fw_size, card_fw,
1256 	    p->t4_fw_install, state, &reset);
1257 
1258 	kmem_free(card_fw, sizeof (*card_fw));
1259 	kmem_free(fw_data, fw_size);
1260 
1261 	if (rc != 0) {
1262 		cxgb_printf(sc->dip, CE_WARN,
1263 		    "failed to install firmware: %d", rc);
1264 		return (rc);
1265 	} else {
1266 		/* refresh */
1267 		(void) t4_check_fw_version(sc);
1268 	}
1269 
1270 	/* Reset device */
1271 	rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
1272 	if (rc != 0) {
1273 		cxgb_printf(sc->dip, CE_WARN,
1274 		    "firmware reset failed: %d.", rc);
1275 		if (rc != ETIMEDOUT && rc != EIO)
1276 			(void) t4_fw_bye(sc, sc->mbox);
1277 		return (rc);
1278 	}
1279 
1280 	/* Partition adapter resources as specified in the config file. */
1281 	if (sc->flags & MASTER_PF) {
1282 		/* Handle default vs special T4 config file */
1283 
1284 		rc = partition_resources(sc);
1285 		if (rc != 0)
1286 			goto err;	/* error message displayed already */
1287 	}
1288 
1289 	sc->flags |= FW_OK;
1290 	return (0);
1291 err:
1292 	return (rc);
1293 
1294 }
1295 
1296 static const struct memwin t4_memwin[] = {
1297 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1298 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1299 	{ MEMWIN2_BASE, MEMWIN2_APERTURE }
1300 };
1301 
1302 static const struct memwin t5_memwin[] = {
1303 	{ MEMWIN0_BASE, MEMWIN0_APERTURE },
1304 	{ MEMWIN1_BASE, MEMWIN1_APERTURE },
1305 	{ MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1306 };
1307 
1308 #define	FW_PARAM_DEV(param) \
1309 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
1310 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
1311 #define	FW_PARAM_PFVF(param) \
1312 	(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
1313 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
1314 
1315 /*
1316  * Verify that the memory range specified by the memtype/offset/len pair is
1317  * valid and lies entirely within the memtype specified.  The global address of
1318  * the start of the range is returned in addr.
1319  */
1320 int
validate_mt_off_len(struct adapter * sc,int mtype,uint32_t off,int len,uint32_t * addr)1321 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1322 	uint32_t *addr)
1323 {
1324 	uint32_t em, addr_len, maddr, mlen;
1325 
1326 	/* Memory can only be accessed in naturally aligned 4 byte units */
1327 	if (off & 3 || len & 3 || len == 0)
1328 		return (EINVAL);
1329 
1330 	em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1331 	switch (mtype) {
1332 		case MEM_EDC0:
1333 			if (!(em & F_EDRAM0_ENABLE))
1334 				return (EINVAL);
1335 			addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1336 			maddr = G_EDRAM0_BASE(addr_len) << 20;
1337 			mlen = G_EDRAM0_SIZE(addr_len) << 20;
1338 			break;
1339 		case MEM_EDC1:
1340 			if (!(em & F_EDRAM1_ENABLE))
1341 				return (EINVAL);
1342 			addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1343 			maddr = G_EDRAM1_BASE(addr_len) << 20;
1344 			mlen = G_EDRAM1_SIZE(addr_len) << 20;
1345 			break;
1346 		case MEM_MC:
1347 			if (!(em & F_EXT_MEM_ENABLE))
1348 				return (EINVAL);
1349 			addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1350 			maddr = G_EXT_MEM_BASE(addr_len) << 20;
1351 			mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1352 			break;
1353 		case MEM_MC1:
1354 			if (is_t4(sc->params.chip) || !(em & F_EXT_MEM1_ENABLE))
1355 				return (EINVAL);
1356 			addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1357 			maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1358 			mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1359 			break;
1360 		default:
1361 			return (EINVAL);
1362 	}
1363 
1364 	if (mlen > 0 && off < mlen && off + len <= mlen) {
1365 		*addr = maddr + off;    /* global address */
1366 		return (0);
1367 	}
1368 
1369 	return (EFAULT);
1370 }
1371 
1372 void
memwin_info(struct adapter * sc,int win,uint32_t * base,uint32_t * aperture)1373 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1374 {
1375 	const struct memwin *mw;
1376 
1377 	if (is_t4(sc->params.chip)) {
1378 		mw = &t4_memwin[win];
1379 	} else {
1380 		mw = &t5_memwin[win];
1381 	}
1382 
1383 	if (base != NULL)
1384 		*base = mw->base;
1385 	if (aperture != NULL)
1386 		*aperture = mw->aperture;
1387 }
1388 
1389 /*
1390  * Upload configuration file to card's memory.
1391  */
1392 static int
upload_config_file(struct adapter * sc,uint32_t * mt,uint32_t * ma)1393 upload_config_file(struct adapter *sc, uint32_t *mt, uint32_t *ma)
1394 {
1395 	int rc = 0;
1396 	size_t cflen, cfbaselen;
1397 	u_int i, n;
1398 	uint32_t param, val, addr, mtype, maddr;
1399 	uint32_t off, mw_base, mw_aperture;
1400 	uint32_t *cfdata, *cfbase;
1401 	firmware_handle_t fw_hdl;
1402 	const char *cfg_file = NULL;
1403 
1404 	/* Figure out where the firmware wants us to upload it. */
1405 	param = FW_PARAM_DEV(CF);
1406 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
1407 	if (rc != 0) {
1408 		/* Firmwares without config file support will fail this way */
1409 		cxgb_printf(sc->dip, CE_WARN,
1410 		    "failed to query config file location: %d.\n", rc);
1411 		return (rc);
1412 	}
1413 	*mt = mtype = G_FW_PARAMS_PARAM_Y(val);
1414 	*ma = maddr = G_FW_PARAMS_PARAM_Z(val) << 16;
1415 
1416 	switch (CHELSIO_CHIP_VERSION(sc->params.chip)) {
1417 	case CHELSIO_T4:
1418 		cfg_file = "t4fw_cfg.txt";
1419 		break;
1420 	case CHELSIO_T5:
1421 		cfg_file = "t5fw_cfg.txt";
1422 		break;
1423 	case CHELSIO_T6:
1424 		cfg_file = "t6fw_cfg.txt";
1425 		break;
1426 	default:
1427 		cxgb_printf(sc->dip, CE_WARN, "Invalid Adapter detected\n");
1428 		return EINVAL;
1429 	}
1430 
1431 	if (firmware_open(T4_PORT_NAME, cfg_file, &fw_hdl) != 0) {
1432 		cxgb_printf(sc->dip, CE_WARN, "Could not open %s\n", cfg_file);
1433 		return EINVAL;
1434 	}
1435 
1436 	cflen = firmware_get_size(fw_hdl);
1437 	/*
1438 	 * Truncate the length to a multiple of uint32_ts. The configuration
1439 	 * text files have trailing comments (and hopefully always will) so
1440 	 * nothing important is lost.
1441 	 */
1442 	cflen &= ~3;
1443 
1444 	if (cflen > FLASH_CFG_MAX_SIZE) {
1445 		cxgb_printf(sc->dip, CE_WARN,
1446 		    "config file too long (%d, max allowed is %d).  ",
1447 		    cflen, FLASH_CFG_MAX_SIZE);
1448 		firmware_close(fw_hdl);
1449 		return (EFBIG);
1450 	}
1451 
1452 	rc = validate_mt_off_len(sc, mtype, maddr, cflen, &addr);
1453 	if (rc != 0) {
1454 		cxgb_printf(sc->dip, CE_WARN,
1455 		    "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
1456 		    "Will try to use the config on the card, if any.\n",
1457 		    __func__, mtype, maddr, cflen, rc);
1458 		firmware_close(fw_hdl);
1459 		return (EFAULT);
1460 	}
1461 
1462 	cfbaselen = cflen;
1463 	cfbase = cfdata = kmem_zalloc(cflen, KM_SLEEP);
1464 	if (firmware_read(fw_hdl, 0, cfdata, cflen) != 0) {
1465 		cxgb_printf(sc->dip, CE_WARN, "Failed to read from %s\n",
1466 		    cfg_file);
1467 		firmware_close(fw_hdl);
1468 		kmem_free(cfbase, cfbaselen);
1469 		return EINVAL;
1470 	}
1471 	firmware_close(fw_hdl);
1472 
1473 	memwin_info(sc, 2, &mw_base, &mw_aperture);
1474 	while (cflen) {
1475 		off = position_memwin(sc, 2, addr);
1476 		n = min(cflen, mw_aperture - off);
1477 		for (i = 0; i < n; i += 4)
1478 			t4_write_reg(sc, mw_base + off + i, *cfdata++);
1479 		cflen -= n;
1480 		addr += n;
1481 	}
1482 
1483 	kmem_free(cfbase, cfbaselen);
1484 
1485 	return (rc);
1486 }
1487 
1488 /*
1489  * Partition chip resources for use between various PFs, VFs, etc.  This is done
1490  * by uploading the firmware configuration file to the adapter and instructing
1491  * the firmware to process it.
1492  */
1493 static int
partition_resources(struct adapter * sc)1494 partition_resources(struct adapter *sc)
1495 {
1496 	int rc;
1497 	struct fw_caps_config_cmd caps;
1498 	uint32_t mtype, maddr, finicsum, cfcsum;
1499 
1500 	rc = upload_config_file(sc, &mtype, &maddr);
1501 	if (rc != 0) {
1502 		mtype = FW_MEMTYPE_CF_FLASH;
1503 		maddr = t4_flash_cfg_addr(sc);
1504 	}
1505 
1506 	bzero(&caps, sizeof (caps));
1507 	caps.op_to_write = BE_32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1508 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
1509 	caps.cfvalid_to_len16 = BE_32(F_FW_CAPS_CONFIG_CMD_CFVALID |
1510 	    V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
1511 	    V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | FW_LEN16(caps));
1512 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), &caps);
1513 	if (rc != 0) {
1514 		cxgb_printf(sc->dip, CE_WARN,
1515 		    "failed to pre-process config file: %d.\n", rc);
1516 		return (rc);
1517 	}
1518 
1519 	finicsum = ntohl(caps.finicsum);
1520 	cfcsum = ntohl(caps.cfcsum);
1521 	if (finicsum != cfcsum) {
1522 		cxgb_printf(sc->dip, CE_WARN,
1523 		    "WARNING: config file checksum mismatch: %08x %08x\n",
1524 		    finicsum, cfcsum);
1525 	}
1526 	sc->cfcsum = cfcsum;
1527 
1528 	/* TODO: Need to configure this correctly */
1529 	caps.toecaps = htons(FW_CAPS_CONFIG_TOE);
1530 	caps.iscsicaps = 0;
1531 	caps.rdmacaps = 0;
1532 	caps.fcoecaps = 0;
1533 	/* TODO: Disable VNIC cap for now */
1534 	caps.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
1535 
1536 	caps.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1537 	    F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
1538 	caps.cfvalid_to_len16 = htonl(FW_LEN16(caps));
1539 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), NULL);
1540 	if (rc != 0) {
1541 		cxgb_printf(sc->dip, CE_WARN,
1542 		    "failed to process config file: %d.\n", rc);
1543 		return (rc);
1544 	}
1545 
1546 	return (0);
1547 }
1548 
1549 /*
1550  * Tweak configuration based on module parameters, etc.  Most of these have
1551  * defaults assigned to them by Firmware Configuration Files (if we're using
1552  * them) but need to be explicitly set if we're using hard-coded
1553  * initialization.  But even in the case of using Firmware Configuration
1554  * Files, we'd like to expose the ability to change these via module
1555  * parameters so these are essentially common tweaks/settings for
1556  * Configuration Files and hard-coded initialization ...
1557  */
1558 static int
adap__pre_init_tweaks(struct adapter * sc)1559 adap__pre_init_tweaks(struct adapter *sc)
1560 {
1561 	int rx_dma_offset = 2; /* Offset of RX packets into DMA buffers */
1562 
1563 	/*
1564 	 * Fix up various Host-Dependent Parameters like Page Size, Cache
1565 	 * Line Size, etc.  The firmware default is for a 4KB Page Size and
1566 	 * 64B Cache Line Size ...
1567 	 */
1568 	(void) t4_fixup_host_params_compat(sc, PAGE_SIZE, CACHE_LINE, T5_LAST_REV);
1569 
1570 	t4_set_reg_field(sc, A_SGE_CONTROL,
1571 			 V_PKTSHIFT(M_PKTSHIFT), V_PKTSHIFT(rx_dma_offset));
1572 
1573 	return 0;
1574 }
1575 /*
1576  * Retrieve parameters that are needed (or nice to have) prior to calling
1577  * t4_sge_init and t4_fw_initialize.
1578  */
1579 static int
get_params__pre_init(struct adapter * sc)1580 get_params__pre_init(struct adapter *sc)
1581 {
1582 	int rc;
1583 	uint32_t param[2], val[2];
1584 	struct fw_devlog_cmd cmd;
1585 	struct devlog_params *dlog = &sc->params.devlog;
1586 
1587 	/*
1588 	 * Grab the raw VPD parameters.
1589 	 */
1590 	rc = -t4_get_raw_vpd_params(sc, &sc->params.vpd);
1591 	if (rc != 0) {
1592 		cxgb_printf(sc->dip, CE_WARN,
1593 		    "failed to query VPD parameters (pre_init): %d.\n", rc);
1594 		return (rc);
1595 	}
1596 
1597 	param[0] = FW_PARAM_DEV(PORTVEC);
1598 	param[1] = FW_PARAM_DEV(CCLK);
1599 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
1600 	if (rc != 0) {
1601 		cxgb_printf(sc->dip, CE_WARN,
1602 		    "failed to query parameters (pre_init): %d.\n", rc);
1603 		return (rc);
1604 	}
1605 
1606 	sc->params.portvec = val[0];
1607 	sc->params.nports = 0;
1608 	while (val[0]) {
1609 		sc->params.nports++;
1610 		val[0] &= val[0] - 1;
1611 	}
1612 
1613 	sc->params.vpd.cclk = val[1];
1614 
1615 	/* Read device log parameters. */
1616 	bzero(&cmd, sizeof (cmd));
1617 	cmd.op_to_write = htonl(V_FW_CMD_OP(FW_DEVLOG_CMD) |
1618 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
1619 	cmd.retval_len16 = htonl(FW_LEN16(cmd));
1620 	rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof (cmd), &cmd);
1621 	if (rc != 0) {
1622 		cxgb_printf(sc->dip, CE_WARN,
1623 		    "failed to get devlog parameters: %d.\n", rc);
1624 		bzero(dlog, sizeof (*dlog));
1625 		rc = 0;	/* devlog isn't critical for device operation */
1626 	} else {
1627 		val[0] = ntohl(cmd.memtype_devlog_memaddr16_devlog);
1628 		dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
1629 		dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
1630 		dlog->size = ntohl(cmd.memsize_devlog);
1631 	}
1632 
1633 	return (rc);
1634 }
1635 
1636 /*
1637  * Retrieve various parameters that are of interest to the driver.  The device
1638  * has been initialized by the firmware at this point.
1639  */
1640 static int
get_params__post_init(struct adapter * sc)1641 get_params__post_init(struct adapter *sc)
1642 {
1643 	int rc;
1644 	uint32_t param[7], val[7];
1645 	struct fw_caps_config_cmd caps;
1646 
1647 	param[0] = FW_PARAM_PFVF(IQFLINT_START);
1648 	param[1] = FW_PARAM_PFVF(EQ_START);
1649 	param[2] = FW_PARAM_PFVF(FILTER_START);
1650 	param[3] = FW_PARAM_PFVF(FILTER_END);
1651 	param[4] = FW_PARAM_PFVF(L2T_START);
1652 	param[5] = FW_PARAM_PFVF(L2T_END);
1653 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
1654 	if (rc != 0) {
1655 		cxgb_printf(sc->dip, CE_WARN,
1656 		    "failed to query parameters (post_init): %d.\n", rc);
1657 		return (rc);
1658 	}
1659 
1660 	/* LINTED: E_ASSIGN_NARROW_CONV */
1661 	sc->sge.iq_start = val[0];
1662 	sc->sge.eq_start = val[1];
1663 	sc->tids.ftid_base = val[2];
1664 	sc->tids.nftids = val[3] - val[2] + 1;
1665 	sc->vres.l2t.start = val[4];
1666 	sc->vres.l2t.size = val[5] - val[4] + 1;
1667 
1668 	param[0] = FW_PARAM_PFVF(IQFLINT_END);
1669 	param[1] = FW_PARAM_PFVF(EQ_END);
1670 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
1671 	if (rc != 0) {
1672 		cxgb_printf(sc->dip, CE_WARN,
1673 			    "failed to query eq/iq map size parameters (post_init): %d.\n",
1674 			    rc);
1675 		return (rc);
1676 	}
1677 
1678 	sc->sge.iqmap_sz = val[0] - sc->sge.iq_start + 1;
1679 	sc->sge.eqmap_sz = val[1] - sc->sge.eq_start + 1;
1680 
1681 	/* get capabilites */
1682 	bzero(&caps, sizeof (caps));
1683 	caps.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1684 	    F_FW_CMD_REQUEST | F_FW_CMD_READ);
1685 	caps.cfvalid_to_len16 = htonl(FW_LEN16(caps));
1686 	rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), &caps);
1687 	if (rc != 0) {
1688 		cxgb_printf(sc->dip, CE_WARN,
1689 		    "failed to get card capabilities: %d.\n", rc);
1690 		return (rc);
1691 	}
1692 
1693 	if (caps.toecaps != 0) {
1694 		/* query offload-related parameters */
1695 		param[0] = FW_PARAM_DEV(NTID);
1696 		param[1] = FW_PARAM_PFVF(SERVER_START);
1697 		param[2] = FW_PARAM_PFVF(SERVER_END);
1698 		param[3] = FW_PARAM_PFVF(TDDP_START);
1699 		param[4] = FW_PARAM_PFVF(TDDP_END);
1700 		param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
1701 		rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
1702 		if (rc != 0) {
1703 			cxgb_printf(sc->dip, CE_WARN,
1704 			    "failed to query TOE parameters: %d.\n", rc);
1705 			return (rc);
1706 		}
1707 		sc->tids.ntids = val[0];
1708 		sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
1709 		sc->tids.stid_base = val[1];
1710 		sc->tids.nstids = val[2] - val[1] + 1;
1711 		sc->vres.ddp.start = val[3];
1712 		sc->vres.ddp.size = val[4] - val[3] + 1;
1713 		sc->params.ofldq_wr_cred = val[5];
1714 		sc->params.offload = 1;
1715 	}
1716 
1717 	rc = -t4_get_pfres(sc);
1718 	if (rc != 0) {
1719 		cxgb_printf(sc->dip, CE_WARN,
1720 			    "failed to query PF resource params: %d.\n", rc);
1721 		return (rc);
1722 	}
1723 
1724 	/* These are finalized by FW initialization, load their values now */
1725 	val[0] = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
1726 	sc->params.tp.tre = G_TIMERRESOLUTION(val[0]);
1727 	sc->params.tp.dack_re = G_DELAYEDACKRESOLUTION(val[0]);
1728 	t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
1729 
1730 	return (rc);
1731 }
1732 
1733 static int
set_params__post_init(struct adapter * sc)1734 set_params__post_init(struct adapter *sc)
1735 {
1736 	uint32_t param, val;
1737 
1738 	/* ask for encapsulated CPLs */
1739 	param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
1740 	val = 1;
1741 	(void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
1742 
1743 	return (0);
1744 }
1745 
1746 /* TODO: verify */
1747 static void
setup_memwin(struct adapter * sc)1748 setup_memwin(struct adapter *sc)
1749 {
1750 	pci_regspec_t *data;
1751 	int rc;
1752 	uint_t n;
1753 	uintptr_t bar0;
1754 	uintptr_t mem_win0_base, mem_win1_base, mem_win2_base;
1755 	uintptr_t mem_win2_aperture;
1756 
1757 	rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sc->dip,
1758 	    DDI_PROP_DONTPASS, "assigned-addresses", (int **)&data, &n);
1759 	if (rc != DDI_SUCCESS) {
1760 		cxgb_printf(sc->dip, CE_WARN,
1761 		    "failed to lookup \"assigned-addresses\" property: %d", rc);
1762 		return;
1763 	}
1764 	n /= sizeof (*data);
1765 
1766 	bar0 = ((uint64_t)data[0].pci_phys_mid << 32) | data[0].pci_phys_low;
1767 	ddi_prop_free(data);
1768 
1769 	if (is_t4(sc->params.chip)) {
1770 		mem_win0_base = bar0 + MEMWIN0_BASE;
1771 		mem_win1_base = bar0 + MEMWIN1_BASE;
1772 		mem_win2_base = bar0 + MEMWIN2_BASE;
1773 		mem_win2_aperture = MEMWIN2_APERTURE;
1774 	} else {
1775 		/* For T5, only relative offset inside the PCIe BAR is passed */
1776 		mem_win0_base = MEMWIN0_BASE;
1777 		mem_win1_base = MEMWIN1_BASE;
1778 		mem_win2_base = MEMWIN2_BASE_T5;
1779 		mem_win2_aperture = MEMWIN2_APERTURE_T5;
1780 	}
1781 
1782 	t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 0),
1783 	    mem_win0_base | V_BIR(0) |
1784 	    V_WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
1785 
1786 	t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 1),
1787 	    mem_win1_base | V_BIR(0) |
1788 	    V_WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
1789 
1790 	t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2),
1791 	    mem_win2_base | V_BIR(0) |
1792 	    V_WINDOW(ilog2(mem_win2_aperture) - 10));
1793 
1794 	/* flush */
1795 	(void)t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1796 }
1797 
1798 /*
1799  * Positions the memory window such that it can be used to access the specified
1800  * address in the chip's address space.  The return value is the offset of addr
1801  * from the start of the window.
1802  */
1803 uint32_t
position_memwin(struct adapter * sc,int n,uint32_t addr)1804 position_memwin(struct adapter *sc, int n, uint32_t addr)
1805 {
1806 	uint32_t start, pf;
1807 	uint32_t reg;
1808 
1809 	if (addr & 3) {
1810 		cxgb_printf(sc->dip, CE_WARN,
1811 		    "addr (0x%x) is not at a 4B boundary.\n", addr);
1812 		return (EFAULT);
1813 	}
1814 
1815 	if (is_t4(sc->params.chip)) {
1816 		pf = 0;
1817 		start = addr & ~0xf;    /* start must be 16B aligned */
1818 	} else {
1819 		pf = V_PFNUM(sc->pf);
1820 		start = addr & ~0x7f;   /* start must be 128B aligned */
1821 	}
1822 	reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1823 
1824 	t4_write_reg(sc, reg, start | pf);
1825 	(void) t4_read_reg(sc, reg);
1826 
1827 	return (addr - start);
1828 }
1829 
1830 
1831 /*
1832  * Reads the named property and fills up the "data" array (which has at least
1833  * "count" elements).  We first try and lookup the property for our dev_t and
1834  * then retry with DDI_DEV_T_ANY if it's not found.
1835  *
1836  * Returns non-zero if the property was found and "data" has been updated.
1837  */
1838 static int
prop_lookup_int_array(struct adapter * sc,char * name,int * data,uint_t count)1839 prop_lookup_int_array(struct adapter *sc, char *name, int *data, uint_t count)
1840 {
1841 	dev_info_t *dip = sc->dip;
1842 	dev_t dev = sc->dev;
1843 	int rc, *d;
1844 	uint_t i, n;
1845 
1846 	rc = ddi_prop_lookup_int_array(dev, dip, DDI_PROP_DONTPASS,
1847 	    name, &d, &n);
1848 	if (rc == DDI_PROP_SUCCESS)
1849 		goto found;
1850 
1851 	if (rc != DDI_PROP_NOT_FOUND) {
1852 		cxgb_printf(dip, CE_WARN,
1853 		    "failed to lookup property %s for minor %d: %d.",
1854 		    name, getminor(dev), rc);
1855 		return (0);
1856 	}
1857 
1858 	rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1859 	    name, &d, &n);
1860 	if (rc == DDI_PROP_SUCCESS)
1861 		goto found;
1862 
1863 	if (rc != DDI_PROP_NOT_FOUND) {
1864 		cxgb_printf(dip, CE_WARN,
1865 		    "failed to lookup property %s: %d.", name, rc);
1866 		return (0);
1867 	}
1868 
1869 	return (0);
1870 
1871 found:
1872 	if (n > count) {
1873 		cxgb_printf(dip, CE_NOTE,
1874 		    "property %s has too many elements (%d), ignoring extras",
1875 		    name, n);
1876 	}
1877 
1878 	for (i = 0; i < n && i < count; i++)
1879 		data[i] = d[i];
1880 	ddi_prop_free(d);
1881 
1882 	return (1);
1883 }
1884 
1885 static int
prop_lookup_int(struct adapter * sc,char * name,int defval)1886 prop_lookup_int(struct adapter *sc, char *name, int defval)
1887 {
1888 	int rc;
1889 
1890 	rc = ddi_prop_get_int(sc->dev, sc->dip, DDI_PROP_DONTPASS, name, -1);
1891 	if (rc != -1)
1892 		return (rc);
1893 
1894 	return (ddi_prop_get_int(DDI_DEV_T_ANY, sc->dip, DDI_PROP_DONTPASS,
1895 	    name, defval));
1896 }
1897 
1898 static int
init_driver_props(struct adapter * sc,struct driver_properties * p)1899 init_driver_props(struct adapter *sc, struct driver_properties *p)
1900 {
1901 	dev_t dev = sc->dev;
1902 	dev_info_t *dip = sc->dip;
1903 	int i, *data;
1904 	uint_t tmr[SGE_NTIMERS] = {5, 10, 20, 50, 100, 200};
1905 	uint_t cnt[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */
1906 
1907 	/*
1908 	 * Holdoff timer
1909 	 */
1910 	data = &p->timer_val[0];
1911 	for (i = 0; i < SGE_NTIMERS; i++)
1912 		data[i] = tmr[i];
1913 	(void) prop_lookup_int_array(sc, "holdoff-timer-values", data,
1914 	    SGE_NTIMERS);
1915 	for (i = 0; i < SGE_NTIMERS; i++) {
1916 		int limit = 200U;
1917 		if (data[i] > limit) {
1918 			cxgb_printf(dip, CE_WARN,
1919 			    "holdoff timer %d is too high (%d), lowered to %d.",
1920 			    i, data[i], limit);
1921 			data[i] = limit;
1922 		}
1923 	}
1924 	(void) ddi_prop_update_int_array(dev, dip, "holdoff-timer-values",
1925 	    data, SGE_NTIMERS);
1926 
1927 	/*
1928 	 * Holdoff packet counter
1929 	 */
1930 	data = &p->counter_val[0];
1931 	for (i = 0; i < SGE_NCOUNTERS; i++)
1932 		data[i] = cnt[i];
1933 	(void) prop_lookup_int_array(sc, "holdoff-pkt-counter-values", data,
1934 	    SGE_NCOUNTERS);
1935 	for (i = 0; i < SGE_NCOUNTERS; i++) {
1936 		int limit = M_THRESHOLD_0;
1937 		if (data[i] > limit) {
1938 			cxgb_printf(dip, CE_WARN,
1939 			    "holdoff pkt-counter %d is too high (%d), "
1940 			    "lowered to %d.", i, data[i], limit);
1941 			data[i] = limit;
1942 		}
1943 	}
1944 	(void) ddi_prop_update_int_array(dev, dip, "holdoff-pkt-counter-values",
1945 	    data, SGE_NCOUNTERS);
1946 
1947 	/*
1948 	 * Maximum # of tx and rx queues to use for each
1949 	 * 100G, 40G, 25G, 10G and 1G port.
1950 	 */
1951 	p->max_ntxq_10g = prop_lookup_int(sc, "max-ntxq-10G-port", 8);
1952 	(void) ddi_prop_update_int(dev, dip, "max-ntxq-10G-port",
1953 	    p->max_ntxq_10g);
1954 
1955 	p->max_nrxq_10g = prop_lookup_int(sc, "max-nrxq-10G-port", 8);
1956 	(void) ddi_prop_update_int(dev, dip, "max-nrxq-10G-port",
1957 	    p->max_nrxq_10g);
1958 
1959 	p->max_ntxq_1g = prop_lookup_int(sc, "max-ntxq-1G-port", 2);
1960 	(void) ddi_prop_update_int(dev, dip, "max-ntxq-1G-port",
1961 	    p->max_ntxq_1g);
1962 
1963 	p->max_nrxq_1g = prop_lookup_int(sc, "max-nrxq-1G-port", 2);
1964 	(void) ddi_prop_update_int(dev, dip, "max-nrxq-1G-port",
1965 	    p->max_nrxq_1g);
1966 
1967 #ifdef TCP_OFFLOAD_ENABLE
1968 	p->max_nofldtxq_10g = prop_lookup_int(sc, "max-nofldtxq-10G-port", 8);
1969 	(void) ddi_prop_update_int(dev, dip, "max-ntxq-10G-port",
1970 	    p->max_nofldtxq_10g);
1971 
1972 	p->max_nofldrxq_10g = prop_lookup_int(sc, "max-nofldrxq-10G-port", 2);
1973 	(void) ddi_prop_update_int(dev, dip, "max-nrxq-10G-port",
1974 	    p->max_nofldrxq_10g);
1975 
1976 	p->max_nofldtxq_1g = prop_lookup_int(sc, "max-nofldtxq-1G-port", 2);
1977 	(void) ddi_prop_update_int(dev, dip, "max-ntxq-1G-port",
1978 	    p->max_nofldtxq_1g);
1979 
1980 	p->max_nofldrxq_1g = prop_lookup_int(sc, "max-nofldrxq-1G-port", 1);
1981 	(void) ddi_prop_update_int(dev, dip, "max-nrxq-1G-port",
1982 	    p->max_nofldrxq_1g);
1983 #endif
1984 
1985 	/*
1986 	 * Holdoff parameters for 10G and 1G ports.
1987 	 */
1988 	p->tmr_idx_10g = prop_lookup_int(sc, "holdoff-timer-idx-10G", 0);
1989 	(void) ddi_prop_update_int(dev, dip, "holdoff-timer-idx-10G",
1990 	    p->tmr_idx_10g);
1991 
1992 	p->pktc_idx_10g = prop_lookup_int(sc, "holdoff-pktc-idx-10G", 2);
1993 	(void) ddi_prop_update_int(dev, dip, "holdoff-pktc-idx-10G",
1994 	    p->pktc_idx_10g);
1995 
1996 	p->tmr_idx_1g = prop_lookup_int(sc, "holdoff-timer-idx-1G", 0);
1997 	(void) ddi_prop_update_int(dev, dip, "holdoff-timer-idx-1G",
1998 	    p->tmr_idx_1g);
1999 
2000 	p->pktc_idx_1g = prop_lookup_int(sc, "holdoff-pktc-idx-1G", 2);
2001 	(void) ddi_prop_update_int(dev, dip, "holdoff-pktc-idx-1G",
2002 	    p->pktc_idx_1g);
2003 
2004 	/*
2005 	 * Size (number of entries) of each tx and rx queue.
2006 	 */
2007 	i = prop_lookup_int(sc, "qsize-txq", TX_EQ_QSIZE);
2008 	p->qsize_txq = max(i, 128);
2009 	if (p->qsize_txq != i) {
2010 		cxgb_printf(dip, CE_WARN,
2011 		    "using %d instead of %d as the tx queue size",
2012 		    p->qsize_txq, i);
2013 	}
2014 	(void) ddi_prop_update_int(dev, dip, "qsize-txq", p->qsize_txq);
2015 
2016 	i = prop_lookup_int(sc, "qsize-rxq", RX_IQ_QSIZE);
2017 	p->qsize_rxq = max(i, 128);
2018 	while (p->qsize_rxq & 7)
2019 		p->qsize_rxq--;
2020 	if (p->qsize_rxq != i) {
2021 		cxgb_printf(dip, CE_WARN,
2022 		    "using %d instead of %d as the rx queue size",
2023 		    p->qsize_rxq, i);
2024 	}
2025 	(void) ddi_prop_update_int(dev, dip, "qsize-rxq", p->qsize_rxq);
2026 
2027 	/*
2028 	 * Interrupt types allowed.
2029 	 * Bits 0, 1, 2 = INTx, MSI, MSI-X respectively.  See sys/ddi_intr.h
2030 	 */
2031 	p->intr_types = prop_lookup_int(sc, "interrupt-types",
2032 	    DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_FIXED);
2033 	(void) ddi_prop_update_int(dev, dip, "interrupt-types", p->intr_types);
2034 
2035 	/*
2036 	 * Forwarded interrupt queues.  Create this property to force the driver
2037 	 * to use forwarded interrupt queues.
2038 	 */
2039 	if (ddi_prop_exists(dev, dip, DDI_PROP_DONTPASS,
2040 	    "interrupt-forwarding") != 0 ||
2041 	    ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
2042 	    "interrupt-forwarding") != 0) {
2043 		UNIMPLEMENTED();
2044 		(void) ddi_prop_create(dev, dip, DDI_PROP_CANSLEEP,
2045 		    "interrupt-forwarding", NULL, 0);
2046 	}
2047 
2048 	/*
2049 	 * Write combining
2050 	 * 0 to disable, 1 to enable
2051 	 */
2052 	p->wc = prop_lookup_int(sc, "write-combine", 1);
2053 	cxgb_printf(dip, CE_WARN, "write-combine: using of %d", p->wc);
2054 	if (p->wc != 0 && p->wc != 1) {
2055 		cxgb_printf(dip, CE_WARN,
2056 		    "write-combine: using 1 instead of %d", p->wc);
2057 		p->wc = 1;
2058 	}
2059 	(void) ddi_prop_update_int(dev, dip, "write-combine", p->wc);
2060 
2061 	p->t4_fw_install = prop_lookup_int(sc, "t4_fw_install", 1);
2062 	if (p->t4_fw_install != 0 && p->t4_fw_install != 2)
2063 		p->t4_fw_install = 1;
2064 	(void) ddi_prop_update_int(dev, dip, "t4_fw_install", p->t4_fw_install);
2065 
2066 	/* Multiple Rings */
2067 	p->multi_rings = prop_lookup_int(sc, "multi-rings", 1);
2068 	if (p->multi_rings != 0 && p->multi_rings != 1) {
2069 		cxgb_printf(dip, CE_NOTE,
2070 			   "multi-rings: using value 1 instead of %d", p->multi_rings);
2071 		p->multi_rings = 1;
2072 	}
2073 
2074 	(void) ddi_prop_update_int(dev, dip, "multi-rings", p->multi_rings);
2075 
2076 	return (0);
2077 }
2078 
2079 static int
remove_extra_props(struct adapter * sc,int n10g,int n1g)2080 remove_extra_props(struct adapter *sc, int n10g, int n1g)
2081 {
2082 	if (n10g == 0) {
2083 		(void) ddi_prop_remove(sc->dev, sc->dip, "max-ntxq-10G-port");
2084 		(void) ddi_prop_remove(sc->dev, sc->dip, "max-nrxq-10G-port");
2085 		(void) ddi_prop_remove(sc->dev, sc->dip,
2086 		    "holdoff-timer-idx-10G");
2087 		(void) ddi_prop_remove(sc->dev, sc->dip,
2088 		    "holdoff-pktc-idx-10G");
2089 	}
2090 
2091 	if (n1g == 0) {
2092 		(void) ddi_prop_remove(sc->dev, sc->dip, "max-ntxq-1G-port");
2093 		(void) ddi_prop_remove(sc->dev, sc->dip, "max-nrxq-1G-port");
2094 		(void) ddi_prop_remove(sc->dev, sc->dip,
2095 		    "holdoff-timer-idx-1G");
2096 		(void) ddi_prop_remove(sc->dev, sc->dip, "holdoff-pktc-idx-1G");
2097 	}
2098 
2099 	return (0);
2100 }
2101 
2102 static int
cfg_itype_and_nqueues(struct adapter * sc,int n10g,int n1g,struct intrs_and_queues * iaq)2103 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
2104     struct intrs_and_queues *iaq)
2105 {
2106 	struct driver_properties *p = &sc->props;
2107 	int rc, itype, itypes, navail, nc, n;
2108 	int pfres_rxq, pfres_txq, pfresq;
2109 
2110 	bzero(iaq, sizeof (*iaq));
2111 	nc = ncpus;	/* our snapshot of the number of CPUs */
2112 	iaq->ntxq10g = min(nc, p->max_ntxq_10g);
2113 	iaq->ntxq1g = min(nc, p->max_ntxq_1g);
2114 	iaq->nrxq10g = min(nc, p->max_nrxq_10g);
2115 	iaq->nrxq1g = min(nc, p->max_nrxq_1g);
2116 #ifdef TCP_OFFLOAD_ENABLE
2117 	iaq->nofldtxq10g = min(nc, p->max_nofldtxq_10g);
2118 	iaq->nofldtxq1g = min(nc, p->max_nofldtxq_1g);
2119 	iaq->nofldrxq10g = min(nc, p->max_nofldrxq_10g);
2120 	iaq->nofldrxq1g = min(nc, p->max_nofldrxq_1g);
2121 #endif
2122 
2123 	pfres_rxq = iaq->nrxq10g * n10g + iaq->nrxq1g * n1g;
2124 	pfres_txq = iaq->ntxq10g * n10g + iaq->ntxq1g * n1g;
2125 #ifdef TCP_OFFLOAD_ENABLE
2126 	pfres_rxq += iaq->nofldrxq10g * n10g + iaq->nofldrxq1g * n1g;
2127 	pfres_txq += iaq->nofldtxq10g * n10g + iaq->nofldtxq1g * n1g;
2128 #endif
2129 
2130 	/* If current configuration of max number of Rxqs and Txqs exceed
2131 	 * the max available for all the ports under this PF, then shrink
2132 	 * the queues to max available. Reduce them in a way that each
2133 	 * port under this PF has equally distributed number of queues.
2134 	 * Must guarantee at least 1 queue for each port for both NIC
2135 	 * and Offload queues.
2136 	 *
2137 	 * neq - fixed max number of Egress queues on Tx path and Free List
2138 	 * queues that hold Rx payload data on Rx path. Half are reserved
2139 	 * for Egress queues and the other half for Free List queues.
2140 	 * Hence, the division by 2.
2141 	 *
2142 	 * niqflint - max number of Ingress queues with interrupts on Rx
2143 	 * path to receive completions that indicate Rx payload has been
2144 	 * posted in its associated Free List queue. Also handles Tx
2145 	 * completions for packets successfully transmitted on Tx path.
2146 	 *
2147 	 * nethctrl - max number of Egress queues only for Tx path. This
2148 	 * number is usually half of neq. However, if it became less than
2149 	 * neq due to lack of resources based on firmware configuration,
2150 	 * then take the lower value.
2151 	 */
2152 	while (pfres_rxq >
2153 	       min(sc->params.pfres.neq / 2, sc->params.pfres.niqflint)) {
2154 		pfresq = pfres_rxq;
2155 
2156 		if (iaq->nrxq10g > 1) {
2157 			iaq->nrxq10g--;
2158 			pfres_rxq -= n10g;
2159 		}
2160 
2161 		if (iaq->nrxq1g > 1) {
2162 			iaq->nrxq1g--;
2163 			pfres_rxq -= n1g;
2164 		}
2165 
2166 #ifdef TCP_OFFLOAD_ENABLE
2167 		if (iaq->nofldrxq10g > 1) {
2168 			iaq->nofldrxq10g--;
2169 			pfres_rxq -= n10g;
2170 		}
2171 
2172 		if (iaq->nofldrxq1g > 1) {
2173 			iaq->nofldrxq1g--;
2174 			pfres_rxq -= n1g;
2175 		}
2176 #endif
2177 
2178 		/* Break if nothing changed */
2179 		if (pfresq == pfres_rxq)
2180 			break;
2181 	}
2182 
2183 	while (pfres_txq >
2184 	       min(sc->params.pfres.neq / 2, sc->params.pfres.nethctrl)) {
2185 		pfresq = pfres_txq;
2186 
2187 		if (iaq->ntxq10g > 1) {
2188 			iaq->ntxq10g--;
2189 			pfres_txq -= n10g;
2190 		}
2191 
2192 		if (iaq->ntxq1g > 1) {
2193 			iaq->ntxq1g--;
2194 			pfres_txq -= n1g;
2195 		}
2196 
2197 #ifdef TCP_OFFLOAD_ENABLE
2198 		if (iaq->nofldtxq10g > 1) {
2199 			iaq->nofldtxq10g--;
2200 			pfres_txq -= n10g;
2201 		}
2202 
2203 		if (iaq->nofldtxq1g > 1) {
2204 			iaq->nofldtxq1g--;
2205 			pfres_txq -= n1g;
2206 		}
2207 #endif
2208 
2209 		/* Break if nothing changed */
2210 		if (pfresq == pfres_txq)
2211 			break;
2212 	}
2213 
2214 	rc = ddi_intr_get_supported_types(sc->dip, &itypes);
2215 	if (rc != DDI_SUCCESS) {
2216 		cxgb_printf(sc->dip, CE_WARN,
2217 		    "failed to determine supported interrupt types: %d", rc);
2218 		return (rc);
2219 	}
2220 
2221 	for (itype = DDI_INTR_TYPE_MSIX; itype; itype >>= 1) {
2222 		ASSERT(itype == DDI_INTR_TYPE_MSIX ||
2223 		    itype == DDI_INTR_TYPE_MSI ||
2224 		    itype == DDI_INTR_TYPE_FIXED);
2225 
2226 		if ((itype & itypes & p->intr_types) == 0)
2227 			continue;	/* not supported or not allowed */
2228 
2229 		navail = 0;
2230 		rc = ddi_intr_get_navail(sc->dip, itype, &navail);
2231 		if (rc != DDI_SUCCESS || navail == 0) {
2232 			cxgb_printf(sc->dip, CE_WARN,
2233 			    "failed to get # of interrupts for type %d: %d",
2234 			    itype, rc);
2235 			continue;	/* carry on */
2236 		}
2237 
2238 		iaq->intr_type = itype;
2239 		if (navail == 0)
2240 			continue;
2241 
2242 		/*
2243 		 * Best option: an interrupt vector for errors, one for the
2244 		 * firmware event queue, and one each for each rxq (NIC as well
2245 		 * as offload).
2246 		 */
2247 		iaq->nirq = T4_EXTRA_INTR;
2248 		iaq->nirq += n10g * iaq->nrxq10g;
2249 		iaq->nirq += n1g * iaq->nrxq1g;
2250 #ifdef TCP_OFFLOAD_ENABLE
2251 		iaq->nirq += n10g * iaq->nofldrxq10g;
2252 		iaq->nirq += n1g * iaq->nofldrxq1g;
2253 #endif
2254 
2255 		if (iaq->nirq <= navail &&
2256 		    (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq))) {
2257 			iaq->intr_fwd = 0;
2258 			goto allocate;
2259 		}
2260 
2261 		/*
2262 		 * Second best option: an interrupt vector for errors, one for
2263 		 * the firmware event queue, and one each for either NIC or
2264 		 * offload rxq's.
2265 		 */
2266 		iaq->nirq = T4_EXTRA_INTR;
2267 #ifdef TCP_OFFLOAD_ENABLE
2268 		iaq->nirq += n10g * max(iaq->nrxq10g, iaq->nofldrxq10g);
2269 		iaq->nirq += n1g * max(iaq->nrxq1g, iaq->nofldrxq1g);
2270 #else
2271 		iaq->nirq += n10g * iaq->nrxq10g;
2272 		iaq->nirq += n1g * iaq->nrxq1g;
2273 #endif
2274 		if (iaq->nirq <= navail &&
2275 		    (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq))) {
2276 			iaq->intr_fwd = 1;
2277 			goto allocate;
2278 		}
2279 
2280 		/*
2281 		 * Next best option: an interrupt vector for errors, one for the
2282 		 * firmware event queue, and at least one per port.  At this
2283 		 * point we know we'll have to downsize nrxq or nofldrxq to fit
2284 		 * what's available to us.
2285 		 */
2286 		iaq->nirq = T4_EXTRA_INTR;
2287 		iaq->nirq += n10g + n1g;
2288 		if (iaq->nirq <= navail) {
2289 			int leftover = navail - iaq->nirq;
2290 
2291 			if (n10g > 0) {
2292 				int target = iaq->nrxq10g;
2293 
2294 #ifdef TCP_OFFLOAD_ENABLE
2295 				target = max(target, iaq->nofldrxq10g);
2296 #endif
2297 				n = 1;
2298 				while (n < target && leftover >= n10g) {
2299 					leftover -= n10g;
2300 					iaq->nirq += n10g;
2301 					n++;
2302 				}
2303 				iaq->nrxq10g = min(n, iaq->nrxq10g);
2304 #ifdef TCP_OFFLOAD_ENABLE
2305 				iaq->nofldrxq10g = min(n, iaq->nofldrxq10g);
2306 #endif
2307 			}
2308 
2309 			if (n1g > 0) {
2310 				int target = iaq->nrxq1g;
2311 
2312 #ifdef TCP_OFFLOAD_ENABLE
2313 				target = max(target, iaq->nofldrxq1g);
2314 #endif
2315 				n = 1;
2316 				while (n < target && leftover >= n1g) {
2317 					leftover -= n1g;
2318 					iaq->nirq += n1g;
2319 					n++;
2320 				}
2321 				iaq->nrxq1g = min(n, iaq->nrxq1g);
2322 #ifdef TCP_OFFLOAD_ENABLE
2323 				iaq->nofldrxq1g = min(n, iaq->nofldrxq1g);
2324 #endif
2325 			}
2326 
2327 			/* We have arrived at a minimum value required to enable
2328 			 * per queue irq(either NIC or offload). Thus for non-
2329 			 * offload case, we will get a vector per queue, while
2330 			 * offload case, we will get a vector per offload/NIC q.
2331 			 * Hence enable Interrupt forwarding only for offload
2332 			 * case.
2333 			 */
2334 #ifdef TCP_OFFLOAD_ENABLE
2335 			if (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq)) {
2336 				iaq->intr_fwd = 1;
2337 #else
2338 			if (itype != DDI_INTR_TYPE_MSI) {
2339 #endif
2340 				goto allocate;
2341 			}
2342 		}
2343 
2344 		/*
2345 		 * Least desirable option: one interrupt vector for everything.
2346 		 */
2347 		iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
2348 #ifdef TCP_OFFLOAD_ENABLE
2349 		iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
2350 #endif
2351 		iaq->intr_fwd = 1;
2352 
2353 allocate:
2354 		return (0);
2355 	}
2356 
2357 	cxgb_printf(sc->dip, CE_WARN,
2358 	    "failed to find a usable interrupt type.  supported=%d, allowed=%d",
2359 	    itypes, p->intr_types);
2360 	return (DDI_FAILURE);
2361 }
2362 
2363 static int
2364 add_child_node(struct adapter *sc, int idx)
2365 {
2366 	int rc;
2367 	struct port_info *pi;
2368 
2369 	if (idx < 0 || idx >= sc->params.nports)
2370 		return (EINVAL);
2371 
2372 	pi = sc->port[idx];
2373 	if (pi == NULL)
2374 		return (ENODEV);	/* t4_port_init failed earlier */
2375 
2376 	PORT_LOCK(pi);
2377 	if (pi->dip != NULL) {
2378 		rc = 0;		/* EEXIST really, but then bus_config fails */
2379 		goto done;
2380 	}
2381 
2382 	rc = ndi_devi_alloc(sc->dip, T4_PORT_NAME, DEVI_SID_NODEID, &pi->dip);
2383 	if (rc != DDI_SUCCESS || pi->dip == NULL) {
2384 		rc = ENOMEM;
2385 		goto done;
2386 	}
2387 
2388 	(void) ddi_set_parent_data(pi->dip, pi);
2389 	(void) ndi_devi_bind_driver(pi->dip, 0);
2390 	rc = 0;
2391 done:
2392 	PORT_UNLOCK(pi);
2393 	return (rc);
2394 }
2395 
2396 static int
2397 remove_child_node(struct adapter *sc, int idx)
2398 {
2399 	int rc;
2400 	struct port_info *pi;
2401 
2402 	if (idx < 0 || idx >= sc->params.nports)
2403 		return (EINVAL);
2404 
2405 	pi = sc->port[idx];
2406 	if (pi == NULL)
2407 		return (ENODEV);
2408 
2409 	PORT_LOCK(pi);
2410 	if (pi->dip == NULL) {
2411 		rc = ENODEV;
2412 		goto done;
2413 	}
2414 
2415 	rc = ndi_devi_free(pi->dip);
2416 	if (rc == 0)
2417 		pi->dip = NULL;
2418 done:
2419 	PORT_UNLOCK(pi);
2420 	return (rc);
2421 }
2422 
2423 static char *
2424 print_port_speed(const struct port_info *pi)
2425 {
2426 	if (!pi)
2427 		return "-";
2428 
2429 	if (is_100G_port(pi))
2430 		return "100G";
2431 	else if (is_50G_port(pi))
2432 		return "50G";
2433 	else if (is_40G_port(pi))
2434 		return "40G";
2435 	else if (is_25G_port(pi))
2436 		return "25G";
2437 	else if (is_10G_port(pi))
2438 		return "10G";
2439 	else
2440 		return "1G";
2441 }
2442 
2443 #define	KS_UINIT(x)	kstat_named_init(&kstatp->x, #x, KSTAT_DATA_ULONG)
2444 #define	KS_CINIT(x)	kstat_named_init(&kstatp->x, #x, KSTAT_DATA_CHAR)
2445 #define	KS_U64INIT(x)	kstat_named_init(&kstatp->x, #x, KSTAT_DATA_UINT64)
2446 #define	KS_U_SET(x, y)	kstatp->x.value.ul = (y)
2447 #define	KS_C_SET(x, ...)	\
2448 			(void) snprintf(kstatp->x.value.c, 16,  __VA_ARGS__)
2449 
2450 /*
2451  * t4nex:X:config
2452  */
2453 struct t4_kstats {
2454 	kstat_named_t chip_ver;
2455 	kstat_named_t fw_vers;
2456 	kstat_named_t tp_vers;
2457 	kstat_named_t driver_version;
2458 	kstat_named_t serial_number;
2459 	kstat_named_t ec_level;
2460 	kstat_named_t id;
2461 	kstat_named_t bus_type;
2462 	kstat_named_t bus_width;
2463 	kstat_named_t bus_speed;
2464 	kstat_named_t core_clock;
2465 	kstat_named_t port_cnt;
2466 	kstat_named_t port_type;
2467 	kstat_named_t pci_vendor_id;
2468 	kstat_named_t pci_device_id;
2469 };
2470 static kstat_t *
2471 setup_kstats(struct adapter *sc)
2472 {
2473 	kstat_t *ksp;
2474 	struct t4_kstats *kstatp;
2475 	int ndata;
2476 	struct pci_params *p = &sc->params.pci;
2477 	struct vpd_params *v = &sc->params.vpd;
2478 	uint16_t pci_vendor, pci_device;
2479 
2480 	ndata = sizeof (struct t4_kstats) / sizeof (kstat_named_t);
2481 
2482 	ksp = kstat_create(T4_NEXUS_NAME, ddi_get_instance(sc->dip), "config",
2483 	    "nexus", KSTAT_TYPE_NAMED, ndata, 0);
2484 	if (ksp == NULL) {
2485 		cxgb_printf(sc->dip, CE_WARN, "failed to initialize kstats.");
2486 		return (NULL);
2487 	}
2488 
2489 	kstatp = (struct t4_kstats *)ksp->ks_data;
2490 
2491 	KS_UINIT(chip_ver);
2492 	KS_CINIT(fw_vers);
2493 	KS_CINIT(tp_vers);
2494 	KS_CINIT(driver_version);
2495 	KS_CINIT(serial_number);
2496 	KS_CINIT(ec_level);
2497 	KS_CINIT(id);
2498 	KS_CINIT(bus_type);
2499 	KS_CINIT(bus_width);
2500 	KS_CINIT(bus_speed);
2501 	KS_UINIT(core_clock);
2502 	KS_UINIT(port_cnt);
2503 	KS_CINIT(port_type);
2504 	KS_CINIT(pci_vendor_id);
2505 	KS_CINIT(pci_device_id);
2506 
2507 	KS_U_SET(chip_ver, sc->params.chip);
2508 	KS_C_SET(fw_vers, "%d.%d.%d.%d",
2509 	    G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2510 	    G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2511 	    G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2512 	    G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2513 	KS_C_SET(tp_vers, "%d.%d.%d.%d",
2514 	    G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers),
2515 	    G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers),
2516 	    G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers),
2517 	    G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers));
2518 	KS_C_SET(driver_version, DRV_VERSION);
2519 	KS_C_SET(serial_number, "%s", v->sn);
2520 	KS_C_SET(ec_level, "%s", v->ec);
2521 	KS_C_SET(id, "%s", v->id);
2522 	KS_C_SET(bus_type, "pci-express");
2523 	KS_C_SET(bus_width, "x%d lanes", p->width);
2524 	KS_C_SET(bus_speed, "%d", p->speed);
2525 	KS_U_SET(core_clock, v->cclk);
2526 	KS_U_SET(port_cnt, sc->params.nports);
2527 
2528 	t4_os_pci_read_cfg2(sc, PCI_CONF_VENID, &pci_vendor);
2529 	KS_C_SET(pci_vendor_id, "0x%x", pci_vendor);
2530 
2531 	t4_os_pci_read_cfg2(sc, PCI_CONF_DEVID, &pci_device);
2532 	KS_C_SET(pci_device_id, "0x%x", pci_device);
2533 
2534 	KS_C_SET(port_type, "%s/%s/%s/%s",
2535 		 print_port_speed(sc->port[0]),
2536 		 print_port_speed(sc->port[1]),
2537 		 print_port_speed(sc->port[2]),
2538 		 print_port_speed(sc->port[3]));
2539 
2540 	/* Do NOT set ksp->ks_update.  These kstats do not change. */
2541 
2542 	/* Install the kstat */
2543 	ksp->ks_private = (void *)sc;
2544 	kstat_install(ksp);
2545 
2546 	return (ksp);
2547 }
2548 
2549 /*
2550  * t4nex:X:stat
2551  */
2552 struct t4_wc_kstats {
2553 	kstat_named_t write_coal_success;
2554 	kstat_named_t write_coal_failure;
2555 };
2556 static kstat_t *
2557 setup_wc_kstats(struct adapter *sc)
2558 {
2559 	kstat_t *ksp;
2560 	struct t4_wc_kstats *kstatp;
2561 	int ndata;
2562 
2563 	ndata = sizeof(struct t4_wc_kstats) / sizeof(kstat_named_t);
2564 	ksp = kstat_create(T4_NEXUS_NAME, ddi_get_instance(sc->dip), "stats",
2565 	    "nexus", KSTAT_TYPE_NAMED, ndata, 0);
2566 	if (ksp == NULL) {
2567 		cxgb_printf(sc->dip, CE_WARN, "failed to initialize kstats.");
2568 		return (NULL);
2569 	}
2570 
2571 	kstatp = (struct t4_wc_kstats *)ksp->ks_data;
2572 
2573 	KS_UINIT(write_coal_success);
2574 	KS_UINIT(write_coal_failure);
2575 
2576 	ksp->ks_update = update_wc_kstats;
2577 	/* Install the kstat */
2578 	ksp->ks_private = (void *)sc;
2579 	kstat_install(ksp);
2580 
2581 	return (ksp);
2582 }
2583 
2584 static int
2585 update_wc_kstats(kstat_t *ksp, int rw)
2586 {
2587 	struct t4_wc_kstats *kstatp = (struct t4_wc_kstats *)ksp->ks_data;
2588 	struct adapter *sc = ksp->ks_private;
2589 	uint32_t wc_total, wc_success, wc_failure;
2590 
2591 	if (rw == KSTAT_WRITE)
2592 		return (0);
2593 
2594 	if (is_t5(sc->params.chip)) {
2595 		wc_total = t4_read_reg(sc, A_SGE_STAT_TOTAL);
2596 		wc_failure = t4_read_reg(sc, A_SGE_STAT_MATCH);
2597 		wc_success = wc_total - wc_failure;
2598 	} else {
2599 		wc_success = 0;
2600 		wc_failure = 0;
2601 	}
2602 
2603 	KS_U_SET(write_coal_success, wc_success);
2604 	KS_U_SET(write_coal_failure, wc_failure);
2605 
2606 	return (0);
2607 }
2608 
2609 /*
2610  * cxgbe:X:fec
2611  *
2612  * This provides visibility into the errors that have been found by the
2613  * different FEC subsystems. While it's tempting to combine the two different
2614  * FEC types logically, the data that the errors tell us are pretty different
2615  * between the two. Firecode is strictly per-lane, but RS has parts that are
2616  * related to symbol distribution to lanes and also to the overall channel.
2617  */
2618 struct cxgbe_port_fec_kstats {
2619 	kstat_named_t rs_corr;
2620 	kstat_named_t rs_uncorr;
2621 	kstat_named_t rs_sym0_corr;
2622 	kstat_named_t rs_sym1_corr;
2623 	kstat_named_t rs_sym2_corr;
2624 	kstat_named_t rs_sym3_corr;
2625 	kstat_named_t fc_lane0_corr;
2626 	kstat_named_t fc_lane0_uncorr;
2627 	kstat_named_t fc_lane1_corr;
2628 	kstat_named_t fc_lane1_uncorr;
2629 	kstat_named_t fc_lane2_corr;
2630 	kstat_named_t fc_lane2_uncorr;
2631 	kstat_named_t fc_lane3_corr;
2632 	kstat_named_t fc_lane3_uncorr;
2633 };
2634 
2635 static uint32_t
2636 read_fec_pair(struct port_info *pi, uint32_t lo_reg, uint32_t high_reg)
2637 {
2638 	struct adapter *sc = pi->adapter;
2639 	uint8_t port = pi->tx_chan;
2640 	uint32_t low, high, ret;
2641 
2642 	low = t4_read_reg32(sc, T5_PORT_REG(port, lo_reg));
2643 	high = t4_read_reg32(sc, T5_PORT_REG(port, high_reg));
2644 	ret = low & 0xffff;
2645 	ret |= (high & 0xffff) << 16;
2646 	return (ret);
2647 }
2648 
2649 static int
2650 update_port_fec_kstats(kstat_t *ksp, int rw)
2651 {
2652 	struct cxgbe_port_fec_kstats *fec = ksp->ks_data;
2653 	struct port_info *pi = ksp->ks_private;
2654 
2655 	if (rw == KSTAT_WRITE) {
2656 		return (EACCES);
2657 	}
2658 
2659 	/*
2660 	 * First go ahead and gather RS related stats.
2661 	 */
2662 	fec->rs_corr.value.ui64 += read_fec_pair(pi, T6_RS_FEC_CCW_LO,
2663 	    T6_RS_FEC_CCW_HI);
2664 	fec->rs_uncorr.value.ui64 += read_fec_pair(pi, T6_RS_FEC_NCCW_LO,
2665 	    T6_RS_FEC_NCCW_HI);
2666 	fec->rs_sym0_corr.value.ui64 += read_fec_pair(pi, T6_RS_FEC_SYMERR0_LO,
2667 	    T6_RS_FEC_SYMERR0_HI);
2668 	fec->rs_sym1_corr.value.ui64 += read_fec_pair(pi, T6_RS_FEC_SYMERR1_LO,
2669 	    T6_RS_FEC_SYMERR1_HI);
2670 	fec->rs_sym2_corr.value.ui64 += read_fec_pair(pi, T6_RS_FEC_SYMERR2_LO,
2671 	    T6_RS_FEC_SYMERR2_HI);
2672 	fec->rs_sym3_corr.value.ui64 += read_fec_pair(pi, T6_RS_FEC_SYMERR3_LO,
2673 	    T6_RS_FEC_SYMERR3_HI);
2674 
2675 	/*
2676 	 * Now go through and try to grab Firecode/BASE-R stats.
2677 	 */
2678 	fec->fc_lane0_corr.value.ui64 += read_fec_pair(pi, T6_FC_FEC_L0_CERR_LO,
2679 	    T6_FC_FEC_L0_CERR_HI);
2680 	fec->fc_lane0_uncorr.value.ui64 += read_fec_pair(pi,
2681 	    T6_FC_FEC_L0_NCERR_LO, T6_FC_FEC_L0_NCERR_HI);
2682 	fec->fc_lane1_corr.value.ui64 += read_fec_pair(pi, T6_FC_FEC_L1_CERR_LO,
2683 	    T6_FC_FEC_L1_CERR_HI);
2684 	fec->fc_lane1_uncorr.value.ui64 += read_fec_pair(pi,
2685 	    T6_FC_FEC_L1_NCERR_LO, T6_FC_FEC_L1_NCERR_HI);
2686 	fec->fc_lane2_corr.value.ui64 += read_fec_pair(pi, T6_FC_FEC_L2_CERR_LO,
2687 	    T6_FC_FEC_L2_CERR_HI);
2688 	fec->fc_lane2_uncorr.value.ui64 += read_fec_pair(pi,
2689 	    T6_FC_FEC_L2_NCERR_LO, T6_FC_FEC_L2_NCERR_HI);
2690 	fec->fc_lane3_corr.value.ui64 += read_fec_pair(pi, T6_FC_FEC_L3_CERR_LO,
2691 	    T6_FC_FEC_L3_CERR_HI);
2692 	fec->fc_lane3_uncorr.value.ui64 += read_fec_pair(pi,
2693 	    T6_FC_FEC_L3_NCERR_LO, T6_FC_FEC_L3_NCERR_HI);
2694 
2695 	return (0);
2696 }
2697 
2698 static kstat_t *
2699 setup_port_fec_kstats(struct port_info *pi)
2700 {
2701 	kstat_t *ksp;
2702 	struct cxgbe_port_fec_kstats *kstatp;
2703 
2704 	if (!is_t6(pi->adapter->params.chip)) {
2705 		return (NULL);
2706 	}
2707 
2708 	ksp = kstat_create(T4_PORT_NAME, ddi_get_instance(pi->dip), "fec",
2709 	    "net", KSTAT_TYPE_NAMED, sizeof (struct cxgbe_port_fec_kstats) /
2710 	    sizeof (kstat_named_t), 0);
2711 	if (ksp == NULL) {
2712 		cxgb_printf(pi->dip, CE_WARN, "failed to initialize fec "
2713 		    "kstats.");
2714 		return (NULL);
2715 	}
2716 
2717 	kstatp = ksp->ks_data;
2718 	KS_U64INIT(rs_corr);
2719 	KS_U64INIT(rs_uncorr);
2720 	KS_U64INIT(rs_sym0_corr);
2721 	KS_U64INIT(rs_sym1_corr);
2722 	KS_U64INIT(rs_sym2_corr);
2723 	KS_U64INIT(rs_sym3_corr);
2724 	KS_U64INIT(fc_lane0_corr);
2725 	KS_U64INIT(fc_lane0_uncorr);
2726 	KS_U64INIT(fc_lane1_corr);
2727 	KS_U64INIT(fc_lane1_uncorr);
2728 	KS_U64INIT(fc_lane2_corr);
2729 	KS_U64INIT(fc_lane2_uncorr);
2730 	KS_U64INIT(fc_lane3_corr);
2731 	KS_U64INIT(fc_lane3_uncorr);
2732 
2733 	ksp->ks_update = update_port_fec_kstats;
2734 	ksp->ks_private = pi;
2735 	kstat_install(ksp);
2736 
2737 	return (ksp);
2738 }
2739 
2740 int
2741 adapter_full_init(struct adapter *sc)
2742 {
2743 	int i, rc = 0;
2744 
2745 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2746 
2747 	rc = t4_setup_adapter_queues(sc);
2748 	if (rc != 0)
2749 		goto done;
2750 
2751 	if (sc->intr_cap & DDI_INTR_FLAG_BLOCK)
2752 		(void) ddi_intr_block_enable(sc->intr_handle, sc->intr_count);
2753 	else {
2754 		for (i = 0; i < sc->intr_count; i++)
2755 			(void) ddi_intr_enable(sc->intr_handle[i]);
2756 	}
2757 	t4_intr_enable(sc);
2758 	sc->flags |= FULL_INIT_DONE;
2759 
2760 #ifdef TCP_OFFLOAD_ENABLE
2761 	/* TODO: wrong place to enable TOE capability */
2762 	if (is_offload(sc) != 0) {
2763 		for_each_port(sc, i) {
2764 			struct port_info *pi = sc->port[i];
2765 			rc = toe_capability(pi, 1);
2766 			if (rc != 0) {
2767 				cxgb_printf(pi->dip, CE_WARN,
2768 				    "Failed to activate toe capability: %d",
2769 				    rc);
2770 				rc = 0;		/* not a fatal error */
2771 			}
2772 		}
2773 	}
2774 #endif
2775 
2776 done:
2777 	if (rc != 0)
2778 		(void) adapter_full_uninit(sc);
2779 
2780 	return (rc);
2781 }
2782 
2783 int
2784 adapter_full_uninit(struct adapter *sc)
2785 {
2786 	int i, rc = 0;
2787 
2788 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2789 
2790 	if (sc->intr_cap & DDI_INTR_FLAG_BLOCK)
2791 		(void) ddi_intr_block_disable(sc->intr_handle, sc->intr_count);
2792 	else {
2793 		for (i = 0; i < sc->intr_count; i++)
2794 			(void) ddi_intr_disable(sc->intr_handle[i]);
2795 	}
2796 
2797 	rc = t4_teardown_adapter_queues(sc);
2798 	if (rc != 0)
2799 		return (rc);
2800 
2801 	sc->flags &= ~FULL_INIT_DONE;
2802 
2803 	return (0);
2804 }
2805 
2806 int
2807 port_full_init(struct port_info *pi)
2808 {
2809 	struct adapter *sc = pi->adapter;
2810 	uint16_t *rss;
2811 	struct sge_rxq *rxq;
2812 	int rc, i;
2813 
2814 	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2815 	ASSERT((pi->flags & PORT_INIT_DONE) == 0);
2816 
2817 	/*
2818 	 * Allocate tx/rx/fl queues for this port.
2819 	 */
2820 	rc = t4_setup_port_queues(pi);
2821 	if (rc != 0)
2822 		goto done;	/* error message displayed already */
2823 
2824 	/*
2825 	 * Setup RSS for this port.
2826 	 */
2827 	rss = kmem_zalloc(pi->nrxq * sizeof (*rss), KM_SLEEP);
2828 	for_each_rxq(pi, i, rxq) {
2829 		rss[i] = rxq->iq.abs_id;
2830 	}
2831 	rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0,
2832 	    pi->rss_size, rss, pi->nrxq);
2833 	kmem_free(rss, pi->nrxq * sizeof (*rss));
2834 	if (rc != 0) {
2835 		cxgb_printf(pi->dip, CE_WARN, "rss_config failed: %d", rc);
2836 		goto done;
2837 	}
2838 
2839 	/*
2840 	 * Initialize our per-port FEC kstats.
2841 	 */
2842 	pi->ksp_fec = setup_port_fec_kstats(pi);
2843 
2844 	pi->flags |= PORT_INIT_DONE;
2845 done:
2846 	if (rc != 0)
2847 		(void) port_full_uninit(pi);
2848 
2849 	return (rc);
2850 }
2851 
2852 /*
2853  * Idempotent.
2854  */
2855 int
2856 port_full_uninit(struct port_info *pi)
2857 {
2858 
2859 	ASSERT(pi->flags & PORT_INIT_DONE);
2860 
2861 	if (pi->ksp_fec != NULL) {
2862 		kstat_delete(pi->ksp_fec);
2863 		pi->ksp_fec = NULL;
2864 	}
2865 	(void) t4_teardown_port_queues(pi);
2866 	pi->flags &= ~PORT_INIT_DONE;
2867 
2868 	return (0);
2869 }
2870 
2871 void
2872 enable_port_queues(struct port_info *pi)
2873 {
2874 	struct adapter *sc = pi->adapter;
2875 	int i;
2876 	struct sge_iq *iq;
2877 	struct sge_rxq *rxq;
2878 #ifdef TCP_OFFLOAD_ENABLE
2879 	struct sge_ofld_rxq *ofld_rxq;
2880 #endif
2881 
2882 	ASSERT(pi->flags & PORT_INIT_DONE);
2883 
2884 	/*
2885 	 * TODO: whatever was queued up after we set iq->state to IQS_DISABLED
2886 	 * back in disable_port_queues will be processed now, after an unbounded
2887 	 * delay.  This can't be good.
2888 	 */
2889 
2890 #ifdef TCP_OFFLOAD_ENABLE
2891 	for_each_ofld_rxq(pi, i, ofld_rxq) {
2892 		iq = &ofld_rxq->iq;
2893 		if (atomic_cas_uint(&iq->state, IQS_DISABLED, IQS_IDLE) !=
2894 		    IQS_DISABLED)
2895 			panic("%s: iq %p wasn't disabled", __func__,
2896 			    (void *)iq);
2897 		t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
2898 		    V_SEINTARM(iq->intr_params) | V_INGRESSQID(iq->cntxt_id));
2899 	}
2900 #endif
2901 
2902 	for_each_rxq(pi, i, rxq) {
2903 		iq = &rxq->iq;
2904 		if (atomic_cas_uint(&iq->state, IQS_DISABLED, IQS_IDLE) !=
2905 		    IQS_DISABLED)
2906 			panic("%s: iq %p wasn't disabled", __func__,
2907 			    (void *) iq);
2908 		t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
2909 		    V_SEINTARM(iq->intr_params) | V_INGRESSQID(iq->cntxt_id));
2910 	}
2911 }
2912 
2913 void
2914 disable_port_queues(struct port_info *pi)
2915 {
2916 	int i;
2917 	struct adapter *sc = pi->adapter;
2918 	struct sge_rxq *rxq;
2919 #ifdef TCP_OFFLOAD_ENABLE
2920 	struct sge_ofld_rxq *ofld_rxq;
2921 #endif
2922 
2923 	ASSERT(pi->flags & PORT_INIT_DONE);
2924 
2925 	/*
2926 	 * TODO: need proper implementation for all tx queues (ctrl, eth, ofld).
2927 	 */
2928 
2929 #ifdef TCP_OFFLOAD_ENABLE
2930 	for_each_ofld_rxq(pi, i, ofld_rxq) {
2931 		while (atomic_cas_uint(&ofld_rxq->iq.state, IQS_IDLE,
2932 		    IQS_DISABLED) != IQS_IDLE)
2933 			msleep(1);
2934 	}
2935 #endif
2936 
2937 	for_each_rxq(pi, i, rxq) {
2938 		while (atomic_cas_uint(&rxq->iq.state, IQS_IDLE,
2939 		    IQS_DISABLED) != IQS_IDLE)
2940 			msleep(1);
2941 	}
2942 
2943 	mutex_enter(&sc->sfl_lock);
2944 #ifdef TCP_OFFLOAD_ENABLE
2945 	for_each_ofld_rxq(pi, i, ofld_rxq)
2946 	    ofld_rxq->fl.flags |= FL_DOOMED;
2947 #endif
2948 	for_each_rxq(pi, i, rxq)
2949 	    rxq->fl.flags |= FL_DOOMED;
2950 	mutex_exit(&sc->sfl_lock);
2951 	/* TODO: need to wait for all fl's to be removed from sc->sfl */
2952 }
2953 
2954 void
2955 t4_fatal_err(struct adapter *sc)
2956 {
2957 	t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
2958 	t4_intr_disable(sc);
2959 	cxgb_printf(sc->dip, CE_WARN,
2960 	    "encountered fatal error, adapter stopped.");
2961 }
2962 
2963 int
2964 t4_os_find_pci_capability(struct adapter *sc, int cap)
2965 {
2966 	uint16_t stat;
2967 	uint8_t cap_ptr, cap_id;
2968 
2969 	t4_os_pci_read_cfg2(sc, PCI_CONF_STAT, &stat);
2970 	if ((stat & PCI_STAT_CAP) == 0)
2971 		return (0); /* does not implement capabilities */
2972 
2973 	t4_os_pci_read_cfg1(sc, PCI_CONF_CAP_PTR, &cap_ptr);
2974 	while (cap_ptr) {
2975 		t4_os_pci_read_cfg1(sc, cap_ptr + PCI_CAP_ID, &cap_id);
2976 		if (cap_id == cap)
2977 			return (cap_ptr); /* found */
2978 		t4_os_pci_read_cfg1(sc, cap_ptr + PCI_CAP_NEXT_PTR, &cap_ptr);
2979 	}
2980 
2981 	return (0); /* not found */
2982 }
2983 
2984 void
2985 t4_os_portmod_changed(struct adapter *sc, int idx)
2986 {
2987 	static const char *mod_str[] = {
2988 		NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
2989 	};
2990 	struct port_info *pi = sc->port[idx];
2991 
2992 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
2993 		cxgb_printf(pi->dip, CE_NOTE, "transceiver unplugged.");
2994 	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
2995 		cxgb_printf(pi->dip, CE_NOTE,
2996 		    "unknown transceiver inserted.\n");
2997 	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
2998 		cxgb_printf(pi->dip, CE_NOTE,
2999 		    "unsupported transceiver inserted.\n");
3000 	else if (pi->mod_type > 0 && pi->mod_type < ARRAY_SIZE(mod_str))
3001 		cxgb_printf(pi->dip, CE_NOTE, "%s transceiver inserted.\n",
3002 		    mod_str[pi->mod_type]);
3003 	else
3004 		cxgb_printf(pi->dip, CE_NOTE, "transceiver (type %d) inserted.",
3005 		    pi->mod_type);
3006 
3007 	if ((isset(&sc->open_device_map, pi->port_id) != 0) &&
3008 	    pi->link_cfg.new_module)
3009 		pi->link_cfg.redo_l1cfg = true;
3010 }
3011 
3012 /* ARGSUSED */
3013 static int
3014 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, mblk_t *m)
3015 {
3016 	if (m != NULL)
3017 		freemsg(m);
3018 	return (0);
3019 }
3020 
3021 int
3022 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
3023 {
3024 	uint_t *loc, new;
3025 
3026 	if (opcode >= ARRAY_SIZE(sc->cpl_handler))
3027 		return (EINVAL);
3028 
3029 	new = (uint_t)(unsigned long) (h ? h : cpl_not_handled);
3030 	loc = (uint_t *)&sc->cpl_handler[opcode];
3031 	(void) atomic_swap_uint(loc, new);
3032 
3033 	return (0);
3034 }
3035 
3036 static int
3037 fw_msg_not_handled(struct adapter *sc, const __be64 *data)
3038 {
3039 	struct cpl_fw6_msg *cpl;
3040 
3041 	cpl = __containerof((void *)data, struct cpl_fw6_msg, data);
3042 
3043 	cxgb_printf(sc->dip, CE_WARN, "%s fw_msg type %d", __func__, cpl->type);
3044 	return (0);
3045 }
3046 
3047 int
3048 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
3049 {
3050 	fw_msg_handler_t *loc, new;
3051 
3052 	if (type >= ARRAY_SIZE(sc->fw_msg_handler))
3053 		return (EINVAL);
3054 
3055 	/*
3056 	 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
3057 	 * handler dispatch table.  Reject any attempt to install a handler for
3058 	 * this subtype.
3059 	 */
3060 	if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
3061 		return (EINVAL);
3062 
3063 	new = h ? h : fw_msg_not_handled;
3064 	loc = &sc->fw_msg_handler[type];
3065 	(void)atomic_swap_ptr(loc, (void *)new);
3066 
3067 	return (0);
3068 }
3069 
3070 #ifdef TCP_OFFLOAD_ENABLE
3071 static int
3072 toe_capability(struct port_info *pi, int enable)
3073 {
3074 	int rc;
3075 	struct adapter *sc = pi->adapter;
3076 
3077 	if (!is_offload(sc))
3078 		return (ENODEV);
3079 
3080 	if (enable != 0) {
3081 		if (isset(&sc->offload_map, pi->port_id) != 0)
3082 			return (0);
3083 
3084 		if (sc->offload_map == 0) {
3085 			rc = activate_uld(sc, ULD_TOM, &sc->tom);
3086 			if (rc != 0)
3087 				return (rc);
3088 		}
3089 
3090 		setbit(&sc->offload_map, pi->port_id);
3091 	} else {
3092 		if (!isset(&sc->offload_map, pi->port_id))
3093 			return (0);
3094 
3095 		clrbit(&sc->offload_map, pi->port_id);
3096 
3097 		if (sc->offload_map == 0) {
3098 			rc = deactivate_uld(&sc->tom);
3099 			if (rc != 0) {
3100 				setbit(&sc->offload_map, pi->port_id);
3101 				return (rc);
3102 			}
3103 		}
3104 	}
3105 
3106 	return (0);
3107 }
3108 
3109 /*
3110  * Add an upper layer driver to the global list.
3111  */
3112 int
3113 t4_register_uld(struct uld_info *ui)
3114 {
3115 	int rc = 0;
3116 	struct uld_info *u;
3117 
3118 	mutex_enter(&t4_uld_list_lock);
3119 	SLIST_FOREACH(u, &t4_uld_list, link) {
3120 		if (u->uld_id == ui->uld_id) {
3121 			rc = EEXIST;
3122 			goto done;
3123 		}
3124 	}
3125 
3126 	SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
3127 	ui->refcount = 0;
3128 done:
3129 	mutex_exit(&t4_uld_list_lock);
3130 	return (rc);
3131 }
3132 
3133 int
3134 t4_unregister_uld(struct uld_info *ui)
3135 {
3136 	int rc = EINVAL;
3137 	struct uld_info *u;
3138 
3139 	mutex_enter(&t4_uld_list_lock);
3140 
3141 	SLIST_FOREACH(u, &t4_uld_list, link) {
3142 		if (u == ui) {
3143 			if (ui->refcount > 0) {
3144 				rc = EBUSY;
3145 				goto done;
3146 			}
3147 
3148 			SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
3149 			rc = 0;
3150 			goto done;
3151 		}
3152 	}
3153 done:
3154 	mutex_exit(&t4_uld_list_lock);
3155 	return (rc);
3156 }
3157 
3158 static int
3159 activate_uld(struct adapter *sc, int id, struct uld_softc *usc)
3160 {
3161 	int rc = EAGAIN;
3162 	struct uld_info *ui;
3163 
3164 	mutex_enter(&t4_uld_list_lock);
3165 
3166 	SLIST_FOREACH(ui, &t4_uld_list, link) {
3167 		if (ui->uld_id == id) {
3168 			rc = ui->attach(sc, &usc->softc);
3169 			if (rc == 0) {
3170 				ASSERT(usc->softc != NULL);
3171 				ui->refcount++;
3172 				usc->uld = ui;
3173 			}
3174 			goto done;
3175 		}
3176 	}
3177 done:
3178 	mutex_exit(&t4_uld_list_lock);
3179 
3180 	return (rc);
3181 }
3182 
3183 static int
3184 deactivate_uld(struct uld_softc *usc)
3185 {
3186 	int rc;
3187 
3188 	mutex_enter(&t4_uld_list_lock);
3189 
3190 	if (usc->uld == NULL || usc->softc == NULL) {
3191 		rc = EINVAL;
3192 		goto done;
3193 	}
3194 
3195 	rc = usc->uld->detach(usc->softc);
3196 	if (rc == 0) {
3197 		ASSERT(usc->uld->refcount > 0);
3198 		usc->uld->refcount--;
3199 		usc->uld = NULL;
3200 		usc->softc = NULL;
3201 	}
3202 done:
3203 	mutex_exit(&t4_uld_list_lock);
3204 
3205 	return (rc);
3206 }
3207 
3208 void
3209 t4_iterate(void (*func)(int, void *), void *arg)
3210 {
3211 	struct adapter *sc;
3212 
3213 	mutex_enter(&t4_adapter_list_lock);
3214 	SLIST_FOREACH(sc, &t4_adapter_list, link) {
3215 		/*
3216 		 * func should not make any assumptions about what state sc is
3217 		 * in - the only guarantee is that sc->sc_lock is a valid lock.
3218 		 */
3219 		func(ddi_get_instance(sc->dip), arg);
3220 	}
3221 	mutex_exit(&t4_adapter_list_lock);
3222 }
3223 
3224 #endif
3225 
3226 static int
3227 t4_sensor_read(struct adapter *sc, uint32_t diag, uint32_t *valp)
3228 {
3229 	int rc;
3230 	struct port_info *pi = sc->port[0];
3231 	uint32_t param, val;
3232 
3233 	rc = begin_synchronized_op(pi, 1, 1);
3234 	if (rc != 0) {
3235 		return (rc);
3236 	}
3237 	param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3238 	    V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_DIAG) |
3239 	    V_FW_PARAMS_PARAM_Y(diag);
3240 	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
3241 	end_synchronized_op(pi, 1);
3242 
3243 	if (rc != 0) {
3244 		return (rc);
3245 	}
3246 
3247 	if (val == 0) {
3248 		return (EIO);
3249 	}
3250 
3251 	*valp = val;
3252 	return (0);
3253 }
3254 
3255 static int
3256 t4_temperature_read(void *arg, sensor_ioctl_scalar_t *scalar)
3257 {
3258 	int ret;
3259 	struct adapter *sc = arg;
3260 	uint32_t val;
3261 
3262 	ret = t4_sensor_read(sc, FW_PARAM_DEV_DIAG_TMP, &val);
3263 	if (ret != 0) {
3264 		return (ret);
3265 	}
3266 
3267 	/*
3268 	 * The device measures temperature in units of 1 degree Celsius. We
3269 	 * don't know its precision.
3270 	 */
3271 	scalar->sis_unit = SENSOR_UNIT_CELSIUS;
3272 	scalar->sis_gran = 1;
3273 	scalar->sis_prec = 0;
3274 	scalar->sis_value = val;
3275 
3276 	return (0);
3277 }
3278 
3279 static int
3280 t4_voltage_read(void *arg, sensor_ioctl_scalar_t *scalar)
3281 {
3282 	int ret;
3283 	struct adapter *sc = arg;
3284 	uint32_t val;
3285 
3286 	ret = t4_sensor_read(sc, FW_PARAM_DEV_DIAG_VDD, &val);
3287 	if (ret != 0) {
3288 		return (ret);
3289 	}
3290 
3291 	scalar->sis_unit = SENSOR_UNIT_VOLTS;
3292 	scalar->sis_gran = 1000;
3293 	scalar->sis_prec = 0;
3294 	scalar->sis_value = val;
3295 
3296 	return (0);
3297 }
3298 
3299 /*
3300  * While the hardware supports the ability to read and write the flash image,
3301  * this is not currently wired up.
3302  */
3303 static int
3304 t4_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg, ddi_ufm_cap_t *caps)
3305 {
3306 	*caps = DDI_UFM_CAP_REPORT;
3307 	return (0);
3308 }
3309 
3310 static int
3311 t4_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
3312     ddi_ufm_image_t *imgp)
3313 {
3314 	if (imgno != 0) {
3315 		return (EINVAL);
3316 	}
3317 
3318 	ddi_ufm_image_set_desc(imgp, "Firmware");
3319 	ddi_ufm_image_set_nslots(imgp, 1);
3320 
3321 	return (0);
3322 }
3323 
3324 static int
3325 t4_ufm_fill_slot_version(nvlist_t *nvl, const char *key, uint32_t vers)
3326 {
3327 	char buf[128];
3328 
3329 	if (vers == 0) {
3330 		return (0);
3331 	}
3332 
3333 	if (snprintf(buf, sizeof (buf), "%u.%u.%u.%u",
3334 	    G_FW_HDR_FW_VER_MAJOR(vers), G_FW_HDR_FW_VER_MINOR(vers),
3335 	    G_FW_HDR_FW_VER_MICRO(vers), G_FW_HDR_FW_VER_BUILD(vers)) >=
3336 	    sizeof (buf)) {
3337 		return (EOVERFLOW);
3338 	}
3339 
3340 	return (nvlist_add_string(nvl, key, buf));
3341 }
3342 
3343 static int
3344 t4_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno, uint_t slotno,
3345     ddi_ufm_slot_t *slotp)
3346 {
3347 	int ret;
3348 	struct adapter *sc = arg;
3349 	nvlist_t *misc = NULL;
3350 	char buf[128];
3351 
3352 	if (imgno != 0 || slotno != 0) {
3353 		return (EINVAL);
3354 	}
3355 
3356 	if (snprintf(buf, sizeof (buf), "%u.%u.%u.%u",
3357 	    G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
3358 	    G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
3359 	    G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
3360 	    G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)) >= sizeof (buf)) {
3361 		return (EOVERFLOW);
3362 	}
3363 
3364 	ddi_ufm_slot_set_version(slotp, buf);
3365 
3366 	(void) nvlist_alloc(&misc, NV_UNIQUE_NAME, KM_SLEEP);
3367 	if ((ret = t4_ufm_fill_slot_version(misc, "TP Microcode",
3368 	    sc->params.tp_vers)) != 0) {
3369 		goto err;
3370 	}
3371 
3372 	if ((ret = t4_ufm_fill_slot_version(misc, "Bootstrap",
3373 	    sc->params.bs_vers)) != 0) {
3374 		goto err;
3375 	}
3376 
3377 	if ((ret = t4_ufm_fill_slot_version(misc, "Expansion ROM",
3378 	    sc->params.er_vers)) != 0) {
3379 		goto err;
3380 	}
3381 
3382 	if ((ret = nvlist_add_uint32(misc, "Serial Configuration",
3383 	    sc->params.scfg_vers)) != 0) {
3384 		goto err;
3385 	}
3386 
3387 	if ((ret = nvlist_add_uint32(misc, "VPD Version",
3388 	    sc->params.vpd_vers)) != 0) {
3389 		goto err;
3390 	}
3391 
3392 	ddi_ufm_slot_set_misc(slotp, misc);
3393 	ddi_ufm_slot_set_attrs(slotp, DDI_UFM_ATTR_ACTIVE |
3394 	    DDI_UFM_ATTR_WRITEABLE | DDI_UFM_ATTR_READABLE);
3395 	return (0);
3396 
3397 err:
3398 	nvlist_free(misc);
3399 	return (ret);
3400 
3401 }
3402