xref: /linux/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c (revision fcc8487d477a3452a1d0ccbdd4c5e0e1e3cb8bed)
1 /*
2  * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
3  * driver for Linux.
4  *
5  * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35 
36 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 
38 #include <linux/module.h>
39 #include <linux/moduleparam.h>
40 #include <linux/init.h>
41 #include <linux/pci.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/debugfs.h>
46 #include <linux/ethtool.h>
47 #include <linux/mdio.h>
48 
49 #include "t4vf_common.h"
50 #include "t4vf_defs.h"
51 
52 #include "../cxgb4/t4_regs.h"
53 #include "../cxgb4/t4_msg.h"
54 
55 /*
56  * Generic information about the driver.
57  */
58 #define DRV_VERSION "2.0.0-ko"
59 #define DRV_DESC "Chelsio T4/T5/T6 Virtual Function (VF) Network Driver"
60 
61 /*
62  * Module Parameters.
63  * ==================
64  */
65 
66 /*
67  * Default ethtool "message level" for adapters.
68  */
69 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
70 			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
71 			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
72 
73 /*
74  * The driver uses the best interrupt scheme available on a platform in the
75  * order MSI-X then MSI.  This parameter determines which of these schemes the
76  * driver may consider as follows:
77  *
78  *     msi = 2: choose from among MSI-X and MSI
79  *     msi = 1: only consider MSI interrupts
80  *
81  * Note that unlike the Physical Function driver, this Virtual Function driver
82  * does _not_ support legacy INTx interrupts (this limitation is mandated by
83  * the PCI-E SR-IOV standard).
84  */
85 #define MSI_MSIX	2
86 #define MSI_MSI		1
87 #define MSI_DEFAULT	MSI_MSIX
88 
89 static int msi = MSI_DEFAULT;
90 
91 module_param(msi, int, 0644);
92 MODULE_PARM_DESC(msi, "whether to use MSI-X or MSI");
93 
94 /*
95  * Fundamental constants.
96  * ======================
97  */
98 
99 enum {
100 	MAX_TXQ_ENTRIES		= 16384,
101 	MAX_RSPQ_ENTRIES	= 16384,
102 	MAX_RX_BUFFERS		= 16384,
103 
104 	MIN_TXQ_ENTRIES		= 32,
105 	MIN_RSPQ_ENTRIES	= 128,
106 	MIN_FL_ENTRIES		= 16,
107 
108 	/*
109 	 * For purposes of manipulating the Free List size we need to
110 	 * recognize that Free Lists are actually Egress Queues (the host
111 	 * produces free buffers which the hardware consumes), Egress Queues
112 	 * indices are all in units of Egress Context Units bytes, and free
113 	 * list entries are 64-bit PCI DMA addresses.  And since the state of
114 	 * the Producer Index == the Consumer Index implies an EMPTY list, we
115 	 * always have at least one Egress Unit's worth of Free List entries
116 	 * unused.  See sge.c for more details ...
117 	 */
118 	EQ_UNIT = SGE_EQ_IDXSIZE,
119 	FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
120 	MIN_FL_RESID = FL_PER_EQ_UNIT,
121 };
122 
123 /*
124  * Global driver state.
125  * ====================
126  */
127 
128 static struct dentry *cxgb4vf_debugfs_root;
129 
130 /*
131  * OS "Callback" functions.
132  * ========================
133  */
134 
135 /*
136  * The link status has changed on the indicated "port" (Virtual Interface).
137  */
138 void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
139 {
140 	struct net_device *dev = adapter->port[pidx];
141 
142 	/*
143 	 * If the port is disabled or the current recorded "link up"
144 	 * status matches the new status, just return.
145 	 */
146 	if (!netif_running(dev) || link_ok == netif_carrier_ok(dev))
147 		return;
148 
149 	/*
150 	 * Tell the OS that the link status has changed and print a short
151 	 * informative message on the console about the event.
152 	 */
153 	if (link_ok) {
154 		const char *s;
155 		const char *fc;
156 		const struct port_info *pi = netdev_priv(dev);
157 
158 		netif_carrier_on(dev);
159 
160 		switch (pi->link_cfg.speed) {
161 		case 100:
162 			s = "100Mbps";
163 			break;
164 		case 1000:
165 			s = "1Gbps";
166 			break;
167 		case 10000:
168 			s = "10Gbps";
169 			break;
170 		case 25000:
171 			s = "25Gbps";
172 			break;
173 		case 40000:
174 			s = "40Gbps";
175 			break;
176 		case 100000:
177 			s = "100Gbps";
178 			break;
179 
180 		default:
181 			s = "unknown";
182 			break;
183 		}
184 
185 		switch (pi->link_cfg.fc) {
186 		case PAUSE_RX:
187 			fc = "RX";
188 			break;
189 
190 		case PAUSE_TX:
191 			fc = "TX";
192 			break;
193 
194 		case PAUSE_RX|PAUSE_TX:
195 			fc = "RX/TX";
196 			break;
197 
198 		default:
199 			fc = "no";
200 			break;
201 		}
202 
203 		netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, fc);
204 	} else {
205 		netif_carrier_off(dev);
206 		netdev_info(dev, "link down\n");
207 	}
208 }
209 
210 /*
211  * THe port module type has changed on the indicated "port" (Virtual
212  * Interface).
213  */
214 void t4vf_os_portmod_changed(struct adapter *adapter, int pidx)
215 {
216 	static const char * const mod_str[] = {
217 		NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
218 	};
219 	const struct net_device *dev = adapter->port[pidx];
220 	const struct port_info *pi = netdev_priv(dev);
221 
222 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
223 		dev_info(adapter->pdev_dev, "%s: port module unplugged\n",
224 			 dev->name);
225 	else if (pi->mod_type < ARRAY_SIZE(mod_str))
226 		dev_info(adapter->pdev_dev, "%s: %s port module inserted\n",
227 			 dev->name, mod_str[pi->mod_type]);
228 	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
229 		dev_info(adapter->pdev_dev, "%s: unsupported optical port "
230 			 "module inserted\n", dev->name);
231 	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
232 		dev_info(adapter->pdev_dev, "%s: unknown port module inserted,"
233 			 "forcing TWINAX\n", dev->name);
234 	else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
235 		dev_info(adapter->pdev_dev, "%s: transceiver module error\n",
236 			 dev->name);
237 	else
238 		dev_info(adapter->pdev_dev, "%s: unknown module type %d "
239 			 "inserted\n", dev->name, pi->mod_type);
240 }
241 
242 /*
243  * Net device operations.
244  * ======================
245  */
246 
247 
248 
249 
250 /*
251  * Perform the MAC and PHY actions needed to enable a "port" (Virtual
252  * Interface).
253  */
254 static int link_start(struct net_device *dev)
255 {
256 	int ret;
257 	struct port_info *pi = netdev_priv(dev);
258 
259 	/*
260 	 * We do not set address filters and promiscuity here, the stack does
261 	 * that step explicitly. Enable vlan accel.
262 	 */
263 	ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, 1,
264 			      true);
265 	if (ret == 0) {
266 		ret = t4vf_change_mac(pi->adapter, pi->viid,
267 				      pi->xact_addr_filt, dev->dev_addr, true);
268 		if (ret >= 0) {
269 			pi->xact_addr_filt = ret;
270 			ret = 0;
271 		}
272 	}
273 
274 	/*
275 	 * We don't need to actually "start the link" itself since the
276 	 * firmware will do that for us when the first Virtual Interface
277 	 * is enabled on a port.
278 	 */
279 	if (ret == 0)
280 		ret = t4vf_enable_vi(pi->adapter, pi->viid, true, true);
281 	return ret;
282 }
283 
284 /*
285  * Name the MSI-X interrupts.
286  */
287 static void name_msix_vecs(struct adapter *adapter)
288 {
289 	int namelen = sizeof(adapter->msix_info[0].desc) - 1;
290 	int pidx;
291 
292 	/*
293 	 * Firmware events.
294 	 */
295 	snprintf(adapter->msix_info[MSIX_FW].desc, namelen,
296 		 "%s-FWeventq", adapter->name);
297 	adapter->msix_info[MSIX_FW].desc[namelen] = 0;
298 
299 	/*
300 	 * Ethernet queues.
301 	 */
302 	for_each_port(adapter, pidx) {
303 		struct net_device *dev = adapter->port[pidx];
304 		const struct port_info *pi = netdev_priv(dev);
305 		int qs, msi;
306 
307 		for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) {
308 			snprintf(adapter->msix_info[msi].desc, namelen,
309 				 "%s-%d", dev->name, qs);
310 			adapter->msix_info[msi].desc[namelen] = 0;
311 		}
312 	}
313 }
314 
315 /*
316  * Request all of our MSI-X resources.
317  */
318 static int request_msix_queue_irqs(struct adapter *adapter)
319 {
320 	struct sge *s = &adapter->sge;
321 	int rxq, msi, err;
322 
323 	/*
324 	 * Firmware events.
325 	 */
326 	err = request_irq(adapter->msix_info[MSIX_FW].vec, t4vf_sge_intr_msix,
327 			  0, adapter->msix_info[MSIX_FW].desc, &s->fw_evtq);
328 	if (err)
329 		return err;
330 
331 	/*
332 	 * Ethernet queues.
333 	 */
334 	msi = MSIX_IQFLINT;
335 	for_each_ethrxq(s, rxq) {
336 		err = request_irq(adapter->msix_info[msi].vec,
337 				  t4vf_sge_intr_msix, 0,
338 				  adapter->msix_info[msi].desc,
339 				  &s->ethrxq[rxq].rspq);
340 		if (err)
341 			goto err_free_irqs;
342 		msi++;
343 	}
344 	return 0;
345 
346 err_free_irqs:
347 	while (--rxq >= 0)
348 		free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq);
349 	free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
350 	return err;
351 }
352 
353 /*
354  * Free our MSI-X resources.
355  */
356 static void free_msix_queue_irqs(struct adapter *adapter)
357 {
358 	struct sge *s = &adapter->sge;
359 	int rxq, msi;
360 
361 	free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
362 	msi = MSIX_IQFLINT;
363 	for_each_ethrxq(s, rxq)
364 		free_irq(adapter->msix_info[msi++].vec,
365 			 &s->ethrxq[rxq].rspq);
366 }
367 
368 /*
369  * Turn on NAPI and start up interrupts on a response queue.
370  */
371 static void qenable(struct sge_rspq *rspq)
372 {
373 	napi_enable(&rspq->napi);
374 
375 	/*
376 	 * 0-increment the Going To Sleep register to start the timer and
377 	 * enable interrupts.
378 	 */
379 	t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
380 		     CIDXINC_V(0) |
381 		     SEINTARM_V(rspq->intr_params) |
382 		     INGRESSQID_V(rspq->cntxt_id));
383 }
384 
385 /*
386  * Enable NAPI scheduling and interrupt generation for all Receive Queues.
387  */
388 static void enable_rx(struct adapter *adapter)
389 {
390 	int rxq;
391 	struct sge *s = &adapter->sge;
392 
393 	for_each_ethrxq(s, rxq)
394 		qenable(&s->ethrxq[rxq].rspq);
395 	qenable(&s->fw_evtq);
396 
397 	/*
398 	 * The interrupt queue doesn't use NAPI so we do the 0-increment of
399 	 * its Going To Sleep register here to get it started.
400 	 */
401 	if (adapter->flags & USING_MSI)
402 		t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
403 			     CIDXINC_V(0) |
404 			     SEINTARM_V(s->intrq.intr_params) |
405 			     INGRESSQID_V(s->intrq.cntxt_id));
406 
407 }
408 
409 /*
410  * Wait until all NAPI handlers are descheduled.
411  */
412 static void quiesce_rx(struct adapter *adapter)
413 {
414 	struct sge *s = &adapter->sge;
415 	int rxq;
416 
417 	for_each_ethrxq(s, rxq)
418 		napi_disable(&s->ethrxq[rxq].rspq.napi);
419 	napi_disable(&s->fw_evtq.napi);
420 }
421 
422 /*
423  * Response queue handler for the firmware event queue.
424  */
425 static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
426 			  const struct pkt_gl *gl)
427 {
428 	/*
429 	 * Extract response opcode and get pointer to CPL message body.
430 	 */
431 	struct adapter *adapter = rspq->adapter;
432 	u8 opcode = ((const struct rss_header *)rsp)->opcode;
433 	void *cpl = (void *)(rsp + 1);
434 
435 	switch (opcode) {
436 	case CPL_FW6_MSG: {
437 		/*
438 		 * We've received an asynchronous message from the firmware.
439 		 */
440 		const struct cpl_fw6_msg *fw_msg = cpl;
441 		if (fw_msg->type == FW6_TYPE_CMD_RPL)
442 			t4vf_handle_fw_rpl(adapter, fw_msg->data);
443 		break;
444 	}
445 
446 	case CPL_FW4_MSG: {
447 		/* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
448 		 */
449 		const struct cpl_sge_egr_update *p = (void *)(rsp + 3);
450 		opcode = CPL_OPCODE_G(ntohl(p->opcode_qid));
451 		if (opcode != CPL_SGE_EGR_UPDATE) {
452 			dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
453 				, opcode);
454 			break;
455 		}
456 		cpl = (void *)p;
457 		/*FALLTHROUGH*/
458 	}
459 
460 	case CPL_SGE_EGR_UPDATE: {
461 		/*
462 		 * We've received an Egress Queue Status Update message.  We
463 		 * get these, if the SGE is configured to send these when the
464 		 * firmware passes certain points in processing our TX
465 		 * Ethernet Queue or if we make an explicit request for one.
466 		 * We use these updates to determine when we may need to
467 		 * restart a TX Ethernet Queue which was stopped for lack of
468 		 * free TX Queue Descriptors ...
469 		 */
470 		const struct cpl_sge_egr_update *p = cpl;
471 		unsigned int qid = EGR_QID_G(be32_to_cpu(p->opcode_qid));
472 		struct sge *s = &adapter->sge;
473 		struct sge_txq *tq;
474 		struct sge_eth_txq *txq;
475 		unsigned int eq_idx;
476 
477 		/*
478 		 * Perform sanity checking on the Queue ID to make sure it
479 		 * really refers to one of our TX Ethernet Egress Queues which
480 		 * is active and matches the queue's ID.  None of these error
481 		 * conditions should ever happen so we may want to either make
482 		 * them fatal and/or conditionalized under DEBUG.
483 		 */
484 		eq_idx = EQ_IDX(s, qid);
485 		if (unlikely(eq_idx >= MAX_EGRQ)) {
486 			dev_err(adapter->pdev_dev,
487 				"Egress Update QID %d out of range\n", qid);
488 			break;
489 		}
490 		tq = s->egr_map[eq_idx];
491 		if (unlikely(tq == NULL)) {
492 			dev_err(adapter->pdev_dev,
493 				"Egress Update QID %d TXQ=NULL\n", qid);
494 			break;
495 		}
496 		txq = container_of(tq, struct sge_eth_txq, q);
497 		if (unlikely(tq->abs_id != qid)) {
498 			dev_err(adapter->pdev_dev,
499 				"Egress Update QID %d refers to TXQ %d\n",
500 				qid, tq->abs_id);
501 			break;
502 		}
503 
504 		/*
505 		 * Restart a stopped TX Queue which has less than half of its
506 		 * TX ring in use ...
507 		 */
508 		txq->q.restarts++;
509 		netif_tx_wake_queue(txq->txq);
510 		break;
511 	}
512 
513 	default:
514 		dev_err(adapter->pdev_dev,
515 			"unexpected CPL %#x on FW event queue\n", opcode);
516 	}
517 
518 	return 0;
519 }
520 
521 /*
522  * Allocate SGE TX/RX response queues.  Determine how many sets of SGE queues
523  * to use and initializes them.  We support multiple "Queue Sets" per port if
524  * we have MSI-X, otherwise just one queue set per port.
525  */
526 static int setup_sge_queues(struct adapter *adapter)
527 {
528 	struct sge *s = &adapter->sge;
529 	int err, pidx, msix;
530 
531 	/*
532 	 * Clear "Queue Set" Free List Starving and TX Queue Mapping Error
533 	 * state.
534 	 */
535 	bitmap_zero(s->starving_fl, MAX_EGRQ);
536 
537 	/*
538 	 * If we're using MSI interrupt mode we need to set up a "forwarded
539 	 * interrupt" queue which we'll set up with our MSI vector.  The rest
540 	 * of the ingress queues will be set up to forward their interrupts to
541 	 * this queue ...  This must be first since t4vf_sge_alloc_rxq() uses
542 	 * the intrq's queue ID as the interrupt forwarding queue for the
543 	 * subsequent calls ...
544 	 */
545 	if (adapter->flags & USING_MSI) {
546 		err = t4vf_sge_alloc_rxq(adapter, &s->intrq, false,
547 					 adapter->port[0], 0, NULL, NULL);
548 		if (err)
549 			goto err_free_queues;
550 	}
551 
552 	/*
553 	 * Allocate our ingress queue for asynchronous firmware messages.
554 	 */
555 	err = t4vf_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->port[0],
556 				 MSIX_FW, NULL, fwevtq_handler);
557 	if (err)
558 		goto err_free_queues;
559 
560 	/*
561 	 * Allocate each "port"'s initial Queue Sets.  These can be changed
562 	 * later on ... up to the point where any interface on the adapter is
563 	 * brought up at which point lots of things get nailed down
564 	 * permanently ...
565 	 */
566 	msix = MSIX_IQFLINT;
567 	for_each_port(adapter, pidx) {
568 		struct net_device *dev = adapter->port[pidx];
569 		struct port_info *pi = netdev_priv(dev);
570 		struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
571 		struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
572 		int qs;
573 
574 		for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
575 			err = t4vf_sge_alloc_rxq(adapter, &rxq->rspq, false,
576 						 dev, msix++,
577 						 &rxq->fl, t4vf_ethrx_handler);
578 			if (err)
579 				goto err_free_queues;
580 
581 			err = t4vf_sge_alloc_eth_txq(adapter, txq, dev,
582 					     netdev_get_tx_queue(dev, qs),
583 					     s->fw_evtq.cntxt_id);
584 			if (err)
585 				goto err_free_queues;
586 
587 			rxq->rspq.idx = qs;
588 			memset(&rxq->stats, 0, sizeof(rxq->stats));
589 		}
590 	}
591 
592 	/*
593 	 * Create the reverse mappings for the queues.
594 	 */
595 	s->egr_base = s->ethtxq[0].q.abs_id - s->ethtxq[0].q.cntxt_id;
596 	s->ingr_base = s->ethrxq[0].rspq.abs_id - s->ethrxq[0].rspq.cntxt_id;
597 	IQ_MAP(s, s->fw_evtq.abs_id) = &s->fw_evtq;
598 	for_each_port(adapter, pidx) {
599 		struct net_device *dev = adapter->port[pidx];
600 		struct port_info *pi = netdev_priv(dev);
601 		struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
602 		struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
603 		int qs;
604 
605 		for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
606 			IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq;
607 			EQ_MAP(s, txq->q.abs_id) = &txq->q;
608 
609 			/*
610 			 * The FW_IQ_CMD doesn't return the Absolute Queue IDs
611 			 * for Free Lists but since all of the Egress Queues
612 			 * (including Free Lists) have Relative Queue IDs
613 			 * which are computed as Absolute - Base Queue ID, we
614 			 * can synthesize the Absolute Queue IDs for the Free
615 			 * Lists.  This is useful for debugging purposes when
616 			 * we want to dump Queue Contexts via the PF Driver.
617 			 */
618 			rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base;
619 			EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl;
620 		}
621 	}
622 	return 0;
623 
624 err_free_queues:
625 	t4vf_free_sge_resources(adapter);
626 	return err;
627 }
628 
629 /*
630  * Set up Receive Side Scaling (RSS) to distribute packets to multiple receive
631  * queues.  We configure the RSS CPU lookup table to distribute to the number
632  * of HW receive queues, and the response queue lookup table to narrow that
633  * down to the response queues actually configured for each "port" (Virtual
634  * Interface).  We always configure the RSS mapping for all ports since the
635  * mapping table has plenty of entries.
636  */
637 static int setup_rss(struct adapter *adapter)
638 {
639 	int pidx;
640 
641 	for_each_port(adapter, pidx) {
642 		struct port_info *pi = adap2pinfo(adapter, pidx);
643 		struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
644 		u16 rss[MAX_PORT_QSETS];
645 		int qs, err;
646 
647 		for (qs = 0; qs < pi->nqsets; qs++)
648 			rss[qs] = rxq[qs].rspq.abs_id;
649 
650 		err = t4vf_config_rss_range(adapter, pi->viid,
651 					    0, pi->rss_size, rss, pi->nqsets);
652 		if (err)
653 			return err;
654 
655 		/*
656 		 * Perform Global RSS Mode-specific initialization.
657 		 */
658 		switch (adapter->params.rss.mode) {
659 		case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL:
660 			/*
661 			 * If Tunnel All Lookup isn't specified in the global
662 			 * RSS Configuration, then we need to specify a
663 			 * default Ingress Queue for any ingress packets which
664 			 * aren't hashed.  We'll use our first ingress queue
665 			 * ...
666 			 */
667 			if (!adapter->params.rss.u.basicvirtual.tnlalllookup) {
668 				union rss_vi_config config;
669 				err = t4vf_read_rss_vi_config(adapter,
670 							      pi->viid,
671 							      &config);
672 				if (err)
673 					return err;
674 				config.basicvirtual.defaultq =
675 					rxq[0].rspq.abs_id;
676 				err = t4vf_write_rss_vi_config(adapter,
677 							       pi->viid,
678 							       &config);
679 				if (err)
680 					return err;
681 			}
682 			break;
683 		}
684 	}
685 
686 	return 0;
687 }
688 
689 /*
690  * Bring the adapter up.  Called whenever we go from no "ports" open to having
691  * one open.  This function performs the actions necessary to make an adapter
692  * operational, such as completing the initialization of HW modules, and
693  * enabling interrupts.  Must be called with the rtnl lock held.  (Note that
694  * this is called "cxgb_up" in the PF Driver.)
695  */
696 static int adapter_up(struct adapter *adapter)
697 {
698 	int err;
699 
700 	/*
701 	 * If this is the first time we've been called, perform basic
702 	 * adapter setup.  Once we've done this, many of our adapter
703 	 * parameters can no longer be changed ...
704 	 */
705 	if ((adapter->flags & FULL_INIT_DONE) == 0) {
706 		err = setup_sge_queues(adapter);
707 		if (err)
708 			return err;
709 		err = setup_rss(adapter);
710 		if (err) {
711 			t4vf_free_sge_resources(adapter);
712 			return err;
713 		}
714 
715 		if (adapter->flags & USING_MSIX)
716 			name_msix_vecs(adapter);
717 		adapter->flags |= FULL_INIT_DONE;
718 	}
719 
720 	/*
721 	 * Acquire our interrupt resources.  We only support MSI-X and MSI.
722 	 */
723 	BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
724 	if (adapter->flags & USING_MSIX)
725 		err = request_msix_queue_irqs(adapter);
726 	else
727 		err = request_irq(adapter->pdev->irq,
728 				  t4vf_intr_handler(adapter), 0,
729 				  adapter->name, adapter);
730 	if (err) {
731 		dev_err(adapter->pdev_dev, "request_irq failed, err %d\n",
732 			err);
733 		return err;
734 	}
735 
736 	/*
737 	 * Enable NAPI ingress processing and return success.
738 	 */
739 	enable_rx(adapter);
740 	t4vf_sge_start(adapter);
741 
742 	/* Initialize hash mac addr list*/
743 	INIT_LIST_HEAD(&adapter->mac_hlist);
744 	return 0;
745 }
746 
747 /*
748  * Bring the adapter down.  Called whenever the last "port" (Virtual
749  * Interface) closed.  (Note that this routine is called "cxgb_down" in the PF
750  * Driver.)
751  */
752 static void adapter_down(struct adapter *adapter)
753 {
754 	/*
755 	 * Free interrupt resources.
756 	 */
757 	if (adapter->flags & USING_MSIX)
758 		free_msix_queue_irqs(adapter);
759 	else
760 		free_irq(adapter->pdev->irq, adapter);
761 
762 	/*
763 	 * Wait for NAPI handlers to finish.
764 	 */
765 	quiesce_rx(adapter);
766 }
767 
768 /*
769  * Start up a net device.
770  */
771 static int cxgb4vf_open(struct net_device *dev)
772 {
773 	int err;
774 	struct port_info *pi = netdev_priv(dev);
775 	struct adapter *adapter = pi->adapter;
776 
777 	/*
778 	 * If this is the first interface that we're opening on the "adapter",
779 	 * bring the "adapter" up now.
780 	 */
781 	if (adapter->open_device_map == 0) {
782 		err = adapter_up(adapter);
783 		if (err)
784 			return err;
785 	}
786 
787 	/*
788 	 * Note that this interface is up and start everything up ...
789 	 */
790 	err = link_start(dev);
791 	if (err)
792 		goto err_unwind;
793 
794 	netif_tx_start_all_queues(dev);
795 	set_bit(pi->port_id, &adapter->open_device_map);
796 	return 0;
797 
798 err_unwind:
799 	if (adapter->open_device_map == 0)
800 		adapter_down(adapter);
801 	return err;
802 }
803 
804 /*
805  * Shut down a net device.  This routine is called "cxgb_close" in the PF
806  * Driver ...
807  */
808 static int cxgb4vf_stop(struct net_device *dev)
809 {
810 	struct port_info *pi = netdev_priv(dev);
811 	struct adapter *adapter = pi->adapter;
812 
813 	netif_tx_stop_all_queues(dev);
814 	netif_carrier_off(dev);
815 	t4vf_enable_vi(adapter, pi->viid, false, false);
816 	pi->link_cfg.link_ok = 0;
817 
818 	clear_bit(pi->port_id, &adapter->open_device_map);
819 	if (adapter->open_device_map == 0)
820 		adapter_down(adapter);
821 	return 0;
822 }
823 
824 /*
825  * Translate our basic statistics into the standard "ifconfig" statistics.
826  */
827 static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
828 {
829 	struct t4vf_port_stats stats;
830 	struct port_info *pi = netdev2pinfo(dev);
831 	struct adapter *adapter = pi->adapter;
832 	struct net_device_stats *ns = &dev->stats;
833 	int err;
834 
835 	spin_lock(&adapter->stats_lock);
836 	err = t4vf_get_port_stats(adapter, pi->pidx, &stats);
837 	spin_unlock(&adapter->stats_lock);
838 
839 	memset(ns, 0, sizeof(*ns));
840 	if (err)
841 		return ns;
842 
843 	ns->tx_bytes = (stats.tx_bcast_bytes + stats.tx_mcast_bytes +
844 			stats.tx_ucast_bytes + stats.tx_offload_bytes);
845 	ns->tx_packets = (stats.tx_bcast_frames + stats.tx_mcast_frames +
846 			  stats.tx_ucast_frames + stats.tx_offload_frames);
847 	ns->rx_bytes = (stats.rx_bcast_bytes + stats.rx_mcast_bytes +
848 			stats.rx_ucast_bytes);
849 	ns->rx_packets = (stats.rx_bcast_frames + stats.rx_mcast_frames +
850 			  stats.rx_ucast_frames);
851 	ns->multicast = stats.rx_mcast_frames;
852 	ns->tx_errors = stats.tx_drop_frames;
853 	ns->rx_errors = stats.rx_err_frames;
854 
855 	return ns;
856 }
857 
858 static inline int cxgb4vf_set_addr_hash(struct port_info *pi)
859 {
860 	struct adapter *adapter = pi->adapter;
861 	u64 vec = 0;
862 	bool ucast = false;
863 	struct hash_mac_addr *entry;
864 
865 	/* Calculate the hash vector for the updated list and program it */
866 	list_for_each_entry(entry, &adapter->mac_hlist, list) {
867 		ucast |= is_unicast_ether_addr(entry->addr);
868 		vec |= (1ULL << hash_mac_addr(entry->addr));
869 	}
870 	return t4vf_set_addr_hash(adapter, pi->viid, ucast, vec, false);
871 }
872 
873 static int cxgb4vf_mac_sync(struct net_device *netdev, const u8 *mac_addr)
874 {
875 	struct port_info *pi = netdev_priv(netdev);
876 	struct adapter *adapter = pi->adapter;
877 	int ret;
878 	u64 mhash = 0;
879 	u64 uhash = 0;
880 	bool free = false;
881 	bool ucast = is_unicast_ether_addr(mac_addr);
882 	const u8 *maclist[1] = {mac_addr};
883 	struct hash_mac_addr *new_entry;
884 
885 	ret = t4vf_alloc_mac_filt(adapter, pi->viid, free, 1, maclist,
886 				  NULL, ucast ? &uhash : &mhash, false);
887 	if (ret < 0)
888 		goto out;
889 	/* if hash != 0, then add the addr to hash addr list
890 	 * so on the end we will calculate the hash for the
891 	 * list and program it
892 	 */
893 	if (uhash || mhash) {
894 		new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
895 		if (!new_entry)
896 			return -ENOMEM;
897 		ether_addr_copy(new_entry->addr, mac_addr);
898 		list_add_tail(&new_entry->list, &adapter->mac_hlist);
899 		ret = cxgb4vf_set_addr_hash(pi);
900 	}
901 out:
902 	return ret < 0 ? ret : 0;
903 }
904 
905 static int cxgb4vf_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
906 {
907 	struct port_info *pi = netdev_priv(netdev);
908 	struct adapter *adapter = pi->adapter;
909 	int ret;
910 	const u8 *maclist[1] = {mac_addr};
911 	struct hash_mac_addr *entry, *tmp;
912 
913 	/* If the MAC address to be removed is in the hash addr
914 	 * list, delete it from the list and update hash vector
915 	 */
916 	list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, list) {
917 		if (ether_addr_equal(entry->addr, mac_addr)) {
918 			list_del(&entry->list);
919 			kfree(entry);
920 			return cxgb4vf_set_addr_hash(pi);
921 		}
922 	}
923 
924 	ret = t4vf_free_mac_filt(adapter, pi->viid, 1, maclist, false);
925 	return ret < 0 ? -EINVAL : 0;
926 }
927 
928 /*
929  * Set RX properties of a port, such as promiscruity, address filters, and MTU.
930  * If @mtu is -1 it is left unchanged.
931  */
932 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
933 {
934 	struct port_info *pi = netdev_priv(dev);
935 
936 	__dev_uc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
937 	__dev_mc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
938 	return t4vf_set_rxmode(pi->adapter, pi->viid, -1,
939 			       (dev->flags & IFF_PROMISC) != 0,
940 			       (dev->flags & IFF_ALLMULTI) != 0,
941 			       1, -1, sleep_ok);
942 }
943 
944 /*
945  * Set the current receive modes on the device.
946  */
947 static void cxgb4vf_set_rxmode(struct net_device *dev)
948 {
949 	/* unfortunately we can't return errors to the stack */
950 	set_rxmode(dev, -1, false);
951 }
952 
953 /*
954  * Find the entry in the interrupt holdoff timer value array which comes
955  * closest to the specified interrupt holdoff value.
956  */
957 static int closest_timer(const struct sge *s, int us)
958 {
959 	int i, timer_idx = 0, min_delta = INT_MAX;
960 
961 	for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
962 		int delta = us - s->timer_val[i];
963 		if (delta < 0)
964 			delta = -delta;
965 		if (delta < min_delta) {
966 			min_delta = delta;
967 			timer_idx = i;
968 		}
969 	}
970 	return timer_idx;
971 }
972 
973 static int closest_thres(const struct sge *s, int thres)
974 {
975 	int i, delta, pktcnt_idx = 0, min_delta = INT_MAX;
976 
977 	for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
978 		delta = thres - s->counter_val[i];
979 		if (delta < 0)
980 			delta = -delta;
981 		if (delta < min_delta) {
982 			min_delta = delta;
983 			pktcnt_idx = i;
984 		}
985 	}
986 	return pktcnt_idx;
987 }
988 
989 /*
990  * Return a queue's interrupt hold-off time in us.  0 means no timer.
991  */
992 static unsigned int qtimer_val(const struct adapter *adapter,
993 			       const struct sge_rspq *rspq)
994 {
995 	unsigned int timer_idx = QINTR_TIMER_IDX_G(rspq->intr_params);
996 
997 	return timer_idx < SGE_NTIMERS
998 		? adapter->sge.timer_val[timer_idx]
999 		: 0;
1000 }
1001 
1002 /**
1003  *	set_rxq_intr_params - set a queue's interrupt holdoff parameters
1004  *	@adapter: the adapter
1005  *	@rspq: the RX response queue
1006  *	@us: the hold-off time in us, or 0 to disable timer
1007  *	@cnt: the hold-off packet count, or 0 to disable counter
1008  *
1009  *	Sets an RX response queue's interrupt hold-off time and packet count.
1010  *	At least one of the two needs to be enabled for the queue to generate
1011  *	interrupts.
1012  */
1013 static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq,
1014 			       unsigned int us, unsigned int cnt)
1015 {
1016 	unsigned int timer_idx;
1017 
1018 	/*
1019 	 * If both the interrupt holdoff timer and count are specified as
1020 	 * zero, default to a holdoff count of 1 ...
1021 	 */
1022 	if ((us | cnt) == 0)
1023 		cnt = 1;
1024 
1025 	/*
1026 	 * If an interrupt holdoff count has been specified, then find the
1027 	 * closest configured holdoff count and use that.  If the response
1028 	 * queue has already been created, then update its queue context
1029 	 * parameters ...
1030 	 */
1031 	if (cnt) {
1032 		int err;
1033 		u32 v, pktcnt_idx;
1034 
1035 		pktcnt_idx = closest_thres(&adapter->sge, cnt);
1036 		if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) {
1037 			v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1038 			    FW_PARAMS_PARAM_X_V(
1039 					FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1040 			    FW_PARAMS_PARAM_YZ_V(rspq->cntxt_id);
1041 			err = t4vf_set_params(adapter, 1, &v, &pktcnt_idx);
1042 			if (err)
1043 				return err;
1044 		}
1045 		rspq->pktcnt_idx = pktcnt_idx;
1046 	}
1047 
1048 	/*
1049 	 * Compute the closest holdoff timer index from the supplied holdoff
1050 	 * timer value.
1051 	 */
1052 	timer_idx = (us == 0
1053 		     ? SGE_TIMER_RSTRT_CNTR
1054 		     : closest_timer(&adapter->sge, us));
1055 
1056 	/*
1057 	 * Update the response queue's interrupt coalescing parameters and
1058 	 * return success.
1059 	 */
1060 	rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
1061 			     QINTR_CNT_EN_V(cnt > 0));
1062 	return 0;
1063 }
1064 
1065 /*
1066  * Return a version number to identify the type of adapter.  The scheme is:
1067  * - bits 0..9: chip version
1068  * - bits 10..15: chip revision
1069  */
1070 static inline unsigned int mk_adap_vers(const struct adapter *adapter)
1071 {
1072 	/*
1073 	 * Chip version 4, revision 0x3f (cxgb4vf).
1074 	 */
1075 	return CHELSIO_CHIP_VERSION(adapter->params.chip) | (0x3f << 10);
1076 }
1077 
1078 /*
1079  * Execute the specified ioctl command.
1080  */
1081 static int cxgb4vf_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1082 {
1083 	int ret = 0;
1084 
1085 	switch (cmd) {
1086 	    /*
1087 	     * The VF Driver doesn't have access to any of the other
1088 	     * common Ethernet device ioctl()'s (like reading/writing
1089 	     * PHY registers, etc.
1090 	     */
1091 
1092 	default:
1093 		ret = -EOPNOTSUPP;
1094 		break;
1095 	}
1096 	return ret;
1097 }
1098 
1099 /*
1100  * Change the device's MTU.
1101  */
1102 static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
1103 {
1104 	int ret;
1105 	struct port_info *pi = netdev_priv(dev);
1106 
1107 	ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu,
1108 			      -1, -1, -1, -1, true);
1109 	if (!ret)
1110 		dev->mtu = new_mtu;
1111 	return ret;
1112 }
1113 
1114 static netdev_features_t cxgb4vf_fix_features(struct net_device *dev,
1115 	netdev_features_t features)
1116 {
1117 	/*
1118 	 * Since there is no support for separate rx/tx vlan accel
1119 	 * enable/disable make sure tx flag is always in same state as rx.
1120 	 */
1121 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
1122 		features |= NETIF_F_HW_VLAN_CTAG_TX;
1123 	else
1124 		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
1125 
1126 	return features;
1127 }
1128 
1129 static int cxgb4vf_set_features(struct net_device *dev,
1130 	netdev_features_t features)
1131 {
1132 	struct port_info *pi = netdev_priv(dev);
1133 	netdev_features_t changed = dev->features ^ features;
1134 
1135 	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
1136 		t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
1137 				features & NETIF_F_HW_VLAN_CTAG_TX, 0);
1138 
1139 	return 0;
1140 }
1141 
1142 /*
1143  * Change the devices MAC address.
1144  */
1145 static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
1146 {
1147 	int ret;
1148 	struct sockaddr *addr = _addr;
1149 	struct port_info *pi = netdev_priv(dev);
1150 
1151 	if (!is_valid_ether_addr(addr->sa_data))
1152 		return -EADDRNOTAVAIL;
1153 
1154 	ret = t4vf_change_mac(pi->adapter, pi->viid, pi->xact_addr_filt,
1155 			      addr->sa_data, true);
1156 	if (ret < 0)
1157 		return ret;
1158 
1159 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1160 	pi->xact_addr_filt = ret;
1161 	return 0;
1162 }
1163 
1164 #ifdef CONFIG_NET_POLL_CONTROLLER
1165 /*
1166  * Poll all of our receive queues.  This is called outside of normal interrupt
1167  * context.
1168  */
1169 static void cxgb4vf_poll_controller(struct net_device *dev)
1170 {
1171 	struct port_info *pi = netdev_priv(dev);
1172 	struct adapter *adapter = pi->adapter;
1173 
1174 	if (adapter->flags & USING_MSIX) {
1175 		struct sge_eth_rxq *rxq;
1176 		int nqsets;
1177 
1178 		rxq = &adapter->sge.ethrxq[pi->first_qset];
1179 		for (nqsets = pi->nqsets; nqsets; nqsets--) {
1180 			t4vf_sge_intr_msix(0, &rxq->rspq);
1181 			rxq++;
1182 		}
1183 	} else
1184 		t4vf_intr_handler(adapter)(0, adapter);
1185 }
1186 #endif
1187 
1188 /*
1189  * Ethtool operations.
1190  * ===================
1191  *
1192  * Note that we don't support any ethtool operations which change the physical
1193  * state of the port to which we're linked.
1194  */
1195 
1196 /**
1197  *	from_fw_port_mod_type - translate Firmware Port/Module type to Ethtool
1198  *	@port_type: Firmware Port Type
1199  *	@mod_type: Firmware Module Type
1200  *
1201  *	Translate Firmware Port/Module type to Ethtool Port Type.
1202  */
1203 static int from_fw_port_mod_type(enum fw_port_type port_type,
1204 				 enum fw_port_module_type mod_type)
1205 {
1206 	if (port_type == FW_PORT_TYPE_BT_SGMII ||
1207 	    port_type == FW_PORT_TYPE_BT_XFI ||
1208 	    port_type == FW_PORT_TYPE_BT_XAUI) {
1209 		return PORT_TP;
1210 	} else if (port_type == FW_PORT_TYPE_FIBER_XFI ||
1211 		   port_type == FW_PORT_TYPE_FIBER_XAUI) {
1212 		return PORT_FIBRE;
1213 	} else if (port_type == FW_PORT_TYPE_SFP ||
1214 		   port_type == FW_PORT_TYPE_QSFP_10G ||
1215 		   port_type == FW_PORT_TYPE_QSA ||
1216 		   port_type == FW_PORT_TYPE_QSFP) {
1217 		if (mod_type == FW_PORT_MOD_TYPE_LR ||
1218 		    mod_type == FW_PORT_MOD_TYPE_SR ||
1219 		    mod_type == FW_PORT_MOD_TYPE_ER ||
1220 		    mod_type == FW_PORT_MOD_TYPE_LRM)
1221 			return PORT_FIBRE;
1222 		else if (mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
1223 			 mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
1224 			return PORT_DA;
1225 		else
1226 			return PORT_OTHER;
1227 	}
1228 
1229 	return PORT_OTHER;
1230 }
1231 
1232 /**
1233  *	fw_caps_to_lmm - translate Firmware to ethtool Link Mode Mask
1234  *	@port_type: Firmware Port Type
1235  *	@fw_caps: Firmware Port Capabilities
1236  *	@link_mode_mask: ethtool Link Mode Mask
1237  *
1238  *	Translate a Firmware Port Capabilities specification to an ethtool
1239  *	Link Mode Mask.
1240  */
1241 static void fw_caps_to_lmm(enum fw_port_type port_type,
1242 			   unsigned int fw_caps,
1243 			   unsigned long *link_mode_mask)
1244 {
1245 	#define SET_LMM(__lmm_name) __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name\
1246 			 ## _BIT, link_mode_mask)
1247 
1248 	#define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \
1249 		do { \
1250 			if (fw_caps & FW_PORT_CAP_ ## __fw_name) \
1251 				SET_LMM(__lmm_name); \
1252 		} while (0)
1253 
1254 	switch (port_type) {
1255 	case FW_PORT_TYPE_BT_SGMII:
1256 	case FW_PORT_TYPE_BT_XFI:
1257 	case FW_PORT_TYPE_BT_XAUI:
1258 		SET_LMM(TP);
1259 		FW_CAPS_TO_LMM(SPEED_100M, 100baseT_Full);
1260 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
1261 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
1262 		break;
1263 
1264 	case FW_PORT_TYPE_KX4:
1265 	case FW_PORT_TYPE_KX:
1266 		SET_LMM(Backplane);
1267 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
1268 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full);
1269 		break;
1270 
1271 	case FW_PORT_TYPE_KR:
1272 		SET_LMM(Backplane);
1273 		SET_LMM(10000baseKR_Full);
1274 		break;
1275 
1276 	case FW_PORT_TYPE_BP_AP:
1277 		SET_LMM(Backplane);
1278 		SET_LMM(10000baseR_FEC);
1279 		SET_LMM(10000baseKR_Full);
1280 		SET_LMM(1000baseKX_Full);
1281 		break;
1282 
1283 	case FW_PORT_TYPE_BP4_AP:
1284 		SET_LMM(Backplane);
1285 		SET_LMM(10000baseR_FEC);
1286 		SET_LMM(10000baseKR_Full);
1287 		SET_LMM(1000baseKX_Full);
1288 		SET_LMM(10000baseKX4_Full);
1289 		break;
1290 
1291 	case FW_PORT_TYPE_FIBER_XFI:
1292 	case FW_PORT_TYPE_FIBER_XAUI:
1293 	case FW_PORT_TYPE_SFP:
1294 	case FW_PORT_TYPE_QSFP_10G:
1295 	case FW_PORT_TYPE_QSA:
1296 		SET_LMM(FIBRE);
1297 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
1298 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
1299 		break;
1300 
1301 	case FW_PORT_TYPE_BP40_BA:
1302 	case FW_PORT_TYPE_QSFP:
1303 		SET_LMM(FIBRE);
1304 		SET_LMM(40000baseSR4_Full);
1305 		break;
1306 
1307 	case FW_PORT_TYPE_CR_QSFP:
1308 	case FW_PORT_TYPE_SFP28:
1309 		SET_LMM(FIBRE);
1310 		SET_LMM(25000baseCR_Full);
1311 		break;
1312 
1313 	case FW_PORT_TYPE_KR4_100G:
1314 	case FW_PORT_TYPE_CR4_QSFP:
1315 		SET_LMM(FIBRE);
1316 		SET_LMM(100000baseCR4_Full);
1317 		break;
1318 
1319 	default:
1320 		break;
1321 	}
1322 
1323 	FW_CAPS_TO_LMM(ANEG, Autoneg);
1324 	FW_CAPS_TO_LMM(802_3_PAUSE, Pause);
1325 	FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause);
1326 
1327 	#undef FW_CAPS_TO_LMM
1328 	#undef SET_LMM
1329 }
1330 
1331 static int cxgb4vf_get_link_ksettings(struct net_device *dev,
1332 				      struct ethtool_link_ksettings
1333 							*link_ksettings)
1334 {
1335 	const struct port_info *pi = netdev_priv(dev);
1336 	struct ethtool_link_settings *base = &link_ksettings->base;
1337 
1338 	ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
1339 	ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
1340 	ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising);
1341 
1342 	base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type);
1343 
1344 	if (pi->mdio_addr >= 0) {
1345 		base->phy_address = pi->mdio_addr;
1346 		base->mdio_support = (pi->port_type == FW_PORT_TYPE_BT_SGMII
1347 				      ? ETH_MDIO_SUPPORTS_C22
1348 				      : ETH_MDIO_SUPPORTS_C45);
1349 	} else {
1350 		base->phy_address = 255;
1351 		base->mdio_support = 0;
1352 	}
1353 
1354 	fw_caps_to_lmm(pi->port_type, pi->link_cfg.supported,
1355 		       link_ksettings->link_modes.supported);
1356 	fw_caps_to_lmm(pi->port_type, pi->link_cfg.advertising,
1357 		       link_ksettings->link_modes.advertising);
1358 	fw_caps_to_lmm(pi->port_type, pi->link_cfg.lp_advertising,
1359 		       link_ksettings->link_modes.lp_advertising);
1360 
1361 	if (netif_carrier_ok(dev)) {
1362 		base->speed = pi->link_cfg.speed;
1363 		base->duplex = DUPLEX_FULL;
1364 	} else {
1365 		base->speed = SPEED_UNKNOWN;
1366 		base->duplex = DUPLEX_UNKNOWN;
1367 	}
1368 
1369 	base->autoneg = pi->link_cfg.autoneg;
1370 	if (pi->link_cfg.supported & FW_PORT_CAP_ANEG)
1371 		ethtool_link_ksettings_add_link_mode(link_ksettings,
1372 						     supported, Autoneg);
1373 	if (pi->link_cfg.autoneg)
1374 		ethtool_link_ksettings_add_link_mode(link_ksettings,
1375 						     advertising, Autoneg);
1376 
1377 	return 0;
1378 }
1379 
1380 /*
1381  * Return our driver information.
1382  */
1383 static void cxgb4vf_get_drvinfo(struct net_device *dev,
1384 				struct ethtool_drvinfo *drvinfo)
1385 {
1386 	struct adapter *adapter = netdev2adap(dev);
1387 
1388 	strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
1389 	strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
1390 	strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
1391 		sizeof(drvinfo->bus_info));
1392 	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1393 		 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1394 		 FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.fwrev),
1395 		 FW_HDR_FW_VER_MINOR_G(adapter->params.dev.fwrev),
1396 		 FW_HDR_FW_VER_MICRO_G(adapter->params.dev.fwrev),
1397 		 FW_HDR_FW_VER_BUILD_G(adapter->params.dev.fwrev),
1398 		 FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.tprev),
1399 		 FW_HDR_FW_VER_MINOR_G(adapter->params.dev.tprev),
1400 		 FW_HDR_FW_VER_MICRO_G(adapter->params.dev.tprev),
1401 		 FW_HDR_FW_VER_BUILD_G(adapter->params.dev.tprev));
1402 }
1403 
1404 /*
1405  * Return current adapter message level.
1406  */
1407 static u32 cxgb4vf_get_msglevel(struct net_device *dev)
1408 {
1409 	return netdev2adap(dev)->msg_enable;
1410 }
1411 
1412 /*
1413  * Set current adapter message level.
1414  */
1415 static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel)
1416 {
1417 	netdev2adap(dev)->msg_enable = msglevel;
1418 }
1419 
1420 /*
1421  * Return the device's current Queue Set ring size parameters along with the
1422  * allowed maximum values.  Since ethtool doesn't understand the concept of
1423  * multi-queue devices, we just return the current values associated with the
1424  * first Queue Set.
1425  */
1426 static void cxgb4vf_get_ringparam(struct net_device *dev,
1427 				  struct ethtool_ringparam *rp)
1428 {
1429 	const struct port_info *pi = netdev_priv(dev);
1430 	const struct sge *s = &pi->adapter->sge;
1431 
1432 	rp->rx_max_pending = MAX_RX_BUFFERS;
1433 	rp->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1434 	rp->rx_jumbo_max_pending = 0;
1435 	rp->tx_max_pending = MAX_TXQ_ENTRIES;
1436 
1437 	rp->rx_pending = s->ethrxq[pi->first_qset].fl.size - MIN_FL_RESID;
1438 	rp->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1439 	rp->rx_jumbo_pending = 0;
1440 	rp->tx_pending = s->ethtxq[pi->first_qset].q.size;
1441 }
1442 
1443 /*
1444  * Set the Queue Set ring size parameters for the device.  Again, since
1445  * ethtool doesn't allow for the concept of multiple queues per device, we'll
1446  * apply these new values across all of the Queue Sets associated with the
1447  * device -- after vetting them of course!
1448  */
1449 static int cxgb4vf_set_ringparam(struct net_device *dev,
1450 				 struct ethtool_ringparam *rp)
1451 {
1452 	const struct port_info *pi = netdev_priv(dev);
1453 	struct adapter *adapter = pi->adapter;
1454 	struct sge *s = &adapter->sge;
1455 	int qs;
1456 
1457 	if (rp->rx_pending > MAX_RX_BUFFERS ||
1458 	    rp->rx_jumbo_pending ||
1459 	    rp->tx_pending > MAX_TXQ_ENTRIES ||
1460 	    rp->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1461 	    rp->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1462 	    rp->rx_pending < MIN_FL_ENTRIES ||
1463 	    rp->tx_pending < MIN_TXQ_ENTRIES)
1464 		return -EINVAL;
1465 
1466 	if (adapter->flags & FULL_INIT_DONE)
1467 		return -EBUSY;
1468 
1469 	for (qs = pi->first_qset; qs < pi->first_qset + pi->nqsets; qs++) {
1470 		s->ethrxq[qs].fl.size = rp->rx_pending + MIN_FL_RESID;
1471 		s->ethrxq[qs].rspq.size = rp->rx_mini_pending;
1472 		s->ethtxq[qs].q.size = rp->tx_pending;
1473 	}
1474 	return 0;
1475 }
1476 
1477 /*
1478  * Return the interrupt holdoff timer and count for the first Queue Set on the
1479  * device.  Our extension ioctl() (the cxgbtool interface) allows the
1480  * interrupt holdoff timer to be read on all of the device's Queue Sets.
1481  */
1482 static int cxgb4vf_get_coalesce(struct net_device *dev,
1483 				struct ethtool_coalesce *coalesce)
1484 {
1485 	const struct port_info *pi = netdev_priv(dev);
1486 	const struct adapter *adapter = pi->adapter;
1487 	const struct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq;
1488 
1489 	coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq);
1490 	coalesce->rx_max_coalesced_frames =
1491 		((rspq->intr_params & QINTR_CNT_EN_F)
1492 		 ? adapter->sge.counter_val[rspq->pktcnt_idx]
1493 		 : 0);
1494 	return 0;
1495 }
1496 
1497 /*
1498  * Set the RX interrupt holdoff timer and count for the first Queue Set on the
1499  * interface.  Our extension ioctl() (the cxgbtool interface) allows us to set
1500  * the interrupt holdoff timer on any of the device's Queue Sets.
1501  */
1502 static int cxgb4vf_set_coalesce(struct net_device *dev,
1503 				struct ethtool_coalesce *coalesce)
1504 {
1505 	const struct port_info *pi = netdev_priv(dev);
1506 	struct adapter *adapter = pi->adapter;
1507 
1508 	return set_rxq_intr_params(adapter,
1509 				   &adapter->sge.ethrxq[pi->first_qset].rspq,
1510 				   coalesce->rx_coalesce_usecs,
1511 				   coalesce->rx_max_coalesced_frames);
1512 }
1513 
1514 /*
1515  * Report current port link pause parameter settings.
1516  */
1517 static void cxgb4vf_get_pauseparam(struct net_device *dev,
1518 				   struct ethtool_pauseparam *pauseparam)
1519 {
1520 	struct port_info *pi = netdev_priv(dev);
1521 
1522 	pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1523 	pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0;
1524 	pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0;
1525 }
1526 
1527 /*
1528  * Identify the port by blinking the port's LED.
1529  */
1530 static int cxgb4vf_phys_id(struct net_device *dev,
1531 			   enum ethtool_phys_id_state state)
1532 {
1533 	unsigned int val;
1534 	struct port_info *pi = netdev_priv(dev);
1535 
1536 	if (state == ETHTOOL_ID_ACTIVE)
1537 		val = 0xffff;
1538 	else if (state == ETHTOOL_ID_INACTIVE)
1539 		val = 0;
1540 	else
1541 		return -EINVAL;
1542 
1543 	return t4vf_identify_port(pi->adapter, pi->viid, val);
1544 }
1545 
1546 /*
1547  * Port stats maintained per queue of the port.
1548  */
1549 struct queue_port_stats {
1550 	u64 tso;
1551 	u64 tx_csum;
1552 	u64 rx_csum;
1553 	u64 vlan_ex;
1554 	u64 vlan_ins;
1555 	u64 lro_pkts;
1556 	u64 lro_merged;
1557 };
1558 
1559 /*
1560  * Strings for the ETH_SS_STATS statistics set ("ethtool -S").  Note that
1561  * these need to match the order of statistics returned by
1562  * t4vf_get_port_stats().
1563  */
1564 static const char stats_strings[][ETH_GSTRING_LEN] = {
1565 	/*
1566 	 * These must match the layout of the t4vf_port_stats structure.
1567 	 */
1568 	"TxBroadcastBytes  ",
1569 	"TxBroadcastFrames ",
1570 	"TxMulticastBytes  ",
1571 	"TxMulticastFrames ",
1572 	"TxUnicastBytes    ",
1573 	"TxUnicastFrames   ",
1574 	"TxDroppedFrames   ",
1575 	"TxOffloadBytes    ",
1576 	"TxOffloadFrames   ",
1577 	"RxBroadcastBytes  ",
1578 	"RxBroadcastFrames ",
1579 	"RxMulticastBytes  ",
1580 	"RxMulticastFrames ",
1581 	"RxUnicastBytes    ",
1582 	"RxUnicastFrames   ",
1583 	"RxErrorFrames     ",
1584 
1585 	/*
1586 	 * These are accumulated per-queue statistics and must match the
1587 	 * order of the fields in the queue_port_stats structure.
1588 	 */
1589 	"TSO               ",
1590 	"TxCsumOffload     ",
1591 	"RxCsumGood        ",
1592 	"VLANextractions   ",
1593 	"VLANinsertions    ",
1594 	"GROPackets        ",
1595 	"GROMerged         ",
1596 };
1597 
1598 /*
1599  * Return the number of statistics in the specified statistics set.
1600  */
1601 static int cxgb4vf_get_sset_count(struct net_device *dev, int sset)
1602 {
1603 	switch (sset) {
1604 	case ETH_SS_STATS:
1605 		return ARRAY_SIZE(stats_strings);
1606 	default:
1607 		return -EOPNOTSUPP;
1608 	}
1609 	/*NOTREACHED*/
1610 }
1611 
1612 /*
1613  * Return the strings for the specified statistics set.
1614  */
1615 static void cxgb4vf_get_strings(struct net_device *dev,
1616 				u32 sset,
1617 				u8 *data)
1618 {
1619 	switch (sset) {
1620 	case ETH_SS_STATS:
1621 		memcpy(data, stats_strings, sizeof(stats_strings));
1622 		break;
1623 	}
1624 }
1625 
1626 /*
1627  * Small utility routine to accumulate queue statistics across the queues of
1628  * a "port".
1629  */
1630 static void collect_sge_port_stats(const struct adapter *adapter,
1631 				   const struct port_info *pi,
1632 				   struct queue_port_stats *stats)
1633 {
1634 	const struct sge_eth_txq *txq = &adapter->sge.ethtxq[pi->first_qset];
1635 	const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
1636 	int qs;
1637 
1638 	memset(stats, 0, sizeof(*stats));
1639 	for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
1640 		stats->tso += txq->tso;
1641 		stats->tx_csum += txq->tx_cso;
1642 		stats->rx_csum += rxq->stats.rx_cso;
1643 		stats->vlan_ex += rxq->stats.vlan_ex;
1644 		stats->vlan_ins += txq->vlan_ins;
1645 		stats->lro_pkts += rxq->stats.lro_pkts;
1646 		stats->lro_merged += rxq->stats.lro_merged;
1647 	}
1648 }
1649 
1650 /*
1651  * Return the ETH_SS_STATS statistics set.
1652  */
1653 static void cxgb4vf_get_ethtool_stats(struct net_device *dev,
1654 				      struct ethtool_stats *stats,
1655 				      u64 *data)
1656 {
1657 	struct port_info *pi = netdev2pinfo(dev);
1658 	struct adapter *adapter = pi->adapter;
1659 	int err = t4vf_get_port_stats(adapter, pi->pidx,
1660 				      (struct t4vf_port_stats *)data);
1661 	if (err)
1662 		memset(data, 0, sizeof(struct t4vf_port_stats));
1663 
1664 	data += sizeof(struct t4vf_port_stats) / sizeof(u64);
1665 	collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1666 }
1667 
1668 /*
1669  * Return the size of our register map.
1670  */
1671 static int cxgb4vf_get_regs_len(struct net_device *dev)
1672 {
1673 	return T4VF_REGMAP_SIZE;
1674 }
1675 
1676 /*
1677  * Dump a block of registers, start to end inclusive, into a buffer.
1678  */
1679 static void reg_block_dump(struct adapter *adapter, void *regbuf,
1680 			   unsigned int start, unsigned int end)
1681 {
1682 	u32 *bp = regbuf + start - T4VF_REGMAP_START;
1683 
1684 	for ( ; start <= end; start += sizeof(u32)) {
1685 		/*
1686 		 * Avoid reading the Mailbox Control register since that
1687 		 * can trigger a Mailbox Ownership Arbitration cycle and
1688 		 * interfere with communication with the firmware.
1689 		 */
1690 		if (start == T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL)
1691 			*bp++ = 0xffff;
1692 		else
1693 			*bp++ = t4_read_reg(adapter, start);
1694 	}
1695 }
1696 
1697 /*
1698  * Copy our entire register map into the provided buffer.
1699  */
1700 static void cxgb4vf_get_regs(struct net_device *dev,
1701 			     struct ethtool_regs *regs,
1702 			     void *regbuf)
1703 {
1704 	struct adapter *adapter = netdev2adap(dev);
1705 
1706 	regs->version = mk_adap_vers(adapter);
1707 
1708 	/*
1709 	 * Fill in register buffer with our register map.
1710 	 */
1711 	memset(regbuf, 0, T4VF_REGMAP_SIZE);
1712 
1713 	reg_block_dump(adapter, regbuf,
1714 		       T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_FIRST,
1715 		       T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_LAST);
1716 	reg_block_dump(adapter, regbuf,
1717 		       T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST,
1718 		       T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST);
1719 
1720 	/* T5 adds new registers in the PL Register map.
1721 	 */
1722 	reg_block_dump(adapter, regbuf,
1723 		       T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
1724 		       T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip)
1725 		       ? PL_VF_WHOAMI_A : PL_VF_REVISION_A));
1726 	reg_block_dump(adapter, regbuf,
1727 		       T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
1728 		       T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
1729 
1730 	reg_block_dump(adapter, regbuf,
1731 		       T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_FIRST,
1732 		       T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_LAST);
1733 }
1734 
1735 /*
1736  * Report current Wake On LAN settings.
1737  */
1738 static void cxgb4vf_get_wol(struct net_device *dev,
1739 			    struct ethtool_wolinfo *wol)
1740 {
1741 	wol->supported = 0;
1742 	wol->wolopts = 0;
1743 	memset(&wol->sopass, 0, sizeof(wol->sopass));
1744 }
1745 
1746 /*
1747  * TCP Segmentation Offload flags which we support.
1748  */
1749 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1750 
1751 static const struct ethtool_ops cxgb4vf_ethtool_ops = {
1752 	.get_link_ksettings	= cxgb4vf_get_link_ksettings,
1753 	.get_drvinfo		= cxgb4vf_get_drvinfo,
1754 	.get_msglevel		= cxgb4vf_get_msglevel,
1755 	.set_msglevel		= cxgb4vf_set_msglevel,
1756 	.get_ringparam		= cxgb4vf_get_ringparam,
1757 	.set_ringparam		= cxgb4vf_set_ringparam,
1758 	.get_coalesce		= cxgb4vf_get_coalesce,
1759 	.set_coalesce		= cxgb4vf_set_coalesce,
1760 	.get_pauseparam		= cxgb4vf_get_pauseparam,
1761 	.get_link		= ethtool_op_get_link,
1762 	.get_strings		= cxgb4vf_get_strings,
1763 	.set_phys_id		= cxgb4vf_phys_id,
1764 	.get_sset_count		= cxgb4vf_get_sset_count,
1765 	.get_ethtool_stats	= cxgb4vf_get_ethtool_stats,
1766 	.get_regs_len		= cxgb4vf_get_regs_len,
1767 	.get_regs		= cxgb4vf_get_regs,
1768 	.get_wol		= cxgb4vf_get_wol,
1769 };
1770 
1771 /*
1772  * /sys/kernel/debug/cxgb4vf support code and data.
1773  * ================================================
1774  */
1775 
1776 /*
1777  * Show Firmware Mailbox Command/Reply Log
1778  *
1779  * Note that we don't do any locking when dumping the Firmware Mailbox Log so
1780  * it's possible that we can catch things during a log update and therefore
1781  * see partially corrupted log entries.  But i9t's probably Good Enough(tm).
1782  * If we ever decide that we want to make sure that we're dumping a coherent
1783  * log, we'd need to perform locking in the mailbox logging and in
1784  * mboxlog_open() where we'd need to grab the entire mailbox log in one go
1785  * like we do for the Firmware Device Log.  But as stated above, meh ...
1786  */
1787 static int mboxlog_show(struct seq_file *seq, void *v)
1788 {
1789 	struct adapter *adapter = seq->private;
1790 	struct mbox_cmd_log *log = adapter->mbox_log;
1791 	struct mbox_cmd *entry;
1792 	int entry_idx, i;
1793 
1794 	if (v == SEQ_START_TOKEN) {
1795 		seq_printf(seq,
1796 			   "%10s  %15s  %5s  %5s  %s\n",
1797 			   "Seq#", "Tstamp", "Atime", "Etime",
1798 			   "Command/Reply");
1799 		return 0;
1800 	}
1801 
1802 	entry_idx = log->cursor + ((uintptr_t)v - 2);
1803 	if (entry_idx >= log->size)
1804 		entry_idx -= log->size;
1805 	entry = mbox_cmd_log_entry(log, entry_idx);
1806 
1807 	/* skip over unused entries */
1808 	if (entry->timestamp == 0)
1809 		return 0;
1810 
1811 	seq_printf(seq, "%10u  %15llu  %5d  %5d",
1812 		   entry->seqno, entry->timestamp,
1813 		   entry->access, entry->execute);
1814 	for (i = 0; i < MBOX_LEN / 8; i++) {
1815 		u64 flit = entry->cmd[i];
1816 		u32 hi = (u32)(flit >> 32);
1817 		u32 lo = (u32)flit;
1818 
1819 		seq_printf(seq, "  %08x %08x", hi, lo);
1820 	}
1821 	seq_puts(seq, "\n");
1822 	return 0;
1823 }
1824 
1825 static inline void *mboxlog_get_idx(struct seq_file *seq, loff_t pos)
1826 {
1827 	struct adapter *adapter = seq->private;
1828 	struct mbox_cmd_log *log = adapter->mbox_log;
1829 
1830 	return ((pos <= log->size) ? (void *)(uintptr_t)(pos + 1) : NULL);
1831 }
1832 
1833 static void *mboxlog_start(struct seq_file *seq, loff_t *pos)
1834 {
1835 	return *pos ? mboxlog_get_idx(seq, *pos) : SEQ_START_TOKEN;
1836 }
1837 
1838 static void *mboxlog_next(struct seq_file *seq, void *v, loff_t *pos)
1839 {
1840 	++*pos;
1841 	return mboxlog_get_idx(seq, *pos);
1842 }
1843 
1844 static void mboxlog_stop(struct seq_file *seq, void *v)
1845 {
1846 }
1847 
1848 static const struct seq_operations mboxlog_seq_ops = {
1849 	.start = mboxlog_start,
1850 	.next  = mboxlog_next,
1851 	.stop  = mboxlog_stop,
1852 	.show  = mboxlog_show
1853 };
1854 
1855 static int mboxlog_open(struct inode *inode, struct file *file)
1856 {
1857 	int res = seq_open(file, &mboxlog_seq_ops);
1858 
1859 	if (!res) {
1860 		struct seq_file *seq = file->private_data;
1861 
1862 		seq->private = inode->i_private;
1863 	}
1864 	return res;
1865 }
1866 
1867 static const struct file_operations mboxlog_fops = {
1868 	.owner   = THIS_MODULE,
1869 	.open    = mboxlog_open,
1870 	.read    = seq_read,
1871 	.llseek  = seq_lseek,
1872 	.release = seq_release,
1873 };
1874 
1875 /*
1876  * Show SGE Queue Set information.  We display QPL Queues Sets per line.
1877  */
1878 #define QPL	4
1879 
1880 static int sge_qinfo_show(struct seq_file *seq, void *v)
1881 {
1882 	struct adapter *adapter = seq->private;
1883 	int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
1884 	int qs, r = (uintptr_t)v - 1;
1885 
1886 	if (r)
1887 		seq_putc(seq, '\n');
1888 
1889 	#define S3(fmt_spec, s, v) \
1890 		do {\
1891 			seq_printf(seq, "%-12s", s); \
1892 			for (qs = 0; qs < n; ++qs) \
1893 				seq_printf(seq, " %16" fmt_spec, v); \
1894 			seq_putc(seq, '\n'); \
1895 		} while (0)
1896 	#define S(s, v)		S3("s", s, v)
1897 	#define T(s, v)		S3("u", s, txq[qs].v)
1898 	#define R(s, v)		S3("u", s, rxq[qs].v)
1899 
1900 	if (r < eth_entries) {
1901 		const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
1902 		const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
1903 		int n = min(QPL, adapter->sge.ethqsets - QPL * r);
1904 
1905 		S("QType:", "Ethernet");
1906 		S("Interface:",
1907 		  (rxq[qs].rspq.netdev
1908 		   ? rxq[qs].rspq.netdev->name
1909 		   : "N/A"));
1910 		S3("d", "Port:",
1911 		   (rxq[qs].rspq.netdev
1912 		    ? ((struct port_info *)
1913 		       netdev_priv(rxq[qs].rspq.netdev))->port_id
1914 		    : -1));
1915 		T("TxQ ID:", q.abs_id);
1916 		T("TxQ size:", q.size);
1917 		T("TxQ inuse:", q.in_use);
1918 		T("TxQ PIdx:", q.pidx);
1919 		T("TxQ CIdx:", q.cidx);
1920 		R("RspQ ID:", rspq.abs_id);
1921 		R("RspQ size:", rspq.size);
1922 		R("RspQE size:", rspq.iqe_len);
1923 		S3("u", "Intr delay:", qtimer_val(adapter, &rxq[qs].rspq));
1924 		S3("u", "Intr pktcnt:",
1925 		   adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]);
1926 		R("RspQ CIdx:", rspq.cidx);
1927 		R("RspQ Gen:", rspq.gen);
1928 		R("FL ID:", fl.abs_id);
1929 		R("FL size:", fl.size - MIN_FL_RESID);
1930 		R("FL avail:", fl.avail);
1931 		R("FL PIdx:", fl.pidx);
1932 		R("FL CIdx:", fl.cidx);
1933 		return 0;
1934 	}
1935 
1936 	r -= eth_entries;
1937 	if (r == 0) {
1938 		const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
1939 
1940 		seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
1941 		seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
1942 		seq_printf(seq, "%-12s %16u\n", "Intr delay:",
1943 			   qtimer_val(adapter, evtq));
1944 		seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
1945 			   adapter->sge.counter_val[evtq->pktcnt_idx]);
1946 		seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", evtq->cidx);
1947 		seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen);
1948 	} else if (r == 1) {
1949 		const struct sge_rspq *intrq = &adapter->sge.intrq;
1950 
1951 		seq_printf(seq, "%-12s %16s\n", "QType:", "Interrupt Queue");
1952 		seq_printf(seq, "%-12s %16u\n", "RspQ ID:", intrq->abs_id);
1953 		seq_printf(seq, "%-12s %16u\n", "Intr delay:",
1954 			   qtimer_val(adapter, intrq));
1955 		seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
1956 			   adapter->sge.counter_val[intrq->pktcnt_idx]);
1957 		seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", intrq->cidx);
1958 		seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", intrq->gen);
1959 	}
1960 
1961 	#undef R
1962 	#undef T
1963 	#undef S
1964 	#undef S3
1965 
1966 	return 0;
1967 }
1968 
1969 /*
1970  * Return the number of "entries" in our "file".  We group the multi-Queue
1971  * sections with QPL Queue Sets per "entry".  The sections of the output are:
1972  *
1973  *     Ethernet RX/TX Queue Sets
1974  *     Firmware Event Queue
1975  *     Forwarded Interrupt Queue (if in MSI mode)
1976  */
1977 static int sge_queue_entries(const struct adapter *adapter)
1978 {
1979 	return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
1980 		((adapter->flags & USING_MSI) != 0);
1981 }
1982 
1983 static void *sge_queue_start(struct seq_file *seq, loff_t *pos)
1984 {
1985 	int entries = sge_queue_entries(seq->private);
1986 
1987 	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
1988 }
1989 
1990 static void sge_queue_stop(struct seq_file *seq, void *v)
1991 {
1992 }
1993 
1994 static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos)
1995 {
1996 	int entries = sge_queue_entries(seq->private);
1997 
1998 	++*pos;
1999 	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2000 }
2001 
2002 static const struct seq_operations sge_qinfo_seq_ops = {
2003 	.start = sge_queue_start,
2004 	.next  = sge_queue_next,
2005 	.stop  = sge_queue_stop,
2006 	.show  = sge_qinfo_show
2007 };
2008 
2009 static int sge_qinfo_open(struct inode *inode, struct file *file)
2010 {
2011 	int res = seq_open(file, &sge_qinfo_seq_ops);
2012 
2013 	if (!res) {
2014 		struct seq_file *seq = file->private_data;
2015 		seq->private = inode->i_private;
2016 	}
2017 	return res;
2018 }
2019 
2020 static const struct file_operations sge_qinfo_debugfs_fops = {
2021 	.owner   = THIS_MODULE,
2022 	.open    = sge_qinfo_open,
2023 	.read    = seq_read,
2024 	.llseek  = seq_lseek,
2025 	.release = seq_release,
2026 };
2027 
2028 /*
2029  * Show SGE Queue Set statistics.  We display QPL Queues Sets per line.
2030  */
2031 #define QPL	4
2032 
2033 static int sge_qstats_show(struct seq_file *seq, void *v)
2034 {
2035 	struct adapter *adapter = seq->private;
2036 	int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
2037 	int qs, r = (uintptr_t)v - 1;
2038 
2039 	if (r)
2040 		seq_putc(seq, '\n');
2041 
2042 	#define S3(fmt, s, v) \
2043 		do { \
2044 			seq_printf(seq, "%-16s", s); \
2045 			for (qs = 0; qs < n; ++qs) \
2046 				seq_printf(seq, " %8" fmt, v); \
2047 			seq_putc(seq, '\n'); \
2048 		} while (0)
2049 	#define S(s, v)		S3("s", s, v)
2050 
2051 	#define T3(fmt, s, v)	S3(fmt, s, txq[qs].v)
2052 	#define T(s, v)		T3("lu", s, v)
2053 
2054 	#define R3(fmt, s, v)	S3(fmt, s, rxq[qs].v)
2055 	#define R(s, v)		R3("lu", s, v)
2056 
2057 	if (r < eth_entries) {
2058 		const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
2059 		const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
2060 		int n = min(QPL, adapter->sge.ethqsets - QPL * r);
2061 
2062 		S("QType:", "Ethernet");
2063 		S("Interface:",
2064 		  (rxq[qs].rspq.netdev
2065 		   ? rxq[qs].rspq.netdev->name
2066 		   : "N/A"));
2067 		R3("u", "RspQNullInts:", rspq.unhandled_irqs);
2068 		R("RxPackets:", stats.pkts);
2069 		R("RxCSO:", stats.rx_cso);
2070 		R("VLANxtract:", stats.vlan_ex);
2071 		R("LROmerged:", stats.lro_merged);
2072 		R("LROpackets:", stats.lro_pkts);
2073 		R("RxDrops:", stats.rx_drops);
2074 		T("TSO:", tso);
2075 		T("TxCSO:", tx_cso);
2076 		T("VLANins:", vlan_ins);
2077 		T("TxQFull:", q.stops);
2078 		T("TxQRestarts:", q.restarts);
2079 		T("TxMapErr:", mapping_err);
2080 		R("FLAllocErr:", fl.alloc_failed);
2081 		R("FLLrgAlcErr:", fl.large_alloc_failed);
2082 		R("FLStarving:", fl.starving);
2083 		return 0;
2084 	}
2085 
2086 	r -= eth_entries;
2087 	if (r == 0) {
2088 		const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
2089 
2090 		seq_printf(seq, "%-8s %16s\n", "QType:", "FW event queue");
2091 		seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
2092 			   evtq->unhandled_irqs);
2093 		seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", evtq->cidx);
2094 		seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", evtq->gen);
2095 	} else if (r == 1) {
2096 		const struct sge_rspq *intrq = &adapter->sge.intrq;
2097 
2098 		seq_printf(seq, "%-8s %16s\n", "QType:", "Interrupt Queue");
2099 		seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
2100 			   intrq->unhandled_irqs);
2101 		seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", intrq->cidx);
2102 		seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", intrq->gen);
2103 	}
2104 
2105 	#undef R
2106 	#undef T
2107 	#undef S
2108 	#undef R3
2109 	#undef T3
2110 	#undef S3
2111 
2112 	return 0;
2113 }
2114 
2115 /*
2116  * Return the number of "entries" in our "file".  We group the multi-Queue
2117  * sections with QPL Queue Sets per "entry".  The sections of the output are:
2118  *
2119  *     Ethernet RX/TX Queue Sets
2120  *     Firmware Event Queue
2121  *     Forwarded Interrupt Queue (if in MSI mode)
2122  */
2123 static int sge_qstats_entries(const struct adapter *adapter)
2124 {
2125 	return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
2126 		((adapter->flags & USING_MSI) != 0);
2127 }
2128 
2129 static void *sge_qstats_start(struct seq_file *seq, loff_t *pos)
2130 {
2131 	int entries = sge_qstats_entries(seq->private);
2132 
2133 	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2134 }
2135 
2136 static void sge_qstats_stop(struct seq_file *seq, void *v)
2137 {
2138 }
2139 
2140 static void *sge_qstats_next(struct seq_file *seq, void *v, loff_t *pos)
2141 {
2142 	int entries = sge_qstats_entries(seq->private);
2143 
2144 	(*pos)++;
2145 	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2146 }
2147 
2148 static const struct seq_operations sge_qstats_seq_ops = {
2149 	.start = sge_qstats_start,
2150 	.next  = sge_qstats_next,
2151 	.stop  = sge_qstats_stop,
2152 	.show  = sge_qstats_show
2153 };
2154 
2155 static int sge_qstats_open(struct inode *inode, struct file *file)
2156 {
2157 	int res = seq_open(file, &sge_qstats_seq_ops);
2158 
2159 	if (res == 0) {
2160 		struct seq_file *seq = file->private_data;
2161 		seq->private = inode->i_private;
2162 	}
2163 	return res;
2164 }
2165 
2166 static const struct file_operations sge_qstats_proc_fops = {
2167 	.owner   = THIS_MODULE,
2168 	.open    = sge_qstats_open,
2169 	.read    = seq_read,
2170 	.llseek  = seq_lseek,
2171 	.release = seq_release,
2172 };
2173 
2174 /*
2175  * Show PCI-E SR-IOV Virtual Function Resource Limits.
2176  */
2177 static int resources_show(struct seq_file *seq, void *v)
2178 {
2179 	struct adapter *adapter = seq->private;
2180 	struct vf_resources *vfres = &adapter->params.vfres;
2181 
2182 	#define S(desc, fmt, var) \
2183 		seq_printf(seq, "%-60s " fmt "\n", \
2184 			   desc " (" #var "):", vfres->var)
2185 
2186 	S("Virtual Interfaces", "%d", nvi);
2187 	S("Egress Queues", "%d", neq);
2188 	S("Ethernet Control", "%d", nethctrl);
2189 	S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint);
2190 	S("Ingress Queues", "%d", niq);
2191 	S("Traffic Class", "%d", tc);
2192 	S("Port Access Rights Mask", "%#x", pmask);
2193 	S("MAC Address Filters", "%d", nexactf);
2194 	S("Firmware Command Read Capabilities", "%#x", r_caps);
2195 	S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps);
2196 
2197 	#undef S
2198 
2199 	return 0;
2200 }
2201 
2202 static int resources_open(struct inode *inode, struct file *file)
2203 {
2204 	return single_open(file, resources_show, inode->i_private);
2205 }
2206 
2207 static const struct file_operations resources_proc_fops = {
2208 	.owner   = THIS_MODULE,
2209 	.open    = resources_open,
2210 	.read    = seq_read,
2211 	.llseek  = seq_lseek,
2212 	.release = single_release,
2213 };
2214 
2215 /*
2216  * Show Virtual Interfaces.
2217  */
2218 static int interfaces_show(struct seq_file *seq, void *v)
2219 {
2220 	if (v == SEQ_START_TOKEN) {
2221 		seq_puts(seq, "Interface  Port   VIID\n");
2222 	} else {
2223 		struct adapter *adapter = seq->private;
2224 		int pidx = (uintptr_t)v - 2;
2225 		struct net_device *dev = adapter->port[pidx];
2226 		struct port_info *pi = netdev_priv(dev);
2227 
2228 		seq_printf(seq, "%9s  %4d  %#5x\n",
2229 			   dev->name, pi->port_id, pi->viid);
2230 	}
2231 	return 0;
2232 }
2233 
2234 static inline void *interfaces_get_idx(struct adapter *adapter, loff_t pos)
2235 {
2236 	return pos <= adapter->params.nports
2237 		? (void *)(uintptr_t)(pos + 1)
2238 		: NULL;
2239 }
2240 
2241 static void *interfaces_start(struct seq_file *seq, loff_t *pos)
2242 {
2243 	return *pos
2244 		? interfaces_get_idx(seq->private, *pos)
2245 		: SEQ_START_TOKEN;
2246 }
2247 
2248 static void *interfaces_next(struct seq_file *seq, void *v, loff_t *pos)
2249 {
2250 	(*pos)++;
2251 	return interfaces_get_idx(seq->private, *pos);
2252 }
2253 
2254 static void interfaces_stop(struct seq_file *seq, void *v)
2255 {
2256 }
2257 
2258 static const struct seq_operations interfaces_seq_ops = {
2259 	.start = interfaces_start,
2260 	.next  = interfaces_next,
2261 	.stop  = interfaces_stop,
2262 	.show  = interfaces_show
2263 };
2264 
2265 static int interfaces_open(struct inode *inode, struct file *file)
2266 {
2267 	int res = seq_open(file, &interfaces_seq_ops);
2268 
2269 	if (res == 0) {
2270 		struct seq_file *seq = file->private_data;
2271 		seq->private = inode->i_private;
2272 	}
2273 	return res;
2274 }
2275 
2276 static const struct file_operations interfaces_proc_fops = {
2277 	.owner   = THIS_MODULE,
2278 	.open    = interfaces_open,
2279 	.read    = seq_read,
2280 	.llseek  = seq_lseek,
2281 	.release = seq_release,
2282 };
2283 
2284 /*
2285  * /sys/kernel/debugfs/cxgb4vf/ files list.
2286  */
2287 struct cxgb4vf_debugfs_entry {
2288 	const char *name;		/* name of debugfs node */
2289 	umode_t mode;			/* file system mode */
2290 	const struct file_operations *fops;
2291 };
2292 
2293 static struct cxgb4vf_debugfs_entry debugfs_files[] = {
2294 	{ "mboxlog",    S_IRUGO, &mboxlog_fops },
2295 	{ "sge_qinfo",  S_IRUGO, &sge_qinfo_debugfs_fops },
2296 	{ "sge_qstats", S_IRUGO, &sge_qstats_proc_fops },
2297 	{ "resources",  S_IRUGO, &resources_proc_fops },
2298 	{ "interfaces", S_IRUGO, &interfaces_proc_fops },
2299 };
2300 
2301 /*
2302  * Module and device initialization and cleanup code.
2303  * ==================================================
2304  */
2305 
2306 /*
2307  * Set up out /sys/kernel/debug/cxgb4vf sub-nodes.  We assume that the
2308  * directory (debugfs_root) has already been set up.
2309  */
2310 static int setup_debugfs(struct adapter *adapter)
2311 {
2312 	int i;
2313 
2314 	BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2315 
2316 	/*
2317 	 * Debugfs support is best effort.
2318 	 */
2319 	for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
2320 		(void)debugfs_create_file(debugfs_files[i].name,
2321 				  debugfs_files[i].mode,
2322 				  adapter->debugfs_root,
2323 				  (void *)adapter,
2324 				  debugfs_files[i].fops);
2325 
2326 	return 0;
2327 }
2328 
2329 /*
2330  * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above.  We leave
2331  * it to our caller to tear down the directory (debugfs_root).
2332  */
2333 static void cleanup_debugfs(struct adapter *adapter)
2334 {
2335 	BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2336 
2337 	/*
2338 	 * Unlike our sister routine cleanup_proc(), we don't need to remove
2339 	 * individual entries because a call will be made to
2340 	 * debugfs_remove_recursive().  We just need to clean up any ancillary
2341 	 * persistent state.
2342 	 */
2343 	/* nothing to do */
2344 }
2345 
2346 /* Figure out how many Ports and Queue Sets we can support.  This depends on
2347  * knowing our Virtual Function Resources and may be called a second time if
2348  * we fall back from MSI-X to MSI Interrupt Mode.
2349  */
2350 static void size_nports_qsets(struct adapter *adapter)
2351 {
2352 	struct vf_resources *vfres = &adapter->params.vfres;
2353 	unsigned int ethqsets, pmask_nports;
2354 
2355 	/* The number of "ports" which we support is equal to the number of
2356 	 * Virtual Interfaces with which we've been provisioned.
2357 	 */
2358 	adapter->params.nports = vfres->nvi;
2359 	if (adapter->params.nports > MAX_NPORTS) {
2360 		dev_warn(adapter->pdev_dev, "only using %d of %d maximum"
2361 			 " allowed virtual interfaces\n", MAX_NPORTS,
2362 			 adapter->params.nports);
2363 		adapter->params.nports = MAX_NPORTS;
2364 	}
2365 
2366 	/* We may have been provisioned with more VIs than the number of
2367 	 * ports we're allowed to access (our Port Access Rights Mask).
2368 	 * This is obviously a configuration conflict but we don't want to
2369 	 * crash the kernel or anything silly just because of that.
2370 	 */
2371 	pmask_nports = hweight32(adapter->params.vfres.pmask);
2372 	if (pmask_nports < adapter->params.nports) {
2373 		dev_warn(adapter->pdev_dev, "only using %d of %d provisioned"
2374 			 " virtual interfaces; limited by Port Access Rights"
2375 			 " mask %#x\n", pmask_nports, adapter->params.nports,
2376 			 adapter->params.vfres.pmask);
2377 		adapter->params.nports = pmask_nports;
2378 	}
2379 
2380 	/* We need to reserve an Ingress Queue for the Asynchronous Firmware
2381 	 * Event Queue.  And if we're using MSI Interrupts, we'll also need to
2382 	 * reserve an Ingress Queue for a Forwarded Interrupts.
2383 	 *
2384 	 * The rest of the FL/Intr-capable ingress queues will be matched up
2385 	 * one-for-one with Ethernet/Control egress queues in order to form
2386 	 * "Queue Sets" which will be aportioned between the "ports".  For
2387 	 * each Queue Set, we'll need the ability to allocate two Egress
2388 	 * Contexts -- one for the Ingress Queue Free List and one for the TX
2389 	 * Ethernet Queue.
2390 	 *
2391 	 * Note that even if we're currently configured to use MSI-X
2392 	 * Interrupts (module variable msi == MSI_MSIX) we may get downgraded
2393 	 * to MSI Interrupts if we can't get enough MSI-X Interrupts.  If that
2394 	 * happens we'll need to adjust things later.
2395 	 */
2396 	ethqsets = vfres->niqflint - 1 - (msi == MSI_MSI);
2397 	if (vfres->nethctrl != ethqsets)
2398 		ethqsets = min(vfres->nethctrl, ethqsets);
2399 	if (vfres->neq < ethqsets*2)
2400 		ethqsets = vfres->neq/2;
2401 	if (ethqsets > MAX_ETH_QSETS)
2402 		ethqsets = MAX_ETH_QSETS;
2403 	adapter->sge.max_ethqsets = ethqsets;
2404 
2405 	if (adapter->sge.max_ethqsets < adapter->params.nports) {
2406 		dev_warn(adapter->pdev_dev, "only using %d of %d available"
2407 			 " virtual interfaces (too few Queue Sets)\n",
2408 			 adapter->sge.max_ethqsets, adapter->params.nports);
2409 		adapter->params.nports = adapter->sge.max_ethqsets;
2410 	}
2411 }
2412 
2413 /*
2414  * Perform early "adapter" initialization.  This is where we discover what
2415  * adapter parameters we're going to be using and initialize basic adapter
2416  * hardware support.
2417  */
2418 static int adap_init0(struct adapter *adapter)
2419 {
2420 	struct sge_params *sge_params = &adapter->params.sge;
2421 	struct sge *s = &adapter->sge;
2422 	int err;
2423 	u32 param, val = 0;
2424 
2425 	/*
2426 	 * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
2427 	 * 2.6.31 and later we can't call pci_reset_function() in order to
2428 	 * issue an FLR because of a self- deadlock on the device semaphore.
2429 	 * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
2430 	 * cases where they're needed -- for instance, some versions of KVM
2431 	 * fail to reset "Assigned Devices" when the VM reboots.  Therefore we
2432 	 * use the firmware based reset in order to reset any per function
2433 	 * state.
2434 	 */
2435 	err = t4vf_fw_reset(adapter);
2436 	if (err < 0) {
2437 		dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
2438 		return err;
2439 	}
2440 
2441 	/*
2442 	 * Grab basic operational parameters.  These will predominantly have
2443 	 * been set up by the Physical Function Driver or will be hard coded
2444 	 * into the adapter.  We just have to live with them ...  Note that
2445 	 * we _must_ get our VPD parameters before our SGE parameters because
2446 	 * we need to know the adapter's core clock from the VPD in order to
2447 	 * properly decode the SGE Timer Values.
2448 	 */
2449 	err = t4vf_get_dev_params(adapter);
2450 	if (err) {
2451 		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2452 			" device parameters: err=%d\n", err);
2453 		return err;
2454 	}
2455 	err = t4vf_get_vpd_params(adapter);
2456 	if (err) {
2457 		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2458 			" VPD parameters: err=%d\n", err);
2459 		return err;
2460 	}
2461 	err = t4vf_get_sge_params(adapter);
2462 	if (err) {
2463 		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2464 			" SGE parameters: err=%d\n", err);
2465 		return err;
2466 	}
2467 	err = t4vf_get_rss_glb_config(adapter);
2468 	if (err) {
2469 		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2470 			" RSS parameters: err=%d\n", err);
2471 		return err;
2472 	}
2473 	if (adapter->params.rss.mode !=
2474 	    FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2475 		dev_err(adapter->pdev_dev, "unable to operate with global RSS"
2476 			" mode %d\n", adapter->params.rss.mode);
2477 		return -EINVAL;
2478 	}
2479 	err = t4vf_sge_init(adapter);
2480 	if (err) {
2481 		dev_err(adapter->pdev_dev, "unable to use adapter parameters:"
2482 			" err=%d\n", err);
2483 		return err;
2484 	}
2485 
2486 	/* If we're running on newer firmware, let it know that we're
2487 	 * prepared to deal with encapsulated CPL messages.  Older
2488 	 * firmware won't understand this and we'll just get
2489 	 * unencapsulated messages ...
2490 	 */
2491 	param = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
2492 		FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP);
2493 	val = 1;
2494 	(void) t4vf_set_params(adapter, 1, &param, &val);
2495 
2496 	/*
2497 	 * Retrieve our RX interrupt holdoff timer values and counter
2498 	 * threshold values from the SGE parameters.
2499 	 */
2500 	s->timer_val[0] = core_ticks_to_us(adapter,
2501 		TIMERVALUE0_G(sge_params->sge_timer_value_0_and_1));
2502 	s->timer_val[1] = core_ticks_to_us(adapter,
2503 		TIMERVALUE1_G(sge_params->sge_timer_value_0_and_1));
2504 	s->timer_val[2] = core_ticks_to_us(adapter,
2505 		TIMERVALUE0_G(sge_params->sge_timer_value_2_and_3));
2506 	s->timer_val[3] = core_ticks_to_us(adapter,
2507 		TIMERVALUE1_G(sge_params->sge_timer_value_2_and_3));
2508 	s->timer_val[4] = core_ticks_to_us(adapter,
2509 		TIMERVALUE0_G(sge_params->sge_timer_value_4_and_5));
2510 	s->timer_val[5] = core_ticks_to_us(adapter,
2511 		TIMERVALUE1_G(sge_params->sge_timer_value_4_and_5));
2512 
2513 	s->counter_val[0] = THRESHOLD_0_G(sge_params->sge_ingress_rx_threshold);
2514 	s->counter_val[1] = THRESHOLD_1_G(sge_params->sge_ingress_rx_threshold);
2515 	s->counter_val[2] = THRESHOLD_2_G(sge_params->sge_ingress_rx_threshold);
2516 	s->counter_val[3] = THRESHOLD_3_G(sge_params->sge_ingress_rx_threshold);
2517 
2518 	/*
2519 	 * Grab our Virtual Interface resource allocation, extract the
2520 	 * features that we're interested in and do a bit of sanity testing on
2521 	 * what we discover.
2522 	 */
2523 	err = t4vf_get_vfres(adapter);
2524 	if (err) {
2525 		dev_err(adapter->pdev_dev, "unable to get virtual interface"
2526 			" resources: err=%d\n", err);
2527 		return err;
2528 	}
2529 
2530 	/* Check for various parameter sanity issues */
2531 	if (adapter->params.vfres.pmask == 0) {
2532 		dev_err(adapter->pdev_dev, "no port access configured\n"
2533 			"usable!\n");
2534 		return -EINVAL;
2535 	}
2536 	if (adapter->params.vfres.nvi == 0) {
2537 		dev_err(adapter->pdev_dev, "no virtual interfaces configured/"
2538 			"usable!\n");
2539 		return -EINVAL;
2540 	}
2541 
2542 	/* Initialize nports and max_ethqsets now that we have our Virtual
2543 	 * Function Resources.
2544 	 */
2545 	size_nports_qsets(adapter);
2546 
2547 	return 0;
2548 }
2549 
2550 static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx,
2551 			     u8 pkt_cnt_idx, unsigned int size,
2552 			     unsigned int iqe_size)
2553 {
2554 	rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
2555 			     (pkt_cnt_idx < SGE_NCOUNTERS ?
2556 			      QINTR_CNT_EN_F : 0));
2557 	rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS
2558 			    ? pkt_cnt_idx
2559 			    : 0);
2560 	rspq->iqe_len = iqe_size;
2561 	rspq->size = size;
2562 }
2563 
2564 /*
2565  * Perform default configuration of DMA queues depending on the number and
2566  * type of ports we found and the number of available CPUs.  Most settings can
2567  * be modified by the admin via ethtool and cxgbtool prior to the adapter
2568  * being brought up for the first time.
2569  */
2570 static void cfg_queues(struct adapter *adapter)
2571 {
2572 	struct sge *s = &adapter->sge;
2573 	int q10g, n10g, qidx, pidx, qs;
2574 	size_t iqe_size;
2575 
2576 	/*
2577 	 * We should not be called till we know how many Queue Sets we can
2578 	 * support.  In particular, this means that we need to know what kind
2579 	 * of interrupts we'll be using ...
2580 	 */
2581 	BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
2582 
2583 	/*
2584 	 * Count the number of 10GbE Virtual Interfaces that we have.
2585 	 */
2586 	n10g = 0;
2587 	for_each_port(adapter, pidx)
2588 		n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
2589 
2590 	/*
2591 	 * We default to 1 queue per non-10G port and up to # of cores queues
2592 	 * per 10G port.
2593 	 */
2594 	if (n10g == 0)
2595 		q10g = 0;
2596 	else {
2597 		int n1g = (adapter->params.nports - n10g);
2598 		q10g = (adapter->sge.max_ethqsets - n1g) / n10g;
2599 		if (q10g > num_online_cpus())
2600 			q10g = num_online_cpus();
2601 	}
2602 
2603 	/*
2604 	 * Allocate the "Queue Sets" to the various Virtual Interfaces.
2605 	 * The layout will be established in setup_sge_queues() when the
2606 	 * adapter is brough up for the first time.
2607 	 */
2608 	qidx = 0;
2609 	for_each_port(adapter, pidx) {
2610 		struct port_info *pi = adap2pinfo(adapter, pidx);
2611 
2612 		pi->first_qset = qidx;
2613 		pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
2614 		qidx += pi->nqsets;
2615 	}
2616 	s->ethqsets = qidx;
2617 
2618 	/*
2619 	 * The Ingress Queue Entry Size for our various Response Queues needs
2620 	 * to be big enough to accommodate the largest message we can receive
2621 	 * from the chip/firmware; which is 64 bytes ...
2622 	 */
2623 	iqe_size = 64;
2624 
2625 	/*
2626 	 * Set up default Queue Set parameters ...  Start off with the
2627 	 * shortest interrupt holdoff timer.
2628 	 */
2629 	for (qs = 0; qs < s->max_ethqsets; qs++) {
2630 		struct sge_eth_rxq *rxq = &s->ethrxq[qs];
2631 		struct sge_eth_txq *txq = &s->ethtxq[qs];
2632 
2633 		init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size);
2634 		rxq->fl.size = 72;
2635 		txq->q.size = 1024;
2636 	}
2637 
2638 	/*
2639 	 * The firmware event queue is used for link state changes and
2640 	 * notifications of TX DMA completions.
2641 	 */
2642 	init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size);
2643 
2644 	/*
2645 	 * The forwarded interrupt queue is used when we're in MSI interrupt
2646 	 * mode.  In this mode all interrupts associated with RX queues will
2647 	 * be forwarded to a single queue which we'll associate with our MSI
2648 	 * interrupt vector.  The messages dropped in the forwarded interrupt
2649 	 * queue will indicate which ingress queue needs servicing ...  This
2650 	 * queue needs to be large enough to accommodate all of the ingress
2651 	 * queues which are forwarding their interrupt (+1 to prevent the PIDX
2652 	 * from equalling the CIDX if every ingress queue has an outstanding
2653 	 * interrupt).  The queue doesn't need to be any larger because no
2654 	 * ingress queue will ever have more than one outstanding interrupt at
2655 	 * any time ...
2656 	 */
2657 	init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1,
2658 		  iqe_size);
2659 }
2660 
2661 /*
2662  * Reduce the number of Ethernet queues across all ports to at most n.
2663  * n provides at least one queue per port.
2664  */
2665 static void reduce_ethqs(struct adapter *adapter, int n)
2666 {
2667 	int i;
2668 	struct port_info *pi;
2669 
2670 	/*
2671 	 * While we have too many active Ether Queue Sets, interate across the
2672 	 * "ports" and reduce their individual Queue Set allocations.
2673 	 */
2674 	BUG_ON(n < adapter->params.nports);
2675 	while (n < adapter->sge.ethqsets)
2676 		for_each_port(adapter, i) {
2677 			pi = adap2pinfo(adapter, i);
2678 			if (pi->nqsets > 1) {
2679 				pi->nqsets--;
2680 				adapter->sge.ethqsets--;
2681 				if (adapter->sge.ethqsets <= n)
2682 					break;
2683 			}
2684 		}
2685 
2686 	/*
2687 	 * Reassign the starting Queue Sets for each of the "ports" ...
2688 	 */
2689 	n = 0;
2690 	for_each_port(adapter, i) {
2691 		pi = adap2pinfo(adapter, i);
2692 		pi->first_qset = n;
2693 		n += pi->nqsets;
2694 	}
2695 }
2696 
2697 /*
2698  * We need to grab enough MSI-X vectors to cover our interrupt needs.  Ideally
2699  * we get a separate MSI-X vector for every "Queue Set" plus any extras we
2700  * need.  Minimally we need one for every Virtual Interface plus those needed
2701  * for our "extras".  Note that this process may lower the maximum number of
2702  * allowed Queue Sets ...
2703  */
2704 static int enable_msix(struct adapter *adapter)
2705 {
2706 	int i, want, need, nqsets;
2707 	struct msix_entry entries[MSIX_ENTRIES];
2708 	struct sge *s = &adapter->sge;
2709 
2710 	for (i = 0; i < MSIX_ENTRIES; ++i)
2711 		entries[i].entry = i;
2712 
2713 	/*
2714 	 * We _want_ enough MSI-X interrupts to cover all of our "Queue Sets"
2715 	 * plus those needed for our "extras" (for example, the firmware
2716 	 * message queue).  We _need_ at least one "Queue Set" per Virtual
2717 	 * Interface plus those needed for our "extras".  So now we get to see
2718 	 * if the song is right ...
2719 	 */
2720 	want = s->max_ethqsets + MSIX_EXTRAS;
2721 	need = adapter->params.nports + MSIX_EXTRAS;
2722 
2723 	want = pci_enable_msix_range(adapter->pdev, entries, need, want);
2724 	if (want < 0)
2725 		return want;
2726 
2727 	nqsets = want - MSIX_EXTRAS;
2728 	if (nqsets < s->max_ethqsets) {
2729 		dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
2730 			 " for %d Queue Sets\n", nqsets);
2731 		s->max_ethqsets = nqsets;
2732 		if (nqsets < s->ethqsets)
2733 			reduce_ethqs(adapter, nqsets);
2734 	}
2735 	for (i = 0; i < want; ++i)
2736 		adapter->msix_info[i].vec = entries[i].vector;
2737 
2738 	return 0;
2739 }
2740 
2741 static const struct net_device_ops cxgb4vf_netdev_ops	= {
2742 	.ndo_open		= cxgb4vf_open,
2743 	.ndo_stop		= cxgb4vf_stop,
2744 	.ndo_start_xmit		= t4vf_eth_xmit,
2745 	.ndo_get_stats		= cxgb4vf_get_stats,
2746 	.ndo_set_rx_mode	= cxgb4vf_set_rxmode,
2747 	.ndo_set_mac_address	= cxgb4vf_set_mac_addr,
2748 	.ndo_validate_addr	= eth_validate_addr,
2749 	.ndo_do_ioctl		= cxgb4vf_do_ioctl,
2750 	.ndo_change_mtu		= cxgb4vf_change_mtu,
2751 	.ndo_fix_features	= cxgb4vf_fix_features,
2752 	.ndo_set_features	= cxgb4vf_set_features,
2753 #ifdef CONFIG_NET_POLL_CONTROLLER
2754 	.ndo_poll_controller	= cxgb4vf_poll_controller,
2755 #endif
2756 };
2757 
2758 /*
2759  * "Probe" a device: initialize a device and construct all kernel and driver
2760  * state needed to manage the device.  This routine is called "init_one" in
2761  * the PF Driver ...
2762  */
2763 static int cxgb4vf_pci_probe(struct pci_dev *pdev,
2764 			     const struct pci_device_id *ent)
2765 {
2766 	int pci_using_dac;
2767 	int err, pidx;
2768 	unsigned int pmask;
2769 	struct adapter *adapter;
2770 	struct port_info *pi;
2771 	struct net_device *netdev;
2772 	unsigned int pf;
2773 
2774 	/*
2775 	 * Print our driver banner the first time we're called to initialize a
2776 	 * device.
2777 	 */
2778 	pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
2779 
2780 	/*
2781 	 * Initialize generic PCI device state.
2782 	 */
2783 	err = pci_enable_device(pdev);
2784 	if (err) {
2785 		dev_err(&pdev->dev, "cannot enable PCI device\n");
2786 		return err;
2787 	}
2788 
2789 	/*
2790 	 * Reserve PCI resources for the device.  If we can't get them some
2791 	 * other driver may have already claimed the device ...
2792 	 */
2793 	err = pci_request_regions(pdev, KBUILD_MODNAME);
2794 	if (err) {
2795 		dev_err(&pdev->dev, "cannot obtain PCI resources\n");
2796 		goto err_disable_device;
2797 	}
2798 
2799 	/*
2800 	 * Set up our DMA mask: try for 64-bit address masking first and
2801 	 * fall back to 32-bit if we can't get 64 bits ...
2802 	 */
2803 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2804 	if (err == 0) {
2805 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2806 		if (err) {
2807 			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for"
2808 				" coherent allocations\n");
2809 			goto err_release_regions;
2810 		}
2811 		pci_using_dac = 1;
2812 	} else {
2813 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2814 		if (err != 0) {
2815 			dev_err(&pdev->dev, "no usable DMA configuration\n");
2816 			goto err_release_regions;
2817 		}
2818 		pci_using_dac = 0;
2819 	}
2820 
2821 	/*
2822 	 * Enable bus mastering for the device ...
2823 	 */
2824 	pci_set_master(pdev);
2825 
2826 	/*
2827 	 * Allocate our adapter data structure and attach it to the device.
2828 	 */
2829 	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2830 	if (!adapter) {
2831 		err = -ENOMEM;
2832 		goto err_release_regions;
2833 	}
2834 	pci_set_drvdata(pdev, adapter);
2835 	adapter->pdev = pdev;
2836 	adapter->pdev_dev = &pdev->dev;
2837 
2838 	adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
2839 				    (sizeof(struct mbox_cmd) *
2840 				     T4VF_OS_LOG_MBOX_CMDS),
2841 				    GFP_KERNEL);
2842 	if (!adapter->mbox_log) {
2843 		err = -ENOMEM;
2844 		goto err_free_adapter;
2845 	}
2846 	adapter->mbox_log->size = T4VF_OS_LOG_MBOX_CMDS;
2847 
2848 	/*
2849 	 * Initialize SMP data synchronization resources.
2850 	 */
2851 	spin_lock_init(&adapter->stats_lock);
2852 	spin_lock_init(&adapter->mbox_lock);
2853 	INIT_LIST_HEAD(&adapter->mlist.list);
2854 
2855 	/*
2856 	 * Map our I/O registers in BAR0.
2857 	 */
2858 	adapter->regs = pci_ioremap_bar(pdev, 0);
2859 	if (!adapter->regs) {
2860 		dev_err(&pdev->dev, "cannot map device registers\n");
2861 		err = -ENOMEM;
2862 		goto err_free_adapter;
2863 	}
2864 
2865 	/* Wait for the device to become ready before proceeding ...
2866 	 */
2867 	err = t4vf_prep_adapter(adapter);
2868 	if (err) {
2869 		dev_err(adapter->pdev_dev, "device didn't become ready:"
2870 			" err=%d\n", err);
2871 		goto err_unmap_bar0;
2872 	}
2873 
2874 	/* For T5 and later we want to use the new BAR-based User Doorbells,
2875 	 * so we need to map BAR2 here ...
2876 	 */
2877 	if (!is_t4(adapter->params.chip)) {
2878 		adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
2879 					   pci_resource_len(pdev, 2));
2880 		if (!adapter->bar2) {
2881 			dev_err(adapter->pdev_dev, "cannot map BAR2 doorbells\n");
2882 			err = -ENOMEM;
2883 			goto err_unmap_bar0;
2884 		}
2885 	}
2886 	/*
2887 	 * Initialize adapter level features.
2888 	 */
2889 	adapter->name = pci_name(pdev);
2890 	adapter->msg_enable = DFLT_MSG_ENABLE;
2891 	err = adap_init0(adapter);
2892 	if (err)
2893 		goto err_unmap_bar;
2894 
2895 	/*
2896 	 * Allocate our "adapter ports" and stitch everything together.
2897 	 */
2898 	pmask = adapter->params.vfres.pmask;
2899 	pf = t4vf_get_pf_from_vf(adapter);
2900 	for_each_port(adapter, pidx) {
2901 		int port_id, viid;
2902 		u8 mac[ETH_ALEN];
2903 		unsigned int naddr = 1;
2904 
2905 		/*
2906 		 * We simplistically allocate our virtual interfaces
2907 		 * sequentially across the port numbers to which we have
2908 		 * access rights.  This should be configurable in some manner
2909 		 * ...
2910 		 */
2911 		if (pmask == 0)
2912 			break;
2913 		port_id = ffs(pmask) - 1;
2914 		pmask &= ~(1 << port_id);
2915 		viid = t4vf_alloc_vi(adapter, port_id);
2916 		if (viid < 0) {
2917 			dev_err(&pdev->dev, "cannot allocate VI for port %d:"
2918 				" err=%d\n", port_id, viid);
2919 			err = viid;
2920 			goto err_free_dev;
2921 		}
2922 
2923 		/*
2924 		 * Allocate our network device and stitch things together.
2925 		 */
2926 		netdev = alloc_etherdev_mq(sizeof(struct port_info),
2927 					   MAX_PORT_QSETS);
2928 		if (netdev == NULL) {
2929 			t4vf_free_vi(adapter, viid);
2930 			err = -ENOMEM;
2931 			goto err_free_dev;
2932 		}
2933 		adapter->port[pidx] = netdev;
2934 		SET_NETDEV_DEV(netdev, &pdev->dev);
2935 		pi = netdev_priv(netdev);
2936 		pi->adapter = adapter;
2937 		pi->pidx = pidx;
2938 		pi->port_id = port_id;
2939 		pi->viid = viid;
2940 
2941 		/*
2942 		 * Initialize the starting state of our "port" and register
2943 		 * it.
2944 		 */
2945 		pi->xact_addr_filt = -1;
2946 		netif_carrier_off(netdev);
2947 		netdev->irq = pdev->irq;
2948 
2949 		netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
2950 			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2951 			NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
2952 		netdev->vlan_features = NETIF_F_SG | TSO_FLAGS |
2953 			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2954 			NETIF_F_HIGHDMA;
2955 		netdev->features = netdev->hw_features |
2956 				   NETIF_F_HW_VLAN_CTAG_TX;
2957 		if (pci_using_dac)
2958 			netdev->features |= NETIF_F_HIGHDMA;
2959 
2960 		netdev->priv_flags |= IFF_UNICAST_FLT;
2961 		netdev->min_mtu = 81;
2962 		netdev->max_mtu = ETH_MAX_MTU;
2963 
2964 		netdev->netdev_ops = &cxgb4vf_netdev_ops;
2965 		netdev->ethtool_ops = &cxgb4vf_ethtool_ops;
2966 		netdev->dev_port = pi->port_id;
2967 
2968 		/*
2969 		 * Initialize the hardware/software state for the port.
2970 		 */
2971 		err = t4vf_port_init(adapter, pidx);
2972 		if (err) {
2973 			dev_err(&pdev->dev, "cannot initialize port %d\n",
2974 				pidx);
2975 			goto err_free_dev;
2976 		}
2977 
2978 		err = t4vf_get_vf_mac_acl(adapter, pf, &naddr, mac);
2979 		if (err) {
2980 			dev_err(&pdev->dev,
2981 				"unable to determine MAC ACL address, "
2982 				"continuing anyway.. (status %d)\n", err);
2983 		} else if (naddr && adapter->params.vfres.nvi == 1) {
2984 			struct sockaddr addr;
2985 
2986 			ether_addr_copy(addr.sa_data, mac);
2987 			err = cxgb4vf_set_mac_addr(netdev, &addr);
2988 			if (err) {
2989 				dev_err(&pdev->dev,
2990 					"unable to set MAC address %pM\n",
2991 					mac);
2992 				goto err_free_dev;
2993 			}
2994 			dev_info(&pdev->dev,
2995 				 "Using assigned MAC ACL: %pM\n", mac);
2996 		}
2997 	}
2998 
2999 	/* See what interrupts we'll be using.  If we've been configured to
3000 	 * use MSI-X interrupts, try to enable them but fall back to using
3001 	 * MSI interrupts if we can't enable MSI-X interrupts.  If we can't
3002 	 * get MSI interrupts we bail with the error.
3003 	 */
3004 	if (msi == MSI_MSIX && enable_msix(adapter) == 0)
3005 		adapter->flags |= USING_MSIX;
3006 	else {
3007 		if (msi == MSI_MSIX) {
3008 			dev_info(adapter->pdev_dev,
3009 				 "Unable to use MSI-X Interrupts; falling "
3010 				 "back to MSI Interrupts\n");
3011 
3012 			/* We're going to need a Forwarded Interrupt Queue so
3013 			 * that may cut into how many Queue Sets we can
3014 			 * support.
3015 			 */
3016 			msi = MSI_MSI;
3017 			size_nports_qsets(adapter);
3018 		}
3019 		err = pci_enable_msi(pdev);
3020 		if (err) {
3021 			dev_err(&pdev->dev, "Unable to allocate MSI Interrupts;"
3022 				" err=%d\n", err);
3023 			goto err_free_dev;
3024 		}
3025 		adapter->flags |= USING_MSI;
3026 	}
3027 
3028 	/* Now that we know how many "ports" we have and what interrupt
3029 	 * mechanism we're going to use, we can configure our queue resources.
3030 	 */
3031 	cfg_queues(adapter);
3032 
3033 	/*
3034 	 * The "card" is now ready to go.  If any errors occur during device
3035 	 * registration we do not fail the whole "card" but rather proceed
3036 	 * only with the ports we manage to register successfully.  However we
3037 	 * must register at least one net device.
3038 	 */
3039 	for_each_port(adapter, pidx) {
3040 		struct port_info *pi = netdev_priv(adapter->port[pidx]);
3041 		netdev = adapter->port[pidx];
3042 		if (netdev == NULL)
3043 			continue;
3044 
3045 		netif_set_real_num_tx_queues(netdev, pi->nqsets);
3046 		netif_set_real_num_rx_queues(netdev, pi->nqsets);
3047 
3048 		err = register_netdev(netdev);
3049 		if (err) {
3050 			dev_warn(&pdev->dev, "cannot register net device %s,"
3051 				 " skipping\n", netdev->name);
3052 			continue;
3053 		}
3054 
3055 		set_bit(pidx, &adapter->registered_device_map);
3056 	}
3057 	if (adapter->registered_device_map == 0) {
3058 		dev_err(&pdev->dev, "could not register any net devices\n");
3059 		goto err_disable_interrupts;
3060 	}
3061 
3062 	/*
3063 	 * Set up our debugfs entries.
3064 	 */
3065 	if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
3066 		adapter->debugfs_root =
3067 			debugfs_create_dir(pci_name(pdev),
3068 					   cxgb4vf_debugfs_root);
3069 		if (IS_ERR_OR_NULL(adapter->debugfs_root))
3070 			dev_warn(&pdev->dev, "could not create debugfs"
3071 				 " directory");
3072 		else
3073 			setup_debugfs(adapter);
3074 	}
3075 
3076 	/*
3077 	 * Print a short notice on the existence and configuration of the new
3078 	 * VF network device ...
3079 	 */
3080 	for_each_port(adapter, pidx) {
3081 		dev_info(adapter->pdev_dev, "%s: Chelsio VF NIC PCIe %s\n",
3082 			 adapter->port[pidx]->name,
3083 			 (adapter->flags & USING_MSIX) ? "MSI-X" :
3084 			 (adapter->flags & USING_MSI)  ? "MSI" : "");
3085 	}
3086 
3087 	/*
3088 	 * Return success!
3089 	 */
3090 	return 0;
3091 
3092 	/*
3093 	 * Error recovery and exit code.  Unwind state that's been created
3094 	 * so far and return the error.
3095 	 */
3096 err_disable_interrupts:
3097 	if (adapter->flags & USING_MSIX) {
3098 		pci_disable_msix(adapter->pdev);
3099 		adapter->flags &= ~USING_MSIX;
3100 	} else if (adapter->flags & USING_MSI) {
3101 		pci_disable_msi(adapter->pdev);
3102 		adapter->flags &= ~USING_MSI;
3103 	}
3104 
3105 err_free_dev:
3106 	for_each_port(adapter, pidx) {
3107 		netdev = adapter->port[pidx];
3108 		if (netdev == NULL)
3109 			continue;
3110 		pi = netdev_priv(netdev);
3111 		t4vf_free_vi(adapter, pi->viid);
3112 		if (test_bit(pidx, &adapter->registered_device_map))
3113 			unregister_netdev(netdev);
3114 		free_netdev(netdev);
3115 	}
3116 
3117 err_unmap_bar:
3118 	if (!is_t4(adapter->params.chip))
3119 		iounmap(adapter->bar2);
3120 
3121 err_unmap_bar0:
3122 	iounmap(adapter->regs);
3123 
3124 err_free_adapter:
3125 	kfree(adapter->mbox_log);
3126 	kfree(adapter);
3127 
3128 err_release_regions:
3129 	pci_release_regions(pdev);
3130 	pci_clear_master(pdev);
3131 
3132 err_disable_device:
3133 	pci_disable_device(pdev);
3134 
3135 	return err;
3136 }
3137 
3138 /*
3139  * "Remove" a device: tear down all kernel and driver state created in the
3140  * "probe" routine and quiesce the device (disable interrupts, etc.).  (Note
3141  * that this is called "remove_one" in the PF Driver.)
3142  */
3143 static void cxgb4vf_pci_remove(struct pci_dev *pdev)
3144 {
3145 	struct adapter *adapter = pci_get_drvdata(pdev);
3146 
3147 	/*
3148 	 * Tear down driver state associated with device.
3149 	 */
3150 	if (adapter) {
3151 		int pidx;
3152 
3153 		/*
3154 		 * Stop all of our activity.  Unregister network port,
3155 		 * disable interrupts, etc.
3156 		 */
3157 		for_each_port(adapter, pidx)
3158 			if (test_bit(pidx, &adapter->registered_device_map))
3159 				unregister_netdev(adapter->port[pidx]);
3160 		t4vf_sge_stop(adapter);
3161 		if (adapter->flags & USING_MSIX) {
3162 			pci_disable_msix(adapter->pdev);
3163 			adapter->flags &= ~USING_MSIX;
3164 		} else if (adapter->flags & USING_MSI) {
3165 			pci_disable_msi(adapter->pdev);
3166 			adapter->flags &= ~USING_MSI;
3167 		}
3168 
3169 		/*
3170 		 * Tear down our debugfs entries.
3171 		 */
3172 		if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
3173 			cleanup_debugfs(adapter);
3174 			debugfs_remove_recursive(adapter->debugfs_root);
3175 		}
3176 
3177 		/*
3178 		 * Free all of the various resources which we've acquired ...
3179 		 */
3180 		t4vf_free_sge_resources(adapter);
3181 		for_each_port(adapter, pidx) {
3182 			struct net_device *netdev = adapter->port[pidx];
3183 			struct port_info *pi;
3184 
3185 			if (netdev == NULL)
3186 				continue;
3187 
3188 			pi = netdev_priv(netdev);
3189 			t4vf_free_vi(adapter, pi->viid);
3190 			free_netdev(netdev);
3191 		}
3192 		iounmap(adapter->regs);
3193 		if (!is_t4(adapter->params.chip))
3194 			iounmap(adapter->bar2);
3195 		kfree(adapter->mbox_log);
3196 		kfree(adapter);
3197 	}
3198 
3199 	/*
3200 	 * Disable the device and release its PCI resources.
3201 	 */
3202 	pci_disable_device(pdev);
3203 	pci_clear_master(pdev);
3204 	pci_release_regions(pdev);
3205 }
3206 
3207 /*
3208  * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
3209  * delivery.
3210  */
3211 static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
3212 {
3213 	struct adapter *adapter;
3214 	int pidx;
3215 
3216 	adapter = pci_get_drvdata(pdev);
3217 	if (!adapter)
3218 		return;
3219 
3220 	/* Disable all Virtual Interfaces.  This will shut down the
3221 	 * delivery of all ingress packets into the chip for these
3222 	 * Virtual Interfaces.
3223 	 */
3224 	for_each_port(adapter, pidx)
3225 		if (test_bit(pidx, &adapter->registered_device_map))
3226 			unregister_netdev(adapter->port[pidx]);
3227 
3228 	/* Free up all Queues which will prevent further DMA and
3229 	 * Interrupts allowing various internal pathways to drain.
3230 	 */
3231 	t4vf_sge_stop(adapter);
3232 	if (adapter->flags & USING_MSIX) {
3233 		pci_disable_msix(adapter->pdev);
3234 		adapter->flags &= ~USING_MSIX;
3235 	} else if (adapter->flags & USING_MSI) {
3236 		pci_disable_msi(adapter->pdev);
3237 		adapter->flags &= ~USING_MSI;
3238 	}
3239 
3240 	/*
3241 	 * Free up all Queues which will prevent further DMA and
3242 	 * Interrupts allowing various internal pathways to drain.
3243 	 */
3244 	t4vf_free_sge_resources(adapter);
3245 	pci_set_drvdata(pdev, NULL);
3246 }
3247 
3248 /* Macros needed to support the PCI Device ID Table ...
3249  */
3250 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
3251 	static const struct pci_device_id cxgb4vf_pci_tbl[] = {
3252 #define CH_PCI_DEVICE_ID_FUNCTION	0x8
3253 
3254 #define CH_PCI_ID_TABLE_ENTRY(devid) \
3255 		{ PCI_VDEVICE(CHELSIO, (devid)), 0 }
3256 
3257 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } }
3258 
3259 #include "../cxgb4/t4_pci_id_tbl.h"
3260 
3261 MODULE_DESCRIPTION(DRV_DESC);
3262 MODULE_AUTHOR("Chelsio Communications");
3263 MODULE_LICENSE("Dual BSD/GPL");
3264 MODULE_VERSION(DRV_VERSION);
3265 MODULE_DEVICE_TABLE(pci, cxgb4vf_pci_tbl);
3266 
3267 static struct pci_driver cxgb4vf_driver = {
3268 	.name		= KBUILD_MODNAME,
3269 	.id_table	= cxgb4vf_pci_tbl,
3270 	.probe		= cxgb4vf_pci_probe,
3271 	.remove		= cxgb4vf_pci_remove,
3272 	.shutdown	= cxgb4vf_pci_shutdown,
3273 };
3274 
3275 /*
3276  * Initialize global driver state.
3277  */
3278 static int __init cxgb4vf_module_init(void)
3279 {
3280 	int ret;
3281 
3282 	/*
3283 	 * Vet our module parameters.
3284 	 */
3285 	if (msi != MSI_MSIX && msi != MSI_MSI) {
3286 		pr_warn("bad module parameter msi=%d; must be %d (MSI-X or MSI) or %d (MSI)\n",
3287 			msi, MSI_MSIX, MSI_MSI);
3288 		return -EINVAL;
3289 	}
3290 
3291 	/* Debugfs support is optional, just warn if this fails */
3292 	cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
3293 	if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
3294 		pr_warn("could not create debugfs entry, continuing\n");
3295 
3296 	ret = pci_register_driver(&cxgb4vf_driver);
3297 	if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
3298 		debugfs_remove(cxgb4vf_debugfs_root);
3299 	return ret;
3300 }
3301 
3302 /*
3303  * Tear down global driver state.
3304  */
3305 static void __exit cxgb4vf_module_exit(void)
3306 {
3307 	pci_unregister_driver(&cxgb4vf_driver);
3308 	debugfs_remove(cxgb4vf_debugfs_root);
3309 }
3310 
3311 module_init(cxgb4vf_module_init);
3312 module_exit(cxgb4vf_module_exit);
3313