xref: /illumos-gate/usr/src/uts/common/io/i40e/core/i40e_adminq.c (revision c94be9439c4f0773ef60e2cec21d548359cfea20)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "i40e_status.h"
36 #include "i40e_type.h"
37 #include "i40e_register.h"
38 #include "i40e_adminq.h"
39 #include "i40e_prototype.h"
40 
41 /**
42  *  i40e_adminq_init_regs - Initialize AdminQ registers
43  *  @hw: pointer to the hardware structure
44  *
45  *  This assumes the alloc_asq and alloc_arq functions have already been called
46  **/
47 static void i40e_adminq_init_regs(struct i40e_hw *hw)
48 {
49 	/* set head and tail registers in our local struct */
50 	if (i40e_is_vf(hw)) {
51 		hw->aq.asq.tail = I40E_VF_ATQT1;
52 		hw->aq.asq.head = I40E_VF_ATQH1;
53 		hw->aq.asq.len  = I40E_VF_ATQLEN1;
54 		hw->aq.asq.bal  = I40E_VF_ATQBAL1;
55 		hw->aq.asq.bah  = I40E_VF_ATQBAH1;
56 		hw->aq.arq.tail = I40E_VF_ARQT1;
57 		hw->aq.arq.head = I40E_VF_ARQH1;
58 		hw->aq.arq.len  = I40E_VF_ARQLEN1;
59 		hw->aq.arq.bal  = I40E_VF_ARQBAL1;
60 		hw->aq.arq.bah  = I40E_VF_ARQBAH1;
61 	} else {
62 		hw->aq.asq.tail = I40E_PF_ATQT;
63 		hw->aq.asq.head = I40E_PF_ATQH;
64 		hw->aq.asq.len  = I40E_PF_ATQLEN;
65 		hw->aq.asq.bal  = I40E_PF_ATQBAL;
66 		hw->aq.asq.bah  = I40E_PF_ATQBAH;
67 		hw->aq.arq.tail = I40E_PF_ARQT;
68 		hw->aq.arq.head = I40E_PF_ARQH;
69 		hw->aq.arq.len  = I40E_PF_ARQLEN;
70 		hw->aq.arq.bal  = I40E_PF_ARQBAL;
71 		hw->aq.arq.bah  = I40E_PF_ARQBAH;
72 	}
73 }
74 
75 /**
76  *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
77  *  @hw: pointer to the hardware structure
78  **/
79 enum i40e_status_code i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
80 {
81 	enum i40e_status_code ret_code;
82 
83 	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
84 					 i40e_mem_atq_ring,
85 					 (hw->aq.num_asq_entries *
86 					 sizeof(struct i40e_aq_desc)),
87 					 I40E_ADMINQ_DESC_ALIGNMENT);
88 	if (ret_code)
89 		return ret_code;
90 
91 	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
92 					  (hw->aq.num_asq_entries *
93 					  sizeof(struct i40e_asq_cmd_details)));
94 	if (ret_code) {
95 		i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
96 		return ret_code;
97 	}
98 
99 	return ret_code;
100 }
101 
102 /**
103  *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
104  *  @hw: pointer to the hardware structure
105  **/
106 enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
107 {
108 	enum i40e_status_code ret_code;
109 
110 	ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
111 					 i40e_mem_arq_ring,
112 					 (hw->aq.num_arq_entries *
113 					 sizeof(struct i40e_aq_desc)),
114 					 I40E_ADMINQ_DESC_ALIGNMENT);
115 
116 	return ret_code;
117 }
118 
119 /**
120  *  i40e_free_adminq_asq - Free Admin Queue send rings
121  *  @hw: pointer to the hardware structure
122  *
123  *  This assumes the posted send buffers have already been cleaned
124  *  and de-allocated
125  **/
126 void i40e_free_adminq_asq(struct i40e_hw *hw)
127 {
128 	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
129 }
130 
131 /**
132  *  i40e_free_adminq_arq - Free Admin Queue receive rings
133  *  @hw: pointer to the hardware structure
134  *
135  *  This assumes the posted receive buffers have already been cleaned
136  *  and de-allocated
137  **/
138 void i40e_free_adminq_arq(struct i40e_hw *hw)
139 {
140 	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
141 }
142 
143 /**
144  *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
145  *  @hw: pointer to the hardware structure
146  **/
147 static enum i40e_status_code i40e_alloc_arq_bufs(struct i40e_hw *hw)
148 {
149 	enum i40e_status_code ret_code;
150 	struct i40e_aq_desc *desc;
151 	struct i40e_dma_mem *bi;
152 	int i;
153 
154 	/* We'll be allocating the buffer info memory first, then we can
155 	 * allocate the mapped buffers for the event processing
156 	 */
157 
158 	/* buffer_info structures do not need alignment */
159 	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
160 		(hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
161 	if (ret_code)
162 		goto alloc_arq_bufs;
163 	hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
164 
165 	/* allocate the mapped buffers */
166 	for (i = 0; i < hw->aq.num_arq_entries; i++) {
167 		bi = &hw->aq.arq.r.arq_bi[i];
168 		ret_code = i40e_allocate_dma_mem(hw, bi,
169 						 i40e_mem_arq_buf,
170 						 hw->aq.arq_buf_size,
171 						 I40E_ADMINQ_DESC_ALIGNMENT);
172 		if (ret_code)
173 			goto unwind_alloc_arq_bufs;
174 
175 		/* now configure the descriptors for use */
176 		desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
177 
178 		desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
179 		if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
180 			desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
181 		desc->opcode = 0;
182 		/* This is in accordance with Admin queue design, there is no
183 		 * register for buffer size configuration
184 		 */
185 		desc->datalen = CPU_TO_LE16((u16)bi->size);
186 		desc->retval = 0;
187 		desc->cookie_high = 0;
188 		desc->cookie_low = 0;
189 		desc->params.external.addr_high =
190 			CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
191 		desc->params.external.addr_low =
192 			CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
193 		desc->params.external.param0 = 0;
194 		desc->params.external.param1 = 0;
195 	}
196 
197 alloc_arq_bufs:
198 	return ret_code;
199 
200 unwind_alloc_arq_bufs:
201 	/* don't try to free the one that failed... */
202 	i--;
203 	for (; i >= 0; i--)
204 		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
205 	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
206 
207 	return ret_code;
208 }
209 
210 /**
211  *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
212  *  @hw: pointer to the hardware structure
213  **/
214 static enum i40e_status_code i40e_alloc_asq_bufs(struct i40e_hw *hw)
215 {
216 	enum i40e_status_code ret_code;
217 	struct i40e_dma_mem *bi;
218 	int i;
219 
220 	/* No mapped memory needed yet, just the buffer info structures */
221 	ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
222 		(hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
223 	if (ret_code)
224 		goto alloc_asq_bufs;
225 	hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
226 
227 	/* allocate the mapped buffers */
228 	for (i = 0; i < hw->aq.num_asq_entries; i++) {
229 		bi = &hw->aq.asq.r.asq_bi[i];
230 		ret_code = i40e_allocate_dma_mem(hw, bi,
231 						 i40e_mem_asq_buf,
232 						 hw->aq.asq_buf_size,
233 						 I40E_ADMINQ_DESC_ALIGNMENT);
234 		if (ret_code)
235 			goto unwind_alloc_asq_bufs;
236 	}
237 alloc_asq_bufs:
238 	return ret_code;
239 
240 unwind_alloc_asq_bufs:
241 	/* don't try to free the one that failed... */
242 	i--;
243 	for (; i >= 0; i--)
244 		i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
245 	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
246 
247 	return ret_code;
248 }
249 
250 /**
251  *  i40e_free_arq_bufs - Free receive queue buffer info elements
252  *  @hw: pointer to the hardware structure
253  **/
254 static void i40e_free_arq_bufs(struct i40e_hw *hw)
255 {
256 	int i;
257 
258 	/* free descriptors */
259 	for (i = 0; i < hw->aq.num_arq_entries; i++)
260 		i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
261 
262 	/* free the descriptor memory */
263 	i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
264 
265 	/* free the dma header */
266 	i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
267 }
268 
269 /**
270  *  i40e_free_asq_bufs - Free send queue buffer info elements
271  *  @hw: pointer to the hardware structure
272  **/
273 static void i40e_free_asq_bufs(struct i40e_hw *hw)
274 {
275 	int i;
276 
277 	/* only unmap if the address is non-NULL */
278 	for (i = 0; i < hw->aq.num_asq_entries; i++)
279 		if (hw->aq.asq.r.asq_bi[i].pa)
280 			i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
281 
282 	/* free the buffer info list */
283 	i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
284 
285 	/* free the descriptor memory */
286 	i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
287 
288 	/* free the dma header */
289 	i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
290 }
291 
292 /**
293  *  i40e_config_asq_regs - configure ASQ registers
294  *  @hw: pointer to the hardware structure
295  *
296  *  Configure base address and length registers for the transmit queue
297  **/
298 static enum i40e_status_code i40e_config_asq_regs(struct i40e_hw *hw)
299 {
300 	enum i40e_status_code ret_code = I40E_SUCCESS;
301 	u32 reg = 0;
302 
303 	/* Clear Head and Tail */
304 	wr32(hw, hw->aq.asq.head, 0);
305 	wr32(hw, hw->aq.asq.tail, 0);
306 
307 	/* set starting point */
308 	wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
309 				  I40E_PF_ATQLEN_ATQENABLE_MASK));
310 	wr32(hw, hw->aq.asq.bal, I40E_LO_DWORD(hw->aq.asq.desc_buf.pa));
311 	wr32(hw, hw->aq.asq.bah, I40E_HI_DWORD(hw->aq.asq.desc_buf.pa));
312 
313 	/* Check one register to verify that config was applied */
314 	reg = rd32(hw, hw->aq.asq.bal);
315 	if (reg != I40E_LO_DWORD(hw->aq.asq.desc_buf.pa))
316 		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
317 
318 	return ret_code;
319 }
320 
321 /**
322  *  i40e_config_arq_regs - ARQ register configuration
323  *  @hw: pointer to the hardware structure
324  *
325  * Configure base address and length registers for the receive (event queue)
326  **/
327 static enum i40e_status_code i40e_config_arq_regs(struct i40e_hw *hw)
328 {
329 	enum i40e_status_code ret_code = I40E_SUCCESS;
330 	u32 reg = 0;
331 
332 	/* Clear Head and Tail */
333 	wr32(hw, hw->aq.arq.head, 0);
334 	wr32(hw, hw->aq.arq.tail, 0);
335 
336 	/* set starting point */
337 	wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
338 				  I40E_PF_ARQLEN_ARQENABLE_MASK));
339 	wr32(hw, hw->aq.arq.bal, I40E_LO_DWORD(hw->aq.arq.desc_buf.pa));
340 	wr32(hw, hw->aq.arq.bah, I40E_HI_DWORD(hw->aq.arq.desc_buf.pa));
341 
342 	/* Update tail in the HW to post pre-allocated buffers */
343 	wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
344 
345 	/* Check one register to verify that config was applied */
346 	reg = rd32(hw, hw->aq.arq.bal);
347 	if (reg != I40E_LO_DWORD(hw->aq.arq.desc_buf.pa))
348 		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
349 
350 	return ret_code;
351 }
352 
353 /**
354  *  i40e_init_asq - main initialization routine for ASQ
355  *  @hw: pointer to the hardware structure
356  *
357  *  This is the main initialization routine for the Admin Send Queue
358  *  Prior to calling this function, drivers *MUST* set the following fields
359  *  in the hw->aq structure:
360  *     - hw->aq.num_asq_entries
361  *     - hw->aq.arq_buf_size
362  *
363  *  Do *NOT* hold the lock when calling this as the memory allocation routines
364  *  called are not going to be atomic context safe
365  **/
366 enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
367 {
368 	enum i40e_status_code ret_code = I40E_SUCCESS;
369 
370 	if (hw->aq.asq.count > 0) {
371 		/* queue already initialized */
372 		ret_code = I40E_ERR_NOT_READY;
373 		goto init_adminq_exit;
374 	}
375 
376 	/* verify input for valid configuration */
377 	if ((hw->aq.num_asq_entries == 0) ||
378 	    (hw->aq.asq_buf_size == 0)) {
379 		ret_code = I40E_ERR_CONFIG;
380 		goto init_adminq_exit;
381 	}
382 
383 	hw->aq.asq.next_to_use = 0;
384 	hw->aq.asq.next_to_clean = 0;
385 
386 	/* allocate the ring memory */
387 	ret_code = i40e_alloc_adminq_asq_ring(hw);
388 	if (ret_code != I40E_SUCCESS)
389 		goto init_adminq_exit;
390 
391 	/* allocate buffers in the rings */
392 	ret_code = i40e_alloc_asq_bufs(hw);
393 	if (ret_code != I40E_SUCCESS)
394 		goto init_adminq_free_rings;
395 
396 	/* initialize base registers */
397 	ret_code = i40e_config_asq_regs(hw);
398 	if (ret_code != I40E_SUCCESS)
399 		goto init_adminq_free_rings;
400 
401 	/* success! */
402 	hw->aq.asq.count = hw->aq.num_asq_entries;
403 	goto init_adminq_exit;
404 
405 init_adminq_free_rings:
406 	i40e_free_adminq_asq(hw);
407 
408 init_adminq_exit:
409 	return ret_code;
410 }
411 
412 /**
413  *  i40e_init_arq - initialize ARQ
414  *  @hw: pointer to the hardware structure
415  *
416  *  The main initialization routine for the Admin Receive (Event) Queue.
417  *  Prior to calling this function, drivers *MUST* set the following fields
418  *  in the hw->aq structure:
419  *     - hw->aq.num_asq_entries
420  *     - hw->aq.arq_buf_size
421  *
422  *  Do *NOT* hold the lock when calling this as the memory allocation routines
423  *  called are not going to be atomic context safe
424  **/
425 enum i40e_status_code i40e_init_arq(struct i40e_hw *hw)
426 {
427 	enum i40e_status_code ret_code = I40E_SUCCESS;
428 
429 	if (hw->aq.arq.count > 0) {
430 		/* queue already initialized */
431 		ret_code = I40E_ERR_NOT_READY;
432 		goto init_adminq_exit;
433 	}
434 
435 	/* verify input for valid configuration */
436 	if ((hw->aq.num_arq_entries == 0) ||
437 	    (hw->aq.arq_buf_size == 0)) {
438 		ret_code = I40E_ERR_CONFIG;
439 		goto init_adminq_exit;
440 	}
441 
442 	hw->aq.arq.next_to_use = 0;
443 	hw->aq.arq.next_to_clean = 0;
444 
445 	/* allocate the ring memory */
446 	ret_code = i40e_alloc_adminq_arq_ring(hw);
447 	if (ret_code != I40E_SUCCESS)
448 		goto init_adminq_exit;
449 
450 	/* allocate buffers in the rings */
451 	ret_code = i40e_alloc_arq_bufs(hw);
452 	if (ret_code != I40E_SUCCESS)
453 		goto init_adminq_free_rings;
454 
455 	/* initialize base registers */
456 	ret_code = i40e_config_arq_regs(hw);
457 	if (ret_code != I40E_SUCCESS)
458 		goto init_adminq_free_rings;
459 
460 	/* success! */
461 	hw->aq.arq.count = hw->aq.num_arq_entries;
462 	goto init_adminq_exit;
463 
464 init_adminq_free_rings:
465 	i40e_free_adminq_arq(hw);
466 
467 init_adminq_exit:
468 	return ret_code;
469 }
470 
471 /**
472  *  i40e_shutdown_asq - shutdown the ASQ
473  *  @hw: pointer to the hardware structure
474  *
475  *  The main shutdown routine for the Admin Send Queue
476  **/
477 enum i40e_status_code i40e_shutdown_asq(struct i40e_hw *hw)
478 {
479 	enum i40e_status_code ret_code = I40E_SUCCESS;
480 
481 	i40e_acquire_spinlock(&hw->aq.asq_spinlock);
482 
483 	if (hw->aq.asq.count == 0) {
484 		ret_code = I40E_ERR_NOT_READY;
485 		goto shutdown_asq_out;
486 	}
487 
488 	/* Stop firmware AdminQ processing */
489 	wr32(hw, hw->aq.asq.head, 0);
490 	wr32(hw, hw->aq.asq.tail, 0);
491 	wr32(hw, hw->aq.asq.len, 0);
492 	wr32(hw, hw->aq.asq.bal, 0);
493 	wr32(hw, hw->aq.asq.bah, 0);
494 
495 	hw->aq.asq.count = 0; /* to indicate uninitialized queue */
496 
497 	/* free ring buffers */
498 	i40e_free_asq_bufs(hw);
499 
500 shutdown_asq_out:
501 	i40e_release_spinlock(&hw->aq.asq_spinlock);
502 	return ret_code;
503 }
504 
505 /**
506  *  i40e_shutdown_arq - shutdown ARQ
507  *  @hw: pointer to the hardware structure
508  *
509  *  The main shutdown routine for the Admin Receive Queue
510  **/
511 enum i40e_status_code i40e_shutdown_arq(struct i40e_hw *hw)
512 {
513 	enum i40e_status_code ret_code = I40E_SUCCESS;
514 
515 	i40e_acquire_spinlock(&hw->aq.arq_spinlock);
516 
517 	if (hw->aq.arq.count == 0) {
518 		ret_code = I40E_ERR_NOT_READY;
519 		goto shutdown_arq_out;
520 	}
521 
522 	/* Stop firmware AdminQ processing */
523 	wr32(hw, hw->aq.arq.head, 0);
524 	wr32(hw, hw->aq.arq.tail, 0);
525 	wr32(hw, hw->aq.arq.len, 0);
526 	wr32(hw, hw->aq.arq.bal, 0);
527 	wr32(hw, hw->aq.arq.bah, 0);
528 
529 	hw->aq.arq.count = 0; /* to indicate uninitialized queue */
530 
531 	/* free ring buffers */
532 	i40e_free_arq_bufs(hw);
533 
534 shutdown_arq_out:
535 	i40e_release_spinlock(&hw->aq.arq_spinlock);
536 	return ret_code;
537 }
538 
539 /**
540  *  i40e_resume_aq - resume AQ processing from 0
541  *  @hw: pointer to the hardware structure
542  **/
543 static void i40e_resume_aq(struct i40e_hw *hw)
544 {
545 	/* Registers are reset after PF reset */
546 	hw->aq.asq.next_to_use = 0;
547 	hw->aq.asq.next_to_clean = 0;
548 
549 	i40e_config_asq_regs(hw);
550 
551 	hw->aq.arq.next_to_use = 0;
552 	hw->aq.arq.next_to_clean = 0;
553 
554 	i40e_config_arq_regs(hw);
555 }
556 
557 /**
558  *  i40e_init_adminq - main initialization routine for Admin Queue
559  *  @hw: pointer to the hardware structure
560  *
561  *  Prior to calling this function, drivers *MUST* set the following fields
562  *  in the hw->aq structure:
563  *     - hw->aq.num_asq_entries
564  *     - hw->aq.num_arq_entries
565  *     - hw->aq.arq_buf_size
566  *     - hw->aq.asq_buf_size
567  **/
568 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
569 {
570 	u16 cfg_ptr, oem_hi, oem_lo;
571 	u16 eetrack_lo, eetrack_hi;
572 	enum i40e_status_code ret_code;
573 	int retry = 0;
574 
575 	/* verify input for valid configuration */
576 	if ((hw->aq.num_arq_entries == 0) ||
577 	    (hw->aq.num_asq_entries == 0) ||
578 	    (hw->aq.arq_buf_size == 0) ||
579 	    (hw->aq.asq_buf_size == 0)) {
580 		ret_code = I40E_ERR_CONFIG;
581 		goto init_adminq_exit;
582 	}
583 	i40e_init_spinlock(&hw->aq.asq_spinlock);
584 	i40e_init_spinlock(&hw->aq.arq_spinlock);
585 
586 	/* Set up register offsets */
587 	i40e_adminq_init_regs(hw);
588 
589 	/* setup ASQ command write back timeout */
590 	hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
591 
592 	/* allocate the ASQ */
593 	ret_code = i40e_init_asq(hw);
594 	if (ret_code != I40E_SUCCESS)
595 		goto init_adminq_destroy_spinlocks;
596 
597 	/* allocate the ARQ */
598 	ret_code = i40e_init_arq(hw);
599 	if (ret_code != I40E_SUCCESS)
600 		goto init_adminq_free_asq;
601 
602 	/* There are some cases where the firmware may not be quite ready
603 	 * for AdminQ operations, so we retry the AdminQ setup a few times
604 	 * if we see timeouts in this first AQ call.
605 	 */
606 	do {
607 		ret_code = i40e_aq_get_firmware_version(hw,
608 							&hw->aq.fw_maj_ver,
609 							&hw->aq.fw_min_ver,
610 							&hw->aq.fw_build,
611 							&hw->aq.api_maj_ver,
612 							&hw->aq.api_min_ver,
613 							NULL);
614 		if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
615 			break;
616 		retry++;
617 		i40e_msec_delay(100);
618 		i40e_resume_aq(hw);
619 	} while (retry < 10);
620 	if (ret_code != I40E_SUCCESS)
621 		goto init_adminq_free_arq;
622 
623 	/* get the NVM version info */
624 	i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
625 			   &hw->nvm.version);
626 	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
627 	i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
628 	hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
629 	i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
630 	i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
631 			   &oem_hi);
632 	i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
633 			   &oem_lo);
634 	hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
635 
636 	/* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
637 	if ((hw->aq.api_maj_ver > 1) ||
638 	    ((hw->aq.api_maj_ver == 1) &&
639 	     (hw->aq.api_min_ver >= 7)))
640 		hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
641 
642 	if (hw->mac.type == I40E_MAC_XL710 &&
643 	    hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
644 	    hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
645 		hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
646 	}
647 
648 	/* Newer versions of firmware require lock when reading the NVM */
649 	if ((hw->aq.api_maj_ver > 1) ||
650 	    ((hw->aq.api_maj_ver == 1) &&
651 	     (hw->aq.api_min_ver >= 5)))
652 		hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
653 
654 	if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
655 		ret_code = I40E_ERR_FIRMWARE_API_VERSION;
656 		goto init_adminq_free_arq;
657 	}
658 
659 	/* pre-emptive resource lock release */
660 	i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
661 	hw->nvm_release_on_done = FALSE;
662 	hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
663 
664 	ret_code = I40E_SUCCESS;
665 
666 	/* success! */
667 	goto init_adminq_exit;
668 
669 init_adminq_free_arq:
670 	i40e_shutdown_arq(hw);
671 init_adminq_free_asq:
672 	i40e_shutdown_asq(hw);
673 init_adminq_destroy_spinlocks:
674 	i40e_destroy_spinlock(&hw->aq.asq_spinlock);
675 	i40e_destroy_spinlock(&hw->aq.arq_spinlock);
676 
677 init_adminq_exit:
678 	return ret_code;
679 }
680 
681 /**
682  *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
683  *  @hw: pointer to the hardware structure
684  **/
685 enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
686 {
687 	enum i40e_status_code ret_code = I40E_SUCCESS;
688 
689 	if (i40e_check_asq_alive(hw))
690 		i40e_aq_queue_shutdown(hw, TRUE);
691 
692 	i40e_shutdown_asq(hw);
693 	i40e_shutdown_arq(hw);
694 	i40e_destroy_spinlock(&hw->aq.asq_spinlock);
695 	i40e_destroy_spinlock(&hw->aq.arq_spinlock);
696 
697 	if (hw->nvm_buff.va)
698 		i40e_free_virt_mem(hw, &hw->nvm_buff);
699 
700 	return ret_code;
701 }
702 
703 /**
704  *  i40e_clean_asq - cleans Admin send queue
705  *  @hw: pointer to the hardware structure
706  *
707  *  returns the number of free desc
708  **/
709 u16 i40e_clean_asq(struct i40e_hw *hw)
710 {
711 	struct i40e_adminq_ring *asq = &(hw->aq.asq);
712 	struct i40e_asq_cmd_details *details;
713 	u16 ntc = asq->next_to_clean;
714 	struct i40e_aq_desc desc_cb;
715 	struct i40e_aq_desc *desc;
716 
717 	desc = I40E_ADMINQ_DESC(*asq, ntc);
718 	details = I40E_ADMINQ_DETAILS(*asq, ntc);
719 	while (rd32(hw, hw->aq.asq.head) != ntc) {
720 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
721 			   "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
722 
723 		if (details->callback) {
724 			I40E_ADMINQ_CALLBACK cb_func =
725 					(I40E_ADMINQ_CALLBACK)details->callback;
726 			i40e_memcpy(&desc_cb, desc, sizeof(struct i40e_aq_desc),
727 				    I40E_DMA_TO_DMA);
728 			cb_func(hw, &desc_cb);
729 		}
730 		i40e_memset(desc, 0, sizeof(*desc), I40E_DMA_MEM);
731 		i40e_memset(details, 0, sizeof(*details), I40E_NONDMA_MEM);
732 		ntc++;
733 		if (ntc == asq->count)
734 			ntc = 0;
735 		desc = I40E_ADMINQ_DESC(*asq, ntc);
736 		details = I40E_ADMINQ_DETAILS(*asq, ntc);
737 	}
738 
739 	asq->next_to_clean = ntc;
740 
741 	return I40E_DESC_UNUSED(asq);
742 }
743 
744 /**
745  *  i40e_asq_done - check if FW has processed the Admin Send Queue
746  *  @hw: pointer to the hw struct
747  *
748  *  Returns TRUE if the firmware has processed all descriptors on the
749  *  admin send queue. Returns FALSE if there are still requests pending.
750  **/
751 static bool i40e_asq_done(struct i40e_hw *hw)
752 {
753 	/* AQ designers suggest use of head for better
754 	 * timing reliability than DD bit
755 	 */
756 	return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
757 
758 }
759 
760 /**
761  *  i40e_asq_send_command - send command to Admin Queue
762  *  @hw: pointer to the hw struct
763  *  @desc: prefilled descriptor describing the command (non DMA mem)
764  *  @buff: buffer to use for indirect commands
765  *  @buff_size: size of buffer for indirect commands
766  *  @cmd_details: pointer to command details structure
767  *
768  *  This is the main send command driver routine for the Admin Queue send
769  *  queue.  It runs the queue, cleans the queue, etc
770  **/
771 enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
772 				struct i40e_aq_desc *desc,
773 				void *buff, /* can be NULL */
774 				u16  buff_size,
775 				struct i40e_asq_cmd_details *cmd_details)
776 {
777 	enum i40e_status_code status = I40E_SUCCESS;
778 	struct i40e_dma_mem *dma_buff = NULL;
779 	struct i40e_asq_cmd_details *details;
780 	struct i40e_aq_desc *desc_on_ring;
781 	bool cmd_completed = FALSE;
782 	u16  retval = 0;
783 	u32  val = 0;
784 
785 	i40e_acquire_spinlock(&hw->aq.asq_spinlock);
786 
787 	hw->aq.asq_last_status = I40E_AQ_RC_OK;
788 
789 	if (hw->aq.asq.count == 0) {
790 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
791 			   "AQTX: Admin queue not initialized.\n");
792 		status = I40E_ERR_QUEUE_EMPTY;
793 		goto asq_send_command_error;
794 	}
795 
796 	val = rd32(hw, hw->aq.asq.head);
797 	if (val >= hw->aq.num_asq_entries) {
798 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
799 			   "AQTX: head overrun at %d\n", val);
800 		status = I40E_ERR_QUEUE_EMPTY;
801 		goto asq_send_command_error;
802 	}
803 
804 	details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
805 	if (cmd_details) {
806 		i40e_memcpy(details,
807 			    cmd_details,
808 			    sizeof(struct i40e_asq_cmd_details),
809 			    I40E_NONDMA_TO_NONDMA);
810 
811 		/* If the cmd_details are defined copy the cookie.  The
812 		 * CPU_TO_LE32 is not needed here because the data is ignored
813 		 * by the FW, only used by the driver
814 		 */
815 		if (details->cookie) {
816 			desc->cookie_high =
817 				CPU_TO_LE32(I40E_HI_DWORD(details->cookie));
818 			desc->cookie_low =
819 				CPU_TO_LE32(I40E_LO_DWORD(details->cookie));
820 		}
821 	} else {
822 		i40e_memset(details, 0,
823 			    sizeof(struct i40e_asq_cmd_details),
824 			    I40E_NONDMA_MEM);
825 	}
826 
827 	/* clear requested flags and then set additional flags if defined */
828 	desc->flags &= ~CPU_TO_LE16(details->flags_dis);
829 	desc->flags |= CPU_TO_LE16(details->flags_ena);
830 
831 	if (buff_size > hw->aq.asq_buf_size) {
832 		i40e_debug(hw,
833 			   I40E_DEBUG_AQ_MESSAGE,
834 			   "AQTX: Invalid buffer size: %d.\n",
835 			   buff_size);
836 		status = I40E_ERR_INVALID_SIZE;
837 		goto asq_send_command_error;
838 	}
839 
840 	if (details->postpone && !details->async) {
841 		i40e_debug(hw,
842 			   I40E_DEBUG_AQ_MESSAGE,
843 			   "AQTX: Async flag not set along with postpone flag");
844 		status = I40E_ERR_PARAM;
845 		goto asq_send_command_error;
846 	}
847 
848 	/* call clean and check queue available function to reclaim the
849 	 * descriptors that were processed by FW, the function returns the
850 	 * number of desc available
851 	 */
852 	/* the clean function called here could be called in a separate thread
853 	 * in case of asynchronous completions
854 	 */
855 	if (i40e_clean_asq(hw) == 0) {
856 		i40e_debug(hw,
857 			   I40E_DEBUG_AQ_MESSAGE,
858 			   "AQTX: Error queue is full.\n");
859 		status = I40E_ERR_ADMIN_QUEUE_FULL;
860 		goto asq_send_command_error;
861 	}
862 
863 	/* initialize the temp desc pointer with the right desc */
864 	desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
865 
866 	/* if the desc is available copy the temp desc to the right place */
867 	i40e_memcpy(desc_on_ring, desc, sizeof(struct i40e_aq_desc),
868 		    I40E_NONDMA_TO_DMA);
869 
870 	/* if buff is not NULL assume indirect command */
871 	if (buff != NULL) {
872 		dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
873 		/* copy the user buff into the respective DMA buff */
874 		i40e_memcpy(dma_buff->va, buff, buff_size,
875 			    I40E_NONDMA_TO_DMA);
876 		desc_on_ring->datalen = CPU_TO_LE16(buff_size);
877 
878 		/* Update the address values in the desc with the pa value
879 		 * for respective buffer
880 		 */
881 		desc_on_ring->params.external.addr_high =
882 				CPU_TO_LE32(I40E_HI_DWORD(dma_buff->pa));
883 		desc_on_ring->params.external.addr_low =
884 				CPU_TO_LE32(I40E_LO_DWORD(dma_buff->pa));
885 	}
886 
887 	/* bump the tail */
888 	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
889 	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
890 		      buff, buff_size);
891 	(hw->aq.asq.next_to_use)++;
892 	if (hw->aq.asq.next_to_use == hw->aq.asq.count)
893 		hw->aq.asq.next_to_use = 0;
894 	if (!details->postpone)
895 		wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
896 
897 	/* if cmd_details are not defined or async flag is not set,
898 	 * we need to wait for desc write back
899 	 */
900 	if (!details->async && !details->postpone) {
901 		u32 total_delay = 0;
902 
903 		do {
904 			/* AQ designers suggest use of head for better
905 			 * timing reliability than DD bit
906 			 */
907 			if (i40e_asq_done(hw))
908 				break;
909 			i40e_usec_delay(50);
910 			total_delay += 50;
911 		} while (total_delay < hw->aq.asq_cmd_timeout);
912 	}
913 
914 	/* if ready, copy the desc back to temp */
915 	if (i40e_asq_done(hw)) {
916 		i40e_memcpy(desc, desc_on_ring, sizeof(struct i40e_aq_desc),
917 			    I40E_DMA_TO_NONDMA);
918 		if (buff != NULL)
919 			i40e_memcpy(buff, dma_buff->va, buff_size,
920 				    I40E_DMA_TO_NONDMA);
921 		retval = LE16_TO_CPU(desc->retval);
922 		if (retval != 0) {
923 			i40e_debug(hw,
924 				   I40E_DEBUG_AQ_MESSAGE,
925 				   "AQTX: Command completed with error 0x%X.\n",
926 				   retval);
927 
928 			/* strip off FW internal code */
929 			retval &= 0xff;
930 		}
931 		cmd_completed = TRUE;
932 		if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
933 			status = I40E_SUCCESS;
934 		else
935 			status = I40E_ERR_ADMIN_QUEUE_ERROR;
936 		hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
937 	}
938 
939 	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
940 		   "AQTX: desc and buffer writeback:\n");
941 	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
942 
943 	/* save writeback aq if requested */
944 	if (details->wb_desc)
945 		i40e_memcpy(details->wb_desc, desc_on_ring,
946 			    sizeof(struct i40e_aq_desc), I40E_DMA_TO_NONDMA);
947 
948 	/* update the error if time out occurred */
949 	if ((!cmd_completed) &&
950 	    (!details->async && !details->postpone)) {
951 		if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
952 			i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
953 				   "AQTX: AQ Critical error.\n");
954 			status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
955 		} else {
956 			i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
957 				   "AQTX: Writeback timeout.\n");
958 			status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
959 		}
960 	}
961 
962 asq_send_command_error:
963 	i40e_release_spinlock(&hw->aq.asq_spinlock);
964 	return status;
965 }
966 
967 /**
968  *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
969  *  @desc:     pointer to the temp descriptor (non DMA mem)
970  *  @opcode:   the opcode can be used to decide which flags to turn off or on
971  *
972  *  Fill the desc with default values
973  **/
974 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
975 				       u16 opcode)
976 {
977 	/* zero out the desc */
978 	i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc),
979 		    I40E_NONDMA_MEM);
980 	desc->opcode = CPU_TO_LE16(opcode);
981 	desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_SI);
982 }
983 
984 /**
985  *  i40e_clean_arq_element
986  *  @hw: pointer to the hw struct
987  *  @e: event info from the receive descriptor, includes any buffers
988  *  @pending: number of events that could be left to process
989  *
990  *  This function cleans one Admin Receive Queue element and returns
991  *  the contents through e.  It can also return how many events are
992  *  left to process through 'pending'
993  **/
994 enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
995 					     struct i40e_arq_event_info *e,
996 					     u16 *pending)
997 {
998 	enum i40e_status_code ret_code = I40E_SUCCESS;
999 	u16 ntc = hw->aq.arq.next_to_clean;
1000 	struct i40e_aq_desc *desc;
1001 	struct i40e_dma_mem *bi;
1002 	u16 desc_idx;
1003 	u16 datalen;
1004 	u16 flags;
1005 	u16 ntu;
1006 
1007 	/* pre-clean the event info */
1008 	i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);
1009 
1010 	/* take the lock before we start messing with the ring */
1011 	i40e_acquire_spinlock(&hw->aq.arq_spinlock);
1012 
1013 	if (hw->aq.arq.count == 0) {
1014 		i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1015 			   "AQRX: Admin queue not initialized.\n");
1016 		ret_code = I40E_ERR_QUEUE_EMPTY;
1017 		goto clean_arq_element_err;
1018 	}
1019 
1020 	/* set next_to_use to head */
1021 	if (!i40e_is_vf(hw))
1022 		ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
1023 	else
1024 		ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
1025 	if (ntu == ntc) {
1026 		/* nothing to do - shouldn't need to update ring's values */
1027 		ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1028 		goto clean_arq_element_out;
1029 	}
1030 
1031 	/* now clean the next descriptor */
1032 	desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1033 	desc_idx = ntc;
1034 
1035 	hw->aq.arq_last_status =
1036 		(enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
1037 	flags = LE16_TO_CPU(desc->flags);
1038 	if (flags & I40E_AQ_FLAG_ERR) {
1039 		ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1040 		i40e_debug(hw,
1041 			   I40E_DEBUG_AQ_MESSAGE,
1042 			   "AQRX: Event received with error 0x%X.\n",
1043 			   hw->aq.arq_last_status);
1044 	}
1045 
1046 	i40e_memcpy(&e->desc, desc, sizeof(struct i40e_aq_desc),
1047 		    I40E_DMA_TO_NONDMA);
1048 	datalen = LE16_TO_CPU(desc->datalen);
1049 	e->msg_len = min(datalen, e->buf_len);
1050 	if (e->msg_buf != NULL && (e->msg_len != 0))
1051 		i40e_memcpy(e->msg_buf,
1052 			    hw->aq.arq.r.arq_bi[desc_idx].va,
1053 			    e->msg_len, I40E_DMA_TO_NONDMA);
1054 
1055 	i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
1056 	i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1057 		      hw->aq.arq_buf_size);
1058 
1059 	/* Restore the original datalen and buffer address in the desc,
1060 	 * FW updates datalen to indicate the event message
1061 	 * size
1062 	 */
1063 	bi = &hw->aq.arq.r.arq_bi[ntc];
1064 	i40e_memset((void *)desc, 0, sizeof(struct i40e_aq_desc), I40E_DMA_MEM);
1065 
1066 	desc->flags = CPU_TO_LE16(I40E_AQ_FLAG_BUF);
1067 	if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1068 		desc->flags |= CPU_TO_LE16(I40E_AQ_FLAG_LB);
1069 	desc->datalen = CPU_TO_LE16((u16)bi->size);
1070 	desc->params.external.addr_high = CPU_TO_LE32(I40E_HI_DWORD(bi->pa));
1071 	desc->params.external.addr_low = CPU_TO_LE32(I40E_LO_DWORD(bi->pa));
1072 
1073 	/* set tail = the last cleaned desc index. */
1074 	wr32(hw, hw->aq.arq.tail, ntc);
1075 	/* ntc is updated to tail + 1 */
1076 	ntc++;
1077 	if (ntc == hw->aq.num_arq_entries)
1078 		ntc = 0;
1079 	hw->aq.arq.next_to_clean = ntc;
1080 	hw->aq.arq.next_to_use = ntu;
1081 
1082 	i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode), &e->desc);
1083 clean_arq_element_out:
1084 	/* Set pending if needed, unlock and return */
1085 	if (pending != NULL)
1086 		*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1087 clean_arq_element_err:
1088 	i40e_release_spinlock(&hw->aq.arq_spinlock);
1089 
1090 	return ret_code;
1091 }
1092 
1093